diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-03-19 10:54:37 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-03-19 10:54:37 -0400 |
commit | b80d6c781e7eb16e24c2a04a88ab6b230bcbbb35 (patch) | |
tree | aeb0885a6a3499ef96b2472be323965db8e1295e | |
parent | 262ca2b08fbdb9346e66ef30424b2226a00e0ffc (diff) | |
parent | 0b99836f238f37a8632a3ab4f9a8cc2346a36d40 (diff) |
Merge branch 'topic/dp-aux-rework' into drm-intel-next-queued
Conflicts:
drivers/gpu/drm/i915/intel_dp.c
A bit a mess with reverts which differe in details between -fixes and
-next and some other unrelated shuffling.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
252 files changed, 7837 insertions, 4231 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index ed1d6d289022..9f5457ac0373 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl | |||
@@ -29,12 +29,26 @@ | |||
29 | </address> | 29 | </address> |
30 | </affiliation> | 30 | </affiliation> |
31 | </author> | 31 | </author> |
32 | <author> | ||
33 | <firstname>Daniel</firstname> | ||
34 | <surname>Vetter</surname> | ||
35 | <contrib>Contributions all over the place</contrib> | ||
36 | <affiliation> | ||
37 | <orgname>Intel Corporation</orgname> | ||
38 | <address> | ||
39 | <email>daniel.vetter@ffwll.ch</email> | ||
40 | </address> | ||
41 | </affiliation> | ||
42 | </author> | ||
32 | </authorgroup> | 43 | </authorgroup> |
33 | 44 | ||
34 | <copyright> | 45 | <copyright> |
35 | <year>2008-2009</year> | 46 | <year>2008-2009</year> |
36 | <year>2012</year> | 47 | <year>2013-2014</year> |
37 | <holder>Intel Corporation</holder> | 48 | <holder>Intel Corporation</holder> |
49 | </copyright> | ||
50 | <copyright> | ||
51 | <year>2012</year> | ||
38 | <holder>Laurent Pinchart</holder> | 52 | <holder>Laurent Pinchart</holder> |
39 | </copyright> | 53 | </copyright> |
40 | 54 | ||
@@ -60,7 +74,15 @@ | |||
60 | 74 | ||
61 | <toc></toc> | 75 | <toc></toc> |
62 | 76 | ||
63 | <!-- Introduction --> | 77 | <part id="drmCore"> |
78 | <title>DRM Core</title> | ||
79 | <partintro> | ||
80 | <para> | ||
81 | This first part of the DRM Developer's Guide documents core DRM code, | ||
82 | helper libraries for writting drivers and generic userspace interfaces | ||
83 | exposed by DRM drivers. | ||
84 | </para> | ||
85 | </partintro> | ||
64 | 86 | ||
65 | <chapter id="drmIntroduction"> | 87 | <chapter id="drmIntroduction"> |
66 | <title>Introduction</title> | 88 | <title>Introduction</title> |
@@ -264,8 +286,8 @@ char *date;</synopsis> | |||
264 | <para> | 286 | <para> |
265 | The <methodname>load</methodname> method is the driver and device | 287 | The <methodname>load</methodname> method is the driver and device |
266 | initialization entry point. The method is responsible for allocating and | 288 | initialization entry point. The method is responsible for allocating and |
267 | initializing driver private data, specifying supported performance | 289 | initializing driver private data, performing resource allocation and |
268 | counters, performing resource allocation and mapping (e.g. acquiring | 290 | mapping (e.g. acquiring |
269 | clocks, mapping registers or allocating command buffers), initializing | 291 | clocks, mapping registers or allocating command buffers), initializing |
270 | the memory manager (<xref linkend="drm-memory-management"/>), installing | 292 | the memory manager (<xref linkend="drm-memory-management"/>), installing |
271 | the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up | 293 | the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up |
@@ -295,7 +317,7 @@ char *date;</synopsis> | |||
295 | their <methodname>load</methodname> method called with flags to 0. | 317 | their <methodname>load</methodname> method called with flags to 0. |
296 | </para> | 318 | </para> |
297 | <sect3> | 319 | <sect3> |
298 | <title>Driver Private & Performance Counters</title> | 320 | <title>Driver Private Data</title> |
299 | <para> | 321 | <para> |
300 | The driver private hangs off the main | 322 | The driver private hangs off the main |
301 | <structname>drm_device</structname> structure and can be used for | 323 | <structname>drm_device</structname> structure and can be used for |
@@ -307,14 +329,6 @@ char *date;</synopsis> | |||
307 | <structname>drm_device</structname>.<structfield>dev_priv</structfield> | 329 | <structname>drm_device</structname>.<structfield>dev_priv</structfield> |
308 | set to NULL when the driver is unloaded. | 330 | set to NULL when the driver is unloaded. |
309 | </para> | 331 | </para> |
310 | <para> | ||
311 | DRM supports several counters which were used for rough performance | ||
312 | characterization. This stat counter system is deprecated and should not | ||
313 | be used. If performance monitoring is desired, the developer should | ||
314 | investigate and potentially enhance the kernel perf and tracing | ||
315 | infrastructure to export GPU related performance information for | ||
316 | consumption by performance monitoring tools and applications. | ||
317 | </para> | ||
318 | </sect3> | 332 | </sect3> |
319 | <sect3 id="drm-irq-registration"> | 333 | <sect3 id="drm-irq-registration"> |
320 | <title>IRQ Registration</title> | 334 | <title>IRQ Registration</title> |
@@ -697,55 +711,16 @@ char *date;</synopsis> | |||
697 | respectively. The conversion is handled by the DRM core without any | 711 | respectively. The conversion is handled by the DRM core without any |
698 | driver-specific support. | 712 | driver-specific support. |
699 | </para> | 713 | </para> |
700 | <para> | 714 | <para> |
701 | Similar to global names, GEM file descriptors are also used to share GEM | 715 | GEM also supports buffer sharing with dma-buf file descriptors through |
702 | objects across processes. They offer additional security: as file | 716 | PRIME. GEM-based drivers must use the provided helpers functions to |
703 | descriptors must be explicitly sent over UNIX domain sockets to be shared | 717 | implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />. |
704 | between applications, they can't be guessed like the globally unique GEM | 718 | Since sharing file descriptors is inherently more secure than the |
705 | names. | 719 | easily guessable and global GEM names it is the preferred buffer |
706 | </para> | 720 | sharing mechanism. Sharing buffers through GEM names is only supported |
707 | <para> | 721 | for legacy userspace. Furthermore PRIME also allows cross-device |
708 | Drivers that support GEM file descriptors, also known as the DRM PRIME | 722 | buffer sharing since it is based on dma-bufs. |
709 | API, must set the DRIVER_PRIME bit in the struct | 723 | </para> |
710 | <structname>drm_driver</structname> | ||
711 | <structfield>driver_features</structfield> field, and implement the | ||
712 | <methodname>prime_handle_to_fd</methodname> and | ||
713 | <methodname>prime_fd_to_handle</methodname> operations. | ||
714 | </para> | ||
715 | <para> | ||
716 | <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, | ||
717 | struct drm_file *file_priv, uint32_t handle, | ||
718 | uint32_t flags, int *prime_fd); | ||
719 | int (*prime_fd_to_handle)(struct drm_device *dev, | ||
720 | struct drm_file *file_priv, int prime_fd, | ||
721 | uint32_t *handle);</synopsis> | ||
722 | Those two operations convert a handle to a PRIME file descriptor and | ||
723 | vice versa. Drivers must use the kernel dma-buf buffer sharing framework | ||
724 | to manage the PRIME file descriptors. | ||
725 | </para> | ||
726 | <para> | ||
727 | While non-GEM drivers must implement the operations themselves, GEM | ||
728 | drivers must use the <function>drm_gem_prime_handle_to_fd</function> | ||
729 | and <function>drm_gem_prime_fd_to_handle</function> helper functions. | ||
730 | Those helpers rely on the driver | ||
731 | <methodname>gem_prime_export</methodname> and | ||
732 | <methodname>gem_prime_import</methodname> operations to create a dma-buf | ||
733 | instance from a GEM object (dma-buf exporter role) and to create a GEM | ||
734 | object from a dma-buf instance (dma-buf importer role). | ||
735 | </para> | ||
736 | <para> | ||
737 | <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, | ||
738 | struct drm_gem_object *obj, | ||
739 | int flags); | ||
740 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, | ||
741 | struct dma_buf *dma_buf);</synopsis> | ||
742 | These two operations are mandatory for GEM drivers that support DRM | ||
743 | PRIME. | ||
744 | </para> | ||
745 | <sect4> | ||
746 | <title>DRM PRIME Helper Functions Reference</title> | ||
747 | !Pdrivers/gpu/drm/drm_prime.c PRIME Helpers | ||
748 | </sect4> | ||
749 | </sect3> | 724 | </sect3> |
750 | <sect3 id="drm-gem-objects-mapping"> | 725 | <sect3 id="drm-gem-objects-mapping"> |
751 | <title>GEM Objects Mapping</title> | 726 | <title>GEM Objects Mapping</title> |
@@ -830,62 +805,6 @@ char *date;</synopsis> | |||
830 | </para> | 805 | </para> |
831 | </sect3> | 806 | </sect3> |
832 | <sect3> | 807 | <sect3> |
833 | <title>Dumb GEM Objects</title> | ||
834 | <para> | ||
835 | The GEM API doesn't standardize GEM objects creation and leaves it to | ||
836 | driver-specific ioctls. While not an issue for full-fledged graphics | ||
837 | stacks that include device-specific userspace components (in libdrm for | ||
838 | instance), this limit makes DRM-based early boot graphics unnecessarily | ||
839 | complex. | ||
840 | </para> | ||
841 | <para> | ||
842 | Dumb GEM objects partly alleviate the problem by providing a standard | ||
843 | API to create dumb buffers suitable for scanout, which can then be used | ||
844 | to create KMS frame buffers. | ||
845 | </para> | ||
846 | <para> | ||
847 | To support dumb GEM objects drivers must implement the | ||
848 | <methodname>dumb_create</methodname>, | ||
849 | <methodname>dumb_destroy</methodname> and | ||
850 | <methodname>dumb_map_offset</methodname> operations. | ||
851 | </para> | ||
852 | <itemizedlist> | ||
853 | <listitem> | ||
854 | <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, | ||
855 | struct drm_mode_create_dumb *args);</synopsis> | ||
856 | <para> | ||
857 | The <methodname>dumb_create</methodname> operation creates a GEM | ||
858 | object suitable for scanout based on the width, height and depth | ||
859 | from the struct <structname>drm_mode_create_dumb</structname> | ||
860 | argument. It fills the argument's <structfield>handle</structfield>, | ||
861 | <structfield>pitch</structfield> and <structfield>size</structfield> | ||
862 | fields with a handle for the newly created GEM object and its line | ||
863 | pitch and size in bytes. | ||
864 | </para> | ||
865 | </listitem> | ||
866 | <listitem> | ||
867 | <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, | ||
868 | uint32_t handle);</synopsis> | ||
869 | <para> | ||
870 | The <methodname>dumb_destroy</methodname> operation destroys a dumb | ||
871 | GEM object created by <methodname>dumb_create</methodname>. | ||
872 | </para> | ||
873 | </listitem> | ||
874 | <listitem> | ||
875 | <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, | ||
876 | uint32_t handle, uint64_t *offset);</synopsis> | ||
877 | <para> | ||
878 | The <methodname>dumb_map_offset</methodname> operation associates an | ||
879 | mmap fake offset with the GEM object given by the handle and returns | ||
880 | it. Drivers must use the | ||
881 | <function>drm_gem_create_mmap_offset</function> function to | ||
882 | associate the fake offset as described in | ||
883 | <xref linkend="drm-gem-objects-mapping"/>. | ||
884 | </para> | ||
885 | </listitem> | ||
886 | </itemizedlist> | ||
887 | </sect3> | ||
888 | <sect3> | ||
889 | <title>Memory Coherency</title> | 808 | <title>Memory Coherency</title> |
890 | <para> | 809 | <para> |
891 | When mapped to the device or used in a command buffer, backing pages | 810 | When mapped to the device or used in a command buffer, backing pages |
@@ -924,7 +843,99 @@ char *date;</synopsis> | |||
924 | abstracted from the client in libdrm. | 843 | abstracted from the client in libdrm. |
925 | </para> | 844 | </para> |
926 | </sect3> | 845 | </sect3> |
927 | </sect2> | 846 | <sect3> |
847 | <title>GEM Function Reference</title> | ||
848 | !Edrivers/gpu/drm/drm_gem.c | ||
849 | </sect3> | ||
850 | </sect2> | ||
851 | <sect2> | ||
852 | <title>VMA Offset Manager</title> | ||
853 | !Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager | ||
854 | !Edrivers/gpu/drm/drm_vma_manager.c | ||
855 | !Iinclude/drm/drm_vma_manager.h | ||
856 | </sect2> | ||
857 | <sect2 id="drm-prime-support"> | ||
858 | <title>PRIME Buffer Sharing</title> | ||
859 | <para> | ||
860 | PRIME is the cross device buffer sharing framework in drm, originally | ||
861 | created for the OPTIMUS range of multi-gpu platforms. To userspace | ||
862 | PRIME buffers are dma-buf based file descriptors. | ||
863 | </para> | ||
864 | <sect3> | ||
865 | <title>Overview and Driver Interface</title> | ||
866 | <para> | ||
867 | Similar to GEM global names, PRIME file descriptors are | ||
868 | also used to share buffer objects across processes. They offer | ||
869 | additional security: as file descriptors must be explicitly sent over | ||
870 | UNIX domain sockets to be shared between applications, they can't be | ||
871 | guessed like the globally unique GEM names. | ||
872 | </para> | ||
873 | <para> | ||
874 | Drivers that support the PRIME | ||
875 | API must set the DRIVER_PRIME bit in the struct | ||
876 | <structname>drm_driver</structname> | ||
877 | <structfield>driver_features</structfield> field, and implement the | ||
878 | <methodname>prime_handle_to_fd</methodname> and | ||
879 | <methodname>prime_fd_to_handle</methodname> operations. | ||
880 | </para> | ||
881 | <para> | ||
882 | <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, | ||
883 | struct drm_file *file_priv, uint32_t handle, | ||
884 | uint32_t flags, int *prime_fd); | ||
885 | int (*prime_fd_to_handle)(struct drm_device *dev, | ||
886 | struct drm_file *file_priv, int prime_fd, | ||
887 | uint32_t *handle);</synopsis> | ||
888 | Those two operations convert a handle to a PRIME file descriptor and | ||
889 | vice versa. Drivers must use the kernel dma-buf buffer sharing framework | ||
890 | to manage the PRIME file descriptors. Similar to the mode setting | ||
891 | API PRIME is agnostic to the underlying buffer object manager, as | ||
892 | long as handles are 32bit unsinged integers. | ||
893 | </para> | ||
894 | <para> | ||
895 | While non-GEM drivers must implement the operations themselves, GEM | ||
896 | drivers must use the <function>drm_gem_prime_handle_to_fd</function> | ||
897 | and <function>drm_gem_prime_fd_to_handle</function> helper functions. | ||
898 | Those helpers rely on the driver | ||
899 | <methodname>gem_prime_export</methodname> and | ||
900 | <methodname>gem_prime_import</methodname> operations to create a dma-buf | ||
901 | instance from a GEM object (dma-buf exporter role) and to create a GEM | ||
902 | object from a dma-buf instance (dma-buf importer role). | ||
903 | </para> | ||
904 | <para> | ||
905 | <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, | ||
906 | struct drm_gem_object *obj, | ||
907 | int flags); | ||
908 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, | ||
909 | struct dma_buf *dma_buf);</synopsis> | ||
910 | These two operations are mandatory for GEM drivers that support | ||
911 | PRIME. | ||
912 | </para> | ||
913 | </sect3> | ||
914 | <sect3> | ||
915 | <title>PRIME Helper Functions</title> | ||
916 | !Pdrivers/gpu/drm/drm_prime.c PRIME Helpers | ||
917 | </sect3> | ||
918 | </sect2> | ||
919 | <sect2> | ||
920 | <title>PRIME Function References</title> | ||
921 | !Edrivers/gpu/drm/drm_prime.c | ||
922 | </sect2> | ||
923 | <sect2> | ||
924 | <title>DRM MM Range Allocator</title> | ||
925 | <sect3> | ||
926 | <title>Overview</title> | ||
927 | !Pdrivers/gpu/drm/drm_mm.c Overview | ||
928 | </sect3> | ||
929 | <sect3> | ||
930 | <title>LRU Scan/Eviction Support</title> | ||
931 | !Pdrivers/gpu/drm/drm_mm.c lru scan roaster | ||
932 | </sect3> | ||
933 | </sect2> | ||
934 | <sect2> | ||
935 | <title>DRM MM Range Allocator Function References</title> | ||
936 | !Edrivers/gpu/drm/drm_mm.c | ||
937 | !Iinclude/drm/drm_mm.h | ||
938 | </sect2> | ||
928 | </sect1> | 939 | </sect1> |
929 | 940 | ||
930 | <!-- Internals: mode setting --> | 941 | <!-- Internals: mode setting --> |
@@ -953,6 +964,11 @@ int max_width, max_height;</synopsis> | |||
953 | </listitem> | 964 | </listitem> |
954 | </itemizedlist> | 965 | </itemizedlist> |
955 | <sect2> | 966 | <sect2> |
967 | <title>Display Modes Function Reference</title> | ||
968 | !Iinclude/drm/drm_modes.h | ||
969 | !Edrivers/gpu/drm/drm_modes.c | ||
970 | </sect2> | ||
971 | <sect2> | ||
956 | <title>Frame Buffer Creation</title> | 972 | <title>Frame Buffer Creation</title> |
957 | <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev, | 973 | <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev, |
958 | struct drm_file *file_priv, | 974 | struct drm_file *file_priv, |
@@ -968,9 +984,11 @@ int max_width, max_height;</synopsis> | |||
968 | Frame buffers rely on the underneath memory manager for low-level memory | 984 | Frame buffers rely on the underneath memory manager for low-level memory |
969 | operations. When creating a frame buffer applications pass a memory | 985 | operations. When creating a frame buffer applications pass a memory |
970 | handle (or a list of memory handles for multi-planar formats) through | 986 | handle (or a list of memory handles for multi-planar formats) through |
971 | the <parameter>drm_mode_fb_cmd2</parameter> argument. This document | 987 | the <parameter>drm_mode_fb_cmd2</parameter> argument. For drivers using |
972 | assumes that the driver uses GEM, those handles thus reference GEM | 988 | GEM as their userspace buffer management interface this would be a GEM |
973 | objects. | 989 | handle. Drivers are however free to use their own backing storage object |
990 | handles, e.g. vmwgfx directly exposes special TTM handles to userspace | ||
991 | and so expects TTM handles in the create ioctl and not GEM handles. | ||
974 | </para> | 992 | </para> |
975 | <para> | 993 | <para> |
976 | Drivers must first validate the requested frame buffer parameters passed | 994 | Drivers must first validate the requested frame buffer parameters passed |
@@ -992,7 +1010,7 @@ int max_width, max_height;</synopsis> | |||
992 | </para> | 1010 | </para> |
993 | 1011 | ||
994 | <para> | 1012 | <para> |
995 | The initailization of the new framebuffer instance is finalized with a | 1013 | The initialization of the new framebuffer instance is finalized with a |
996 | call to <function>drm_framebuffer_init</function> which takes a pointer | 1014 | call to <function>drm_framebuffer_init</function> which takes a pointer |
997 | to DRM frame buffer operations (struct | 1015 | to DRM frame buffer operations (struct |
998 | <structname>drm_framebuffer_funcs</structname>). Note that this function | 1016 | <structname>drm_framebuffer_funcs</structname>). Note that this function |
@@ -1042,7 +1060,7 @@ int max_width, max_height;</synopsis> | |||
1042 | <para> | 1060 | <para> |
1043 | The lifetime of a drm framebuffer is controlled with a reference count, | 1061 | The lifetime of a drm framebuffer is controlled with a reference count, |
1044 | drivers can grab additional references with | 1062 | drivers can grab additional references with |
1045 | <function>drm_framebuffer_reference</function> </para> and drop them | 1063 | <function>drm_framebuffer_reference</function>and drop them |
1046 | again with <function>drm_framebuffer_unreference</function>. For | 1064 | again with <function>drm_framebuffer_unreference</function>. For |
1047 | driver-private framebuffers for which the last reference is never | 1065 | driver-private framebuffers for which the last reference is never |
1048 | dropped (e.g. for the fbdev framebuffer when the struct | 1066 | dropped (e.g. for the fbdev framebuffer when the struct |
@@ -1050,6 +1068,72 @@ int max_width, max_height;</synopsis> | |||
1050 | helper struct) drivers can manually clean up a framebuffer at module | 1068 | helper struct) drivers can manually clean up a framebuffer at module |
1051 | unload time with | 1069 | unload time with |
1052 | <function>drm_framebuffer_unregister_private</function>. | 1070 | <function>drm_framebuffer_unregister_private</function>. |
1071 | </para> | ||
1072 | </sect2> | ||
1073 | <sect2> | ||
1074 | <title>Dumb Buffer Objects</title> | ||
1075 | <para> | ||
1076 | The KMS API doesn't standardize backing storage object creation and | ||
1077 | leaves it to driver-specific ioctls. Furthermore actually creating a | ||
1078 | buffer object even for GEM-based drivers is done through a | ||
1079 | driver-specific ioctl - GEM only has a common userspace interface for | ||
1080 | sharing and destroying objects. While not an issue for full-fledged | ||
1081 | graphics stacks that include device-specific userspace components (in | ||
1082 | libdrm for instance), this limit makes DRM-based early boot graphics | ||
1083 | unnecessarily complex. | ||
1084 | </para> | ||
1085 | <para> | ||
1086 | Dumb objects partly alleviate the problem by providing a standard | ||
1087 | API to create dumb buffers suitable for scanout, which can then be used | ||
1088 | to create KMS frame buffers. | ||
1089 | </para> | ||
1090 | <para> | ||
1091 | To support dumb objects drivers must implement the | ||
1092 | <methodname>dumb_create</methodname>, | ||
1093 | <methodname>dumb_destroy</methodname> and | ||
1094 | <methodname>dumb_map_offset</methodname> operations. | ||
1095 | </para> | ||
1096 | <itemizedlist> | ||
1097 | <listitem> | ||
1098 | <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, | ||
1099 | struct drm_mode_create_dumb *args);</synopsis> | ||
1100 | <para> | ||
1101 | The <methodname>dumb_create</methodname> operation creates a driver | ||
1102 | object (GEM or TTM handle) suitable for scanout based on the | ||
1103 | width, height and depth from the struct | ||
1104 | <structname>drm_mode_create_dumb</structname> argument. It fills the | ||
1105 | argument's <structfield>handle</structfield>, | ||
1106 | <structfield>pitch</structfield> and <structfield>size</structfield> | ||
1107 | fields with a handle for the newly created object and its line | ||
1108 | pitch and size in bytes. | ||
1109 | </para> | ||
1110 | </listitem> | ||
1111 | <listitem> | ||
1112 | <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, | ||
1113 | uint32_t handle);</synopsis> | ||
1114 | <para> | ||
1115 | The <methodname>dumb_destroy</methodname> operation destroys a dumb | ||
1116 | object created by <methodname>dumb_create</methodname>. | ||
1117 | </para> | ||
1118 | </listitem> | ||
1119 | <listitem> | ||
1120 | <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, | ||
1121 | uint32_t handle, uint64_t *offset);</synopsis> | ||
1122 | <para> | ||
1123 | The <methodname>dumb_map_offset</methodname> operation associates an | ||
1124 | mmap fake offset with the object given by the handle and returns | ||
1125 | it. Drivers must use the | ||
1126 | <function>drm_gem_create_mmap_offset</function> function to | ||
1127 | associate the fake offset as described in | ||
1128 | <xref linkend="drm-gem-objects-mapping"/>. | ||
1129 | </para> | ||
1130 | </listitem> | ||
1131 | </itemizedlist> | ||
1132 | <para> | ||
1133 | Note that dumb objects may not be used for gpu acceleration, as has been | ||
1134 | attempted on some ARM embedded platforms. Such drivers really must have | ||
1135 | a hardware-specific ioctl to allocate suitable buffer objects. | ||
1136 | </para> | ||
1053 | </sect2> | 1137 | </sect2> |
1054 | <sect2> | 1138 | <sect2> |
1055 | <title>Output Polling</title> | 1139 | <title>Output Polling</title> |
@@ -1130,8 +1214,11 @@ int max_width, max_height;</synopsis> | |||
1130 | This operation is called with the mode config lock held. | 1214 | This operation is called with the mode config lock held. |
1131 | </para> | 1215 | </para> |
1132 | <note><para> | 1216 | <note><para> |
1133 | FIXME: How should set_config interact with DPMS? If the CRTC is | 1217 | Note that the drm core has no notion of restoring the mode setting |
1134 | suspended, should it be resumed? | 1218 | state after resume, since all resume handling is in the full |
1219 | responsibility of the driver. The common mode setting helper library | ||
1220 | though provides a helper which can be used for this: | ||
1221 | <function>drm_helper_resume_force_mode</function>. | ||
1135 | </para></note> | 1222 | </para></note> |
1136 | </sect4> | 1223 | </sect4> |
1137 | <sect4> | 1224 | <sect4> |
@@ -2134,7 +2221,7 @@ void intel_crt_init(struct drm_device *dev) | |||
2134 | set the <structfield>display_info</structfield> | 2221 | set the <structfield>display_info</structfield> |
2135 | <structfield>width_mm</structfield> and | 2222 | <structfield>width_mm</structfield> and |
2136 | <structfield>height_mm</structfield> fields if they haven't been set | 2223 | <structfield>height_mm</structfield> fields if they haven't been set |
2137 | already (for instance at initilization time when a fixed-size panel is | 2224 | already (for instance at initialization time when a fixed-size panel is |
2138 | attached to the connector). The mode <structfield>width_mm</structfield> | 2225 | attached to the connector). The mode <structfield>width_mm</structfield> |
2139 | and <structfield>height_mm</structfield> fields are only used internally | 2226 | and <structfield>height_mm</structfield> fields are only used internally |
2140 | during EDID parsing and should not be set when creating modes manually. | 2227 | during EDID parsing and should not be set when creating modes manually. |
@@ -2196,10 +2283,15 @@ void intel_crt_init(struct drm_device *dev) | |||
2196 | !Edrivers/gpu/drm/drm_flip_work.c | 2283 | !Edrivers/gpu/drm/drm_flip_work.c |
2197 | </sect2> | 2284 | </sect2> |
2198 | <sect2> | 2285 | <sect2> |
2199 | <title>VMA Offset Manager</title> | 2286 | <title>HDMI Infoframes Helper Reference</title> |
2200 | !Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager | 2287 | <para> |
2201 | !Edrivers/gpu/drm/drm_vma_manager.c | 2288 | Strictly speaking this is not a DRM helper library but generally useable |
2202 | !Iinclude/drm/drm_vma_manager.h | 2289 | by any driver interfacing with HDMI outputs like v4l or alsa drivers. |
2290 | But it nicely fits into the overall topic of mode setting helper | ||
2291 | libraries and hence is also included here. | ||
2292 | </para> | ||
2293 | !Iinclude/linux/hdmi.h | ||
2294 | !Edrivers/video/hdmi.c | ||
2203 | </sect2> | 2295 | </sect2> |
2204 | </sect1> | 2296 | </sect1> |
2205 | 2297 | ||
@@ -2561,42 +2653,44 @@ int num_ioctls;</synopsis> | |||
2561 | </para> | 2653 | </para> |
2562 | </sect2> | 2654 | </sect2> |
2563 | </sect1> | 2655 | </sect1> |
2564 | |||
2565 | <sect1> | 2656 | <sect1> |
2566 | <title>Command submission & fencing</title> | 2657 | <title>Legacy Support Code</title> |
2567 | <para> | 2658 | <para> |
2568 | This should cover a few device-specific command submission | 2659 | The section very brievely covers some of the old legacy support code which |
2569 | implementations. | 2660 | is only used by old DRM drivers which have done a so-called shadow-attach |
2661 | to the underlying device instead of registering as a real driver. This | ||
2662 | also includes some of the old generic buffer mangement and command | ||
2663 | submission code. Do not use any of this in new and modern drivers. | ||
2570 | </para> | 2664 | </para> |
2571 | </sect1> | ||
2572 | |||
2573 | <!-- Internals: suspend/resume --> | ||
2574 | 2665 | ||
2575 | <sect1> | 2666 | <sect2> |
2576 | <title>Suspend/Resume</title> | 2667 | <title>Legacy Suspend/Resume</title> |
2577 | <para> | 2668 | <para> |
2578 | The DRM core provides some suspend/resume code, but drivers wanting full | 2669 | The DRM core provides some suspend/resume code, but drivers wanting full |
2579 | suspend/resume support should provide save() and restore() functions. | 2670 | suspend/resume support should provide save() and restore() functions. |
2580 | These are called at suspend, hibernate, or resume time, and should perform | 2671 | These are called at suspend, hibernate, or resume time, and should perform |
2581 | any state save or restore required by your device across suspend or | 2672 | any state save or restore required by your device across suspend or |
2582 | hibernate states. | 2673 | hibernate states. |
2583 | </para> | 2674 | </para> |
2584 | <synopsis>int (*suspend) (struct drm_device *, pm_message_t state); | 2675 | <synopsis>int (*suspend) (struct drm_device *, pm_message_t state); |
2585 | int (*resume) (struct drm_device *);</synopsis> | 2676 | int (*resume) (struct drm_device *);</synopsis> |
2586 | <para> | 2677 | <para> |
2587 | Those are legacy suspend and resume methods. New driver should use the | 2678 | Those are legacy suspend and resume methods which |
2588 | power management interface provided by their bus type (usually through | 2679 | <emphasis>only</emphasis> work with the legacy shadow-attach driver |
2589 | the struct <structname>device_driver</structname> dev_pm_ops) and set | 2680 | registration functions. New driver should use the power management |
2590 | these methods to NULL. | 2681 | interface provided by their bus type (usually through |
2591 | </para> | 2682 | the struct <structname>device_driver</structname> dev_pm_ops) and set |
2592 | </sect1> | 2683 | these methods to NULL. |
2684 | </para> | ||
2685 | </sect2> | ||
2593 | 2686 | ||
2594 | <sect1> | 2687 | <sect2> |
2595 | <title>DMA services</title> | 2688 | <title>Legacy DMA Services</title> |
2596 | <para> | 2689 | <para> |
2597 | This should cover how DMA mapping etc. is supported by the core. | 2690 | This should cover how DMA mapping etc. is supported by the core. |
2598 | These functions are deprecated and should not be used. | 2691 | These functions are deprecated and should not be used. |
2599 | </para> | 2692 | </para> |
2693 | </sect2> | ||
2600 | </sect1> | 2694 | </sect1> |
2601 | </chapter> | 2695 | </chapter> |
2602 | 2696 | ||
@@ -2658,8 +2752,8 @@ int (*resume) (struct drm_device *);</synopsis> | |||
2658 | DRM core provides multiple character-devices for user-space to use. | 2752 | DRM core provides multiple character-devices for user-space to use. |
2659 | Depending on which device is opened, user-space can perform a different | 2753 | Depending on which device is opened, user-space can perform a different |
2660 | set of operations (mainly ioctls). The primary node is always created | 2754 | set of operations (mainly ioctls). The primary node is always created |
2661 | and called <term>card<num></term>. Additionally, a currently | 2755 | and called card<num>. Additionally, a currently |
2662 | unused control node, called <term>controlD<num></term> is also | 2756 | unused control node, called controlD<num> is also |
2663 | created. The primary node provides all legacy operations and | 2757 | created. The primary node provides all legacy operations and |
2664 | historically was the only interface used by userspace. With KMS, the | 2758 | historically was the only interface used by userspace. With KMS, the |
2665 | control node was introduced. However, the planned KMS control interface | 2759 | control node was introduced. However, the planned KMS control interface |
@@ -2674,21 +2768,21 @@ int (*resume) (struct drm_device *);</synopsis> | |||
2674 | nodes were introduced. Render nodes solely serve render clients, that | 2768 | nodes were introduced. Render nodes solely serve render clients, that |
2675 | is, no modesetting or privileged ioctls can be issued on render nodes. | 2769 | is, no modesetting or privileged ioctls can be issued on render nodes. |
2676 | Only non-global rendering commands are allowed. If a driver supports | 2770 | Only non-global rendering commands are allowed. If a driver supports |
2677 | render nodes, it must advertise it via the <term>DRIVER_RENDER</term> | 2771 | render nodes, it must advertise it via the DRIVER_RENDER |
2678 | DRM driver capability. If not supported, the primary node must be used | 2772 | DRM driver capability. If not supported, the primary node must be used |
2679 | for render clients together with the legacy drmAuth authentication | 2773 | for render clients together with the legacy drmAuth authentication |
2680 | procedure. | 2774 | procedure. |
2681 | </para> | 2775 | </para> |
2682 | <para> | 2776 | <para> |
2683 | If a driver advertises render node support, DRM core will create a | 2777 | If a driver advertises render node support, DRM core will create a |
2684 | separate render node called <term>renderD<num></term>. There will | 2778 | separate render node called renderD<num>. There will |
2685 | be one render node per device. No ioctls except PRIME-related ioctls | 2779 | be one render node per device. No ioctls except PRIME-related ioctls |
2686 | will be allowed on this node. Especially <term>GEM_OPEN</term> will be | 2780 | will be allowed on this node. Especially GEM_OPEN will be |
2687 | explicitly prohibited. Render nodes are designed to avoid the | 2781 | explicitly prohibited. Render nodes are designed to avoid the |
2688 | buffer-leaks, which occur if clients guess the flink names or mmap | 2782 | buffer-leaks, which occur if clients guess the flink names or mmap |
2689 | offsets on the legacy interface. Additionally to this basic interface, | 2783 | offsets on the legacy interface. Additionally to this basic interface, |
2690 | drivers must mark their driver-dependent render-only ioctls as | 2784 | drivers must mark their driver-dependent render-only ioctls as |
2691 | <term>DRM_RENDER_ALLOW</term> so render clients can use them. Driver | 2785 | DRM_RENDER_ALLOW so render clients can use them. Driver |
2692 | authors must be careful not to allow any privileged ioctls on render | 2786 | authors must be careful not to allow any privileged ioctls on render |
2693 | nodes. | 2787 | nodes. |
2694 | </para> | 2788 | </para> |
@@ -2749,15 +2843,73 @@ int (*resume) (struct drm_device *);</synopsis> | |||
2749 | </sect1> | 2843 | </sect1> |
2750 | 2844 | ||
2751 | </chapter> | 2845 | </chapter> |
2846 | </part> | ||
2847 | <part id="drmDrivers"> | ||
2848 | <title>DRM Drivers</title> | ||
2752 | 2849 | ||
2753 | <!-- API reference --> | 2850 | <partintro> |
2851 | <para> | ||
2852 | This second part of the DRM Developer's Guide documents driver code, | ||
2853 | implementation details and also all the driver-specific userspace | ||
2854 | interfaces. Especially since all hardware-acceleration interfaces to | ||
2855 | userspace are driver specific for efficiency and other reasons these | ||
2856 | interfaces can be rather substantial. Hence every driver has its own | ||
2857 | chapter. | ||
2858 | </para> | ||
2859 | </partintro> | ||
2754 | 2860 | ||
2755 | <appendix id="drmDriverApi"> | 2861 | <chapter id="drmI915"> |
2756 | <title>DRM Driver API</title> | 2862 | <title>drm/i915 Intel GFX Driver</title> |
2757 | <para> | 2863 | <para> |
2758 | Include auto-generated API reference here (need to reference it | 2864 | The drm/i915 driver supports all (with the exception of some very early |
2759 | from paragraphs above too). | 2865 | models) integrated GFX chipsets with both Intel display and rendering |
2866 | blocks. This excludes a set of SoC platforms with an SGX rendering unit, | ||
2867 | those have basic support through the gma500 drm driver. | ||
2760 | </para> | 2868 | </para> |
2761 | </appendix> | 2869 | <sect1> |
2870 | <title>Display Hardware Handling</title> | ||
2871 | <para> | ||
2872 | This section covers everything related to the display hardware including | ||
2873 | the mode setting infrastructure, plane, sprite and cursor handling and | ||
2874 | display, output probing and related topics. | ||
2875 | </para> | ||
2876 | <sect2> | ||
2877 | <title>Mode Setting Infrastructure</title> | ||
2878 | <para> | ||
2879 | The i915 driver is thus far the only DRM driver which doesn't use the | ||
2880 | common DRM helper code to implement mode setting sequences. Thus it | ||
2881 | has its own tailor-made infrastructure for executing a display | ||
2882 | configuration change. | ||
2883 | </para> | ||
2884 | </sect2> | ||
2885 | <sect2> | ||
2886 | <title>Plane Configuration</title> | ||
2887 | <para> | ||
2888 | This section covers plane configuration and composition with the | ||
2889 | primary plane, sprites, cursors and overlays. This includes the | ||
2890 | infrastructure to do atomic vsync'ed updates of all this state and | ||
2891 | also tightly coupled topics like watermark setup and computation, | ||
2892 | framebuffer compression and panel self refresh. | ||
2893 | </para> | ||
2894 | </sect2> | ||
2895 | <sect2> | ||
2896 | <title>Output Probing</title> | ||
2897 | <para> | ||
2898 | This section covers output probing and related infrastructure like the | ||
2899 | hotplug interrupt storm detection and mitigation code. Note that the | ||
2900 | i915 driver still uses most of the common DRM helper code for output | ||
2901 | probing, so those sections fully apply. | ||
2902 | </para> | ||
2903 | </sect2> | ||
2904 | </sect1> | ||
2762 | 2905 | ||
2906 | <sect1> | ||
2907 | <title>Memory Management and Command Submission</title> | ||
2908 | <para> | ||
2909 | This sections covers all things related to the GEM implementation in the | ||
2910 | i915 driver. | ||
2911 | </para> | ||
2912 | </sect1> | ||
2913 | </chapter> | ||
2914 | </part> | ||
2763 | </book> | 2915 | </book> |
diff --git a/Documentation/devicetree/bindings/drm/i2c/tda998x.txt b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt new file mode 100644 index 000000000000..d7df01c5bb3a --- /dev/null +++ b/Documentation/devicetree/bindings/drm/i2c/tda998x.txt | |||
@@ -0,0 +1,27 @@ | |||
1 | Device-Tree bindings for the NXP TDA998x HDMI transmitter | ||
2 | |||
3 | Required properties; | ||
4 | - compatible: must be "nxp,tda998x" | ||
5 | |||
6 | Optional properties: | ||
7 | - interrupts: interrupt number and trigger type | ||
8 | default: polling | ||
9 | |||
10 | - pinctrl-0: pin control group to be used for | ||
11 | screen plug/unplug interrupt. | ||
12 | |||
13 | - pinctrl-names: must contain a "default" entry. | ||
14 | |||
15 | - video-ports: 24 bits value which defines how the video controller | ||
16 | output is wired to the TDA998x input - default: <0x230145> | ||
17 | |||
18 | Example: | ||
19 | |||
20 | tda998x: hdmi-encoder { | ||
21 | compatible = "nxp,tda998x"; | ||
22 | reg = <0x70>; | ||
23 | interrupt-parent = <&gpio0>; | ||
24 | interrupts = <27 2>; /* falling edge */ | ||
25 | pinctrl-0 = <&pmx_camera>; | ||
26 | pinctrl-names = "default"; | ||
27 | }; | ||
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 1404674c0a02..6fea79efb4cb 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt | |||
@@ -453,7 +453,7 @@ TP_STATUS_COPY : This flag indicates that the frame (and associated | |||
453 | enabled previously with setsockopt() and | 453 | enabled previously with setsockopt() and |
454 | the PACKET_COPY_THRESH option. | 454 | the PACKET_COPY_THRESH option. |
455 | 455 | ||
456 | The number of frames than can be buffered to | 456 | The number of frames that can be buffered to |
457 | be read with recvfrom is limited like a normal socket. | 457 | be read with recvfrom is limited like a normal socket. |
458 | See the SO_RCVBUF option in the socket (7) man page. | 458 | See the SO_RCVBUF option in the socket (7) man page. |
459 | 459 | ||
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 661d3c316a17..048c92b487f6 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
@@ -21,26 +21,38 @@ has such a feature). | |||
21 | 21 | ||
22 | SO_TIMESTAMPING: | 22 | SO_TIMESTAMPING: |
23 | 23 | ||
24 | Instructs the socket layer which kind of information is wanted. The | 24 | Instructs the socket layer which kind of information should be collected |
25 | parameter is an integer with some of the following bits set. Setting | 25 | and/or reported. The parameter is an integer with some of the following |
26 | other bits is an error and doesn't change the current state. | 26 | bits set. Setting other bits is an error and doesn't change the current |
27 | 27 | state. | |
28 | SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware | 28 | |
29 | SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or | 29 | Four of the bits are requests to the stack to try to generate |
30 | fails, then do it in software | 30 | timestamps. Any combination of them is valid. |
31 | SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp | 31 | |
32 | as generated by the hardware | 32 | SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware |
33 | SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or | 33 | SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software |
34 | fails, then do it in software | 34 | SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware |
35 | SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp | 35 | SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software |
36 | SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to | 36 | |
37 | the system time base | 37 | The other three bits control which timestamps will be reported in a |
38 | SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in | 38 | generated control message. If none of these bits are set or if none of |
39 | software | 39 | the set bits correspond to data that is available, then the control |
40 | 40 | message will not be generated: | |
41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. | 41 | |
42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the | 42 | SOF_TIMESTAMPING_SOFTWARE: report systime if available |
43 | following control message: | 43 | SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available |
44 | SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available | ||
45 | |||
46 | It is worth noting that timestamps may be collected for reasons other | ||
47 | than being requested by a particular socket with | ||
48 | SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that | ||
49 | can generate hardware receive timestamps ignore | ||
50 | SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag | ||
51 | in case future drivers pay attention. | ||
52 | |||
53 | If timestamps are reported, they will appear in a control message with | ||
54 | cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like | ||
55 | this: | ||
44 | 56 | ||
45 | struct scm_timestamping { | 57 | struct scm_timestamping { |
46 | struct timespec systime; | 58 | struct timespec systime; |
diff --git a/MAINTAINERS b/MAINTAINERS index b7befe758429..b3fdb0f004ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h | |||
1738 | BLACKFIN ARCHITECTURE | 1738 | BLACKFIN ARCHITECTURE |
1739 | M: Steven Miao <realmz6@gmail.com> | 1739 | M: Steven Miao <realmz6@gmail.com> |
1740 | L: adi-buildroot-devel@lists.sourceforge.net | 1740 | L: adi-buildroot-devel@lists.sourceforge.net |
1741 | T: git git://git.code.sf.net/p/adi-linux/code | ||
1741 | W: http://blackfin.uclinux.org | 1742 | W: http://blackfin.uclinux.org |
1742 | S: Supported | 1743 | S: Supported |
1743 | F: arch/blackfin/ | 1744 | F: arch/blackfin/ |
@@ -6002,6 +6003,8 @@ F: include/linux/netdevice.h | |||
6002 | F: include/uapi/linux/in.h | 6003 | F: include/uapi/linux/in.h |
6003 | F: include/uapi/linux/net.h | 6004 | F: include/uapi/linux/net.h |
6004 | F: include/uapi/linux/netdevice.h | 6005 | F: include/uapi/linux/netdevice.h |
6006 | F: tools/net/ | ||
6007 | F: tools/testing/selftests/net/ | ||
6005 | 6008 | ||
6006 | NETWORKING [IPv4/IPv6] | 6009 | NETWORKING [IPv4/IPv6] |
6007 | M: "David S. Miller" <davem@davemloft.net> | 6010 | M: "David S. Miller" <davem@davemloft.net> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 184066ceb1f6..053c17b36559 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h | |||
@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
144 | * definition, which doesn't have the same semantics. We don't want to | 144 | * definition, which doesn't have the same semantics. We don't want to |
145 | * use -fno-builtin, so just hide the name ffs. | 145 | * use -fno-builtin, so just hide the name ffs. |
146 | */ | 146 | */ |
147 | #define ffs kernel_ffs | 147 | #define ffs(x) kernel_ffs(x) |
148 | 148 | ||
149 | #include <asm-generic/bitops/fls.h> | 149 | #include <asm-generic/bitops/fls.h> |
150 | #include <asm-generic/bitops/__fls.h> | 150 | #include <asm-generic/bitops/__fls.h> |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a735..20e8a9b21d75 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
98 | /* attempt to allocate a granule's worth of cached memory pages */ | 98 | /* attempt to allocate a granule's worth of cached memory pages */ |
99 | 99 | ||
100 | page = alloc_pages_exact_node(nid, | 100 | page = alloc_pages_exact_node(nid, |
101 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 101 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
103 | if (!page) { | 103 | if (!page) { |
104 | mutex_unlock(&uc_pool->add_chunk_mutex); | 104 | mutex_unlock(&uc_pool->add_chunk_mutex); |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d77..e865d748179b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
123 | 123 | ||
124 | area->nid = nid; | 124 | area->nid = nid; |
125 | area->order = order; | 125 | area->order = order; |
126 | area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, | 126 | area->pages = alloc_pages_exact_node(area->nid, |
127 | GFP_KERNEL|__GFP_THISNODE, | ||
127 | area->order); | 128 | area->order); |
128 | 129 | ||
129 | if (!area->pages) { | 130 | if (!area->pages) { |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index c026cca5602c..f3aaf231b4e5 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -341,10 +341,6 @@ config X86_USE_3DNOW | |||
341 | def_bool y | 341 | def_bool y |
342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML | 342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML |
343 | 343 | ||
344 | config X86_OOSTORE | ||
345 | def_bool y | ||
346 | depends on (MWINCHIP3D || MWINCHIPC6) && MTRR | ||
347 | |||
348 | # | 344 | # |
349 | # P6_NOPs are a relatively minor optimization that require a family >= | 345 | # P6_NOPs are a relatively minor optimization that require a family >= |
350 | # 6 processor, except that it is broken on certain VIA chips. | 346 | # 6 processor, except that it is broken on certain VIA chips. |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 04a48903b2eb..69bbb4845020 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -85,11 +85,7 @@ | |||
85 | #else | 85 | #else |
86 | # define smp_rmb() barrier() | 86 | # define smp_rmb() barrier() |
87 | #endif | 87 | #endif |
88 | #ifdef CONFIG_X86_OOSTORE | 88 | #define smp_wmb() barrier() |
89 | # define smp_wmb() wmb() | ||
90 | #else | ||
91 | # define smp_wmb() barrier() | ||
92 | #endif | ||
93 | #define smp_read_barrier_depends() read_barrier_depends() | 89 | #define smp_read_barrier_depends() read_barrier_depends() |
94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 90 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
95 | #else /* !SMP */ | 91 | #else /* !SMP */ |
@@ -100,7 +96,7 @@ | |||
100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 96 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
101 | #endif /* SMP */ | 97 | #endif /* SMP */ |
102 | 98 | ||
103 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 99 | #if defined(CONFIG_X86_PPRO_FENCE) |
104 | 100 | ||
105 | /* | 101 | /* |
106 | * For either of these options x86 doesn't have a strong TSO memory | 102 | * For either of these options x86 doesn't have a strong TSO memory |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 34f69cb9350a..91d9c69a629e 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) | |||
237 | 237 | ||
238 | static inline void flush_write_buffers(void) | 238 | static inline void flush_write_buffers(void) |
239 | { | 239 | { |
240 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 240 | #if defined(CONFIG_X86_PPRO_FENCE) |
241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | 241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
242 | #endif | 242 | #endif |
243 | } | 243 | } |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index bf156ded74b5..0f62f5482d91 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -26,10 +26,9 @@ | |||
26 | # define LOCK_PTR_REG "D" | 26 | # define LOCK_PTR_REG "D" |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #if defined(CONFIG_X86_32) && \ | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
30 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | ||
31 | /* | 30 | /* |
32 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | 31 | * On PPro SMP, we use a locked operation to unlock |
33 | * (PPro errata 66, 92) | 32 | * (PPro errata 66, 92) |
34 | */ | 33 | */ |
35 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | 34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8779edab684e..d8fba5c15fbd 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -8,236 +8,6 @@ | |||
8 | 8 | ||
9 | #include "cpu.h" | 9 | #include "cpu.h" |
10 | 10 | ||
11 | #ifdef CONFIG_X86_OOSTORE | ||
12 | |||
13 | static u32 power2(u32 x) | ||
14 | { | ||
15 | u32 s = 1; | ||
16 | |||
17 | while (s <= x) | ||
18 | s <<= 1; | ||
19 | |||
20 | return s >>= 1; | ||
21 | } | ||
22 | |||
23 | |||
24 | /* | ||
25 | * Set up an actual MCR | ||
26 | */ | ||
27 | static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) | ||
28 | { | ||
29 | u32 lo, hi; | ||
30 | |||
31 | hi = base & ~0xFFF; | ||
32 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ | ||
33 | lo &= ~0xFFF; /* Remove the ctrl value bits */ | ||
34 | lo |= key; /* Attribute we wish to set */ | ||
35 | wrmsr(reg+MSR_IDT_MCR0, lo, hi); | ||
36 | mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */ | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Figure what we can cover with MCR's | ||
41 | * | ||
42 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | ||
43 | */ | ||
44 | static u32 ramtop(void) | ||
45 | { | ||
46 | u32 clip = 0xFFFFFFFFUL; | ||
47 | u32 top = 0; | ||
48 | int i; | ||
49 | |||
50 | for (i = 0; i < e820.nr_map; i++) { | ||
51 | unsigned long start, end; | ||
52 | |||
53 | if (e820.map[i].addr > 0xFFFFFFFFUL) | ||
54 | continue; | ||
55 | /* | ||
56 | * Don't MCR over reserved space. Ignore the ISA hole | ||
57 | * we frob around that catastrophe already | ||
58 | */ | ||
59 | if (e820.map[i].type == E820_RESERVED) { | ||
60 | if (e820.map[i].addr >= 0x100000UL && | ||
61 | e820.map[i].addr < clip) | ||
62 | clip = e820.map[i].addr; | ||
63 | continue; | ||
64 | } | ||
65 | start = e820.map[i].addr; | ||
66 | end = e820.map[i].addr + e820.map[i].size; | ||
67 | if (start >= end) | ||
68 | continue; | ||
69 | if (end > top) | ||
70 | top = end; | ||
71 | } | ||
72 | /* | ||
73 | * Everything below 'top' should be RAM except for the ISA hole. | ||
74 | * Because of the limited MCR's we want to map NV/ACPI into our | ||
75 | * MCR range for gunk in RAM | ||
76 | * | ||
77 | * Clip might cause us to MCR insufficient RAM but that is an | ||
78 | * acceptable failure mode and should only bite obscure boxes with | ||
79 | * a VESA hole at 15Mb | ||
80 | * | ||
81 | * The second case Clip sometimes kicks in is when the EBDA is marked | ||
82 | * as reserved. Again we fail safe with reasonable results | ||
83 | */ | ||
84 | if (top > clip) | ||
85 | top = clip; | ||
86 | |||
87 | return top; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Compute a set of MCR's to give maximum coverage | ||
92 | */ | ||
93 | static int centaur_mcr_compute(int nr, int key) | ||
94 | { | ||
95 | u32 mem = ramtop(); | ||
96 | u32 root = power2(mem); | ||
97 | u32 base = root; | ||
98 | u32 top = root; | ||
99 | u32 floor = 0; | ||
100 | int ct = 0; | ||
101 | |||
102 | while (ct < nr) { | ||
103 | u32 fspace = 0; | ||
104 | u32 high; | ||
105 | u32 low; | ||
106 | |||
107 | /* | ||
108 | * Find the largest block we will fill going upwards | ||
109 | */ | ||
110 | high = power2(mem-top); | ||
111 | |||
112 | /* | ||
113 | * Find the largest block we will fill going downwards | ||
114 | */ | ||
115 | low = base/2; | ||
116 | |||
117 | /* | ||
118 | * Don't fill below 1Mb going downwards as there | ||
119 | * is an ISA hole in the way. | ||
120 | */ | ||
121 | if (base <= 1024*1024) | ||
122 | low = 0; | ||
123 | |||
124 | /* | ||
125 | * See how much space we could cover by filling below | ||
126 | * the ISA hole | ||
127 | */ | ||
128 | |||
129 | if (floor == 0) | ||
130 | fspace = 512*1024; | ||
131 | else if (floor == 512*1024) | ||
132 | fspace = 128*1024; | ||
133 | |||
134 | /* And forget ROM space */ | ||
135 | |||
136 | /* | ||
137 | * Now install the largest coverage we get | ||
138 | */ | ||
139 | if (fspace > high && fspace > low) { | ||
140 | centaur_mcr_insert(ct, floor, fspace, key); | ||
141 | floor += fspace; | ||
142 | } else if (high > low) { | ||
143 | centaur_mcr_insert(ct, top, high, key); | ||
144 | top += high; | ||
145 | } else if (low > 0) { | ||
146 | base -= low; | ||
147 | centaur_mcr_insert(ct, base, low, key); | ||
148 | } else | ||
149 | break; | ||
150 | ct++; | ||
151 | } | ||
152 | /* | ||
153 | * We loaded ct values. We now need to set the mask. The caller | ||
154 | * must do this bit. | ||
155 | */ | ||
156 | return ct; | ||
157 | } | ||
158 | |||
159 | static void centaur_create_optimal_mcr(void) | ||
160 | { | ||
161 | int used; | ||
162 | int i; | ||
163 | |||
164 | /* | ||
165 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
166 | * as write combining and weak write ordered. | ||
167 | * | ||
168 | * To experiment with: Linux never uses stack operations for | ||
169 | * mmio spaces so we could globally enable stack operation wc | ||
170 | * | ||
171 | * Load the registers with type 31 - full write combining, all | ||
172 | * writes weakly ordered. | ||
173 | */ | ||
174 | used = centaur_mcr_compute(6, 31); | ||
175 | |||
176 | /* | ||
177 | * Wipe unused MCRs | ||
178 | */ | ||
179 | for (i = used; i < 8; i++) | ||
180 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
181 | } | ||
182 | |||
183 | static void winchip2_create_optimal_mcr(void) | ||
184 | { | ||
185 | u32 lo, hi; | ||
186 | int used; | ||
187 | int i; | ||
188 | |||
189 | /* | ||
190 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
191 | * as write combining, weak store ordered. | ||
192 | * | ||
193 | * Load the registers with type 25 | ||
194 | * 8 - weak write ordering | ||
195 | * 16 - weak read ordering | ||
196 | * 1 - write combining | ||
197 | */ | ||
198 | used = centaur_mcr_compute(6, 25); | ||
199 | |||
200 | /* | ||
201 | * Mark the registers we are using. | ||
202 | */ | ||
203 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
204 | for (i = 0; i < used; i++) | ||
205 | lo |= 1<<(9+i); | ||
206 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
207 | |||
208 | /* | ||
209 | * Wipe unused MCRs | ||
210 | */ | ||
211 | |||
212 | for (i = used; i < 8; i++) | ||
213 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Handle the MCR key on the Winchip 2. | ||
218 | */ | ||
219 | static void winchip2_unprotect_mcr(void) | ||
220 | { | ||
221 | u32 lo, hi; | ||
222 | u32 key; | ||
223 | |||
224 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
225 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
226 | key = (lo>>17) & 7; | ||
227 | lo |= key<<6; /* replace with unlock key */ | ||
228 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
229 | } | ||
230 | |||
231 | static void winchip2_protect_mcr(void) | ||
232 | { | ||
233 | u32 lo, hi; | ||
234 | |||
235 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
236 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
237 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
238 | } | ||
239 | #endif /* CONFIG_X86_OOSTORE */ | ||
240 | |||
241 | #define ACE_PRESENT (1 << 6) | 11 | #define ACE_PRESENT (1 << 6) |
242 | #define ACE_ENABLED (1 << 7) | 12 | #define ACE_ENABLED (1 << 7) |
243 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ | 13 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ |
@@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
362 | fcr_clr = DPDC; | 132 | fcr_clr = DPDC; |
363 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 133 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); |
364 | clear_cpu_cap(c, X86_FEATURE_TSC); | 134 | clear_cpu_cap(c, X86_FEATURE_TSC); |
365 | #ifdef CONFIG_X86_OOSTORE | ||
366 | centaur_create_optimal_mcr(); | ||
367 | /* | ||
368 | * Enable: | ||
369 | * write combining on non-stack, non-string | ||
370 | * write combining on string, all types | ||
371 | * weak write ordering | ||
372 | * | ||
373 | * The C6 original lacks weak read order | ||
374 | * | ||
375 | * Note 0x120 is write only on Winchip 1 | ||
376 | */ | ||
377 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); | ||
378 | #endif | ||
379 | break; | 135 | break; |
380 | case 8: | 136 | case 8: |
381 | switch (c->x86_mask) { | 137 | switch (c->x86_mask) { |
@@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
392 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 148 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
393 | E2MMX|EAMD3D; | 149 | E2MMX|EAMD3D; |
394 | fcr_clr = DPDC; | 150 | fcr_clr = DPDC; |
395 | #ifdef CONFIG_X86_OOSTORE | ||
396 | winchip2_unprotect_mcr(); | ||
397 | winchip2_create_optimal_mcr(); | ||
398 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
399 | /* | ||
400 | * Enable: | ||
401 | * write combining on non-stack, non-string | ||
402 | * write combining on string, all types | ||
403 | * weak write ordering | ||
404 | */ | ||
405 | lo |= 31; | ||
406 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
407 | winchip2_protect_mcr(); | ||
408 | #endif | ||
409 | break; | 151 | break; |
410 | case 9: | 152 | case 9: |
411 | name = "3"; | 153 | name = "3"; |
412 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 154 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
413 | E2MMX|EAMD3D; | 155 | E2MMX|EAMD3D; |
414 | fcr_clr = DPDC; | 156 | fcr_clr = DPDC; |
415 | #ifdef CONFIG_X86_OOSTORE | ||
416 | winchip2_unprotect_mcr(); | ||
417 | winchip2_create_optimal_mcr(); | ||
418 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
419 | /* | ||
420 | * Enable: | ||
421 | * write combining on non-stack, non-string | ||
422 | * write combining on string, all types | ||
423 | * weak write ordering | ||
424 | */ | ||
425 | lo |= 31; | ||
426 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
427 | winchip2_protect_mcr(); | ||
428 | #endif | ||
429 | break; | 157 | break; |
430 | default: | 158 | default: |
431 | name = "??"; | 159 | name = "??"; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index c88f7f4b03ee..047f540cf3f7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -3334,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3334 | if (!pmus) | 3334 | if (!pmus) |
3335 | return -ENOMEM; | 3335 | return -ENOMEM; |
3336 | 3336 | ||
3337 | type->pmus = pmus; | ||
3338 | |||
3337 | type->unconstrainted = (struct event_constraint) | 3339 | type->unconstrainted = (struct event_constraint) |
3338 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, | 3340 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, |
3339 | 0, type->num_counters, 0, 0); | 3341 | 0, type->num_counters, 0, 0); |
@@ -3369,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3369 | } | 3371 | } |
3370 | 3372 | ||
3371 | type->pmu_group = &uncore_pmu_attr_group; | 3373 | type->pmu_group = &uncore_pmu_attr_group; |
3372 | type->pmus = pmus; | ||
3373 | return 0; | 3374 | return 0; |
3374 | fail: | 3375 | fail: |
3375 | uncore_type_exit(type); | 3376 | uncore_type_exit(type); |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e8368c6dd2a2..d5dd80814419 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin); | |||
86 | 86 | ||
87 | void __kernel_fpu_end(void) | 87 | void __kernel_fpu_end(void) |
88 | { | 88 | { |
89 | if (use_eager_fpu()) | 89 | if (use_eager_fpu()) { |
90 | math_state_restore(); | 90 | /* |
91 | else | 91 | * For eager fpu, most the time, tsk_used_math() is true. |
92 | * Restore the user math as we are done with the kernel usage. | ||
93 | * At few instances during thread exit, signal handling etc, | ||
94 | * tsk_used_math() is false. Those few places will take proper | ||
95 | * actions, so we don't need to restore the math here. | ||
96 | */ | ||
97 | if (likely(tsk_used_math(current))) | ||
98 | math_state_restore(); | ||
99 | } else { | ||
92 | stts(); | 100 | stts(); |
101 | } | ||
93 | } | 102 | } |
94 | EXPORT_SYMBOL(__kernel_fpu_end); | 103 | EXPORT_SYMBOL(__kernel_fpu_end); |
95 | 104 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 7c6acd4b8995..ff898bbf579d 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev) | |||
529 | return; | 529 | return; |
530 | 530 | ||
531 | pci_read_config_dword(nb_ht, 0x60, &val); | 531 | pci_read_config_dword(nb_ht, 0x60, &val); |
532 | node = val & 7; | 532 | node = pcibus_to_node(dev->bus) | (val & 7); |
533 | /* | 533 | /* |
534 | * Some hardware may return an invalid node ID, | 534 | * Some hardware may return an invalid node ID, |
535 | * so check it first: | 535 | * so check it first: |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e81df8fce027..2de1bc09a8d4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
3003 | /* instruction emulation calls kvm_set_cr8() */ | 3003 | /* instruction emulation calls kvm_set_cr8() */ |
3004 | r = cr_interception(svm); | 3004 | r = cr_interception(svm); |
3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) { | 3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) |
3006 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
3007 | return r; | 3006 | return r; |
3008 | } | ||
3009 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) | 3007 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) |
3010 | return r; | 3008 | return r; |
3011 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 3009 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; |
@@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) | |||
3567 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) | 3565 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
3568 | return; | 3566 | return; |
3569 | 3567 | ||
3568 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
3569 | |||
3570 | if (irr == -1) | 3570 | if (irr == -1) |
3571 | return; | 3571 | return; |
3572 | 3572 | ||
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1b2152..01495755701b 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S | |||
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: | |||
140 | push %r9; \ | 140 | push %r9; \ |
141 | push SKBDATA; \ | 141 | push SKBDATA; \ |
142 | /* rsi already has offset */ \ | 142 | /* rsi already has offset */ \ |
143 | mov $SIZE,%ecx; /* size */ \ | 143 | mov $SIZE,%edx; /* size */ \ |
144 | call bpf_internal_load_pointer_neg_helper; \ | 144 | call bpf_internal_load_pointer_neg_helper; \ |
145 | test %rax,%rax; \ | 145 | test %rax,%rax; \ |
146 | pop SKBDATA; \ | 146 | pop SKBDATA; \ |
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7d01b8c56c00..cc04e67bfd05 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h | |||
@@ -40,11 +40,7 @@ | |||
40 | #define smp_rmb() barrier() | 40 | #define smp_rmb() barrier() |
41 | #endif /* CONFIG_X86_PPRO_FENCE */ | 41 | #endif /* CONFIG_X86_PPRO_FENCE */ |
42 | 42 | ||
43 | #ifdef CONFIG_X86_OOSTORE | ||
44 | #define smp_wmb() wmb() | ||
45 | #else /* CONFIG_X86_OOSTORE */ | ||
46 | #define smp_wmb() barrier() | 43 | #define smp_wmb() barrier() |
47 | #endif /* CONFIG_X86_OOSTORE */ | ||
48 | 44 | ||
49 | #define smp_read_barrier_depends() read_barrier_depends() | 45 | #define smp_read_barrier_depends() read_barrier_depends() |
50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 46 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index b718806657cd..c40fb2e81bbc 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static bool acpi_sleep_state_supported(u8 sleep_state) | ||
75 | { | ||
76 | acpi_status status; | ||
77 | u8 type_a, type_b; | ||
78 | |||
79 | status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); | ||
80 | return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware | ||
81 | || (acpi_gbl_FADT.sleep_control.address | ||
82 | && acpi_gbl_FADT.sleep_status.address)); | ||
83 | } | ||
84 | |||
74 | #ifdef CONFIG_ACPI_SLEEP | 85 | #ifdef CONFIG_ACPI_SLEEP |
75 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 86 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
76 | 87 | ||
@@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void) | |||
604 | { | 615 | { |
605 | int i; | 616 | int i; |
606 | 617 | ||
607 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { | 618 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) |
608 | acpi_status status; | 619 | if (acpi_sleep_state_supported(i)) |
609 | u8 type_a, type_b; | ||
610 | |||
611 | status = acpi_get_sleep_type_data(i, &type_a, &type_b); | ||
612 | if (ACPI_SUCCESS(status)) { | ||
613 | sleep_states[i] = 1; | 620 | sleep_states[i] = 1; |
614 | } | ||
615 | } | ||
616 | 621 | ||
617 | suspend_set_ops(old_suspend_ordering ? | 622 | suspend_set_ops(old_suspend_ordering ? |
618 | &acpi_suspend_ops_old : &acpi_suspend_ops); | 623 | &acpi_suspend_ops_old : &acpi_suspend_ops); |
@@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { | |||
740 | 745 | ||
741 | static void acpi_sleep_hibernate_setup(void) | 746 | static void acpi_sleep_hibernate_setup(void) |
742 | { | 747 | { |
743 | acpi_status status; | 748 | if (!acpi_sleep_state_supported(ACPI_STATE_S4)) |
744 | u8 type_a, type_b; | ||
745 | |||
746 | status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); | ||
747 | if (ACPI_FAILURE(status)) | ||
748 | return; | 749 | return; |
749 | 750 | ||
750 | hibernation_set_ops(old_suspend_ordering ? | 751 | hibernation_set_ops(old_suspend_ordering ? |
@@ -793,8 +794,6 @@ static void acpi_power_off(void) | |||
793 | 794 | ||
794 | int __init acpi_sleep_init(void) | 795 | int __init acpi_sleep_init(void) |
795 | { | 796 | { |
796 | acpi_status status; | ||
797 | u8 type_a, type_b; | ||
798 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; | 797 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; |
799 | char *pos = supported; | 798 | char *pos = supported; |
800 | int i; | 799 | int i; |
@@ -806,8 +805,7 @@ int __init acpi_sleep_init(void) | |||
806 | acpi_sleep_suspend_setup(); | 805 | acpi_sleep_suspend_setup(); |
807 | acpi_sleep_hibernate_setup(); | 806 | acpi_sleep_hibernate_setup(); |
808 | 807 | ||
809 | status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); | 808 | if (acpi_sleep_state_supported(ACPI_STATE_S5)) { |
810 | if (ACPI_SUCCESS(status)) { | ||
811 | sleep_states[ACPI_STATE_S5] = 1; | 809 | sleep_states[ACPI_STATE_S5] = 1; |
812 | pm_power_off_prepare = acpi_power_off_prepare; | 810 | pm_power_off_prepare = acpi_power_off_prepare; |
813 | pm_power_off = acpi_power_off; | 811 | pm_power_off = acpi_power_off; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 65d3f1b5966c..8cb2522d592a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4225,8 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4225 | 4225 | ||
4226 | /* devices that don't properly handle queued TRIM commands */ | 4226 | /* devices that don't properly handle queued TRIM commands */ |
4227 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4227 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
4228 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4228 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
4229 | { "Crucial_CT???M500SSD3", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
4230 | 4229 | ||
4231 | /* | 4230 | /* |
4232 | * Some WD SATA-I drives spin up and down erratically when the link | 4231 | * Some WD SATA-I drives spin up and down erratically when the link |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cf485d928903..199b52b7c3e1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1129,7 +1129,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1129 | per_cpu(cpufreq_cpu_data, j) = policy; | 1129 | per_cpu(cpufreq_cpu_data, j) = policy; |
1130 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1130 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1131 | 1131 | ||
1132 | if (cpufreq_driver->get) { | 1132 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
1133 | policy->cur = cpufreq_driver->get(policy->cpu); | 1133 | policy->cur = cpufreq_driver->get(policy->cpu); |
1134 | if (!policy->cur) { | 1134 | if (!policy->cur) { |
1135 | pr_err("%s: ->get() failed\n", __func__); | 1135 | pr_err("%s: ->get() failed\n", __func__); |
@@ -2143,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2143 | * BIOS might change freq behind our back | 2143 | * BIOS might change freq behind our back |
2144 | * -> ask driver for current freq and notify governors about a change | 2144 | * -> ask driver for current freq and notify governors about a change |
2145 | */ | 2145 | */ |
2146 | if (cpufreq_driver->get) { | 2146 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
2147 | new_policy.cur = cpufreq_driver->get(cpu); | 2147 | new_policy.cur = cpufreq_driver->get(cpu); |
2148 | if (!policy->cur) { | 2148 | if (!policy->cur) { |
2149 | pr_debug("Driver did not initialize current freq"); | 2149 | pr_debug("Driver did not initialize current freq"); |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 4ea9b17ac17a..b8246227bab0 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -259,7 +259,9 @@ int ast_mm_init(struct ast_private *ast) | |||
259 | 259 | ||
260 | ret = ttm_bo_device_init(&ast->ttm.bdev, | 260 | ret = ttm_bo_device_init(&ast->ttm.bdev, |
261 | ast->ttm.bo_global_ref.ref.object, | 261 | ast->ttm.bo_global_ref.ref.object, |
262 | &ast_bo_driver, DRM_FILE_PAGE_OFFSET, | 262 | &ast_bo_driver, |
263 | dev->anon_inode->i_mapping, | ||
264 | DRM_FILE_PAGE_OFFSET, | ||
263 | true); | 265 | true); |
264 | if (ret) { | 266 | if (ret) { |
265 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | 267 | DRM_ERROR("Error initialising bo driver; %d\n", ret); |
@@ -324,7 +326,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
324 | } | 326 | } |
325 | 327 | ||
326 | astbo->bo.bdev = &ast->ttm.bdev; | 328 | astbo->bo.bdev = &ast->ttm.bdev; |
327 | astbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
328 | 329 | ||
329 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 330 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
330 | 331 | ||
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index ce6858765b37..f488be55d650 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -225,7 +225,9 @@ int bochs_mm_init(struct bochs_device *bochs) | |||
225 | 225 | ||
226 | ret = ttm_bo_device_init(&bochs->ttm.bdev, | 226 | ret = ttm_bo_device_init(&bochs->ttm.bdev, |
227 | bochs->ttm.bo_global_ref.ref.object, | 227 | bochs->ttm.bo_global_ref.ref.object, |
228 | &bochs_bo_driver, DRM_FILE_PAGE_OFFSET, | 228 | &bochs_bo_driver, |
229 | bochs->dev->anon_inode->i_mapping, | ||
230 | DRM_FILE_PAGE_OFFSET, | ||
229 | true); | 231 | true); |
230 | if (ret) { | 232 | if (ret) { |
231 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | 233 | DRM_ERROR("Error initialising bo driver; %d\n", ret); |
@@ -359,7 +361,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align, | |||
359 | } | 361 | } |
360 | 362 | ||
361 | bochsbo->bo.bdev = &bochs->ttm.bdev; | 363 | bochsbo->bo.bdev = &bochs->ttm.bdev; |
362 | bochsbo->bo.bdev->dev_mapping = dev->dev_mapping; | 364 | bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; |
363 | 365 | ||
364 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 366 | bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
365 | 367 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 8b37c25ff9bd..92e6b7786097 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -259,7 +259,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus) | |||
259 | 259 | ||
260 | ret = ttm_bo_device_init(&cirrus->ttm.bdev, | 260 | ret = ttm_bo_device_init(&cirrus->ttm.bdev, |
261 | cirrus->ttm.bo_global_ref.ref.object, | 261 | cirrus->ttm.bo_global_ref.ref.object, |
262 | &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET, | 262 | &cirrus_bo_driver, |
263 | dev->anon_inode->i_mapping, | ||
264 | DRM_FILE_PAGE_OFFSET, | ||
263 | true); | 265 | true); |
264 | if (ret) { | 266 | if (ret) { |
265 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | 267 | DRM_ERROR("Error initialising bo driver; %d\n", ret); |
@@ -329,7 +331,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
329 | } | 331 | } |
330 | 332 | ||
331 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; | 333 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; |
332 | cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
333 | 334 | ||
334 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 335 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
335 | 336 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 35ea15d5ffff..16ca28ed5ee8 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -38,12 +38,15 @@ | |||
38 | #include <drm/drm_edid.h> | 38 | #include <drm/drm_edid.h> |
39 | #include <drm/drm_fourcc.h> | 39 | #include <drm/drm_fourcc.h> |
40 | 40 | ||
41 | #include "drm_crtc_internal.h" | ||
42 | |||
41 | /** | 43 | /** |
42 | * drm_modeset_lock_all - take all modeset locks | 44 | * drm_modeset_lock_all - take all modeset locks |
43 | * @dev: drm device | 45 | * @dev: drm device |
44 | * | 46 | * |
45 | * This function takes all modeset locks, suitable where a more fine-grained | 47 | * This function takes all modeset locks, suitable where a more fine-grained |
46 | * scheme isn't (yet) implemented. | 48 | * scheme isn't (yet) implemented. Locks must be dropped with |
49 | * drm_modeset_unlock_all. | ||
47 | */ | 50 | */ |
48 | void drm_modeset_lock_all(struct drm_device *dev) | 51 | void drm_modeset_lock_all(struct drm_device *dev) |
49 | { | 52 | { |
@@ -59,6 +62,8 @@ EXPORT_SYMBOL(drm_modeset_lock_all); | |||
59 | /** | 62 | /** |
60 | * drm_modeset_unlock_all - drop all modeset locks | 63 | * drm_modeset_unlock_all - drop all modeset locks |
61 | * @dev: device | 64 | * @dev: device |
65 | * | ||
66 | * This function drop all modeset locks taken by drm_modeset_lock_all. | ||
62 | */ | 67 | */ |
63 | void drm_modeset_unlock_all(struct drm_device *dev) | 68 | void drm_modeset_unlock_all(struct drm_device *dev) |
64 | { | 69 | { |
@@ -74,6 +79,8 @@ EXPORT_SYMBOL(drm_modeset_unlock_all); | |||
74 | /** | 79 | /** |
75 | * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked | 80 | * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked |
76 | * @dev: device | 81 | * @dev: device |
82 | * | ||
83 | * Useful as a debug assert. | ||
77 | */ | 84 | */ |
78 | void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) | 85 | void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) |
79 | { | 86 | { |
@@ -241,6 +248,15 @@ void drm_connector_ida_destroy(void) | |||
241 | ida_destroy(&drm_connector_enum_list[i].ida); | 248 | ida_destroy(&drm_connector_enum_list[i].ida); |
242 | } | 249 | } |
243 | 250 | ||
251 | /** | ||
252 | * drm_get_encoder_name - return a string for encoder | ||
253 | * @encoder: encoder to compute name of | ||
254 | * | ||
255 | * Note that the buffer used by this function is globally shared and owned by | ||
256 | * the function itself. | ||
257 | * | ||
258 | * FIXME: This isn't really multithreading safe. | ||
259 | */ | ||
244 | const char *drm_get_encoder_name(const struct drm_encoder *encoder) | 260 | const char *drm_get_encoder_name(const struct drm_encoder *encoder) |
245 | { | 261 | { |
246 | static char buf[32]; | 262 | static char buf[32]; |
@@ -252,6 +268,15 @@ const char *drm_get_encoder_name(const struct drm_encoder *encoder) | |||
252 | } | 268 | } |
253 | EXPORT_SYMBOL(drm_get_encoder_name); | 269 | EXPORT_SYMBOL(drm_get_encoder_name); |
254 | 270 | ||
271 | /** | ||
272 | * drm_get_connector_name - return a string for connector | ||
273 | * @connector: connector to compute name of | ||
274 | * | ||
275 | * Note that the buffer used by this function is globally shared and owned by | ||
276 | * the function itself. | ||
277 | * | ||
278 | * FIXME: This isn't really multithreading safe. | ||
279 | */ | ||
255 | const char *drm_get_connector_name(const struct drm_connector *connector) | 280 | const char *drm_get_connector_name(const struct drm_connector *connector) |
256 | { | 281 | { |
257 | static char buf[32]; | 282 | static char buf[32]; |
@@ -263,6 +288,13 @@ const char *drm_get_connector_name(const struct drm_connector *connector) | |||
263 | } | 288 | } |
264 | EXPORT_SYMBOL(drm_get_connector_name); | 289 | EXPORT_SYMBOL(drm_get_connector_name); |
265 | 290 | ||
291 | /** | ||
292 | * drm_get_connector_status_name - return a string for connector status | ||
293 | * @status: connector status to compute name of | ||
294 | * | ||
295 | * In contrast to the other drm_get_*_name functions this one here returns a | ||
296 | * const pointer and hence is threadsafe. | ||
297 | */ | ||
266 | const char *drm_get_connector_status_name(enum drm_connector_status status) | 298 | const char *drm_get_connector_status_name(enum drm_connector_status status) |
267 | { | 299 | { |
268 | if (status == connector_status_connected) | 300 | if (status == connector_status_connected) |
@@ -292,6 +324,15 @@ static char printable_char(int c) | |||
292 | return isascii(c) && isprint(c) ? c : '?'; | 324 | return isascii(c) && isprint(c) ? c : '?'; |
293 | } | 325 | } |
294 | 326 | ||
327 | /** | ||
328 | * drm_get_format_name - return a string for drm fourcc format | ||
329 | * @format: format to compute name of | ||
330 | * | ||
331 | * Note that the buffer used by this function is globally shared and owned by | ||
332 | * the function itself. | ||
333 | * | ||
334 | * FIXME: This isn't really multithreading safe. | ||
335 | */ | ||
295 | const char *drm_get_format_name(uint32_t format) | 336 | const char *drm_get_format_name(uint32_t format) |
296 | { | 337 | { |
297 | static char buf[32]; | 338 | static char buf[32]; |
@@ -316,14 +357,16 @@ EXPORT_SYMBOL(drm_get_format_name); | |||
316 | * @obj_type: object type | 357 | * @obj_type: object type |
317 | * | 358 | * |
318 | * Create a unique identifier based on @ptr in @dev's identifier space. Used | 359 | * Create a unique identifier based on @ptr in @dev's identifier space. Used |
319 | * for tracking modes, CRTCs and connectors. | 360 | * for tracking modes, CRTCs and connectors. Note that despite the _get postfix |
361 | * modeset identifiers are _not_ reference counted. Hence don't use this for | ||
362 | * reference counted modeset objects like framebuffers. | ||
320 | * | 363 | * |
321 | * RETURNS: | 364 | * Returns: |
322 | * New unique (relative to other objects in @dev) integer identifier for the | 365 | * New unique (relative to other objects in @dev) integer identifier for the |
323 | * object. | 366 | * object. |
324 | */ | 367 | */ |
325 | static int drm_mode_object_get(struct drm_device *dev, | 368 | int drm_mode_object_get(struct drm_device *dev, |
326 | struct drm_mode_object *obj, uint32_t obj_type) | 369 | struct drm_mode_object *obj, uint32_t obj_type) |
327 | { | 370 | { |
328 | int ret; | 371 | int ret; |
329 | 372 | ||
@@ -347,10 +390,12 @@ static int drm_mode_object_get(struct drm_device *dev, | |||
347 | * @dev: DRM device | 390 | * @dev: DRM device |
348 | * @object: object to free | 391 | * @object: object to free |
349 | * | 392 | * |
350 | * Free @id from @dev's unique identifier pool. | 393 | * Free @id from @dev's unique identifier pool. Note that despite the _get |
394 | * postfix modeset identifiers are _not_ reference counted. Hence don't use this | ||
395 | * for reference counted modeset objects like framebuffers. | ||
351 | */ | 396 | */ |
352 | static void drm_mode_object_put(struct drm_device *dev, | 397 | void drm_mode_object_put(struct drm_device *dev, |
353 | struct drm_mode_object *object) | 398 | struct drm_mode_object *object) |
354 | { | 399 | { |
355 | mutex_lock(&dev->mode_config.idr_mutex); | 400 | mutex_lock(&dev->mode_config.idr_mutex); |
356 | idr_remove(&dev->mode_config.crtc_idr, object->id); | 401 | idr_remove(&dev->mode_config.crtc_idr, object->id); |
@@ -400,7 +445,7 @@ EXPORT_SYMBOL(drm_mode_object_find); | |||
400 | * since all the fb attributes are invariant over its lifetime, no further | 445 | * since all the fb attributes are invariant over its lifetime, no further |
401 | * locking but only correct reference counting is required. | 446 | * locking but only correct reference counting is required. |
402 | * | 447 | * |
403 | * RETURNS: | 448 | * Returns: |
404 | * Zero on success, error code on failure. | 449 | * Zero on success, error code on failure. |
405 | */ | 450 | */ |
406 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, | 451 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, |
@@ -461,7 +506,7 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, | |||
461 | * | 506 | * |
462 | * If successful, this grabs an additional reference to the framebuffer - | 507 | * If successful, this grabs an additional reference to the framebuffer - |
463 | * callers need to make sure to eventually unreference the returned framebuffer | 508 | * callers need to make sure to eventually unreference the returned framebuffer |
464 | * again. | 509 | * again, using @drm_framebuffer_unreference. |
465 | */ | 510 | */ |
466 | struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, | 511 | struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, |
467 | uint32_t id) | 512 | uint32_t id) |
@@ -494,6 +539,8 @@ EXPORT_SYMBOL(drm_framebuffer_unreference); | |||
494 | /** | 539 | /** |
495 | * drm_framebuffer_reference - incr the fb refcnt | 540 | * drm_framebuffer_reference - incr the fb refcnt |
496 | * @fb: framebuffer | 541 | * @fb: framebuffer |
542 | * | ||
543 | * This functions increments the fb's refcount. | ||
497 | */ | 544 | */ |
498 | void drm_framebuffer_reference(struct drm_framebuffer *fb) | 545 | void drm_framebuffer_reference(struct drm_framebuffer *fb) |
499 | { | 546 | { |
@@ -550,8 +597,9 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); | |||
550 | * drm_framebuffer_cleanup - remove a framebuffer object | 597 | * drm_framebuffer_cleanup - remove a framebuffer object |
551 | * @fb: framebuffer to remove | 598 | * @fb: framebuffer to remove |
552 | * | 599 | * |
553 | * Cleanup references to a user-created framebuffer. This function is intended | 600 | * Cleanup framebuffer. This function is intended to be used from the drivers |
554 | * to be used from the drivers ->destroy callback. | 601 | * ->destroy callback. It can also be used to clean up driver private |
602 | * framebuffers embedded into a larger structure. | ||
555 | * | 603 | * |
556 | * Note that this function does not remove the fb from active usuage - if it is | 604 | * Note that this function does not remove the fb from active usuage - if it is |
557 | * still used anywhere, hilarity can ensue since userspace could call getfb on | 605 | * still used anywhere, hilarity can ensue since userspace could call getfb on |
@@ -644,7 +692,7 @@ EXPORT_SYMBOL(drm_framebuffer_remove); | |||
644 | * | 692 | * |
645 | * Inits a new object created as base part of a driver crtc object. | 693 | * Inits a new object created as base part of a driver crtc object. |
646 | * | 694 | * |
647 | * RETURNS: | 695 | * Returns: |
648 | * Zero on success, error code on failure. | 696 | * Zero on success, error code on failure. |
649 | */ | 697 | */ |
650 | int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, | 698 | int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
@@ -720,20 +768,6 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc) | |||
720 | } | 768 | } |
721 | EXPORT_SYMBOL(drm_crtc_index); | 769 | EXPORT_SYMBOL(drm_crtc_index); |
722 | 770 | ||
723 | /** | ||
724 | * drm_mode_probed_add - add a mode to a connector's probed mode list | ||
725 | * @connector: connector the new mode | ||
726 | * @mode: mode data | ||
727 | * | ||
728 | * Add @mode to @connector's mode list for later use. | ||
729 | */ | ||
730 | void drm_mode_probed_add(struct drm_connector *connector, | ||
731 | struct drm_display_mode *mode) | ||
732 | { | ||
733 | list_add_tail(&mode->head, &connector->probed_modes); | ||
734 | } | ||
735 | EXPORT_SYMBOL(drm_mode_probed_add); | ||
736 | |||
737 | /* | 771 | /* |
738 | * drm_mode_remove - remove and free a mode | 772 | * drm_mode_remove - remove and free a mode |
739 | * @connector: connector list to modify | 773 | * @connector: connector list to modify |
@@ -758,7 +792,7 @@ static void drm_mode_remove(struct drm_connector *connector, | |||
758 | * Initialises a preallocated connector. Connectors should be | 792 | * Initialises a preallocated connector. Connectors should be |
759 | * subclassed as part of driver connector objects. | 793 | * subclassed as part of driver connector objects. |
760 | * | 794 | * |
761 | * RETURNS: | 795 | * Returns: |
762 | * Zero on success, error code on failure. | 796 | * Zero on success, error code on failure. |
763 | */ | 797 | */ |
764 | int drm_connector_init(struct drm_device *dev, | 798 | int drm_connector_init(struct drm_device *dev, |
@@ -836,6 +870,14 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
836 | } | 870 | } |
837 | EXPORT_SYMBOL(drm_connector_cleanup); | 871 | EXPORT_SYMBOL(drm_connector_cleanup); |
838 | 872 | ||
873 | /** | ||
874 | * drm_connector_unplug_all - unregister connector userspace interfaces | ||
875 | * @dev: drm device | ||
876 | * | ||
877 | * This function unregisters all connector userspace interfaces in sysfs. Should | ||
878 | * be call when the device is disconnected, e.g. from an usb driver's | ||
879 | * ->disconnect callback. | ||
880 | */ | ||
839 | void drm_connector_unplug_all(struct drm_device *dev) | 881 | void drm_connector_unplug_all(struct drm_device *dev) |
840 | { | 882 | { |
841 | struct drm_connector *connector; | 883 | struct drm_connector *connector; |
@@ -847,6 +889,18 @@ void drm_connector_unplug_all(struct drm_device *dev) | |||
847 | } | 889 | } |
848 | EXPORT_SYMBOL(drm_connector_unplug_all); | 890 | EXPORT_SYMBOL(drm_connector_unplug_all); |
849 | 891 | ||
892 | /** | ||
893 | * drm_bridge_init - initialize a drm transcoder/bridge | ||
894 | * @dev: drm device | ||
895 | * @bridge: transcoder/bridge to set up | ||
896 | * @funcs: bridge function table | ||
897 | * | ||
898 | * Initialises a preallocated bridge. Bridges should be | ||
899 | * subclassed as part of driver connector objects. | ||
900 | * | ||
901 | * Returns: | ||
902 | * Zero on success, error code on failure. | ||
903 | */ | ||
850 | int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, | 904 | int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, |
851 | const struct drm_bridge_funcs *funcs) | 905 | const struct drm_bridge_funcs *funcs) |
852 | { | 906 | { |
@@ -870,6 +924,12 @@ int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, | |||
870 | } | 924 | } |
871 | EXPORT_SYMBOL(drm_bridge_init); | 925 | EXPORT_SYMBOL(drm_bridge_init); |
872 | 926 | ||
927 | /** | ||
928 | * drm_bridge_cleanup - cleans up an initialised bridge | ||
929 | * @bridge: bridge to cleanup | ||
930 | * | ||
931 | * Cleans up the bridge but doesn't free the object. | ||
932 | */ | ||
873 | void drm_bridge_cleanup(struct drm_bridge *bridge) | 933 | void drm_bridge_cleanup(struct drm_bridge *bridge) |
874 | { | 934 | { |
875 | struct drm_device *dev = bridge->dev; | 935 | struct drm_device *dev = bridge->dev; |
@@ -882,6 +942,19 @@ void drm_bridge_cleanup(struct drm_bridge *bridge) | |||
882 | } | 942 | } |
883 | EXPORT_SYMBOL(drm_bridge_cleanup); | 943 | EXPORT_SYMBOL(drm_bridge_cleanup); |
884 | 944 | ||
945 | /** | ||
946 | * drm_encoder_init - Init a preallocated encoder | ||
947 | * @dev: drm device | ||
948 | * @encoder: the encoder to init | ||
949 | * @funcs: callbacks for this encoder | ||
950 | * @encoder_type: user visible type of the encoder | ||
951 | * | ||
952 | * Initialises a preallocated encoder. Encoder should be | ||
953 | * subclassed as part of driver encoder objects. | ||
954 | * | ||
955 | * Returns: | ||
956 | * Zero on success, error code on failure. | ||
957 | */ | ||
885 | int drm_encoder_init(struct drm_device *dev, | 958 | int drm_encoder_init(struct drm_device *dev, |
886 | struct drm_encoder *encoder, | 959 | struct drm_encoder *encoder, |
887 | const struct drm_encoder_funcs *funcs, | 960 | const struct drm_encoder_funcs *funcs, |
@@ -909,6 +982,12 @@ int drm_encoder_init(struct drm_device *dev, | |||
909 | } | 982 | } |
910 | EXPORT_SYMBOL(drm_encoder_init); | 983 | EXPORT_SYMBOL(drm_encoder_init); |
911 | 984 | ||
985 | /** | ||
986 | * drm_encoder_cleanup - cleans up an initialised encoder | ||
987 | * @encoder: encoder to cleanup | ||
988 | * | ||
989 | * Cleans up the encoder but doesn't free the object. | ||
990 | */ | ||
912 | void drm_encoder_cleanup(struct drm_encoder *encoder) | 991 | void drm_encoder_cleanup(struct drm_encoder *encoder) |
913 | { | 992 | { |
914 | struct drm_device *dev = encoder->dev; | 993 | struct drm_device *dev = encoder->dev; |
@@ -930,9 +1009,10 @@ EXPORT_SYMBOL(drm_encoder_cleanup); | |||
930 | * @format_count: number of elements in @formats | 1009 | * @format_count: number of elements in @formats |
931 | * @priv: plane is private (hidden from userspace)? | 1010 | * @priv: plane is private (hidden from userspace)? |
932 | * | 1011 | * |
933 | * Inits a new object created as base part of a driver plane object. | 1012 | * Inits a preallocate plane object created as base part of a driver plane |
1013 | * object. | ||
934 | * | 1014 | * |
935 | * RETURNS: | 1015 | * Returns: |
936 | * Zero on success, error code on failure. | 1016 | * Zero on success, error code on failure. |
937 | */ | 1017 | */ |
938 | int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, | 1018 | int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, |
@@ -1033,50 +1113,6 @@ void drm_plane_force_disable(struct drm_plane *plane) | |||
1033 | } | 1113 | } |
1034 | EXPORT_SYMBOL(drm_plane_force_disable); | 1114 | EXPORT_SYMBOL(drm_plane_force_disable); |
1035 | 1115 | ||
1036 | /** | ||
1037 | * drm_mode_create - create a new display mode | ||
1038 | * @dev: DRM device | ||
1039 | * | ||
1040 | * Create a new drm_display_mode, give it an ID, and return it. | ||
1041 | * | ||
1042 | * RETURNS: | ||
1043 | * Pointer to new mode on success, NULL on error. | ||
1044 | */ | ||
1045 | struct drm_display_mode *drm_mode_create(struct drm_device *dev) | ||
1046 | { | ||
1047 | struct drm_display_mode *nmode; | ||
1048 | |||
1049 | nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); | ||
1050 | if (!nmode) | ||
1051 | return NULL; | ||
1052 | |||
1053 | if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { | ||
1054 | kfree(nmode); | ||
1055 | return NULL; | ||
1056 | } | ||
1057 | |||
1058 | return nmode; | ||
1059 | } | ||
1060 | EXPORT_SYMBOL(drm_mode_create); | ||
1061 | |||
1062 | /** | ||
1063 | * drm_mode_destroy - remove a mode | ||
1064 | * @dev: DRM device | ||
1065 | * @mode: mode to remove | ||
1066 | * | ||
1067 | * Free @mode's unique identifier, then free it. | ||
1068 | */ | ||
1069 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) | ||
1070 | { | ||
1071 | if (!mode) | ||
1072 | return; | ||
1073 | |||
1074 | drm_mode_object_put(dev, &mode->base); | ||
1075 | |||
1076 | kfree(mode); | ||
1077 | } | ||
1078 | EXPORT_SYMBOL(drm_mode_destroy); | ||
1079 | |||
1080 | static int drm_mode_create_standard_connector_properties(struct drm_device *dev) | 1116 | static int drm_mode_create_standard_connector_properties(struct drm_device *dev) |
1081 | { | 1117 | { |
1082 | struct drm_property *edid; | 1118 | struct drm_property *edid; |
@@ -1280,6 +1316,10 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr | |||
1280 | return 0; | 1316 | return 0; |
1281 | } | 1317 | } |
1282 | 1318 | ||
1319 | /* | ||
1320 | * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is | ||
1321 | * the drm core's responsibility to set up mode control groups. | ||
1322 | */ | ||
1283 | int drm_mode_group_init_legacy_group(struct drm_device *dev, | 1323 | int drm_mode_group_init_legacy_group(struct drm_device *dev, |
1284 | struct drm_mode_group *group) | 1324 | struct drm_mode_group *group) |
1285 | { | 1325 | { |
@@ -1356,7 +1396,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, | |||
1356 | * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to | 1396 | * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to |
1357 | * the caller. | 1397 | * the caller. |
1358 | * | 1398 | * |
1359 | * RETURNS: | 1399 | * Returns: |
1360 | * Zero on success, errno on failure. | 1400 | * Zero on success, errno on failure. |
1361 | */ | 1401 | */ |
1362 | static int drm_crtc_convert_umode(struct drm_display_mode *out, | 1402 | static int drm_crtc_convert_umode(struct drm_display_mode *out, |
@@ -1399,7 +1439,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out, | |||
1399 | * | 1439 | * |
1400 | * Called by the user via ioctl. | 1440 | * Called by the user via ioctl. |
1401 | * | 1441 | * |
1402 | * RETURNS: | 1442 | * Returns: |
1403 | * Zero on success, errno on failure. | 1443 | * Zero on success, errno on failure. |
1404 | */ | 1444 | */ |
1405 | int drm_mode_getresources(struct drm_device *dev, void *data, | 1445 | int drm_mode_getresources(struct drm_device *dev, void *data, |
@@ -1584,7 +1624,7 @@ out: | |||
1584 | * | 1624 | * |
1585 | * Called by the user via ioctl. | 1625 | * Called by the user via ioctl. |
1586 | * | 1626 | * |
1587 | * RETURNS: | 1627 | * Returns: |
1588 | * Zero on success, errno on failure. | 1628 | * Zero on success, errno on failure. |
1589 | */ | 1629 | */ |
1590 | int drm_mode_getcrtc(struct drm_device *dev, | 1630 | int drm_mode_getcrtc(struct drm_device *dev, |
@@ -1653,7 +1693,7 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, | |||
1653 | * | 1693 | * |
1654 | * Called by the user via ioctl. | 1694 | * Called by the user via ioctl. |
1655 | * | 1695 | * |
1656 | * RETURNS: | 1696 | * Returns: |
1657 | * Zero on success, errno on failure. | 1697 | * Zero on success, errno on failure. |
1658 | */ | 1698 | */ |
1659 | int drm_mode_getconnector(struct drm_device *dev, void *data, | 1699 | int drm_mode_getconnector(struct drm_device *dev, void *data, |
@@ -1788,6 +1828,19 @@ out: | |||
1788 | return ret; | 1828 | return ret; |
1789 | } | 1829 | } |
1790 | 1830 | ||
1831 | /** | ||
1832 | * drm_mode_getencoder - get encoder configuration | ||
1833 | * @dev: drm device for the ioctl | ||
1834 | * @data: data pointer for the ioctl | ||
1835 | * @file_priv: drm file for the ioctl call | ||
1836 | * | ||
1837 | * Construct a encoder configuration structure to return to the user. | ||
1838 | * | ||
1839 | * Called by the user via ioctl. | ||
1840 | * | ||
1841 | * Returns: | ||
1842 | * Zero on success, errno on failure. | ||
1843 | */ | ||
1791 | int drm_mode_getencoder(struct drm_device *dev, void *data, | 1844 | int drm_mode_getencoder(struct drm_device *dev, void *data, |
1792 | struct drm_file *file_priv) | 1845 | struct drm_file *file_priv) |
1793 | { | 1846 | { |
@@ -1823,15 +1876,20 @@ out: | |||
1823 | } | 1876 | } |
1824 | 1877 | ||
1825 | /** | 1878 | /** |
1826 | * drm_mode_getplane_res - get plane info | 1879 | * drm_mode_getplane_res - enumerate all plane resources |
1827 | * @dev: DRM device | 1880 | * @dev: DRM device |
1828 | * @data: ioctl data | 1881 | * @data: ioctl data |
1829 | * @file_priv: DRM file info | 1882 | * @file_priv: DRM file info |
1830 | * | 1883 | * |
1831 | * Return an plane count and set of IDs. | 1884 | * Construct a list of plane ids to return to the user. |
1885 | * | ||
1886 | * Called by the user via ioctl. | ||
1887 | * | ||
1888 | * Returns: | ||
1889 | * Zero on success, errno on failure. | ||
1832 | */ | 1890 | */ |
1833 | int drm_mode_getplane_res(struct drm_device *dev, void *data, | 1891 | int drm_mode_getplane_res(struct drm_device *dev, void *data, |
1834 | struct drm_file *file_priv) | 1892 | struct drm_file *file_priv) |
1835 | { | 1893 | { |
1836 | struct drm_mode_get_plane_res *plane_resp = data; | 1894 | struct drm_mode_get_plane_res *plane_resp = data; |
1837 | struct drm_mode_config *config; | 1895 | struct drm_mode_config *config; |
@@ -1869,16 +1927,20 @@ out: | |||
1869 | } | 1927 | } |
1870 | 1928 | ||
1871 | /** | 1929 | /** |
1872 | * drm_mode_getplane - get plane info | 1930 | * drm_mode_getplane - get plane configuration |
1873 | * @dev: DRM device | 1931 | * @dev: DRM device |
1874 | * @data: ioctl data | 1932 | * @data: ioctl data |
1875 | * @file_priv: DRM file info | 1933 | * @file_priv: DRM file info |
1876 | * | 1934 | * |
1877 | * Return plane info, including formats supported, gamma size, any | 1935 | * Construct a plane configuration structure to return to the user. |
1878 | * current fb, etc. | 1936 | * |
1937 | * Called by the user via ioctl. | ||
1938 | * | ||
1939 | * Returns: | ||
1940 | * Zero on success, errno on failure. | ||
1879 | */ | 1941 | */ |
1880 | int drm_mode_getplane(struct drm_device *dev, void *data, | 1942 | int drm_mode_getplane(struct drm_device *dev, void *data, |
1881 | struct drm_file *file_priv) | 1943 | struct drm_file *file_priv) |
1882 | { | 1944 | { |
1883 | struct drm_mode_get_plane *plane_resp = data; | 1945 | struct drm_mode_get_plane *plane_resp = data; |
1884 | struct drm_mode_object *obj; | 1946 | struct drm_mode_object *obj; |
@@ -1934,16 +1996,19 @@ out: | |||
1934 | } | 1996 | } |
1935 | 1997 | ||
1936 | /** | 1998 | /** |
1937 | * drm_mode_setplane - set up or tear down an plane | 1999 | * drm_mode_setplane - configure a plane's configuration |
1938 | * @dev: DRM device | 2000 | * @dev: DRM device |
1939 | * @data: ioctl data* | 2001 | * @data: ioctl data* |
1940 | * @file_priv: DRM file info | 2002 | * @file_priv: DRM file info |
1941 | * | 2003 | * |
1942 | * Set plane info, including placement, fb, scaling, and other factors. | 2004 | * Set plane configuration, including placement, fb, scaling, and other factors. |
1943 | * Or pass a NULL fb to disable. | 2005 | * Or pass a NULL fb to disable. |
2006 | * | ||
2007 | * Returns: | ||
2008 | * Zero on success, errno on failure. | ||
1944 | */ | 2009 | */ |
1945 | int drm_mode_setplane(struct drm_device *dev, void *data, | 2010 | int drm_mode_setplane(struct drm_device *dev, void *data, |
1946 | struct drm_file *file_priv) | 2011 | struct drm_file *file_priv) |
1947 | { | 2012 | { |
1948 | struct drm_mode_set_plane *plane_req = data; | 2013 | struct drm_mode_set_plane *plane_req = data; |
1949 | struct drm_mode_object *obj; | 2014 | struct drm_mode_object *obj; |
@@ -2073,6 +2138,9 @@ out: | |||
2073 | * | 2138 | * |
2074 | * This is a little helper to wrap internal calls to the ->set_config driver | 2139 | * This is a little helper to wrap internal calls to the ->set_config driver |
2075 | * interface. The only thing it adds is correct refcounting dance. | 2140 | * interface. The only thing it adds is correct refcounting dance. |
2141 | * | ||
2142 | * Returns: | ||
2143 | * Zero on success, errno on failure. | ||
2076 | */ | 2144 | */ |
2077 | int drm_mode_set_config_internal(struct drm_mode_set *set) | 2145 | int drm_mode_set_config_internal(struct drm_mode_set *set) |
2078 | { | 2146 | { |
@@ -2157,7 +2225,7 @@ static int drm_crtc_check_viewport(const struct drm_crtc *crtc, | |||
2157 | * | 2225 | * |
2158 | * Called by the user via ioctl. | 2226 | * Called by the user via ioctl. |
2159 | * | 2227 | * |
2160 | * RETURNS: | 2228 | * Returns: |
2161 | * Zero on success, errno on failure. | 2229 | * Zero on success, errno on failure. |
2162 | */ | 2230 | */ |
2163 | int drm_mode_setcrtc(struct drm_device *dev, void *data, | 2231 | int drm_mode_setcrtc(struct drm_device *dev, void *data, |
@@ -2359,8 +2427,23 @@ out: | |||
2359 | return ret; | 2427 | return ret; |
2360 | 2428 | ||
2361 | } | 2429 | } |
2430 | |||
2431 | |||
2432 | /** | ||
2433 | * drm_mode_cursor_ioctl - set CRTC's cursor configuration | ||
2434 | * @dev: drm device for the ioctl | ||
2435 | * @data: data pointer for the ioctl | ||
2436 | * @file_priv: drm file for the ioctl call | ||
2437 | * | ||
2438 | * Set the cursor configuration based on user request. | ||
2439 | * | ||
2440 | * Called by the user via ioctl. | ||
2441 | * | ||
2442 | * Returns: | ||
2443 | * Zero on success, errno on failure. | ||
2444 | */ | ||
2362 | int drm_mode_cursor_ioctl(struct drm_device *dev, | 2445 | int drm_mode_cursor_ioctl(struct drm_device *dev, |
2363 | void *data, struct drm_file *file_priv) | 2446 | void *data, struct drm_file *file_priv) |
2364 | { | 2447 | { |
2365 | struct drm_mode_cursor *req = data; | 2448 | struct drm_mode_cursor *req = data; |
2366 | struct drm_mode_cursor2 new_req; | 2449 | struct drm_mode_cursor2 new_req; |
@@ -2371,6 +2454,21 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
2371 | return drm_mode_cursor_common(dev, &new_req, file_priv); | 2454 | return drm_mode_cursor_common(dev, &new_req, file_priv); |
2372 | } | 2455 | } |
2373 | 2456 | ||
2457 | /** | ||
2458 | * drm_mode_cursor2_ioctl - set CRTC's cursor configuration | ||
2459 | * @dev: drm device for the ioctl | ||
2460 | * @data: data pointer for the ioctl | ||
2461 | * @file_priv: drm file for the ioctl call | ||
2462 | * | ||
2463 | * Set the cursor configuration based on user request. This implements the 2nd | ||
2464 | * version of the cursor ioctl, which allows userspace to additionally specify | ||
2465 | * the hotspot of the pointer. | ||
2466 | * | ||
2467 | * Called by the user via ioctl. | ||
2468 | * | ||
2469 | * Returns: | ||
2470 | * Zero on success, errno on failure. | ||
2471 | */ | ||
2374 | int drm_mode_cursor2_ioctl(struct drm_device *dev, | 2472 | int drm_mode_cursor2_ioctl(struct drm_device *dev, |
2375 | void *data, struct drm_file *file_priv) | 2473 | void *data, struct drm_file *file_priv) |
2376 | { | 2474 | { |
@@ -2378,7 +2476,14 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev, | |||
2378 | return drm_mode_cursor_common(dev, req, file_priv); | 2476 | return drm_mode_cursor_common(dev, req, file_priv); |
2379 | } | 2477 | } |
2380 | 2478 | ||
2381 | /* Original addfb only supported RGB formats, so figure out which one */ | 2479 | /** |
2480 | * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description | ||
2481 | * @bpp: bits per pixels | ||
2482 | * @depth: bit depth per pixel | ||
2483 | * | ||
2484 | * Computes a drm fourcc pixel format code for the given @bpp/@depth values. | ||
2485 | * Useful in fbdev emulation code, since that deals in those values. | ||
2486 | */ | ||
2382 | uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) | 2487 | uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) |
2383 | { | 2488 | { |
2384 | uint32_t fmt; | 2489 | uint32_t fmt; |
@@ -2420,11 +2525,12 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format); | |||
2420 | * @data: data pointer for the ioctl | 2525 | * @data: data pointer for the ioctl |
2421 | * @file_priv: drm file for the ioctl call | 2526 | * @file_priv: drm file for the ioctl call |
2422 | * | 2527 | * |
2423 | * Add a new FB to the specified CRTC, given a user request. | 2528 | * Add a new FB to the specified CRTC, given a user request. This is the |
2529 | * original addfb ioclt which only supported RGB formats. | ||
2424 | * | 2530 | * |
2425 | * Called by the user via ioctl. | 2531 | * Called by the user via ioctl. |
2426 | * | 2532 | * |
2427 | * RETURNS: | 2533 | * Returns: |
2428 | * Zero on success, errno on failure. | 2534 | * Zero on success, errno on failure. |
2429 | */ | 2535 | */ |
2430 | int drm_mode_addfb(struct drm_device *dev, | 2536 | int drm_mode_addfb(struct drm_device *dev, |
@@ -2597,11 +2703,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
2597 | * @data: data pointer for the ioctl | 2703 | * @data: data pointer for the ioctl |
2598 | * @file_priv: drm file for the ioctl call | 2704 | * @file_priv: drm file for the ioctl call |
2599 | * | 2705 | * |
2600 | * Add a new FB to the specified CRTC, given a user request with format. | 2706 | * Add a new FB to the specified CRTC, given a user request with format. This is |
2707 | * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers | ||
2708 | * and uses fourcc codes as pixel format specifiers. | ||
2601 | * | 2709 | * |
2602 | * Called by the user via ioctl. | 2710 | * Called by the user via ioctl. |
2603 | * | 2711 | * |
2604 | * RETURNS: | 2712 | * Returns: |
2605 | * Zero on success, errno on failure. | 2713 | * Zero on success, errno on failure. |
2606 | */ | 2714 | */ |
2607 | int drm_mode_addfb2(struct drm_device *dev, | 2715 | int drm_mode_addfb2(struct drm_device *dev, |
@@ -2661,7 +2769,7 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
2661 | * | 2769 | * |
2662 | * Called by the user via ioctl. | 2770 | * Called by the user via ioctl. |
2663 | * | 2771 | * |
2664 | * RETURNS: | 2772 | * Returns: |
2665 | * Zero on success, errno on failure. | 2773 | * Zero on success, errno on failure. |
2666 | */ | 2774 | */ |
2667 | int drm_mode_rmfb(struct drm_device *dev, | 2775 | int drm_mode_rmfb(struct drm_device *dev, |
@@ -2715,7 +2823,7 @@ fail_lookup: | |||
2715 | * | 2823 | * |
2716 | * Called by the user via ioctl. | 2824 | * Called by the user via ioctl. |
2717 | * | 2825 | * |
2718 | * RETURNS: | 2826 | * Returns: |
2719 | * Zero on success, errno on failure. | 2827 | * Zero on success, errno on failure. |
2720 | */ | 2828 | */ |
2721 | int drm_mode_getfb(struct drm_device *dev, | 2829 | int drm_mode_getfb(struct drm_device *dev, |
@@ -2759,6 +2867,25 @@ int drm_mode_getfb(struct drm_device *dev, | |||
2759 | return ret; | 2867 | return ret; |
2760 | } | 2868 | } |
2761 | 2869 | ||
2870 | /** | ||
2871 | * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB | ||
2872 | * @dev: drm device for the ioctl | ||
2873 | * @data: data pointer for the ioctl | ||
2874 | * @file_priv: drm file for the ioctl call | ||
2875 | * | ||
2876 | * Lookup the FB and flush out the damaged area supplied by userspace as a clip | ||
2877 | * rectangle list. Generic userspace which does frontbuffer rendering must call | ||
2878 | * this ioctl to flush out the changes on manual-update display outputs, e.g. | ||
2879 | * usb display-link, mipi manual update panels or edp panel self refresh modes. | ||
2880 | * | ||
2881 | * Modesetting drivers which always update the frontbuffer do not need to | ||
2882 | * implement the corresponding ->dirty framebuffer callback. | ||
2883 | * | ||
2884 | * Called by the user via ioctl. | ||
2885 | * | ||
2886 | * Returns: | ||
2887 | * Zero on success, errno on failure. | ||
2888 | */ | ||
2762 | int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | 2889 | int drm_mode_dirtyfb_ioctl(struct drm_device *dev, |
2763 | void *data, struct drm_file *file_priv) | 2890 | void *data, struct drm_file *file_priv) |
2764 | { | 2891 | { |
@@ -2836,7 +2963,7 @@ out_err1: | |||
2836 | * | 2963 | * |
2837 | * Called by the user via ioctl. | 2964 | * Called by the user via ioctl. |
2838 | * | 2965 | * |
2839 | * RETURNS: | 2966 | * Returns: |
2840 | * Zero on success, errno on failure. | 2967 | * Zero on success, errno on failure. |
2841 | */ | 2968 | */ |
2842 | void drm_fb_release(struct drm_file *priv) | 2969 | void drm_fb_release(struct drm_file *priv) |
@@ -2860,6 +2987,20 @@ void drm_fb_release(struct drm_file *priv) | |||
2860 | mutex_unlock(&priv->fbs_lock); | 2987 | mutex_unlock(&priv->fbs_lock); |
2861 | } | 2988 | } |
2862 | 2989 | ||
2990 | /** | ||
2991 | * drm_property_create - create a new property type | ||
2992 | * @dev: drm device | ||
2993 | * @flags: flags specifying the property type | ||
2994 | * @name: name of the property | ||
2995 | * @num_values: number of pre-defined values | ||
2996 | * | ||
2997 | * This creates a new generic drm property which can then be attached to a drm | ||
2998 | * object with drm_object_attach_property. The returned property object must be | ||
2999 | * freed with drm_property_destroy. | ||
3000 | * | ||
3001 | * Returns: | ||
3002 | * A pointer to the newly created property on success, NULL on failure. | ||
3003 | */ | ||
2863 | struct drm_property *drm_property_create(struct drm_device *dev, int flags, | 3004 | struct drm_property *drm_property_create(struct drm_device *dev, int flags, |
2864 | const char *name, int num_values) | 3005 | const char *name, int num_values) |
2865 | { | 3006 | { |
@@ -2898,6 +3039,24 @@ fail: | |||
2898 | } | 3039 | } |
2899 | EXPORT_SYMBOL(drm_property_create); | 3040 | EXPORT_SYMBOL(drm_property_create); |
2900 | 3041 | ||
3042 | /** | ||
3043 | * drm_property_create - create a new enumeration property type | ||
3044 | * @dev: drm device | ||
3045 | * @flags: flags specifying the property type | ||
3046 | * @name: name of the property | ||
3047 | * @props: enumeration lists with property values | ||
3048 | * @num_values: number of pre-defined values | ||
3049 | * | ||
3050 | * This creates a new generic drm property which can then be attached to a drm | ||
3051 | * object with drm_object_attach_property. The returned property object must be | ||
3052 | * freed with drm_property_destroy. | ||
3053 | * | ||
3054 | * Userspace is only allowed to set one of the predefined values for enumeration | ||
3055 | * properties. | ||
3056 | * | ||
3057 | * Returns: | ||
3058 | * A pointer to the newly created property on success, NULL on failure. | ||
3059 | */ | ||
2901 | struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, | 3060 | struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, |
2902 | const char *name, | 3061 | const char *name, |
2903 | const struct drm_prop_enum_list *props, | 3062 | const struct drm_prop_enum_list *props, |
@@ -2926,6 +3085,24 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, | |||
2926 | } | 3085 | } |
2927 | EXPORT_SYMBOL(drm_property_create_enum); | 3086 | EXPORT_SYMBOL(drm_property_create_enum); |
2928 | 3087 | ||
3088 | /** | ||
3089 | * drm_property_create - create a new bitmask property type | ||
3090 | * @dev: drm device | ||
3091 | * @flags: flags specifying the property type | ||
3092 | * @name: name of the property | ||
3093 | * @props: enumeration lists with property bitflags | ||
3094 | * @num_values: number of pre-defined values | ||
3095 | * | ||
3096 | * This creates a new generic drm property which can then be attached to a drm | ||
3097 | * object with drm_object_attach_property. The returned property object must be | ||
3098 | * freed with drm_property_destroy. | ||
3099 | * | ||
3100 | * Compared to plain enumeration properties userspace is allowed to set any | ||
3101 | * or'ed together combination of the predefined property bitflag values | ||
3102 | * | ||
3103 | * Returns: | ||
3104 | * A pointer to the newly created property on success, NULL on failure. | ||
3105 | */ | ||
2929 | struct drm_property *drm_property_create_bitmask(struct drm_device *dev, | 3106 | struct drm_property *drm_property_create_bitmask(struct drm_device *dev, |
2930 | int flags, const char *name, | 3107 | int flags, const char *name, |
2931 | const struct drm_prop_enum_list *props, | 3108 | const struct drm_prop_enum_list *props, |
@@ -2954,6 +3131,24 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev, | |||
2954 | } | 3131 | } |
2955 | EXPORT_SYMBOL(drm_property_create_bitmask); | 3132 | EXPORT_SYMBOL(drm_property_create_bitmask); |
2956 | 3133 | ||
3134 | /** | ||
3135 | * drm_property_create - create a new ranged property type | ||
3136 | * @dev: drm device | ||
3137 | * @flags: flags specifying the property type | ||
3138 | * @name: name of the property | ||
3139 | * @min: minimum value of the property | ||
3140 | * @max: maximum value of the property | ||
3141 | * | ||
3142 | * This creates a new generic drm property which can then be attached to a drm | ||
3143 | * object with drm_object_attach_property. The returned property object must be | ||
3144 | * freed with drm_property_destroy. | ||
3145 | * | ||
3146 | * Userspace is allowed to set any interger value in the (min, max) range | ||
3147 | * inclusive. | ||
3148 | * | ||
3149 | * Returns: | ||
3150 | * A pointer to the newly created property on success, NULL on failure. | ||
3151 | */ | ||
2957 | struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, | 3152 | struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, |
2958 | const char *name, | 3153 | const char *name, |
2959 | uint64_t min, uint64_t max) | 3154 | uint64_t min, uint64_t max) |
@@ -2973,6 +3168,21 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags | |||
2973 | } | 3168 | } |
2974 | EXPORT_SYMBOL(drm_property_create_range); | 3169 | EXPORT_SYMBOL(drm_property_create_range); |
2975 | 3170 | ||
3171 | /** | ||
3172 | * drm_property_add_enum - add a possible value to an enumeration property | ||
3173 | * @property: enumeration property to change | ||
3174 | * @index: index of the new enumeration | ||
3175 | * @value: value of the new enumeration | ||
3176 | * @name: symbolic name of the new enumeration | ||
3177 | * | ||
3178 | * This functions adds enumerations to a property. | ||
3179 | * | ||
3180 | * It's use is deprecated, drivers should use one of the more specific helpers | ||
3181 | * to directly create the property with all enumerations already attached. | ||
3182 | * | ||
3183 | * Returns: | ||
3184 | * Zero on success, error code on failure. | ||
3185 | */ | ||
2976 | int drm_property_add_enum(struct drm_property *property, int index, | 3186 | int drm_property_add_enum(struct drm_property *property, int index, |
2977 | uint64_t value, const char *name) | 3187 | uint64_t value, const char *name) |
2978 | { | 3188 | { |
@@ -3012,6 +3222,14 @@ int drm_property_add_enum(struct drm_property *property, int index, | |||
3012 | } | 3222 | } |
3013 | EXPORT_SYMBOL(drm_property_add_enum); | 3223 | EXPORT_SYMBOL(drm_property_add_enum); |
3014 | 3224 | ||
3225 | /** | ||
3226 | * drm_property_destroy - destroy a drm property | ||
3227 | * @dev: drm device | ||
3228 | * @property: property to destry | ||
3229 | * | ||
3230 | * This function frees a property including any attached resources like | ||
3231 | * enumeration values. | ||
3232 | */ | ||
3015 | void drm_property_destroy(struct drm_device *dev, struct drm_property *property) | 3233 | void drm_property_destroy(struct drm_device *dev, struct drm_property *property) |
3016 | { | 3234 | { |
3017 | struct drm_property_enum *prop_enum, *pt; | 3235 | struct drm_property_enum *prop_enum, *pt; |
@@ -3029,6 +3247,16 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property) | |||
3029 | } | 3247 | } |
3030 | EXPORT_SYMBOL(drm_property_destroy); | 3248 | EXPORT_SYMBOL(drm_property_destroy); |
3031 | 3249 | ||
3250 | /** | ||
3251 | * drm_object_attach_property - attach a property to a modeset object | ||
3252 | * @obj: drm modeset object | ||
3253 | * @property: property to attach | ||
3254 | * @init_val: initial value of the property | ||
3255 | * | ||
3256 | * This attaches the given property to the modeset object with the given initial | ||
3257 | * value. Currently this function cannot fail since the properties are stored in | ||
3258 | * a statically sized array. | ||
3259 | */ | ||
3032 | void drm_object_attach_property(struct drm_mode_object *obj, | 3260 | void drm_object_attach_property(struct drm_mode_object *obj, |
3033 | struct drm_property *property, | 3261 | struct drm_property *property, |
3034 | uint64_t init_val) | 3262 | uint64_t init_val) |
@@ -3049,6 +3277,19 @@ void drm_object_attach_property(struct drm_mode_object *obj, | |||
3049 | } | 3277 | } |
3050 | EXPORT_SYMBOL(drm_object_attach_property); | 3278 | EXPORT_SYMBOL(drm_object_attach_property); |
3051 | 3279 | ||
3280 | /** | ||
3281 | * drm_object_property_set_value - set the value of a property | ||
3282 | * @obj: drm mode object to set property value for | ||
3283 | * @property: property to set | ||
3284 | * @val: value the property should be set to | ||
3285 | * | ||
3286 | * This functions sets a given property on a given object. This function only | ||
3287 | * changes the software state of the property, it does not call into the | ||
3288 | * driver's ->set_property callback. | ||
3289 | * | ||
3290 | * Returns: | ||
3291 | * Zero on success, error code on failure. | ||
3292 | */ | ||
3052 | int drm_object_property_set_value(struct drm_mode_object *obj, | 3293 | int drm_object_property_set_value(struct drm_mode_object *obj, |
3053 | struct drm_property *property, uint64_t val) | 3294 | struct drm_property *property, uint64_t val) |
3054 | { | 3295 | { |
@@ -3065,6 +3306,20 @@ int drm_object_property_set_value(struct drm_mode_object *obj, | |||
3065 | } | 3306 | } |
3066 | EXPORT_SYMBOL(drm_object_property_set_value); | 3307 | EXPORT_SYMBOL(drm_object_property_set_value); |
3067 | 3308 | ||
3309 | /** | ||
3310 | * drm_object_property_get_value - retrieve the value of a property | ||
3311 | * @obj: drm mode object to get property value from | ||
3312 | * @property: property to retrieve | ||
3313 | * @val: storage for the property value | ||
3314 | * | ||
3315 | * This function retrieves the softare state of the given property for the given | ||
3316 | * property. Since there is no driver callback to retrieve the current property | ||
3317 | * value this might be out of sync with the hardware, depending upon the driver | ||
3318 | * and property. | ||
3319 | * | ||
3320 | * Returns: | ||
3321 | * Zero on success, error code on failure. | ||
3322 | */ | ||
3068 | int drm_object_property_get_value(struct drm_mode_object *obj, | 3323 | int drm_object_property_get_value(struct drm_mode_object *obj, |
3069 | struct drm_property *property, uint64_t *val) | 3324 | struct drm_property *property, uint64_t *val) |
3070 | { | 3325 | { |
@@ -3081,6 +3336,19 @@ int drm_object_property_get_value(struct drm_mode_object *obj, | |||
3081 | } | 3336 | } |
3082 | EXPORT_SYMBOL(drm_object_property_get_value); | 3337 | EXPORT_SYMBOL(drm_object_property_get_value); |
3083 | 3338 | ||
3339 | /** | ||
3340 | * drm_mode_getproperty_ioctl - get the current value of a connector's property | ||
3341 | * @dev: DRM device | ||
3342 | * @data: ioctl data | ||
3343 | * @file_priv: DRM file info | ||
3344 | * | ||
3345 | * This function retrieves the current value for an connectors's property. | ||
3346 | * | ||
3347 | * Called by the user via ioctl. | ||
3348 | * | ||
3349 | * Returns: | ||
3350 | * Zero on success, errno on failure. | ||
3351 | */ | ||
3084 | int drm_mode_getproperty_ioctl(struct drm_device *dev, | 3352 | int drm_mode_getproperty_ioctl(struct drm_device *dev, |
3085 | void *data, struct drm_file *file_priv) | 3353 | void *data, struct drm_file *file_priv) |
3086 | { | 3354 | { |
@@ -3219,6 +3487,20 @@ static void drm_property_destroy_blob(struct drm_device *dev, | |||
3219 | kfree(blob); | 3487 | kfree(blob); |
3220 | } | 3488 | } |
3221 | 3489 | ||
3490 | /** | ||
3491 | * drm_mode_getblob_ioctl - get the contents of a blob property value | ||
3492 | * @dev: DRM device | ||
3493 | * @data: ioctl data | ||
3494 | * @file_priv: DRM file info | ||
3495 | * | ||
3496 | * This function retrieves the contents of a blob property. The value stored in | ||
3497 | * an object's blob property is just a normal modeset object id. | ||
3498 | * | ||
3499 | * Called by the user via ioctl. | ||
3500 | * | ||
3501 | * Returns: | ||
3502 | * Zero on success, errno on failure. | ||
3503 | */ | ||
3222 | int drm_mode_getblob_ioctl(struct drm_device *dev, | 3504 | int drm_mode_getblob_ioctl(struct drm_device *dev, |
3223 | void *data, struct drm_file *file_priv) | 3505 | void *data, struct drm_file *file_priv) |
3224 | { | 3506 | { |
@@ -3253,6 +3535,17 @@ done: | |||
3253 | return ret; | 3535 | return ret; |
3254 | } | 3536 | } |
3255 | 3537 | ||
3538 | /** | ||
3539 | * drm_mode_connector_update_edid_property - update the edid property of a connector | ||
3540 | * @connector: drm connector | ||
3541 | * @edid: new value of the edid property | ||
3542 | * | ||
3543 | * This function creates a new blob modeset object and assigns its id to the | ||
3544 | * connector's edid property. | ||
3545 | * | ||
3546 | * Returns: | ||
3547 | * Zero on success, errno on failure. | ||
3548 | */ | ||
3256 | int drm_mode_connector_update_edid_property(struct drm_connector *connector, | 3549 | int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
3257 | struct edid *edid) | 3550 | struct edid *edid) |
3258 | { | 3551 | { |
@@ -3310,6 +3603,20 @@ static bool drm_property_change_is_valid(struct drm_property *property, | |||
3310 | } | 3603 | } |
3311 | } | 3604 | } |
3312 | 3605 | ||
3606 | /** | ||
3607 | * drm_mode_connector_property_set_ioctl - set the current value of a connector property | ||
3608 | * @dev: DRM device | ||
3609 | * @data: ioctl data | ||
3610 | * @file_priv: DRM file info | ||
3611 | * | ||
3612 | * This function sets the current value for a connectors's property. It also | ||
3613 | * calls into a driver's ->set_property callback to update the hardware state | ||
3614 | * | ||
3615 | * Called by the user via ioctl. | ||
3616 | * | ||
3617 | * Returns: | ||
3618 | * Zero on success, errno on failure. | ||
3619 | */ | ||
3313 | int drm_mode_connector_property_set_ioctl(struct drm_device *dev, | 3620 | int drm_mode_connector_property_set_ioctl(struct drm_device *dev, |
3314 | void *data, struct drm_file *file_priv) | 3621 | void *data, struct drm_file *file_priv) |
3315 | { | 3622 | { |
@@ -3376,6 +3683,21 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, | |||
3376 | return ret; | 3683 | return ret; |
3377 | } | 3684 | } |
3378 | 3685 | ||
3686 | /** | ||
3687 | * drm_mode_getproperty_ioctl - get the current value of a object's property | ||
3688 | * @dev: DRM device | ||
3689 | * @data: ioctl data | ||
3690 | * @file_priv: DRM file info | ||
3691 | * | ||
3692 | * This function retrieves the current value for an object's property. Compared | ||
3693 | * to the connector specific ioctl this one is extended to also work on crtc and | ||
3694 | * plane objects. | ||
3695 | * | ||
3696 | * Called by the user via ioctl. | ||
3697 | * | ||
3698 | * Returns: | ||
3699 | * Zero on success, errno on failure. | ||
3700 | */ | ||
3379 | int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, | 3701 | int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, |
3380 | struct drm_file *file_priv) | 3702 | struct drm_file *file_priv) |
3381 | { | 3703 | { |
@@ -3432,6 +3754,22 @@ out: | |||
3432 | return ret; | 3754 | return ret; |
3433 | } | 3755 | } |
3434 | 3756 | ||
3757 | /** | ||
3758 | * drm_mode_obj_set_property_ioctl - set the current value of an object's property | ||
3759 | * @dev: DRM device | ||
3760 | * @data: ioctl data | ||
3761 | * @file_priv: DRM file info | ||
3762 | * | ||
3763 | * This function sets the current value for an object's property. It also calls | ||
3764 | * into a driver's ->set_property callback to update the hardware state. | ||
3765 | * Compared to the connector specific ioctl this one is extended to also work on | ||
3766 | * crtc and plane objects. | ||
3767 | * | ||
3768 | * Called by the user via ioctl. | ||
3769 | * | ||
3770 | * Returns: | ||
3771 | * Zero on success, errno on failure. | ||
3772 | */ | ||
3435 | int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, | 3773 | int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, |
3436 | struct drm_file *file_priv) | 3774 | struct drm_file *file_priv) |
3437 | { | 3775 | { |
@@ -3491,6 +3829,18 @@ out: | |||
3491 | return ret; | 3829 | return ret; |
3492 | } | 3830 | } |
3493 | 3831 | ||
3832 | /** | ||
3833 | * drm_mode_connector_attach_encoder - attach a connector to an encoder | ||
3834 | * @connector: connector to attach | ||
3835 | * @encoder: encoder to attach @connector to | ||
3836 | * | ||
3837 | * This function links up a connector to an encoder. Note that the routing | ||
3838 | * restrictions between encoders and crtcs are exposed to userspace through the | ||
3839 | * possible_clones and possible_crtcs bitmasks. | ||
3840 | * | ||
3841 | * Returns: | ||
3842 | * Zero on success, errno on failure. | ||
3843 | */ | ||
3494 | int drm_mode_connector_attach_encoder(struct drm_connector *connector, | 3844 | int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
3495 | struct drm_encoder *encoder) | 3845 | struct drm_encoder *encoder) |
3496 | { | 3846 | { |
@@ -3506,23 +3856,20 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector, | |||
3506 | } | 3856 | } |
3507 | EXPORT_SYMBOL(drm_mode_connector_attach_encoder); | 3857 | EXPORT_SYMBOL(drm_mode_connector_attach_encoder); |
3508 | 3858 | ||
3509 | void drm_mode_connector_detach_encoder(struct drm_connector *connector, | 3859 | /** |
3510 | struct drm_encoder *encoder) | 3860 | * drm_mode_crtc_set_gamma_size - set the gamma table size |
3511 | { | 3861 | * @crtc: CRTC to set the gamma table size for |
3512 | int i; | 3862 | * @gamma_size: size of the gamma table |
3513 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 3863 | * |
3514 | if (connector->encoder_ids[i] == encoder->base.id) { | 3864 | * Drivers which support gamma tables should set this to the supported gamma |
3515 | connector->encoder_ids[i] = 0; | 3865 | * table size when initializing the CRTC. Currently the drm core only supports a |
3516 | if (connector->encoder == encoder) | 3866 | * fixed gamma table size. |
3517 | connector->encoder = NULL; | 3867 | * |
3518 | break; | 3868 | * Returns: |
3519 | } | 3869 | * Zero on success, errno on failure. |
3520 | } | 3870 | */ |
3521 | } | ||
3522 | EXPORT_SYMBOL(drm_mode_connector_detach_encoder); | ||
3523 | |||
3524 | int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | 3871 | int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
3525 | int gamma_size) | 3872 | int gamma_size) |
3526 | { | 3873 | { |
3527 | crtc->gamma_size = gamma_size; | 3874 | crtc->gamma_size = gamma_size; |
3528 | 3875 | ||
@@ -3536,6 +3883,20 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | |||
3536 | } | 3883 | } |
3537 | EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); | 3884 | EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); |
3538 | 3885 | ||
3886 | /** | ||
3887 | * drm_mode_gamma_set_ioctl - set the gamma table | ||
3888 | * @dev: DRM device | ||
3889 | * @data: ioctl data | ||
3890 | * @file_priv: DRM file info | ||
3891 | * | ||
3892 | * Set the gamma table of a CRTC to the one passed in by the user. Userspace can | ||
3893 | * inquire the required gamma table size through drm_mode_gamma_get_ioctl. | ||
3894 | * | ||
3895 | * Called by the user via ioctl. | ||
3896 | * | ||
3897 | * Returns: | ||
3898 | * Zero on success, errno on failure. | ||
3899 | */ | ||
3539 | int drm_mode_gamma_set_ioctl(struct drm_device *dev, | 3900 | int drm_mode_gamma_set_ioctl(struct drm_device *dev, |
3540 | void *data, struct drm_file *file_priv) | 3901 | void *data, struct drm_file *file_priv) |
3541 | { | 3902 | { |
@@ -3595,6 +3956,21 @@ out: | |||
3595 | 3956 | ||
3596 | } | 3957 | } |
3597 | 3958 | ||
3959 | /** | ||
3960 | * drm_mode_gamma_get_ioctl - get the gamma table | ||
3961 | * @dev: DRM device | ||
3962 | * @data: ioctl data | ||
3963 | * @file_priv: DRM file info | ||
3964 | * | ||
3965 | * Copy the current gamma table into the storage provided. This also provides | ||
3966 | * the gamma table size the driver expects, which can be used to size the | ||
3967 | * allocated storage. | ||
3968 | * | ||
3969 | * Called by the user via ioctl. | ||
3970 | * | ||
3971 | * Returns: | ||
3972 | * Zero on success, errno on failure. | ||
3973 | */ | ||
3598 | int drm_mode_gamma_get_ioctl(struct drm_device *dev, | 3974 | int drm_mode_gamma_get_ioctl(struct drm_device *dev, |
3599 | void *data, struct drm_file *file_priv) | 3975 | void *data, struct drm_file *file_priv) |
3600 | { | 3976 | { |
@@ -3645,6 +4021,24 @@ out: | |||
3645 | return ret; | 4021 | return ret; |
3646 | } | 4022 | } |
3647 | 4023 | ||
4024 | /** | ||
4025 | * drm_mode_page_flip_ioctl - schedule an asynchronous fb update | ||
4026 | * @dev: DRM device | ||
4027 | * @data: ioctl data | ||
4028 | * @file_priv: DRM file info | ||
4029 | * | ||
4030 | * This schedules an asynchronous update on a given CRTC, called page flip. | ||
4031 | * Optionally a drm event is generated to signal the completion of the event. | ||
4032 | * Generic drivers cannot assume that a pageflip with changed framebuffer | ||
4033 | * properties (including driver specific metadata like tiling layout) will work, | ||
4034 | * but some drivers support e.g. pixel format changes through the pageflip | ||
4035 | * ioctl. | ||
4036 | * | ||
4037 | * Called by the user via ioctl. | ||
4038 | * | ||
4039 | * Returns: | ||
4040 | * Zero on success, errno on failure. | ||
4041 | */ | ||
3648 | int drm_mode_page_flip_ioctl(struct drm_device *dev, | 4042 | int drm_mode_page_flip_ioctl(struct drm_device *dev, |
3649 | void *data, struct drm_file *file_priv) | 4043 | void *data, struct drm_file *file_priv) |
3650 | { | 4044 | { |
@@ -3757,6 +4151,14 @@ out: | |||
3757 | return ret; | 4151 | return ret; |
3758 | } | 4152 | } |
3759 | 4153 | ||
4154 | /** | ||
4155 | * drm_mode_config_reset - call ->reset callbacks | ||
4156 | * @dev: drm device | ||
4157 | * | ||
4158 | * This functions calls all the crtc's, encoder's and connector's ->reset | ||
4159 | * callback. Drivers can use this in e.g. their driver load or resume code to | ||
4160 | * reset hardware and software state. | ||
4161 | */ | ||
3760 | void drm_mode_config_reset(struct drm_device *dev) | 4162 | void drm_mode_config_reset(struct drm_device *dev) |
3761 | { | 4163 | { |
3762 | struct drm_crtc *crtc; | 4164 | struct drm_crtc *crtc; |
@@ -3780,16 +4182,66 @@ void drm_mode_config_reset(struct drm_device *dev) | |||
3780 | } | 4182 | } |
3781 | EXPORT_SYMBOL(drm_mode_config_reset); | 4183 | EXPORT_SYMBOL(drm_mode_config_reset); |
3782 | 4184 | ||
4185 | /** | ||
4186 | * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer | ||
4187 | * @dev: DRM device | ||
4188 | * @data: ioctl data | ||
4189 | * @file_priv: DRM file info | ||
4190 | * | ||
4191 | * This creates a new dumb buffer in the driver's backing storage manager (GEM, | ||
4192 | * TTM or something else entirely) and returns the resulting buffer handle. This | ||
4193 | * handle can then be wrapped up into a framebuffer modeset object. | ||
4194 | * | ||
4195 | * Note that userspace is not allowed to use such objects for render | ||
4196 | * acceleration - drivers must create their own private ioctls for such a use | ||
4197 | * case. | ||
4198 | * | ||
4199 | * Called by the user via ioctl. | ||
4200 | * | ||
4201 | * Returns: | ||
4202 | * Zero on success, errno on failure. | ||
4203 | */ | ||
3783 | int drm_mode_create_dumb_ioctl(struct drm_device *dev, | 4204 | int drm_mode_create_dumb_ioctl(struct drm_device *dev, |
3784 | void *data, struct drm_file *file_priv) | 4205 | void *data, struct drm_file *file_priv) |
3785 | { | 4206 | { |
3786 | struct drm_mode_create_dumb *args = data; | 4207 | struct drm_mode_create_dumb *args = data; |
4208 | u32 cpp, stride, size; | ||
3787 | 4209 | ||
3788 | if (!dev->driver->dumb_create) | 4210 | if (!dev->driver->dumb_create) |
3789 | return -ENOSYS; | 4211 | return -ENOSYS; |
4212 | if (!args->width || !args->height || !args->bpp) | ||
4213 | return -EINVAL; | ||
4214 | |||
4215 | /* overflow checks for 32bit size calculations */ | ||
4216 | cpp = DIV_ROUND_UP(args->bpp, 8); | ||
4217 | if (cpp > 0xffffffffU / args->width) | ||
4218 | return -EINVAL; | ||
4219 | stride = cpp * args->width; | ||
4220 | if (args->height > 0xffffffffU / stride) | ||
4221 | return -EINVAL; | ||
4222 | |||
4223 | /* test for wrap-around */ | ||
4224 | size = args->height * stride; | ||
4225 | if (PAGE_ALIGN(size) == 0) | ||
4226 | return -EINVAL; | ||
4227 | |||
3790 | return dev->driver->dumb_create(file_priv, dev, args); | 4228 | return dev->driver->dumb_create(file_priv, dev, args); |
3791 | } | 4229 | } |
3792 | 4230 | ||
4231 | /** | ||
4232 | * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer | ||
4233 | * @dev: DRM device | ||
4234 | * @data: ioctl data | ||
4235 | * @file_priv: DRM file info | ||
4236 | * | ||
4237 | * Allocate an offset in the drm device node's address space to be able to | ||
4238 | * memory map a dumb buffer. | ||
4239 | * | ||
4240 | * Called by the user via ioctl. | ||
4241 | * | ||
4242 | * Returns: | ||
4243 | * Zero on success, errno on failure. | ||
4244 | */ | ||
3793 | int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, | 4245 | int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, |
3794 | void *data, struct drm_file *file_priv) | 4246 | void *data, struct drm_file *file_priv) |
3795 | { | 4247 | { |
@@ -3802,6 +4254,21 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, | |||
3802 | return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); | 4254 | return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); |
3803 | } | 4255 | } |
3804 | 4256 | ||
4257 | /** | ||
4258 | * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer | ||
4259 | * @dev: DRM device | ||
4260 | * @data: ioctl data | ||
4261 | * @file_priv: DRM file info | ||
4262 | * | ||
4263 | * This destroys the userspace handle for the given dumb backing storage buffer. | ||
4264 | * Since buffer objects must be reference counted in the kernel a buffer object | ||
4265 | * won't be immediately freed if a framebuffer modeset object still uses it. | ||
4266 | * | ||
4267 | * Called by the user via ioctl. | ||
4268 | * | ||
4269 | * Returns: | ||
4270 | * Zero on success, errno on failure. | ||
4271 | */ | ||
3805 | int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, | 4272 | int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, |
3806 | void *data, struct drm_file *file_priv) | 4273 | void *data, struct drm_file *file_priv) |
3807 | { | 4274 | { |
@@ -3813,9 +4280,14 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, | |||
3813 | return dev->driver->dumb_destroy(file_priv, dev, args->handle); | 4280 | return dev->driver->dumb_destroy(file_priv, dev, args->handle); |
3814 | } | 4281 | } |
3815 | 4282 | ||
3816 | /* | 4283 | /** |
3817 | * Just need to support RGB formats here for compat with code that doesn't | 4284 | * drm_fb_get_bpp_depth - get the bpp/depth values for format |
3818 | * use pixel formats directly yet. | 4285 | * @format: pixel format (DRM_FORMAT_*) |
4286 | * @depth: storage for the depth value | ||
4287 | * @bpp: storage for the bpp value | ||
4288 | * | ||
4289 | * This only supports RGB formats here for compat with code that doesn't use | ||
4290 | * pixel formats directly yet. | ||
3819 | */ | 4291 | */ |
3820 | void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, | 4292 | void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, |
3821 | int *bpp) | 4293 | int *bpp) |
@@ -3887,7 +4359,7 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth); | |||
3887 | * drm_format_num_planes - get the number of planes for format | 4359 | * drm_format_num_planes - get the number of planes for format |
3888 | * @format: pixel format (DRM_FORMAT_*) | 4360 | * @format: pixel format (DRM_FORMAT_*) |
3889 | * | 4361 | * |
3890 | * RETURNS: | 4362 | * Returns: |
3891 | * The number of planes used by the specified pixel format. | 4363 | * The number of planes used by the specified pixel format. |
3892 | */ | 4364 | */ |
3893 | int drm_format_num_planes(uint32_t format) | 4365 | int drm_format_num_planes(uint32_t format) |
@@ -3922,7 +4394,7 @@ EXPORT_SYMBOL(drm_format_num_planes); | |||
3922 | * @format: pixel format (DRM_FORMAT_*) | 4394 | * @format: pixel format (DRM_FORMAT_*) |
3923 | * @plane: plane index | 4395 | * @plane: plane index |
3924 | * | 4396 | * |
3925 | * RETURNS: | 4397 | * Returns: |
3926 | * The bytes per pixel value for the specified plane. | 4398 | * The bytes per pixel value for the specified plane. |
3927 | */ | 4399 | */ |
3928 | int drm_format_plane_cpp(uint32_t format, int plane) | 4400 | int drm_format_plane_cpp(uint32_t format, int plane) |
@@ -3968,7 +4440,7 @@ EXPORT_SYMBOL(drm_format_plane_cpp); | |||
3968 | * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor | 4440 | * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor |
3969 | * @format: pixel format (DRM_FORMAT_*) | 4441 | * @format: pixel format (DRM_FORMAT_*) |
3970 | * | 4442 | * |
3971 | * RETURNS: | 4443 | * Returns: |
3972 | * The horizontal chroma subsampling factor for the | 4444 | * The horizontal chroma subsampling factor for the |
3973 | * specified pixel format. | 4445 | * specified pixel format. |
3974 | */ | 4446 | */ |
@@ -4003,7 +4475,7 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); | |||
4003 | * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor | 4475 | * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor |
4004 | * @format: pixel format (DRM_FORMAT_*) | 4476 | * @format: pixel format (DRM_FORMAT_*) |
4005 | * | 4477 | * |
4006 | * RETURNS: | 4478 | * Returns: |
4007 | * The vertical chroma subsampling factor for the | 4479 | * The vertical chroma subsampling factor for the |
4008 | * specified pixel format. | 4480 | * specified pixel format. |
4009 | */ | 4481 | */ |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index ea92b827e787..a85517854073 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -105,9 +105,6 @@ static void drm_mode_validate_flag(struct drm_connector *connector, | |||
105 | * @maxX: max width for modes | 105 | * @maxX: max width for modes |
106 | * @maxY: max height for modes | 106 | * @maxY: max height for modes |
107 | * | 107 | * |
108 | * LOCKING: | ||
109 | * Caller must hold mode config lock. | ||
110 | * | ||
111 | * Based on the helper callbacks implemented by @connector try to detect all | 108 | * Based on the helper callbacks implemented by @connector try to detect all |
112 | * valid modes. Modes will first be added to the connector's probed_modes list, | 109 | * valid modes. Modes will first be added to the connector's probed_modes list, |
113 | * then culled (based on validity and the @maxX, @maxY parameters) and put into | 110 | * then culled (based on validity and the @maxX, @maxY parameters) and put into |
@@ -117,8 +114,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector, | |||
117 | * @connector vfunc for drivers that use the crtc helpers for output mode | 114 | * @connector vfunc for drivers that use the crtc helpers for output mode |
118 | * filtering and detection. | 115 | * filtering and detection. |
119 | * | 116 | * |
120 | * RETURNS: | 117 | * Returns: |
121 | * Number of modes found on @connector. | 118 | * The number of modes found on @connector. |
122 | */ | 119 | */ |
123 | int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | 120 | int drm_helper_probe_single_connector_modes(struct drm_connector *connector, |
124 | uint32_t maxX, uint32_t maxY) | 121 | uint32_t maxX, uint32_t maxY) |
@@ -131,6 +128,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
131 | int mode_flags = 0; | 128 | int mode_flags = 0; |
132 | bool verbose_prune = true; | 129 | bool verbose_prune = true; |
133 | 130 | ||
131 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
132 | |||
134 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, | 133 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, |
135 | drm_get_connector_name(connector)); | 134 | drm_get_connector_name(connector)); |
136 | /* set all modes to the unverified state */ | 135 | /* set all modes to the unverified state */ |
@@ -176,8 +175,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
176 | drm_mode_connector_list_update(connector); | 175 | drm_mode_connector_list_update(connector); |
177 | 176 | ||
178 | if (maxX && maxY) | 177 | if (maxX && maxY) |
179 | drm_mode_validate_size(dev, &connector->modes, maxX, | 178 | drm_mode_validate_size(dev, &connector->modes, maxX, maxY); |
180 | maxY, 0); | ||
181 | 179 | ||
182 | if (connector->interlace_allowed) | 180 | if (connector->interlace_allowed) |
183 | mode_flags |= DRM_MODE_FLAG_INTERLACE; | 181 | mode_flags |= DRM_MODE_FLAG_INTERLACE; |
@@ -219,18 +217,19 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); | |||
219 | * drm_helper_encoder_in_use - check if a given encoder is in use | 217 | * drm_helper_encoder_in_use - check if a given encoder is in use |
220 | * @encoder: encoder to check | 218 | * @encoder: encoder to check |
221 | * | 219 | * |
222 | * LOCKING: | 220 | * Checks whether @encoder is with the current mode setting output configuration |
223 | * Caller must hold mode config lock. | 221 | * in use by any connector. This doesn't mean that it is actually enabled since |
222 | * the DPMS state is tracked separately. | ||
224 | * | 223 | * |
225 | * Walk @encoders's DRM device's mode_config and see if it's in use. | 224 | * Returns: |
226 | * | 225 | * True if @encoder is used, false otherwise. |
227 | * RETURNS: | ||
228 | * True if @encoder is part of the mode_config, false otherwise. | ||
229 | */ | 226 | */ |
230 | bool drm_helper_encoder_in_use(struct drm_encoder *encoder) | 227 | bool drm_helper_encoder_in_use(struct drm_encoder *encoder) |
231 | { | 228 | { |
232 | struct drm_connector *connector; | 229 | struct drm_connector *connector; |
233 | struct drm_device *dev = encoder->dev; | 230 | struct drm_device *dev = encoder->dev; |
231 | |||
232 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
234 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | 233 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
235 | if (connector->encoder == encoder) | 234 | if (connector->encoder == encoder) |
236 | return true; | 235 | return true; |
@@ -242,19 +241,19 @@ EXPORT_SYMBOL(drm_helper_encoder_in_use); | |||
242 | * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config | 241 | * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config |
243 | * @crtc: CRTC to check | 242 | * @crtc: CRTC to check |
244 | * | 243 | * |
245 | * LOCKING: | 244 | * Checks whether @crtc is with the current mode setting output configuration |
246 | * Caller must hold mode config lock. | 245 | * in use by any connector. This doesn't mean that it is actually enabled since |
246 | * the DPMS state is tracked separately. | ||
247 | * | 247 | * |
248 | * Walk @crtc's DRM device's mode_config and see if it's in use. | 248 | * Returns: |
249 | * | 249 | * True if @crtc is used, false otherwise. |
250 | * RETURNS: | ||
251 | * True if @crtc is part of the mode_config, false otherwise. | ||
252 | */ | 250 | */ |
253 | bool drm_helper_crtc_in_use(struct drm_crtc *crtc) | 251 | bool drm_helper_crtc_in_use(struct drm_crtc *crtc) |
254 | { | 252 | { |
255 | struct drm_encoder *encoder; | 253 | struct drm_encoder *encoder; |
256 | struct drm_device *dev = crtc->dev; | 254 | struct drm_device *dev = crtc->dev; |
257 | /* FIXME: Locking around list access? */ | 255 | |
256 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
258 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | 257 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
259 | if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) | 258 | if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) |
260 | return true; | 259 | return true; |
@@ -283,11 +282,11 @@ drm_encoder_disable(struct drm_encoder *encoder) | |||
283 | * drm_helper_disable_unused_functions - disable unused objects | 282 | * drm_helper_disable_unused_functions - disable unused objects |
284 | * @dev: DRM device | 283 | * @dev: DRM device |
285 | * | 284 | * |
286 | * LOCKING: | 285 | * This function walks through the entire mode setting configuration of @dev. It |
287 | * Caller must hold mode config lock. | 286 | * will remove any crtc links of unused encoders and encoder links of |
288 | * | 287 | * disconnected connectors. Then it will disable all unused encoders and crtcs |
289 | * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled | 288 | * either by calling their disable callback if available or by calling their |
290 | * by calling its dpms function, which should power it off. | 289 | * dpms callback with DRM_MODE_DPMS_OFF. |
291 | */ | 290 | */ |
292 | void drm_helper_disable_unused_functions(struct drm_device *dev) | 291 | void drm_helper_disable_unused_functions(struct drm_device *dev) |
293 | { | 292 | { |
@@ -295,6 +294,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) | |||
295 | struct drm_connector *connector; | 294 | struct drm_connector *connector; |
296 | struct drm_crtc *crtc; | 295 | struct drm_crtc *crtc; |
297 | 296 | ||
297 | drm_warn_on_modeset_not_all_locked(dev); | ||
298 | |||
298 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 299 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
299 | if (!connector->encoder) | 300 | if (!connector->encoder) |
300 | continue; | 301 | continue; |
@@ -355,9 +356,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev) | |||
355 | * @y: vertical offset into the surface | 356 | * @y: vertical offset into the surface |
356 | * @old_fb: old framebuffer, for cleanup | 357 | * @old_fb: old framebuffer, for cleanup |
357 | * | 358 | * |
358 | * LOCKING: | ||
359 | * Caller must hold mode config lock. | ||
360 | * | ||
361 | * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance | 359 | * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance |
362 | * to fixup or reject the mode prior to trying to set it. This is an internal | 360 | * to fixup or reject the mode prior to trying to set it. This is an internal |
363 | * helper that drivers could e.g. use to update properties that require the | 361 | * helper that drivers could e.g. use to update properties that require the |
@@ -367,8 +365,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev) | |||
367 | * drm_crtc_helper_set_config() helper function to drive the mode setting | 365 | * drm_crtc_helper_set_config() helper function to drive the mode setting |
368 | * sequence. | 366 | * sequence. |
369 | * | 367 | * |
370 | * RETURNS: | 368 | * Returns: |
371 | * True if the mode was set successfully, or false otherwise. | 369 | * True if the mode was set successfully, false otherwise. |
372 | */ | 370 | */ |
373 | bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | 371 | bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, |
374 | struct drm_display_mode *mode, | 372 | struct drm_display_mode *mode, |
@@ -384,6 +382,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
384 | struct drm_encoder *encoder; | 382 | struct drm_encoder *encoder; |
385 | bool ret = true; | 383 | bool ret = true; |
386 | 384 | ||
385 | drm_warn_on_modeset_not_all_locked(dev); | ||
386 | |||
387 | saved_enabled = crtc->enabled; | 387 | saved_enabled = crtc->enabled; |
388 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 388 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
389 | if (!crtc->enabled) | 389 | if (!crtc->enabled) |
@@ -560,17 +560,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) | |||
560 | * drm_crtc_helper_set_config - set a new config from userspace | 560 | * drm_crtc_helper_set_config - set a new config from userspace |
561 | * @set: mode set configuration | 561 | * @set: mode set configuration |
562 | * | 562 | * |
563 | * LOCKING: | ||
564 | * Caller must hold mode config lock. | ||
565 | * | ||
566 | * Setup a new configuration, provided by the upper layers (either an ioctl call | 563 | * Setup a new configuration, provided by the upper layers (either an ioctl call |
567 | * from userspace or internally e.g. from the fbdev suppport code) in @set, and | 564 | * from userspace or internally e.g. from the fbdev suppport code) in @set, and |
568 | * enable it. This is the main helper functions for drivers that implement | 565 | * enable it. This is the main helper functions for drivers that implement |
569 | * kernel mode setting with the crtc helper functions and the assorted | 566 | * kernel mode setting with the crtc helper functions and the assorted |
570 | * ->prepare(), ->modeset() and ->commit() helper callbacks. | 567 | * ->prepare(), ->modeset() and ->commit() helper callbacks. |
571 | * | 568 | * |
572 | * RETURNS: | 569 | * Returns: |
573 | * Returns 0 on success, -ERRNO on failure. | 570 | * Returns 0 on success, negative errno numbers on failure. |
574 | */ | 571 | */ |
575 | int drm_crtc_helper_set_config(struct drm_mode_set *set) | 572 | int drm_crtc_helper_set_config(struct drm_mode_set *set) |
576 | { | 573 | { |
@@ -612,6 +609,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
612 | 609 | ||
613 | dev = set->crtc->dev; | 610 | dev = set->crtc->dev; |
614 | 611 | ||
612 | drm_warn_on_modeset_not_all_locked(dev); | ||
613 | |||
615 | /* | 614 | /* |
616 | * Allocate space for the backup of all (non-pointer) encoder and | 615 | * Allocate space for the backup of all (non-pointer) encoder and |
617 | * connector data. | 616 | * connector data. |
@@ -924,8 +923,16 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) | |||
924 | } | 923 | } |
925 | EXPORT_SYMBOL(drm_helper_connector_dpms); | 924 | EXPORT_SYMBOL(drm_helper_connector_dpms); |
926 | 925 | ||
927 | int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | 926 | /** |
928 | struct drm_mode_fb_cmd2 *mode_cmd) | 927 | * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata |
928 | * @fb: drm_framebuffer object to fill out | ||
929 | * @mode_cmd: metadata from the userspace fb creation request | ||
930 | * | ||
931 | * This helper can be used in a drivers fb_create callback to pre-fill the fb's | ||
932 | * metadata fields. | ||
933 | */ | ||
934 | void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | ||
935 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
929 | { | 936 | { |
930 | int i; | 937 | int i; |
931 | 938 | ||
@@ -938,17 +945,36 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | |||
938 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, | 945 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, |
939 | &fb->bits_per_pixel); | 946 | &fb->bits_per_pixel); |
940 | fb->pixel_format = mode_cmd->pixel_format; | 947 | fb->pixel_format = mode_cmd->pixel_format; |
941 | |||
942 | return 0; | ||
943 | } | 948 | } |
944 | EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); | 949 | EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); |
945 | 950 | ||
946 | int drm_helper_resume_force_mode(struct drm_device *dev) | 951 | /** |
952 | * drm_helper_resume_force_mode - force-restore mode setting configuration | ||
953 | * @dev: drm_device which should be restored | ||
954 | * | ||
955 | * Drivers which use the mode setting helpers can use this function to | ||
956 | * force-restore the mode setting configuration e.g. on resume or when something | ||
957 | * else might have trampled over the hw state (like some overzealous old BIOSen | ||
958 | * tended to do). | ||
959 | * | ||
960 | * This helper doesn't provide a error return value since restoring the old | ||
961 | * config should never fail due to resource allocation issues since the driver | ||
962 | * has successfully set the restored configuration already. Hence this should | ||
963 | * boil down to the equivalent of a few dpms on calls, which also don't provide | ||
964 | * an error code. | ||
965 | * | ||
966 | * Drivers where simply restoring an old configuration again might fail (e.g. | ||
967 | * due to slight differences in allocating shared resources when the | ||
968 | * configuration is restored in a different order than when userspace set it up) | ||
969 | * need to use their own restore logic. | ||
970 | */ | ||
971 | void drm_helper_resume_force_mode(struct drm_device *dev) | ||
947 | { | 972 | { |
948 | struct drm_crtc *crtc; | 973 | struct drm_crtc *crtc; |
949 | struct drm_encoder *encoder; | 974 | struct drm_encoder *encoder; |
950 | struct drm_crtc_helper_funcs *crtc_funcs; | 975 | struct drm_crtc_helper_funcs *crtc_funcs; |
951 | int ret, encoder_dpms; | 976 | int encoder_dpms; |
977 | bool ret; | ||
952 | 978 | ||
953 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 979 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
954 | 980 | ||
@@ -958,6 +984,7 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
958 | ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, | 984 | ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, |
959 | crtc->x, crtc->y, crtc->fb); | 985 | crtc->x, crtc->y, crtc->fb); |
960 | 986 | ||
987 | /* Restoring the old config should never fail! */ | ||
961 | if (ret == false) | 988 | if (ret == false) |
962 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); | 989 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); |
963 | 990 | ||
@@ -980,12 +1007,28 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
980 | drm_helper_choose_crtc_dpms(crtc)); | 1007 | drm_helper_choose_crtc_dpms(crtc)); |
981 | } | 1008 | } |
982 | } | 1009 | } |
1010 | |||
983 | /* disable the unused connectors while restoring the modesetting */ | 1011 | /* disable the unused connectors while restoring the modesetting */ |
984 | drm_helper_disable_unused_functions(dev); | 1012 | drm_helper_disable_unused_functions(dev); |
985 | return 0; | ||
986 | } | 1013 | } |
987 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 1014 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
988 | 1015 | ||
1016 | /** | ||
1017 | * drm_kms_helper_hotplug_event - fire off KMS hotplug events | ||
1018 | * @dev: drm_device whose connector state changed | ||
1019 | * | ||
1020 | * This function fires off the uevent for userspace and also calls the | ||
1021 | * output_poll_changed function, which is most commonly used to inform the fbdev | ||
1022 | * emulation code and allow it to update the fbcon output configuration. | ||
1023 | * | ||
1024 | * Drivers should call this from their hotplug handling code when a change is | ||
1025 | * detected. Note that this function does not do any output detection of its | ||
1026 | * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the | ||
1027 | * driver already. | ||
1028 | * | ||
1029 | * This function must be called from process context with no mode | ||
1030 | * setting locks held. | ||
1031 | */ | ||
989 | void drm_kms_helper_hotplug_event(struct drm_device *dev) | 1032 | void drm_kms_helper_hotplug_event(struct drm_device *dev) |
990 | { | 1033 | { |
991 | /* send a uevent + call fbdev */ | 1034 | /* send a uevent + call fbdev */ |
@@ -1054,6 +1097,16 @@ static void output_poll_execute(struct work_struct *work) | |||
1054 | schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); | 1097 | schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); |
1055 | } | 1098 | } |
1056 | 1099 | ||
1100 | /** | ||
1101 | * drm_kms_helper_poll_disable - disable output polling | ||
1102 | * @dev: drm_device | ||
1103 | * | ||
1104 | * This function disables the output polling work. | ||
1105 | * | ||
1106 | * Drivers can call this helper from their device suspend implementation. It is | ||
1107 | * not an error to call this even when output polling isn't enabled or arlready | ||
1108 | * disabled. | ||
1109 | */ | ||
1057 | void drm_kms_helper_poll_disable(struct drm_device *dev) | 1110 | void drm_kms_helper_poll_disable(struct drm_device *dev) |
1058 | { | 1111 | { |
1059 | if (!dev->mode_config.poll_enabled) | 1112 | if (!dev->mode_config.poll_enabled) |
@@ -1062,6 +1115,16 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) | |||
1062 | } | 1115 | } |
1063 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | 1116 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
1064 | 1117 | ||
1118 | /** | ||
1119 | * drm_kms_helper_poll_enable - re-enable output polling. | ||
1120 | * @dev: drm_device | ||
1121 | * | ||
1122 | * This function re-enables the output polling work. | ||
1123 | * | ||
1124 | * Drivers can call this helper from their device resume implementation. It is | ||
1125 | * an error to call this when the output polling support has not yet been set | ||
1126 | * up. | ||
1127 | */ | ||
1065 | void drm_kms_helper_poll_enable(struct drm_device *dev) | 1128 | void drm_kms_helper_poll_enable(struct drm_device *dev) |
1066 | { | 1129 | { |
1067 | bool poll = false; | 1130 | bool poll = false; |
@@ -1081,6 +1144,25 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
1081 | } | 1144 | } |
1082 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | 1145 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
1083 | 1146 | ||
1147 | /** | ||
1148 | * drm_kms_helper_poll_init - initialize and enable output polling | ||
1149 | * @dev: drm_device | ||
1150 | * | ||
1151 | * This function intializes and then also enables output polling support for | ||
1152 | * @dev. Drivers which do not have reliable hotplug support in hardware can use | ||
1153 | * this helper infrastructure to regularly poll such connectors for changes in | ||
1154 | * their connection state. | ||
1155 | * | ||
1156 | * Drivers can control which connectors are polled by setting the | ||
1157 | * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On | ||
1158 | * connectors where probing live outputs can result in visual distortion drivers | ||
1159 | * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this. | ||
1160 | * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are | ||
1161 | * completely ignored by the polling logic. | ||
1162 | * | ||
1163 | * Note that a connector can be both polled and probed from the hotplug handler, | ||
1164 | * in case the hotplug interrupt is known to be unreliable. | ||
1165 | */ | ||
1084 | void drm_kms_helper_poll_init(struct drm_device *dev) | 1166 | void drm_kms_helper_poll_init(struct drm_device *dev) |
1085 | { | 1167 | { |
1086 | INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); | 1168 | INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); |
@@ -1090,12 +1172,39 @@ void drm_kms_helper_poll_init(struct drm_device *dev) | |||
1090 | } | 1172 | } |
1091 | EXPORT_SYMBOL(drm_kms_helper_poll_init); | 1173 | EXPORT_SYMBOL(drm_kms_helper_poll_init); |
1092 | 1174 | ||
1175 | /** | ||
1176 | * drm_kms_helper_poll_fini - disable output polling and clean it up | ||
1177 | * @dev: drm_device | ||
1178 | */ | ||
1093 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 1179 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
1094 | { | 1180 | { |
1095 | drm_kms_helper_poll_disable(dev); | 1181 | drm_kms_helper_poll_disable(dev); |
1096 | } | 1182 | } |
1097 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 1183 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
1098 | 1184 | ||
1185 | /** | ||
1186 | * drm_helper_hpd_irq_event - hotplug processing | ||
1187 | * @dev: drm_device | ||
1188 | * | ||
1189 | * Drivers can use this helper function to run a detect cycle on all connectors | ||
1190 | * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All | ||
1191 | * other connectors are ignored, which is useful to avoid reprobing fixed | ||
1192 | * panels. | ||
1193 | * | ||
1194 | * This helper function is useful for drivers which can't or don't track hotplug | ||
1195 | * interrupts for each connector. | ||
1196 | * | ||
1197 | * Drivers which support hotplug interrupts for each connector individually and | ||
1198 | * which have a more fine-grained detect logic should bypass this code and | ||
1199 | * directly call drm_kms_helper_hotplug_event() in case the connector state | ||
1200 | * changed. | ||
1201 | * | ||
1202 | * This function must be called from process context with no mode | ||
1203 | * setting locks held. | ||
1204 | * | ||
1205 | * Note that a connector can be both polled and probed from the hotplug handler, | ||
1206 | * in case the hotplug interrupt is known to be unreliable. | ||
1207 | */ | ||
1099 | bool drm_helper_hpd_irq_event(struct drm_device *dev) | 1208 | bool drm_helper_hpd_irq_event(struct drm_device *dev) |
1100 | { | 1209 | { |
1101 | struct drm_connector *connector; | 1210 | struct drm_connector *connector; |
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h new file mode 100644 index 000000000000..a2945ee6d675 --- /dev/null +++ b/drivers/gpu/drm/drm_crtc_internal.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Keith Packard | ||
3 | * Copyright © 2007-2008 Dave Airlie | ||
4 | * Copyright © 2007-2008 Intel Corporation | ||
5 | * Jesse Barnes <jesse.barnes@intel.com> | ||
6 | * Copyright © 2014 Intel Corporation | ||
7 | * Daniel Vetter <daniel.vetter@ffwll.ch> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice shall be included in | ||
17 | * all copies or substantial portions of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * This header file contains mode setting related functions and definitions | ||
30 | * which are only used within the drm module as internal implementation details | ||
31 | * and are not exported to drivers. | ||
32 | */ | ||
33 | |||
34 | int drm_mode_object_get(struct drm_device *dev, | ||
35 | struct drm_mode_object *obj, uint32_t obj_type); | ||
36 | void drm_mode_object_put(struct drm_device *dev, | ||
37 | struct drm_mode_object *object); | ||
38 | |||
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 9e978aae8972..17832d048147 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -346,3 +346,399 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw) | |||
346 | } | 346 | } |
347 | } | 347 | } |
348 | EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); | 348 | EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); |
349 | |||
350 | /** | ||
351 | * DOC: dp helpers | ||
352 | * | ||
353 | * The DisplayPort AUX channel is an abstraction to allow generic, driver- | ||
354 | * independent access to AUX functionality. Drivers can take advantage of | ||
355 | * this by filling in the fields of the drm_dp_aux structure. | ||
356 | * | ||
357 | * Transactions are described using a hardware-independent drm_dp_aux_msg | ||
358 | * structure, which is passed into a driver's .transfer() implementation. | ||
359 | * Both native and I2C-over-AUX transactions are supported. | ||
360 | */ | ||
361 | |||
362 | static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, | ||
363 | unsigned int offset, void *buffer, size_t size) | ||
364 | { | ||
365 | struct drm_dp_aux_msg msg; | ||
366 | unsigned int retry; | ||
367 | int err; | ||
368 | |||
369 | memset(&msg, 0, sizeof(msg)); | ||
370 | msg.address = offset; | ||
371 | msg.request = request; | ||
372 | msg.buffer = buffer; | ||
373 | msg.size = size; | ||
374 | |||
375 | /* | ||
376 | * The specification doesn't give any recommendation on how often to | ||
377 | * retry native transactions, so retry 7 times like for I2C-over-AUX | ||
378 | * transactions. | ||
379 | */ | ||
380 | for (retry = 0; retry < 7; retry++) { | ||
381 | err = aux->transfer(aux, &msg); | ||
382 | if (err < 0) { | ||
383 | if (err == -EBUSY) | ||
384 | continue; | ||
385 | |||
386 | return err; | ||
387 | } | ||
388 | |||
389 | if (err < size) | ||
390 | return -EPROTO; | ||
391 | |||
392 | switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { | ||
393 | case DP_AUX_NATIVE_REPLY_ACK: | ||
394 | return err; | ||
395 | |||
396 | case DP_AUX_NATIVE_REPLY_NACK: | ||
397 | return -EIO; | ||
398 | |||
399 | case DP_AUX_NATIVE_REPLY_DEFER: | ||
400 | usleep_range(400, 500); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | DRM_ERROR("too many retries, giving up\n"); | ||
406 | return -EIO; | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * drm_dp_dpcd_read() - read a series of bytes from the DPCD | ||
411 | * @aux: DisplayPort AUX channel | ||
412 | * @offset: address of the (first) register to read | ||
413 | * @buffer: buffer to store the register values | ||
414 | * @size: number of bytes in @buffer | ||
415 | * | ||
416 | * Returns the number of bytes transferred on success, or a negative error | ||
417 | * code on failure. -EIO is returned if the request was NAKed by the sink or | ||
418 | * if the retry count was exceeded. If not all bytes were transferred, this | ||
419 | * function returns -EPROTO. Errors from the underlying AUX channel transfer | ||
420 | * function, with the exception of -EBUSY (which causes the transaction to | ||
421 | * be retried), are propagated to the caller. | ||
422 | */ | ||
423 | ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, | ||
424 | void *buffer, size_t size) | ||
425 | { | ||
426 | return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, | ||
427 | size); | ||
428 | } | ||
429 | EXPORT_SYMBOL(drm_dp_dpcd_read); | ||
430 | |||
431 | /** | ||
432 | * drm_dp_dpcd_write() - write a series of bytes to the DPCD | ||
433 | * @aux: DisplayPort AUX channel | ||
434 | * @offset: address of the (first) register to write | ||
435 | * @buffer: buffer containing the values to write | ||
436 | * @size: number of bytes in @buffer | ||
437 | * | ||
438 | * Returns the number of bytes transferred on success, or a negative error | ||
439 | * code on failure. -EIO is returned if the request was NAKed by the sink or | ||
440 | * if the retry count was exceeded. If not all bytes were transferred, this | ||
441 | * function returns -EPROTO. Errors from the underlying AUX channel transfer | ||
442 | * function, with the exception of -EBUSY (which causes the transaction to | ||
443 | * be retried), are propagated to the caller. | ||
444 | */ | ||
445 | ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, | ||
446 | void *buffer, size_t size) | ||
447 | { | ||
448 | return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, | ||
449 | size); | ||
450 | } | ||
451 | EXPORT_SYMBOL(drm_dp_dpcd_write); | ||
452 | |||
453 | /** | ||
454 | * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207) | ||
455 | * @aux: DisplayPort AUX channel | ||
456 | * @status: buffer to store the link status in (must be at least 6 bytes) | ||
457 | * | ||
458 | * Returns the number of bytes transferred on success or a negative error | ||
459 | * code on failure. | ||
460 | */ | ||
461 | int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, | ||
462 | u8 status[DP_LINK_STATUS_SIZE]) | ||
463 | { | ||
464 | return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, | ||
465 | DP_LINK_STATUS_SIZE); | ||
466 | } | ||
467 | EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); | ||
468 | |||
469 | /** | ||
470 | * drm_dp_link_probe() - probe a DisplayPort link for capabilities | ||
471 | * @aux: DisplayPort AUX channel | ||
472 | * @link: pointer to structure in which to return link capabilities | ||
473 | * | ||
474 | * The structure filled in by this function can usually be passed directly | ||
475 | * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and | ||
476 | * configure the link based on the link's capabilities. | ||
477 | * | ||
478 | * Returns 0 on success or a negative error code on failure. | ||
479 | */ | ||
480 | int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) | ||
481 | { | ||
482 | u8 values[3]; | ||
483 | int err; | ||
484 | |||
485 | memset(link, 0, sizeof(*link)); | ||
486 | |||
487 | err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); | ||
488 | if (err < 0) | ||
489 | return err; | ||
490 | |||
491 | link->revision = values[0]; | ||
492 | link->rate = drm_dp_bw_code_to_link_rate(values[1]); | ||
493 | link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; | ||
494 | |||
495 | if (values[2] & DP_ENHANCED_FRAME_CAP) | ||
496 | link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; | ||
497 | |||
498 | return 0; | ||
499 | } | ||
500 | EXPORT_SYMBOL(drm_dp_link_probe); | ||
501 | |||
502 | /** | ||
503 | * drm_dp_link_power_up() - power up a DisplayPort link | ||
504 | * @aux: DisplayPort AUX channel | ||
505 | * @link: pointer to a structure containing the link configuration | ||
506 | * | ||
507 | * Returns 0 on success or a negative error code on failure. | ||
508 | */ | ||
509 | int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) | ||
510 | { | ||
511 | u8 value; | ||
512 | int err; | ||
513 | |||
514 | /* DP_SET_POWER register is only available on DPCD v1.1 and later */ | ||
515 | if (link->revision < 0x11) | ||
516 | return 0; | ||
517 | |||
518 | err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); | ||
519 | if (err < 0) | ||
520 | return err; | ||
521 | |||
522 | value &= ~DP_SET_POWER_MASK; | ||
523 | value |= DP_SET_POWER_D0; | ||
524 | |||
525 | err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); | ||
526 | if (err < 0) | ||
527 | return err; | ||
528 | |||
529 | /* | ||
530 | * According to the DP 1.1 specification, a "Sink Device must exit the | ||
531 | * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink | ||
532 | * Control Field" (register 0x600). | ||
533 | */ | ||
534 | usleep_range(1000, 2000); | ||
535 | |||
536 | return 0; | ||
537 | } | ||
538 | EXPORT_SYMBOL(drm_dp_link_power_up); | ||
539 | |||
540 | /** | ||
541 | * drm_dp_link_configure() - configure a DisplayPort link | ||
542 | * @aux: DisplayPort AUX channel | ||
543 | * @link: pointer to a structure containing the link configuration | ||
544 | * | ||
545 | * Returns 0 on success or a negative error code on failure. | ||
546 | */ | ||
547 | int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) | ||
548 | { | ||
549 | u8 values[2]; | ||
550 | int err; | ||
551 | |||
552 | values[0] = drm_dp_link_rate_to_bw_code(link->rate); | ||
553 | values[1] = link->num_lanes; | ||
554 | |||
555 | if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) | ||
556 | values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
557 | |||
558 | err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); | ||
559 | if (err < 0) | ||
560 | return err; | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | EXPORT_SYMBOL(drm_dp_link_configure); | ||
565 | |||
566 | /* | ||
567 | * I2C-over-AUX implementation | ||
568 | */ | ||
569 | |||
570 | static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) | ||
571 | { | ||
572 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
573 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | ||
574 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | | ||
575 | I2C_FUNC_10BIT_ADDR; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Transfer a single I2C-over-AUX message and handle various error conditions, | ||
580 | * retrying the transaction as appropriate. | ||
581 | */ | ||
582 | static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | ||
583 | { | ||
584 | unsigned int retry; | ||
585 | int err; | ||
586 | |||
587 | /* | ||
588 | * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device | ||
589 | * is required to retry at least seven times upon receiving AUX_DEFER | ||
590 | * before giving up the AUX transaction. | ||
591 | */ | ||
592 | for (retry = 0; retry < 7; retry++) { | ||
593 | err = aux->transfer(aux, msg); | ||
594 | if (err < 0) { | ||
595 | if (err == -EBUSY) | ||
596 | continue; | ||
597 | |||
598 | DRM_DEBUG_KMS("transaction failed: %d\n", err); | ||
599 | return err; | ||
600 | } | ||
601 | |||
602 | if (err < msg->size) | ||
603 | return -EPROTO; | ||
604 | |||
605 | switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { | ||
606 | case DP_AUX_NATIVE_REPLY_ACK: | ||
607 | /* | ||
608 | * For I2C-over-AUX transactions this isn't enough, we | ||
609 | * need to check for the I2C ACK reply. | ||
610 | */ | ||
611 | break; | ||
612 | |||
613 | case DP_AUX_NATIVE_REPLY_NACK: | ||
614 | DRM_DEBUG_KMS("native nack\n"); | ||
615 | return -EREMOTEIO; | ||
616 | |||
617 | case DP_AUX_NATIVE_REPLY_DEFER: | ||
618 | DRM_DEBUG_KMS("native defer"); | ||
619 | /* | ||
620 | * We could check for I2C bit rate capabilities and if | ||
621 | * available adjust this interval. We could also be | ||
622 | * more careful with DP-to-legacy adapters where a | ||
623 | * long legacy cable may force very low I2C bit rates. | ||
624 | * | ||
625 | * For now just defer for long enough to hopefully be | ||
626 | * safe for all use-cases. | ||
627 | */ | ||
628 | usleep_range(500, 600); | ||
629 | continue; | ||
630 | |||
631 | default: | ||
632 | DRM_ERROR("invalid native reply %#04x\n", msg->reply); | ||
633 | return -EREMOTEIO; | ||
634 | } | ||
635 | |||
636 | switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { | ||
637 | case DP_AUX_I2C_REPLY_ACK: | ||
638 | /* | ||
639 | * Both native ACK and I2C ACK replies received. We | ||
640 | * can assume the transfer was successful. | ||
641 | */ | ||
642 | return 0; | ||
643 | |||
644 | case DP_AUX_I2C_REPLY_NACK: | ||
645 | DRM_DEBUG_KMS("I2C nack\n"); | ||
646 | return -EREMOTEIO; | ||
647 | |||
648 | case DP_AUX_I2C_REPLY_DEFER: | ||
649 | DRM_DEBUG_KMS("I2C defer\n"); | ||
650 | usleep_range(400, 500); | ||
651 | continue; | ||
652 | |||
653 | default: | ||
654 | DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); | ||
655 | return -EREMOTEIO; | ||
656 | } | ||
657 | } | ||
658 | |||
659 | DRM_ERROR("too many retries, giving up\n"); | ||
660 | return -EREMOTEIO; | ||
661 | } | ||
662 | |||
663 | static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, | ||
664 | int num) | ||
665 | { | ||
666 | struct drm_dp_aux *aux = adapter->algo_data; | ||
667 | unsigned int i, j; | ||
668 | |||
669 | for (i = 0; i < num; i++) { | ||
670 | struct drm_dp_aux_msg msg; | ||
671 | int err; | ||
672 | |||
673 | /* | ||
674 | * Many hardware implementations support FIFOs larger than a | ||
675 | * single byte, but it has been empirically determined that | ||
676 | * transferring data in larger chunks can actually lead to | ||
677 | * decreased performance. Therefore each message is simply | ||
678 | * transferred byte-by-byte. | ||
679 | */ | ||
680 | for (j = 0; j < msgs[i].len; j++) { | ||
681 | memset(&msg, 0, sizeof(msg)); | ||
682 | msg.address = msgs[i].addr; | ||
683 | |||
684 | msg.request = (msgs[i].flags & I2C_M_RD) ? | ||
685 | DP_AUX_I2C_READ : | ||
686 | DP_AUX_I2C_WRITE; | ||
687 | |||
688 | /* | ||
689 | * All messages except the last one are middle-of- | ||
690 | * transfer messages. | ||
691 | */ | ||
692 | if ((i < num - 1) || (j < msgs[i].len - 1)) | ||
693 | msg.request |= DP_AUX_I2C_MOT; | ||
694 | |||
695 | msg.buffer = msgs[i].buf + j; | ||
696 | msg.size = 1; | ||
697 | |||
698 | err = drm_dp_i2c_do_msg(aux, &msg); | ||
699 | if (err < 0) | ||
700 | return err; | ||
701 | } | ||
702 | } | ||
703 | |||
704 | return num; | ||
705 | } | ||
706 | |||
707 | static const struct i2c_algorithm drm_dp_i2c_algo = { | ||
708 | .functionality = drm_dp_i2c_functionality, | ||
709 | .master_xfer = drm_dp_i2c_xfer, | ||
710 | }; | ||
711 | |||
712 | /** | ||
713 | * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX | ||
714 | * @aux: DisplayPort AUX channel | ||
715 | * | ||
716 | * Returns 0 on success or a negative error code on failure. | ||
717 | */ | ||
718 | int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux) | ||
719 | { | ||
720 | aux->ddc.algo = &drm_dp_i2c_algo; | ||
721 | aux->ddc.algo_data = aux; | ||
722 | aux->ddc.retries = 3; | ||
723 | |||
724 | aux->ddc.class = I2C_CLASS_DDC; | ||
725 | aux->ddc.owner = THIS_MODULE; | ||
726 | aux->ddc.dev.parent = aux->dev; | ||
727 | aux->ddc.dev.of_node = aux->dev->of_node; | ||
728 | |||
729 | strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), | ||
730 | sizeof(aux->ddc.name)); | ||
731 | |||
732 | return i2c_add_adapter(&aux->ddc); | ||
733 | } | ||
734 | EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus); | ||
735 | |||
736 | /** | ||
737 | * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter | ||
738 | * @aux: DisplayPort AUX channel | ||
739 | */ | ||
740 | void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux) | ||
741 | { | ||
742 | i2c_del_adapter(&aux->ddc); | ||
743 | } | ||
744 | EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus); | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 345be03c23db..ec651be2f3cb 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -344,7 +344,7 @@ long drm_ioctl(struct file *filp, | |||
344 | 344 | ||
345 | DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", | 345 | DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", |
346 | task_pid_nr(current), | 346 | task_pid_nr(current), |
347 | (long)old_encode_dev(file_priv->minor->device), | 347 | (long)old_encode_dev(file_priv->minor->kdev->devt), |
348 | file_priv->authenticated, ioctl->name); | 348 | file_priv->authenticated, ioctl->name); |
349 | 349 | ||
350 | /* Do not trust userspace, use our own definition */ | 350 | /* Do not trust userspace, use our own definition */ |
@@ -402,7 +402,7 @@ long drm_ioctl(struct file *filp, | |||
402 | if (!ioctl) | 402 | if (!ioctl) |
403 | DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", | 403 | DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", |
404 | task_pid_nr(current), | 404 | task_pid_nr(current), |
405 | (long)old_encode_dev(file_priv->minor->device), | 405 | (long)old_encode_dev(file_priv->minor->kdev->devt), |
406 | file_priv->authenticated, cmd, nr); | 406 | file_priv->authenticated, cmd, nr); |
407 | 407 | ||
408 | if (kdata != stack_kdata) | 408 | if (kdata != stack_kdata) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index b924306b8477..d4e3f9d9370f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1098,10 +1098,14 @@ EXPORT_SYMBOL(drm_edid_is_valid); | |||
1098 | /** | 1098 | /** |
1099 | * Get EDID information via I2C. | 1099 | * Get EDID information via I2C. |
1100 | * | 1100 | * |
1101 | * \param adapter : i2c device adaptor | 1101 | * @adapter : i2c device adaptor |
1102 | * \param buf : EDID data buffer to be filled | 1102 | * @buf: EDID data buffer to be filled |
1103 | * \param len : EDID data buffer length | 1103 | * @block: 128 byte EDID block to start fetching from |
1104 | * \return 0 on success or -1 on failure. | 1104 | * @len: EDID data buffer length to fetch |
1105 | * | ||
1106 | * Returns: | ||
1107 | * | ||
1108 | * 0 on success or -1 on failure. | ||
1105 | * | 1109 | * |
1106 | * Try to fetch EDID information by calling i2c driver function. | 1110 | * Try to fetch EDID information by calling i2c driver function. |
1107 | */ | 1111 | */ |
@@ -1243,9 +1247,11 @@ out: | |||
1243 | 1247 | ||
1244 | /** | 1248 | /** |
1245 | * Probe DDC presence. | 1249 | * Probe DDC presence. |
1250 | * @adapter: i2c adapter to probe | ||
1251 | * | ||
1252 | * Returns: | ||
1246 | * | 1253 | * |
1247 | * \param adapter : i2c device adaptor | 1254 | * 1 on success |
1248 | * \return 1 on success | ||
1249 | */ | 1255 | */ |
1250 | bool | 1256 | bool |
1251 | drm_probe_ddc(struct i2c_adapter *adapter) | 1257 | drm_probe_ddc(struct i2c_adapter *adapter) |
@@ -1586,8 +1592,10 @@ bad_std_timing(u8 a, u8 b) | |||
1586 | 1592 | ||
1587 | /** | 1593 | /** |
1588 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode | 1594 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode |
1595 | * @connector: connector of for the EDID block | ||
1596 | * @edid: EDID block to scan | ||
1589 | * @t: standard timing params | 1597 | * @t: standard timing params |
1590 | * @timing_level: standard timing level | 1598 | * @revision: standard timing level |
1591 | * | 1599 | * |
1592 | * Take the standard timing params (in this case width, aspect, and refresh) | 1600 | * Take the standard timing params (in this case width, aspect, and refresh) |
1593 | * and convert them into a real mode using CVT/GTF/DMT. | 1601 | * and convert them into a real mode using CVT/GTF/DMT. |
@@ -2132,6 +2140,7 @@ do_established_modes(struct detailed_timing *timing, void *c) | |||
2132 | 2140 | ||
2133 | /** | 2141 | /** |
2134 | * add_established_modes - get est. modes from EDID and add them | 2142 | * add_established_modes - get est. modes from EDID and add them |
2143 | * @connector: connector of for the EDID block | ||
2135 | * @edid: EDID block to scan | 2144 | * @edid: EDID block to scan |
2136 | * | 2145 | * |
2137 | * Each EDID block contains a bitmap of the supported "established modes" list | 2146 | * Each EDID block contains a bitmap of the supported "established modes" list |
@@ -2194,6 +2203,7 @@ do_standard_modes(struct detailed_timing *timing, void *c) | |||
2194 | 2203 | ||
2195 | /** | 2204 | /** |
2196 | * add_standard_modes - get std. modes from EDID and add them | 2205 | * add_standard_modes - get std. modes from EDID and add them |
2206 | * @connector: connector of for the EDID block | ||
2197 | * @edid: EDID block to scan | 2207 | * @edid: EDID block to scan |
2198 | * | 2208 | * |
2199 | * Standard modes can be calculated using the appropriate standard (DMT, | 2209 | * Standard modes can be calculated using the appropriate standard (DMT, |
@@ -2580,6 +2590,9 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, | |||
2580 | return NULL; | 2590 | return NULL; |
2581 | 2591 | ||
2582 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | 2592 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); |
2593 | if (!newmode) | ||
2594 | return NULL; | ||
2595 | |||
2583 | newmode->vrefresh = 0; | 2596 | newmode->vrefresh = 0; |
2584 | 2597 | ||
2585 | return newmode; | 2598 | return newmode; |
@@ -3300,6 +3313,7 @@ EXPORT_SYMBOL(drm_detect_hdmi_monitor); | |||
3300 | 3313 | ||
3301 | /** | 3314 | /** |
3302 | * drm_detect_monitor_audio - check monitor audio capability | 3315 | * drm_detect_monitor_audio - check monitor audio capability |
3316 | * @edid: EDID block to scan | ||
3303 | * | 3317 | * |
3304 | * Monitor should have CEA extension block. | 3318 | * Monitor should have CEA extension block. |
3305 | * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic | 3319 | * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic |
@@ -3345,6 +3359,7 @@ EXPORT_SYMBOL(drm_detect_monitor_audio); | |||
3345 | 3359 | ||
3346 | /** | 3360 | /** |
3347 | * drm_rgb_quant_range_selectable - is RGB quantization range selectable? | 3361 | * drm_rgb_quant_range_selectable - is RGB quantization range selectable? |
3362 | * @edid: EDID block to scan | ||
3348 | * | 3363 | * |
3349 | * Check whether the monitor reports the RGB quantization range selection | 3364 | * Check whether the monitor reports the RGB quantization range selection |
3350 | * as supported. The AVI infoframe can then be used to inform the monitor | 3365 | * as supported. The AVI infoframe can then be used to inform the monitor |
@@ -3564,8 +3579,8 @@ void drm_set_preferred_mode(struct drm_connector *connector, | |||
3564 | struct drm_display_mode *mode; | 3579 | struct drm_display_mode *mode; |
3565 | 3580 | ||
3566 | list_for_each_entry(mode, &connector->probed_modes, head) { | 3581 | list_for_each_entry(mode, &connector->probed_modes, head) { |
3567 | if (drm_mode_width(mode) == hpref && | 3582 | if (mode->hdisplay == hpref && |
3568 | drm_mode_height(mode) == vpref) | 3583 | mode->vdisplay == vpref) |
3569 | mode->type |= DRM_MODE_TYPE_PREFERRED; | 3584 | mode->type |= DRM_MODE_TYPE_PREFERRED; |
3570 | } | 3585 | } |
3571 | } | 3586 | } |
@@ -3599,6 +3614,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, | |||
3599 | 3614 | ||
3600 | frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; | 3615 | frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; |
3601 | frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; | 3616 | frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; |
3617 | frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; | ||
3602 | 3618 | ||
3603 | return 0; | 3619 | return 0; |
3604 | } | 3620 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d99df15a78bc..87876198801d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -516,6 +516,9 @@ int drm_fb_helper_init(struct drm_device *dev, | |||
516 | struct drm_crtc *crtc; | 516 | struct drm_crtc *crtc; |
517 | int i; | 517 | int i; |
518 | 518 | ||
519 | if (!max_conn_count) | ||
520 | return -EINVAL; | ||
521 | |||
519 | fb_helper->dev = dev; | 522 | fb_helper->dev = dev; |
520 | 523 | ||
521 | INIT_LIST_HEAD(&fb_helper->kernel_fb_list); | 524 | INIT_LIST_HEAD(&fb_helper->kernel_fb_list); |
@@ -809,8 +812,6 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
809 | struct drm_fb_helper *fb_helper = info->par; | 812 | struct drm_fb_helper *fb_helper = info->par; |
810 | struct drm_device *dev = fb_helper->dev; | 813 | struct drm_device *dev = fb_helper->dev; |
811 | struct fb_var_screeninfo *var = &info->var; | 814 | struct fb_var_screeninfo *var = &info->var; |
812 | int ret; | ||
813 | int i; | ||
814 | 815 | ||
815 | if (var->pixclock != 0) { | 816 | if (var->pixclock != 0) { |
816 | DRM_ERROR("PIXEL CLOCK SET\n"); | 817 | DRM_ERROR("PIXEL CLOCK SET\n"); |
@@ -818,13 +819,7 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
818 | } | 819 | } |
819 | 820 | ||
820 | drm_modeset_lock_all(dev); | 821 | drm_modeset_lock_all(dev); |
821 | for (i = 0; i < fb_helper->crtc_count; i++) { | 822 | drm_fb_helper_restore_fbdev_mode(fb_helper); |
822 | ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set); | ||
823 | if (ret) { | ||
824 | drm_modeset_unlock_all(dev); | ||
825 | return ret; | ||
826 | } | ||
827 | } | ||
828 | drm_modeset_unlock_all(dev); | 823 | drm_modeset_unlock_all(dev); |
829 | 824 | ||
830 | if (fb_helper->delayed_hotplug) { | 825 | if (fb_helper->delayed_hotplug) { |
@@ -1141,8 +1136,8 @@ struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector * | |||
1141 | struct drm_display_mode *mode; | 1136 | struct drm_display_mode *mode; |
1142 | 1137 | ||
1143 | list_for_each_entry(mode, &fb_connector->connector->modes, head) { | 1138 | list_for_each_entry(mode, &fb_connector->connector->modes, head) { |
1144 | if (drm_mode_width(mode) > width || | 1139 | if (mode->hdisplay > width || |
1145 | drm_mode_height(mode) > height) | 1140 | mode->vdisplay > height) |
1146 | continue; | 1141 | continue; |
1147 | if (mode->type & DRM_MODE_TYPE_PREFERRED) | 1142 | if (mode->type & DRM_MODE_TYPE_PREFERRED) |
1148 | return mode; | 1143 | return mode; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7f2af9aca038..9b02f126fb0d 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -39,12 +39,12 @@ | |||
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | 41 | ||
42 | /* from BKL pushdown: note that nothing else serializes idr_find() */ | 42 | /* from BKL pushdown */ |
43 | DEFINE_MUTEX(drm_global_mutex); | 43 | DEFINE_MUTEX(drm_global_mutex); |
44 | EXPORT_SYMBOL(drm_global_mutex); | 44 | EXPORT_SYMBOL(drm_global_mutex); |
45 | 45 | ||
46 | static int drm_open_helper(struct inode *inode, struct file *filp, | 46 | static int drm_open_helper(struct inode *inode, struct file *filp, |
47 | struct drm_device * dev); | 47 | struct drm_minor *minor); |
48 | 48 | ||
49 | static int drm_setup(struct drm_device * dev) | 49 | static int drm_setup(struct drm_device * dev) |
50 | { | 50 | { |
@@ -79,38 +79,23 @@ static int drm_setup(struct drm_device * dev) | |||
79 | */ | 79 | */ |
80 | int drm_open(struct inode *inode, struct file *filp) | 80 | int drm_open(struct inode *inode, struct file *filp) |
81 | { | 81 | { |
82 | struct drm_device *dev = NULL; | 82 | struct drm_device *dev; |
83 | int minor_id = iminor(inode); | ||
84 | struct drm_minor *minor; | 83 | struct drm_minor *minor; |
85 | int retcode = 0; | 84 | int retcode; |
86 | int need_setup = 0; | 85 | int need_setup = 0; |
87 | struct address_space *old_mapping; | ||
88 | struct address_space *old_imapping; | ||
89 | |||
90 | minor = idr_find(&drm_minors_idr, minor_id); | ||
91 | if (!minor) | ||
92 | return -ENODEV; | ||
93 | |||
94 | if (!(dev = minor->dev)) | ||
95 | return -ENODEV; | ||
96 | 86 | ||
97 | if (drm_device_is_unplugged(dev)) | 87 | minor = drm_minor_acquire(iminor(inode)); |
98 | return -ENODEV; | 88 | if (IS_ERR(minor)) |
89 | return PTR_ERR(minor); | ||
99 | 90 | ||
91 | dev = minor->dev; | ||
100 | if (!dev->open_count++) | 92 | if (!dev->open_count++) |
101 | need_setup = 1; | 93 | need_setup = 1; |
102 | mutex_lock(&dev->struct_mutex); | ||
103 | old_imapping = inode->i_mapping; | ||
104 | old_mapping = dev->dev_mapping; | ||
105 | if (old_mapping == NULL) | ||
106 | dev->dev_mapping = &inode->i_data; | ||
107 | /* ihold ensures nobody can remove inode with our i_data */ | ||
108 | ihold(container_of(dev->dev_mapping, struct inode, i_data)); | ||
109 | inode->i_mapping = dev->dev_mapping; | ||
110 | filp->f_mapping = dev->dev_mapping; | ||
111 | mutex_unlock(&dev->struct_mutex); | ||
112 | 94 | ||
113 | retcode = drm_open_helper(inode, filp, dev); | 95 | /* share address_space across all char-devs of a single device */ |
96 | filp->f_mapping = dev->anon_inode->i_mapping; | ||
97 | |||
98 | retcode = drm_open_helper(inode, filp, minor); | ||
114 | if (retcode) | 99 | if (retcode) |
115 | goto err_undo; | 100 | goto err_undo; |
116 | if (need_setup) { | 101 | if (need_setup) { |
@@ -121,13 +106,8 @@ int drm_open(struct inode *inode, struct file *filp) | |||
121 | return 0; | 106 | return 0; |
122 | 107 | ||
123 | err_undo: | 108 | err_undo: |
124 | mutex_lock(&dev->struct_mutex); | ||
125 | filp->f_mapping = old_imapping; | ||
126 | inode->i_mapping = old_imapping; | ||
127 | iput(container_of(dev->dev_mapping, struct inode, i_data)); | ||
128 | dev->dev_mapping = old_mapping; | ||
129 | mutex_unlock(&dev->struct_mutex); | ||
130 | dev->open_count--; | 109 | dev->open_count--; |
110 | drm_minor_release(minor); | ||
131 | return retcode; | 111 | return retcode; |
132 | } | 112 | } |
133 | EXPORT_SYMBOL(drm_open); | 113 | EXPORT_SYMBOL(drm_open); |
@@ -143,33 +123,30 @@ EXPORT_SYMBOL(drm_open); | |||
143 | */ | 123 | */ |
144 | int drm_stub_open(struct inode *inode, struct file *filp) | 124 | int drm_stub_open(struct inode *inode, struct file *filp) |
145 | { | 125 | { |
146 | struct drm_device *dev = NULL; | 126 | struct drm_device *dev; |
147 | struct drm_minor *minor; | 127 | struct drm_minor *minor; |
148 | int minor_id = iminor(inode); | ||
149 | int err = -ENODEV; | 128 | int err = -ENODEV; |
150 | const struct file_operations *new_fops; | 129 | const struct file_operations *new_fops; |
151 | 130 | ||
152 | DRM_DEBUG("\n"); | 131 | DRM_DEBUG("\n"); |
153 | 132 | ||
154 | mutex_lock(&drm_global_mutex); | 133 | mutex_lock(&drm_global_mutex); |
155 | minor = idr_find(&drm_minors_idr, minor_id); | 134 | minor = drm_minor_acquire(iminor(inode)); |
156 | if (!minor) | 135 | if (IS_ERR(minor)) |
157 | goto out; | 136 | goto out_unlock; |
158 | |||
159 | if (!(dev = minor->dev)) | ||
160 | goto out; | ||
161 | |||
162 | if (drm_device_is_unplugged(dev)) | ||
163 | goto out; | ||
164 | 137 | ||
138 | dev = minor->dev; | ||
165 | new_fops = fops_get(dev->driver->fops); | 139 | new_fops = fops_get(dev->driver->fops); |
166 | if (!new_fops) | 140 | if (!new_fops) |
167 | goto out; | 141 | goto out_release; |
168 | 142 | ||
169 | replace_fops(filp, new_fops); | 143 | replace_fops(filp, new_fops); |
170 | if (filp->f_op->open) | 144 | if (filp->f_op->open) |
171 | err = filp->f_op->open(inode, filp); | 145 | err = filp->f_op->open(inode, filp); |
172 | out: | 146 | |
147 | out_release: | ||
148 | drm_minor_release(minor); | ||
149 | out_unlock: | ||
173 | mutex_unlock(&drm_global_mutex); | 150 | mutex_unlock(&drm_global_mutex); |
174 | return err; | 151 | return err; |
175 | } | 152 | } |
@@ -196,16 +173,16 @@ static int drm_cpu_valid(void) | |||
196 | * | 173 | * |
197 | * \param inode device inode. | 174 | * \param inode device inode. |
198 | * \param filp file pointer. | 175 | * \param filp file pointer. |
199 | * \param dev device. | 176 | * \param minor acquired minor-object. |
200 | * \return zero on success or a negative number on failure. | 177 | * \return zero on success or a negative number on failure. |
201 | * | 178 | * |
202 | * Creates and initializes a drm_file structure for the file private data in \p | 179 | * Creates and initializes a drm_file structure for the file private data in \p |
203 | * filp and add it into the double linked list in \p dev. | 180 | * filp and add it into the double linked list in \p dev. |
204 | */ | 181 | */ |
205 | static int drm_open_helper(struct inode *inode, struct file *filp, | 182 | static int drm_open_helper(struct inode *inode, struct file *filp, |
206 | struct drm_device * dev) | 183 | struct drm_minor *minor) |
207 | { | 184 | { |
208 | int minor_id = iminor(inode); | 185 | struct drm_device *dev = minor->dev; |
209 | struct drm_file *priv; | 186 | struct drm_file *priv; |
210 | int ret; | 187 | int ret; |
211 | 188 | ||
@@ -216,7 +193,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
216 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) | 193 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) |
217 | return -EINVAL; | 194 | return -EINVAL; |
218 | 195 | ||
219 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); | 196 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index); |
220 | 197 | ||
221 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 198 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
222 | if (!priv) | 199 | if (!priv) |
@@ -226,11 +203,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
226 | priv->filp = filp; | 203 | priv->filp = filp; |
227 | priv->uid = current_euid(); | 204 | priv->uid = current_euid(); |
228 | priv->pid = get_pid(task_pid(current)); | 205 | priv->pid = get_pid(task_pid(current)); |
229 | priv->minor = idr_find(&drm_minors_idr, minor_id); | 206 | priv->minor = minor; |
230 | if (!priv->minor) { | ||
231 | ret = -ENODEV; | ||
232 | goto out_put_pid; | ||
233 | } | ||
234 | 207 | ||
235 | /* for compatibility root is always authenticated */ | 208 | /* for compatibility root is always authenticated */ |
236 | priv->always_authenticated = capable(CAP_SYS_ADMIN); | 209 | priv->always_authenticated = capable(CAP_SYS_ADMIN); |
@@ -336,7 +309,6 @@ out_prime_destroy: | |||
336 | drm_prime_destroy_file_private(&priv->prime); | 309 | drm_prime_destroy_file_private(&priv->prime); |
337 | if (dev->driver->driver_features & DRIVER_GEM) | 310 | if (dev->driver->driver_features & DRIVER_GEM) |
338 | drm_gem_release(dev, priv); | 311 | drm_gem_release(dev, priv); |
339 | out_put_pid: | ||
340 | put_pid(priv->pid); | 312 | put_pid(priv->pid); |
341 | kfree(priv); | 313 | kfree(priv); |
342 | filp->private_data = NULL; | 314 | filp->private_data = NULL; |
@@ -434,7 +406,6 @@ int drm_lastclose(struct drm_device * dev) | |||
434 | 406 | ||
435 | drm_legacy_dma_takedown(dev); | 407 | drm_legacy_dma_takedown(dev); |
436 | 408 | ||
437 | dev->dev_mapping = NULL; | ||
438 | mutex_unlock(&dev->struct_mutex); | 409 | mutex_unlock(&dev->struct_mutex); |
439 | 410 | ||
440 | drm_legacy_dev_reinit(dev); | 411 | drm_legacy_dev_reinit(dev); |
@@ -458,7 +429,8 @@ int drm_lastclose(struct drm_device * dev) | |||
458 | int drm_release(struct inode *inode, struct file *filp) | 429 | int drm_release(struct inode *inode, struct file *filp) |
459 | { | 430 | { |
460 | struct drm_file *file_priv = filp->private_data; | 431 | struct drm_file *file_priv = filp->private_data; |
461 | struct drm_device *dev = file_priv->minor->dev; | 432 | struct drm_minor *minor = file_priv->minor; |
433 | struct drm_device *dev = minor->dev; | ||
462 | int retcode = 0; | 434 | int retcode = 0; |
463 | 435 | ||
464 | mutex_lock(&drm_global_mutex); | 436 | mutex_lock(&drm_global_mutex); |
@@ -474,7 +446,7 @@ int drm_release(struct inode *inode, struct file *filp) | |||
474 | 446 | ||
475 | DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", | 447 | DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", |
476 | task_pid_nr(current), | 448 | task_pid_nr(current), |
477 | (long)old_encode_dev(file_priv->minor->device), | 449 | (long)old_encode_dev(file_priv->minor->kdev->devt), |
478 | dev->open_count); | 450 | dev->open_count); |
479 | 451 | ||
480 | /* Release any auth tokens that might point to this file_priv, | 452 | /* Release any auth tokens that might point to this file_priv, |
@@ -549,9 +521,6 @@ int drm_release(struct inode *inode, struct file *filp) | |||
549 | } | 521 | } |
550 | } | 522 | } |
551 | 523 | ||
552 | BUG_ON(dev->dev_mapping == NULL); | ||
553 | iput(container_of(dev->dev_mapping, struct inode, i_data)); | ||
554 | |||
555 | /* drop the reference held my the file priv */ | 524 | /* drop the reference held my the file priv */ |
556 | if (file_priv->master) | 525 | if (file_priv->master) |
557 | drm_master_put(&file_priv->master); | 526 | drm_master_put(&file_priv->master); |
@@ -580,6 +549,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
580 | } | 549 | } |
581 | mutex_unlock(&drm_global_mutex); | 550 | mutex_unlock(&drm_global_mutex); |
582 | 551 | ||
552 | drm_minor_release(minor); | ||
553 | |||
583 | return retcode; | 554 | return retcode; |
584 | } | 555 | } |
585 | EXPORT_SYMBOL(drm_release); | 556 | EXPORT_SYMBOL(drm_release); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 5bbad873c798..9909bef59800 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -85,9 +85,9 @@ | |||
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | /** | 87 | /** |
88 | * Initialize the GEM device fields | 88 | * drm_gem_init - Initialize the GEM device fields |
89 | * @dev: drm_devic structure to initialize | ||
89 | */ | 90 | */ |
90 | |||
91 | int | 91 | int |
92 | drm_gem_init(struct drm_device *dev) | 92 | drm_gem_init(struct drm_device *dev) |
93 | { | 93 | { |
@@ -120,6 +120,11 @@ drm_gem_destroy(struct drm_device *dev) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | /** | 122 | /** |
123 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object | ||
124 | * @dev: drm_device the object should be initialized for | ||
125 | * @obj: drm_gem_object to initialize | ||
126 | * @size: object size | ||
127 | * | ||
123 | * Initialize an already allocated GEM object of the specified size with | 128 | * Initialize an already allocated GEM object of the specified size with |
124 | * shmfs backing store. | 129 | * shmfs backing store. |
125 | */ | 130 | */ |
@@ -141,6 +146,11 @@ int drm_gem_object_init(struct drm_device *dev, | |||
141 | EXPORT_SYMBOL(drm_gem_object_init); | 146 | EXPORT_SYMBOL(drm_gem_object_init); |
142 | 147 | ||
143 | /** | 148 | /** |
149 | * drm_gem_object_init - initialize an allocated private GEM object | ||
150 | * @dev: drm_device the object should be initialized for | ||
151 | * @obj: drm_gem_object to initialize | ||
152 | * @size: object size | ||
153 | * | ||
144 | * Initialize an already allocated GEM object of the specified size with | 154 | * Initialize an already allocated GEM object of the specified size with |
145 | * no GEM provided backing store. Instead the caller is responsible for | 155 | * no GEM provided backing store. Instead the caller is responsible for |
146 | * backing the object and handling it. | 156 | * backing the object and handling it. |
@@ -176,6 +186,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | |||
176 | } | 186 | } |
177 | 187 | ||
178 | /** | 188 | /** |
189 | * drm_gem_object_free - release resources bound to userspace handles | ||
190 | * @obj: GEM object to clean up. | ||
191 | * | ||
179 | * Called after the last handle to the object has been closed | 192 | * Called after the last handle to the object has been closed |
180 | * | 193 | * |
181 | * Removes any name for the object. Note that this must be | 194 | * Removes any name for the object. Note that this must be |
@@ -225,7 +238,12 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
225 | } | 238 | } |
226 | 239 | ||
227 | /** | 240 | /** |
228 | * Removes the mapping from handle to filp for this object. | 241 | * drm_gem_handle_delete - deletes the given file-private handle |
242 | * @filp: drm file-private structure to use for the handle look up | ||
243 | * @handle: userspace handle to delete | ||
244 | * | ||
245 | * Removes the GEM handle from the @filp lookup table and if this is the last | ||
246 | * handle also cleans up linked resources like GEM names. | ||
229 | */ | 247 | */ |
230 | int | 248 | int |
231 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) | 249 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
@@ -270,6 +288,9 @@ EXPORT_SYMBOL(drm_gem_handle_delete); | |||
270 | 288 | ||
271 | /** | 289 | /** |
272 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers | 290 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
291 | * @file: drm file-private structure to remove the dumb handle from | ||
292 | * @dev: corresponding drm_device | ||
293 | * @handle: the dumb handle to remove | ||
273 | * | 294 | * |
274 | * This implements the ->dumb_destroy kms driver callback for drivers which use | 295 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
275 | * gem to manage their backing storage. | 296 | * gem to manage their backing storage. |
@@ -284,6 +305,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy); | |||
284 | 305 | ||
285 | /** | 306 | /** |
286 | * drm_gem_handle_create_tail - internal functions to create a handle | 307 | * drm_gem_handle_create_tail - internal functions to create a handle |
308 | * @file_priv: drm file-private structure to register the handle for | ||
309 | * @obj: object to register | ||
310 | * @handlep: pionter to return the created handle to the caller | ||
287 | * | 311 | * |
288 | * This expects the dev->object_name_lock to be held already and will drop it | 312 | * This expects the dev->object_name_lock to be held already and will drop it |
289 | * before returning. Used to avoid races in establishing new handles when | 313 | * before returning. Used to avoid races in establishing new handles when |
@@ -336,6 +360,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, | |||
336 | } | 360 | } |
337 | 361 | ||
338 | /** | 362 | /** |
363 | * gem_handle_create - create a gem handle for an object | ||
364 | * @file_priv: drm file-private structure to register the handle for | ||
365 | * @obj: object to register | ||
366 | * @handlep: pionter to return the created handle to the caller | ||
367 | * | ||
339 | * Create a handle for this object. This adds a handle reference | 368 | * Create a handle for this object. This adds a handle reference |
340 | * to the object, which includes a regular reference count. Callers | 369 | * to the object, which includes a regular reference count. Callers |
341 | * will likely want to dereference the object afterwards. | 370 | * will likely want to dereference the object afterwards. |
@@ -536,6 +565,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, | |||
536 | EXPORT_SYMBOL(drm_gem_object_lookup); | 565 | EXPORT_SYMBOL(drm_gem_object_lookup); |
537 | 566 | ||
538 | /** | 567 | /** |
568 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl | ||
569 | * @dev: drm_device | ||
570 | * @data: ioctl data | ||
571 | * @file_priv: drm file-private structure | ||
572 | * | ||
539 | * Releases the handle to an mm object. | 573 | * Releases the handle to an mm object. |
540 | */ | 574 | */ |
541 | int | 575 | int |
@@ -554,6 +588,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data, | |||
554 | } | 588 | } |
555 | 589 | ||
556 | /** | 590 | /** |
591 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl | ||
592 | * @dev: drm_device | ||
593 | * @data: ioctl data | ||
594 | * @file_priv: drm file-private structure | ||
595 | * | ||
557 | * Create a global name for an object, returning the name. | 596 | * Create a global name for an object, returning the name. |
558 | * | 597 | * |
559 | * Note that the name does not hold a reference; when the object | 598 | * Note that the name does not hold a reference; when the object |
@@ -601,6 +640,11 @@ err: | |||
601 | } | 640 | } |
602 | 641 | ||
603 | /** | 642 | /** |
643 | * drm_gem_open - implementation of the GEM_OPEN ioctl | ||
644 | * @dev: drm_device | ||
645 | * @data: ioctl data | ||
646 | * @file_priv: drm file-private structure | ||
647 | * | ||
604 | * Open an object using the global name, returning a handle and the size. | 648 | * Open an object using the global name, returning a handle and the size. |
605 | * | 649 | * |
606 | * This handle (of course) holds a reference to the object, so the object | 650 | * This handle (of course) holds a reference to the object, so the object |
@@ -640,6 +684,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, | |||
640 | } | 684 | } |
641 | 685 | ||
642 | /** | 686 | /** |
687 | * gem_gem_open - initalizes GEM file-private structures at devnode open time | ||
688 | * @dev: drm_device which is being opened by userspace | ||
689 | * @file_private: drm file-private structure to set up | ||
690 | * | ||
643 | * Called at device open time, sets up the structure for handling refcounting | 691 | * Called at device open time, sets up the structure for handling refcounting |
644 | * of mm objects. | 692 | * of mm objects. |
645 | */ | 693 | */ |
@@ -650,7 +698,7 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | |||
650 | spin_lock_init(&file_private->table_lock); | 698 | spin_lock_init(&file_private->table_lock); |
651 | } | 699 | } |
652 | 700 | ||
653 | /** | 701 | /* |
654 | * Called at device close to release the file's | 702 | * Called at device close to release the file's |
655 | * handle references on objects. | 703 | * handle references on objects. |
656 | */ | 704 | */ |
@@ -674,6 +722,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
674 | } | 722 | } |
675 | 723 | ||
676 | /** | 724 | /** |
725 | * drm_gem_release - release file-private GEM resources | ||
726 | * @dev: drm_device which is being closed by userspace | ||
727 | * @file_private: drm file-private structure to clean up | ||
728 | * | ||
677 | * Called at close time when the filp is going away. | 729 | * Called at close time when the filp is going away. |
678 | * | 730 | * |
679 | * Releases any remaining references on objects by this filp. | 731 | * Releases any remaining references on objects by this filp. |
@@ -692,11 +744,16 @@ drm_gem_object_release(struct drm_gem_object *obj) | |||
692 | WARN_ON(obj->dma_buf); | 744 | WARN_ON(obj->dma_buf); |
693 | 745 | ||
694 | if (obj->filp) | 746 | if (obj->filp) |
695 | fput(obj->filp); | 747 | fput(obj->filp); |
748 | |||
749 | drm_gem_free_mmap_offset(obj); | ||
696 | } | 750 | } |
697 | EXPORT_SYMBOL(drm_gem_object_release); | 751 | EXPORT_SYMBOL(drm_gem_object_release); |
698 | 752 | ||
699 | /** | 753 | /** |
754 | * drm_gem_object_free - free a GEM object | ||
755 | * @kref: kref of the object to free | ||
756 | * | ||
700 | * Called after the last reference to the object has been lost. | 757 | * Called after the last reference to the object has been lost. |
701 | * Must be called holding struct_ mutex | 758 | * Must be called holding struct_ mutex |
702 | * | 759 | * |
@@ -782,7 +839,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, | |||
782 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; | 839 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
783 | vma->vm_ops = dev->driver->gem_vm_ops; | 840 | vma->vm_ops = dev->driver->gem_vm_ops; |
784 | vma->vm_private_data = obj; | 841 | vma->vm_private_data = obj; |
785 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 842 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
786 | 843 | ||
787 | /* Take a ref for this mapping of the object, so that the fault | 844 | /* Take a ref for this mapping of the object, so that the fault |
788 | * handler can dereference the mmap offset's pointer to the object. | 845 | * handler can dereference the mmap offset's pointer to the object. |
@@ -818,7 +875,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
818 | struct drm_device *dev = priv->minor->dev; | 875 | struct drm_device *dev = priv->minor->dev; |
819 | struct drm_gem_object *obj; | 876 | struct drm_gem_object *obj; |
820 | struct drm_vma_offset_node *node; | 877 | struct drm_vma_offset_node *node; |
821 | int ret = 0; | 878 | int ret; |
822 | 879 | ||
823 | if (drm_device_is_unplugged(dev)) | 880 | if (drm_device_is_unplugged(dev)) |
824 | return -ENODEV; | 881 | return -ENODEV; |
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 6b51bf90df0e..2c07cb9550ef 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c | |||
@@ -79,7 +79,6 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, | |||
79 | unsigned int size) | 79 | unsigned int size) |
80 | { | 80 | { |
81 | struct drm_gem_cma_object *cma_obj; | 81 | struct drm_gem_cma_object *cma_obj; |
82 | struct sg_table *sgt = NULL; | ||
83 | int ret; | 82 | int ret; |
84 | 83 | ||
85 | size = round_up(size, PAGE_SIZE); | 84 | size = round_up(size, PAGE_SIZE); |
@@ -97,23 +96,9 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, | |||
97 | goto error; | 96 | goto error; |
98 | } | 97 | } |
99 | 98 | ||
100 | sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL); | ||
101 | if (sgt == NULL) { | ||
102 | ret = -ENOMEM; | ||
103 | goto error; | ||
104 | } | ||
105 | |||
106 | ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr, | ||
107 | cma_obj->paddr, size); | ||
108 | if (ret < 0) | ||
109 | goto error; | ||
110 | |||
111 | cma_obj->sgt = sgt; | ||
112 | |||
113 | return cma_obj; | 99 | return cma_obj; |
114 | 100 | ||
115 | error: | 101 | error: |
116 | kfree(sgt); | ||
117 | drm_gem_cma_free_object(&cma_obj->base); | 102 | drm_gem_cma_free_object(&cma_obj->base); |
118 | return ERR_PTR(ret); | 103 | return ERR_PTR(ret); |
119 | } | 104 | } |
@@ -175,10 +160,6 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) | |||
175 | if (cma_obj->vaddr) { | 160 | if (cma_obj->vaddr) { |
176 | dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, | 161 | dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size, |
177 | cma_obj->vaddr, cma_obj->paddr); | 162 | cma_obj->vaddr, cma_obj->paddr); |
178 | if (cma_obj->sgt) { | ||
179 | sg_free_table(cma_obj->sgt); | ||
180 | kfree(cma_obj->sgt); | ||
181 | } | ||
182 | } else if (gem_obj->import_attach) { | 163 | } else if (gem_obj->import_attach) { |
183 | drm_prime_gem_destroy(gem_obj, cma_obj->sgt); | 164 | drm_prime_gem_destroy(gem_obj, cma_obj->sgt); |
184 | } | 165 | } |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index af93cc55259f..a2d45b748f86 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -47,7 +47,44 @@ | |||
47 | #include <linux/seq_file.h> | 47 | #include <linux/seq_file.h> |
48 | #include <linux/export.h> | 48 | #include <linux/export.h> |
49 | 49 | ||
50 | #define MM_UNUSED_TARGET 4 | 50 | /** |
51 | * DOC: Overview | ||
52 | * | ||
53 | * drm_mm provides a simple range allocator. The drivers are free to use the | ||
54 | * resource allocator from the linux core if it suits them, the upside of drm_mm | ||
55 | * is that it's in the DRM core. Which means that it's easier to extend for | ||
56 | * some of the crazier special purpose needs of gpus. | ||
57 | * | ||
58 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. | ||
59 | * Drivers are free to embed either of them into their own suitable | ||
60 | * datastructures. drm_mm itself will not do any allocations of its own, so if | ||
61 | * drivers choose not to embed nodes they need to still allocate them | ||
62 | * themselves. | ||
63 | * | ||
64 | * The range allocator also supports reservation of preallocated blocks. This is | ||
65 | * useful for taking over initial mode setting configurations from the firmware, | ||
66 | * where an object needs to be created which exactly matches the firmware's | ||
67 | * scanout target. As long as the range is still free it can be inserted anytime | ||
68 | * after the allocator is initialized, which helps with avoiding looped | ||
69 | * depencies in the driver load sequence. | ||
70 | * | ||
71 | * drm_mm maintains a stack of most recently freed holes, which of all | ||
72 | * simplistic datastructures seems to be a fairly decent approach to clustering | ||
73 | * allocations and avoiding too much fragmentation. This means free space | ||
74 | * searches are O(num_holes). Given that all the fancy features drm_mm supports | ||
75 | * something better would be fairly complex and since gfx thrashing is a fairly | ||
76 | * steep cliff not a real concern. Removing a node again is O(1). | ||
77 | * | ||
78 | * drm_mm supports a few features: Alignment and range restrictions can be | ||
79 | * supplied. Further more every &drm_mm_node has a color value (which is just an | ||
80 | * opaqua unsigned long) which in conjunction with a driver callback can be used | ||
81 | * to implement sophisticated placement restrictions. The i915 DRM driver uses | ||
82 | * this to implement guard pages between incompatible caching domains in the | ||
83 | * graphics TT. | ||
84 | * | ||
85 | * Finally iteration helpers to walk all nodes and all holes are provided as are | ||
86 | * some basic allocator dumpers for debugging. | ||
87 | */ | ||
51 | 88 | ||
52 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 89 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
53 | unsigned long size, | 90 | unsigned long size, |
@@ -107,6 +144,20 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
107 | } | 144 | } |
108 | } | 145 | } |
109 | 146 | ||
147 | /** | ||
148 | * drm_mm_reserve_node - insert an pre-initialized node | ||
149 | * @mm: drm_mm allocator to insert @node into | ||
150 | * @node: drm_mm_node to insert | ||
151 | * | ||
152 | * This functions inserts an already set-up drm_mm_node into the allocator, | ||
153 | * meaning that start, size and color must be set by the caller. This is useful | ||
154 | * to initialize the allocator with preallocated objects which must be set-up | ||
155 | * before the range allocator can be set-up, e.g. when taking over a firmware | ||
156 | * framebuffer. | ||
157 | * | ||
158 | * Returns: | ||
159 | * 0 on success, -ENOSPC if there's no hole where @node is. | ||
160 | */ | ||
110 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | 161 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
111 | { | 162 | { |
112 | struct drm_mm_node *hole; | 163 | struct drm_mm_node *hole; |
@@ -148,9 +199,18 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | |||
148 | EXPORT_SYMBOL(drm_mm_reserve_node); | 199 | EXPORT_SYMBOL(drm_mm_reserve_node); |
149 | 200 | ||
150 | /** | 201 | /** |
151 | * Search for free space and insert a preallocated memory node. Returns | 202 | * drm_mm_insert_node_generic - search for space and insert @node |
152 | * -ENOSPC if no suitable free area is available. The preallocated memory node | 203 | * @mm: drm_mm to allocate from |
153 | * must be cleared. | 204 | * @node: preallocate node to insert |
205 | * @size: size of the allocation | ||
206 | * @alignment: alignment of the allocation | ||
207 | * @color: opaque tag value to use for this node | ||
208 | * @flags: flags to fine-tune the allocation | ||
209 | * | ||
210 | * The preallocated node must be cleared to 0. | ||
211 | * | ||
212 | * Returns: | ||
213 | * 0 on success, -ENOSPC if there's no suitable hole. | ||
154 | */ | 214 | */ |
155 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, | 215 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
156 | unsigned long size, unsigned alignment, | 216 | unsigned long size, unsigned alignment, |
@@ -222,9 +282,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
222 | } | 282 | } |
223 | 283 | ||
224 | /** | 284 | /** |
225 | * Search for free space and insert a preallocated memory node. Returns | 285 | * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node |
226 | * -ENOSPC if no suitable free area is available. This is for range | 286 | * @mm: drm_mm to allocate from |
227 | * restricted allocations. The preallocated memory node must be cleared. | 287 | * @node: preallocate node to insert |
288 | * @size: size of the allocation | ||
289 | * @alignment: alignment of the allocation | ||
290 | * @color: opaque tag value to use for this node | ||
291 | * @start: start of the allowed range for this node | ||
292 | * @end: end of the allowed range for this node | ||
293 | * @flags: flags to fine-tune the allocation | ||
294 | * | ||
295 | * The preallocated node must be cleared to 0. | ||
296 | * | ||
297 | * Returns: | ||
298 | * 0 on success, -ENOSPC if there's no suitable hole. | ||
228 | */ | 299 | */ |
229 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, | 300 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | unsigned long size, unsigned alignment, unsigned long color, | 301 | unsigned long size, unsigned alignment, unsigned long color, |
@@ -247,7 +318,12 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n | |||
247 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); | 318 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
248 | 319 | ||
249 | /** | 320 | /** |
250 | * Remove a memory node from the allocator. | 321 | * drm_mm_remove_node - Remove a memory node from the allocator. |
322 | * @node: drm_mm_node to remove | ||
323 | * | ||
324 | * This just removes a node from its drm_mm allocator. The node does not need to | ||
325 | * be cleared again before it can be re-inserted into this or any other drm_mm | ||
326 | * allocator. It is a bug to call this function on a un-allocated node. | ||
251 | */ | 327 | */ |
252 | void drm_mm_remove_node(struct drm_mm_node *node) | 328 | void drm_mm_remove_node(struct drm_mm_node *node) |
253 | { | 329 | { |
@@ -384,7 +460,13 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ | |||
384 | } | 460 | } |
385 | 461 | ||
386 | /** | 462 | /** |
387 | * Moves an allocation. To be used with embedded struct drm_mm_node. | 463 | * drm_mm_replace_node - move an allocation from @old to @new |
464 | * @old: drm_mm_node to remove from the allocator | ||
465 | * @new: drm_mm_node which should inherit @old's allocation | ||
466 | * | ||
467 | * This is useful for when drivers embed the drm_mm_node structure and hence | ||
468 | * can't move allocations by reassigning pointers. It's a combination of remove | ||
469 | * and insert with the guarantee that the allocation start will match. | ||
388 | */ | 470 | */ |
389 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | 471 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
390 | { | 472 | { |
@@ -402,12 +484,46 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | |||
402 | EXPORT_SYMBOL(drm_mm_replace_node); | 484 | EXPORT_SYMBOL(drm_mm_replace_node); |
403 | 485 | ||
404 | /** | 486 | /** |
405 | * Initializa lru scanning. | 487 | * DOC: lru scan roaster |
488 | * | ||
489 | * Very often GPUs need to have continuous allocations for a given object. When | ||
490 | * evicting objects to make space for a new one it is therefore not most | ||
491 | * efficient when we simply start to select all objects from the tail of an LRU | ||
492 | * until there's a suitable hole: Especially for big objects or nodes that | ||
493 | * otherwise have special allocation constraints there's a good chance we evict | ||
494 | * lots of (smaller) objects unecessarily. | ||
495 | * | ||
496 | * The DRM range allocator supports this use-case through the scanning | ||
497 | * interfaces. First a scan operation needs to be initialized with | ||
498 | * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds | ||
499 | * objects to the roaster (probably by walking an LRU list, but this can be | ||
500 | * freely implemented) until a suitable hole is found or there's no further | ||
501 | * evitable object. | ||
502 | * | ||
503 | * The the driver must walk through all objects again in exactly the reverse | ||
504 | * order to restore the allocator state. Note that while the allocator is used | ||
505 | * in the scan mode no other operation is allowed. | ||
506 | * | ||
507 | * Finally the driver evicts all objects selected in the scan. Adding and | ||
508 | * removing an object is O(1), and since freeing a node is also O(1) the overall | ||
509 | * complexity is O(scanned_objects). So like the free stack which needs to be | ||
510 | * walked before a scan operation even begins this is linear in the number of | ||
511 | * objects. It doesn't seem to hurt badly. | ||
512 | */ | ||
513 | |||
514 | /** | ||
515 | * drm_mm_init_scan - initialize lru scanning | ||
516 | * @mm: drm_mm to scan | ||
517 | * @size: size of the allocation | ||
518 | * @alignment: alignment of the allocation | ||
519 | * @color: opaque tag value to use for the allocation | ||
406 | * | 520 | * |
407 | * This simply sets up the scanning routines with the parameters for the desired | 521 | * This simply sets up the scanning routines with the parameters for the desired |
408 | * hole. | 522 | * hole. Note that there's no need to specify allocation flags, since they only |
523 | * change the place a node is allocated from within a suitable hole. | ||
409 | * | 524 | * |
410 | * Warning: As long as the scan list is non-empty, no other operations than | 525 | * Warning: |
526 | * As long as the scan list is non-empty, no other operations than | ||
411 | * adding/removing nodes to/from the scan list are allowed. | 527 | * adding/removing nodes to/from the scan list are allowed. |
412 | */ | 528 | */ |
413 | void drm_mm_init_scan(struct drm_mm *mm, | 529 | void drm_mm_init_scan(struct drm_mm *mm, |
@@ -427,12 +543,20 @@ void drm_mm_init_scan(struct drm_mm *mm, | |||
427 | EXPORT_SYMBOL(drm_mm_init_scan); | 543 | EXPORT_SYMBOL(drm_mm_init_scan); |
428 | 544 | ||
429 | /** | 545 | /** |
430 | * Initializa lru scanning. | 546 | * drm_mm_init_scan - initialize range-restricted lru scanning |
547 | * @mm: drm_mm to scan | ||
548 | * @size: size of the allocation | ||
549 | * @alignment: alignment of the allocation | ||
550 | * @color: opaque tag value to use for the allocation | ||
551 | * @start: start of the allowed range for the allocation | ||
552 | * @end: end of the allowed range for the allocation | ||
431 | * | 553 | * |
432 | * This simply sets up the scanning routines with the parameters for the desired | 554 | * This simply sets up the scanning routines with the parameters for the desired |
433 | * hole. This version is for range-restricted scans. | 555 | * hole. Note that there's no need to specify allocation flags, since they only |
556 | * change the place a node is allocated from within a suitable hole. | ||
434 | * | 557 | * |
435 | * Warning: As long as the scan list is non-empty, no other operations than | 558 | * Warning: |
559 | * As long as the scan list is non-empty, no other operations than | ||
436 | * adding/removing nodes to/from the scan list are allowed. | 560 | * adding/removing nodes to/from the scan list are allowed. |
437 | */ | 561 | */ |
438 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 562 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
@@ -456,12 +580,16 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, | |||
456 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); | 580 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
457 | 581 | ||
458 | /** | 582 | /** |
583 | * drm_mm_scan_add_block - add a node to the scan list | ||
584 | * @node: drm_mm_node to add | ||
585 | * | ||
459 | * Add a node to the scan list that might be freed to make space for the desired | 586 | * Add a node to the scan list that might be freed to make space for the desired |
460 | * hole. | 587 | * hole. |
461 | * | 588 | * |
462 | * Returns non-zero, if a hole has been found, zero otherwise. | 589 | * Returns: |
590 | * True if a hole has been found, false otherwise. | ||
463 | */ | 591 | */ |
464 | int drm_mm_scan_add_block(struct drm_mm_node *node) | 592 | bool drm_mm_scan_add_block(struct drm_mm_node *node) |
465 | { | 593 | { |
466 | struct drm_mm *mm = node->mm; | 594 | struct drm_mm *mm = node->mm; |
467 | struct drm_mm_node *prev_node; | 595 | struct drm_mm_node *prev_node; |
@@ -501,15 +629,16 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) | |||
501 | mm->scan_size, mm->scan_alignment)) { | 629 | mm->scan_size, mm->scan_alignment)) { |
502 | mm->scan_hit_start = hole_start; | 630 | mm->scan_hit_start = hole_start; |
503 | mm->scan_hit_end = hole_end; | 631 | mm->scan_hit_end = hole_end; |
504 | return 1; | 632 | return true; |
505 | } | 633 | } |
506 | 634 | ||
507 | return 0; | 635 | return false; |
508 | } | 636 | } |
509 | EXPORT_SYMBOL(drm_mm_scan_add_block); | 637 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
510 | 638 | ||
511 | /** | 639 | /** |
512 | * Remove a node from the scan list. | 640 | * drm_mm_scan_remove_block - remove a node from the scan list |
641 | * @node: drm_mm_node to remove | ||
513 | * | 642 | * |
514 | * Nodes _must_ be removed in the exact same order from the scan list as they | 643 | * Nodes _must_ be removed in the exact same order from the scan list as they |
515 | * have been added, otherwise the internal state of the memory manager will be | 644 | * have been added, otherwise the internal state of the memory manager will be |
@@ -519,10 +648,11 @@ EXPORT_SYMBOL(drm_mm_scan_add_block); | |||
519 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then | 648 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then |
520 | * return the just freed block (because its at the top of the free_stack list). | 649 | * return the just freed block (because its at the top of the free_stack list). |
521 | * | 650 | * |
522 | * Returns one if this block should be evicted, zero otherwise. Will always | 651 | * Returns: |
523 | * return zero when no hole has been found. | 652 | * True if this block should be evicted, false otherwise. Will always |
653 | * return false when no hole has been found. | ||
524 | */ | 654 | */ |
525 | int drm_mm_scan_remove_block(struct drm_mm_node *node) | 655 | bool drm_mm_scan_remove_block(struct drm_mm_node *node) |
526 | { | 656 | { |
527 | struct drm_mm *mm = node->mm; | 657 | struct drm_mm *mm = node->mm; |
528 | struct drm_mm_node *prev_node; | 658 | struct drm_mm_node *prev_node; |
@@ -543,7 +673,15 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node) | |||
543 | } | 673 | } |
544 | EXPORT_SYMBOL(drm_mm_scan_remove_block); | 674 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
545 | 675 | ||
546 | int drm_mm_clean(struct drm_mm * mm) | 676 | /** |
677 | * drm_mm_clean - checks whether an allocator is clean | ||
678 | * @mm: drm_mm allocator to check | ||
679 | * | ||
680 | * Returns: | ||
681 | * True if the allocator is completely free, false if there's still a node | ||
682 | * allocated in it. | ||
683 | */ | ||
684 | bool drm_mm_clean(struct drm_mm * mm) | ||
547 | { | 685 | { |
548 | struct list_head *head = &mm->head_node.node_list; | 686 | struct list_head *head = &mm->head_node.node_list; |
549 | 687 | ||
@@ -551,6 +689,14 @@ int drm_mm_clean(struct drm_mm * mm) | |||
551 | } | 689 | } |
552 | EXPORT_SYMBOL(drm_mm_clean); | 690 | EXPORT_SYMBOL(drm_mm_clean); |
553 | 691 | ||
692 | /** | ||
693 | * drm_mm_init - initialize a drm-mm allocator | ||
694 | * @mm: the drm_mm structure to initialize | ||
695 | * @start: start of the range managed by @mm | ||
696 | * @size: end of the range managed by @mm | ||
697 | * | ||
698 | * Note that @mm must be cleared to 0 before calling this function. | ||
699 | */ | ||
554 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 700 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
555 | { | 701 | { |
556 | INIT_LIST_HEAD(&mm->hole_stack); | 702 | INIT_LIST_HEAD(&mm->hole_stack); |
@@ -572,6 +718,13 @@ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | |||
572 | } | 718 | } |
573 | EXPORT_SYMBOL(drm_mm_init); | 719 | EXPORT_SYMBOL(drm_mm_init); |
574 | 720 | ||
721 | /** | ||
722 | * drm_mm_takedown - clean up a drm_mm allocator | ||
723 | * @mm: drm_mm allocator to clean up | ||
724 | * | ||
725 | * Note that it is a bug to call this function on an allocator which is not | ||
726 | * clean. | ||
727 | */ | ||
575 | void drm_mm_takedown(struct drm_mm * mm) | 728 | void drm_mm_takedown(struct drm_mm * mm) |
576 | { | 729 | { |
577 | WARN(!list_empty(&mm->head_node.node_list), | 730 | WARN(!list_empty(&mm->head_node.node_list), |
@@ -597,6 +750,11 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |||
597 | return 0; | 750 | return 0; |
598 | } | 751 | } |
599 | 752 | ||
753 | /** | ||
754 | * drm_mm_debug_table - dump allocator state to dmesg | ||
755 | * @mm: drm_mm allocator to dump | ||
756 | * @prefix: prefix to use for dumping to dmesg | ||
757 | */ | ||
600 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 758 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
601 | { | 759 | { |
602 | struct drm_mm_node *entry; | 760 | struct drm_mm_node *entry; |
@@ -635,6 +793,11 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en | |||
635 | return 0; | 793 | return 0; |
636 | } | 794 | } |
637 | 795 | ||
796 | /** | ||
797 | * drm_mm_dump_table - dump allocator state to a seq_file | ||
798 | * @m: seq_file to dump to | ||
799 | * @mm: drm_mm allocator to dump | ||
800 | */ | ||
638 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 801 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
639 | { | 802 | { |
640 | struct drm_mm_node *entry; | 803 | struct drm_mm_node *entry; |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index b0733153dfd2..8b410576fce4 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -37,15 +37,14 @@ | |||
37 | #include <drm/drm_crtc.h> | 37 | #include <drm/drm_crtc.h> |
38 | #include <video/of_videomode.h> | 38 | #include <video/of_videomode.h> |
39 | #include <video/videomode.h> | 39 | #include <video/videomode.h> |
40 | #include <drm/drm_modes.h> | ||
41 | |||
42 | #include "drm_crtc_internal.h" | ||
40 | 43 | ||
41 | /** | 44 | /** |
42 | * drm_mode_debug_printmodeline - debug print a mode | 45 | * drm_mode_debug_printmodeline - print a mode to dmesg |
43 | * @dev: DRM device | ||
44 | * @mode: mode to print | 46 | * @mode: mode to print |
45 | * | 47 | * |
46 | * LOCKING: | ||
47 | * None. | ||
48 | * | ||
49 | * Describe @mode using DRM_DEBUG. | 48 | * Describe @mode using DRM_DEBUG. |
50 | */ | 49 | */ |
51 | void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) | 50 | void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) |
@@ -61,18 +60,77 @@ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) | |||
61 | EXPORT_SYMBOL(drm_mode_debug_printmodeline); | 60 | EXPORT_SYMBOL(drm_mode_debug_printmodeline); |
62 | 61 | ||
63 | /** | 62 | /** |
64 | * drm_cvt_mode -create a modeline based on CVT algorithm | 63 | * drm_mode_create - create a new display mode |
65 | * @dev: DRM device | 64 | * @dev: DRM device |
66 | * @hdisplay: hdisplay size | ||
67 | * @vdisplay: vdisplay size | ||
68 | * @vrefresh : vrefresh rate | ||
69 | * @reduced : Whether the GTF calculation is simplified | ||
70 | * @interlaced:Whether the interlace is supported | ||
71 | * | 65 | * |
72 | * LOCKING: | 66 | * Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it |
73 | * none. | 67 | * and return it. |
74 | * | 68 | * |
75 | * return the modeline based on CVT algorithm | 69 | * Returns: |
70 | * Pointer to new mode on success, NULL on error. | ||
71 | */ | ||
72 | struct drm_display_mode *drm_mode_create(struct drm_device *dev) | ||
73 | { | ||
74 | struct drm_display_mode *nmode; | ||
75 | |||
76 | nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); | ||
77 | if (!nmode) | ||
78 | return NULL; | ||
79 | |||
80 | if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { | ||
81 | kfree(nmode); | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | return nmode; | ||
86 | } | ||
87 | EXPORT_SYMBOL(drm_mode_create); | ||
88 | |||
89 | /** | ||
90 | * drm_mode_destroy - remove a mode | ||
91 | * @dev: DRM device | ||
92 | * @mode: mode to remove | ||
93 | * | ||
94 | * Release @mode's unique ID, then free it @mode structure itself using kfree. | ||
95 | */ | ||
96 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) | ||
97 | { | ||
98 | if (!mode) | ||
99 | return; | ||
100 | |||
101 | drm_mode_object_put(dev, &mode->base); | ||
102 | |||
103 | kfree(mode); | ||
104 | } | ||
105 | EXPORT_SYMBOL(drm_mode_destroy); | ||
106 | |||
107 | /** | ||
108 | * drm_mode_probed_add - add a mode to a connector's probed_mode list | ||
109 | * @connector: connector the new mode | ||
110 | * @mode: mode data | ||
111 | * | ||
112 | * Add @mode to @connector's probed_mode list for later use. This list should | ||
113 | * then in a second step get filtered and all the modes actually supported by | ||
114 | * the hardware moved to the @connector's modes list. | ||
115 | */ | ||
116 | void drm_mode_probed_add(struct drm_connector *connector, | ||
117 | struct drm_display_mode *mode) | ||
118 | { | ||
119 | WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); | ||
120 | |||
121 | list_add_tail(&mode->head, &connector->probed_modes); | ||
122 | } | ||
123 | EXPORT_SYMBOL(drm_mode_probed_add); | ||
124 | |||
125 | /** | ||
126 | * drm_cvt_mode -create a modeline based on the CVT algorithm | ||
127 | * @dev: drm device | ||
128 | * @hdisplay: hdisplay size | ||
129 | * @vdisplay: vdisplay size | ||
130 | * @vrefresh: vrefresh rate | ||
131 | * @reduced: whether to use reduced blanking | ||
132 | * @interlaced: whether to compute an interlaced mode | ||
133 | * @margins: whether to add margins (borders) | ||
76 | * | 134 | * |
77 | * This function is called to generate the modeline based on CVT algorithm | 135 | * This function is called to generate the modeline based on CVT algorithm |
78 | * according to the hdisplay, vdisplay, vrefresh. | 136 | * according to the hdisplay, vdisplay, vrefresh. |
@@ -82,12 +140,17 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline); | |||
82 | * | 140 | * |
83 | * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. | 141 | * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. |
84 | * What I have done is to translate it by using integer calculation. | 142 | * What I have done is to translate it by using integer calculation. |
143 | * | ||
144 | * Returns: | ||
145 | * The modeline based on the CVT algorithm stored in a drm_display_mode object. | ||
146 | * The display mode object is allocated with drm_mode_create(). Returns NULL | ||
147 | * when no mode could be allocated. | ||
85 | */ | 148 | */ |
86 | #define HV_FACTOR 1000 | ||
87 | struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, | 149 | struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, |
88 | int vdisplay, int vrefresh, | 150 | int vdisplay, int vrefresh, |
89 | bool reduced, bool interlaced, bool margins) | 151 | bool reduced, bool interlaced, bool margins) |
90 | { | 152 | { |
153 | #define HV_FACTOR 1000 | ||
91 | /* 1) top/bottom margin size (% of height) - default: 1.8, */ | 154 | /* 1) top/bottom margin size (% of height) - default: 1.8, */ |
92 | #define CVT_MARGIN_PERCENTAGE 18 | 155 | #define CVT_MARGIN_PERCENTAGE 18 |
93 | /* 2) character cell horizontal granularity (pixels) - default 8 */ | 156 | /* 2) character cell horizontal granularity (pixels) - default 8 */ |
@@ -281,23 +344,25 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, | |||
281 | EXPORT_SYMBOL(drm_cvt_mode); | 344 | EXPORT_SYMBOL(drm_cvt_mode); |
282 | 345 | ||
283 | /** | 346 | /** |
284 | * drm_gtf_mode_complex - create the modeline based on full GTF algorithm | 347 | * drm_gtf_mode_complex - create the modeline based on the full GTF algorithm |
285 | * | 348 | * @dev: drm device |
286 | * @dev :drm device | 349 | * @hdisplay: hdisplay size |
287 | * @hdisplay :hdisplay size | 350 | * @vdisplay: vdisplay size |
288 | * @vdisplay :vdisplay size | 351 | * @vrefresh: vrefresh rate. |
289 | * @vrefresh :vrefresh rate. | 352 | * @interlaced: whether to compute an interlaced mode |
290 | * @interlaced :whether the interlace is supported | 353 | * @margins: desired margin (borders) size |
291 | * @margins :desired margin size | 354 | * @GTF_M: extended GTF formula parameters |
292 | * @GTF_[MCKJ] :extended GTF formula parameters | 355 | * @GTF_2C: extended GTF formula parameters |
293 | * | 356 | * @GTF_K: extended GTF formula parameters |
294 | * LOCKING. | 357 | * @GTF_2J: extended GTF formula parameters |
295 | * none. | ||
296 | * | ||
297 | * return the modeline based on full GTF algorithm. | ||
298 | * | 358 | * |
299 | * GTF feature blocks specify C and J in multiples of 0.5, so we pass them | 359 | * GTF feature blocks specify C and J in multiples of 0.5, so we pass them |
300 | * in here multiplied by two. For a C of 40, pass in 80. | 360 | * in here multiplied by two. For a C of 40, pass in 80. |
361 | * | ||
362 | * Returns: | ||
363 | * The modeline based on the full GTF algorithm stored in a drm_display_mode object. | ||
364 | * The display mode object is allocated with drm_mode_create(). Returns NULL | ||
365 | * when no mode could be allocated. | ||
301 | */ | 366 | */ |
302 | struct drm_display_mode * | 367 | struct drm_display_mode * |
303 | drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, | 368 | drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, |
@@ -467,17 +532,13 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, | |||
467 | EXPORT_SYMBOL(drm_gtf_mode_complex); | 532 | EXPORT_SYMBOL(drm_gtf_mode_complex); |
468 | 533 | ||
469 | /** | 534 | /** |
470 | * drm_gtf_mode - create the modeline based on GTF algorithm | 535 | * drm_gtf_mode - create the modeline based on the GTF algorithm |
471 | * | 536 | * @dev: drm device |
472 | * @dev :drm device | 537 | * @hdisplay: hdisplay size |
473 | * @hdisplay :hdisplay size | 538 | * @vdisplay: vdisplay size |
474 | * @vdisplay :vdisplay size | 539 | * @vrefresh: vrefresh rate. |
475 | * @vrefresh :vrefresh rate. | 540 | * @interlaced: whether to compute an interlaced mode |
476 | * @interlaced :whether the interlace is supported | 541 | * @margins: desired margin (borders) size |
477 | * @margins :whether the margin is supported | ||
478 | * | ||
479 | * LOCKING. | ||
480 | * none. | ||
481 | * | 542 | * |
482 | * return the modeline based on GTF algorithm | 543 | * return the modeline based on GTF algorithm |
483 | * | 544 | * |
@@ -496,19 +557,32 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); | |||
496 | * C = 40 | 557 | * C = 40 |
497 | * K = 128 | 558 | * K = 128 |
498 | * J = 20 | 559 | * J = 20 |
560 | * | ||
561 | * Returns: | ||
562 | * The modeline based on the GTF algorithm stored in a drm_display_mode object. | ||
563 | * The display mode object is allocated with drm_mode_create(). Returns NULL | ||
564 | * when no mode could be allocated. | ||
499 | */ | 565 | */ |
500 | struct drm_display_mode * | 566 | struct drm_display_mode * |
501 | drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, | 567 | drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, |
502 | bool lace, int margins) | 568 | bool interlaced, int margins) |
503 | { | 569 | { |
504 | return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, | 570 | return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, |
505 | margins, 600, 40 * 2, 128, 20 * 2); | 571 | interlaced, margins, |
572 | 600, 40 * 2, 128, 20 * 2); | ||
506 | } | 573 | } |
507 | EXPORT_SYMBOL(drm_gtf_mode); | 574 | EXPORT_SYMBOL(drm_gtf_mode); |
508 | 575 | ||
509 | #ifdef CONFIG_VIDEOMODE_HELPERS | 576 | #ifdef CONFIG_VIDEOMODE_HELPERS |
510 | int drm_display_mode_from_videomode(const struct videomode *vm, | 577 | /** |
511 | struct drm_display_mode *dmode) | 578 | * drm_display_mode_from_videomode - fill in @dmode using @vm, |
579 | * @vm: videomode structure to use as source | ||
580 | * @dmode: drm_display_mode structure to use as destination | ||
581 | * | ||
582 | * Fills out @dmode using the display mode specified in @vm. | ||
583 | */ | ||
584 | void drm_display_mode_from_videomode(const struct videomode *vm, | ||
585 | struct drm_display_mode *dmode) | ||
512 | { | 586 | { |
513 | dmode->hdisplay = vm->hactive; | 587 | dmode->hdisplay = vm->hactive; |
514 | dmode->hsync_start = dmode->hdisplay + vm->hfront_porch; | 588 | dmode->hsync_start = dmode->hdisplay + vm->hfront_porch; |
@@ -538,8 +612,6 @@ int drm_display_mode_from_videomode(const struct videomode *vm, | |||
538 | if (vm->flags & DISPLAY_FLAGS_DOUBLECLK) | 612 | if (vm->flags & DISPLAY_FLAGS_DOUBLECLK) |
539 | dmode->flags |= DRM_MODE_FLAG_DBLCLK; | 613 | dmode->flags |= DRM_MODE_FLAG_DBLCLK; |
540 | drm_mode_set_name(dmode); | 614 | drm_mode_set_name(dmode); |
541 | |||
542 | return 0; | ||
543 | } | 615 | } |
544 | EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); | 616 | EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); |
545 | 617 | ||
@@ -553,6 +625,9 @@ EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); | |||
553 | * This function is expensive and should only be used, if only one mode is to be | 625 | * This function is expensive and should only be used, if only one mode is to be |
554 | * read from DT. To get multiple modes start with of_get_display_timings and | 626 | * read from DT. To get multiple modes start with of_get_display_timings and |
555 | * work with that instead. | 627 | * work with that instead. |
628 | * | ||
629 | * Returns: | ||
630 | * 0 on success, a negative errno code when no of videomode node was found. | ||
556 | */ | 631 | */ |
557 | int of_get_drm_display_mode(struct device_node *np, | 632 | int of_get_drm_display_mode(struct device_node *np, |
558 | struct drm_display_mode *dmode, int index) | 633 | struct drm_display_mode *dmode, int index) |
@@ -580,10 +655,8 @@ EXPORT_SYMBOL_GPL(of_get_drm_display_mode); | |||
580 | * drm_mode_set_name - set the name on a mode | 655 | * drm_mode_set_name - set the name on a mode |
581 | * @mode: name will be set in this mode | 656 | * @mode: name will be set in this mode |
582 | * | 657 | * |
583 | * LOCKING: | 658 | * Set the name of @mode to a standard format which is <hdisplay>x<vdisplay> |
584 | * None. | 659 | * with an optional 'i' suffix for interlaced modes. |
585 | * | ||
586 | * Set the name of @mode to a standard format. | ||
587 | */ | 660 | */ |
588 | void drm_mode_set_name(struct drm_display_mode *mode) | 661 | void drm_mode_set_name(struct drm_display_mode *mode) |
589 | { | 662 | { |
@@ -595,54 +668,12 @@ void drm_mode_set_name(struct drm_display_mode *mode) | |||
595 | } | 668 | } |
596 | EXPORT_SYMBOL(drm_mode_set_name); | 669 | EXPORT_SYMBOL(drm_mode_set_name); |
597 | 670 | ||
598 | /** | ||
599 | * drm_mode_width - get the width of a mode | ||
600 | * @mode: mode | ||
601 | * | ||
602 | * LOCKING: | ||
603 | * None. | ||
604 | * | ||
605 | * Return @mode's width (hdisplay) value. | ||
606 | * | ||
607 | * FIXME: is this needed? | ||
608 | * | ||
609 | * RETURNS: | ||
610 | * @mode->hdisplay | ||
611 | */ | ||
612 | int drm_mode_width(const struct drm_display_mode *mode) | ||
613 | { | ||
614 | return mode->hdisplay; | ||
615 | |||
616 | } | ||
617 | EXPORT_SYMBOL(drm_mode_width); | ||
618 | |||
619 | /** | ||
620 | * drm_mode_height - get the height of a mode | ||
621 | * @mode: mode | ||
622 | * | ||
623 | * LOCKING: | ||
624 | * None. | ||
625 | * | ||
626 | * Return @mode's height (vdisplay) value. | ||
627 | * | ||
628 | * FIXME: is this needed? | ||
629 | * | ||
630 | * RETURNS: | ||
631 | * @mode->vdisplay | ||
632 | */ | ||
633 | int drm_mode_height(const struct drm_display_mode *mode) | ||
634 | { | ||
635 | return mode->vdisplay; | ||
636 | } | ||
637 | EXPORT_SYMBOL(drm_mode_height); | ||
638 | |||
639 | /** drm_mode_hsync - get the hsync of a mode | 671 | /** drm_mode_hsync - get the hsync of a mode |
640 | * @mode: mode | 672 | * @mode: mode |
641 | * | 673 | * |
642 | * LOCKING: | 674 | * Returns: |
643 | * None. | 675 | * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the |
644 | * | 676 | * value first if it is not yet set. |
645 | * Return @modes's hsync rate in kHz, rounded to the nearest int. | ||
646 | */ | 677 | */ |
647 | int drm_mode_hsync(const struct drm_display_mode *mode) | 678 | int drm_mode_hsync(const struct drm_display_mode *mode) |
648 | { | 679 | { |
@@ -666,17 +697,9 @@ EXPORT_SYMBOL(drm_mode_hsync); | |||
666 | * drm_mode_vrefresh - get the vrefresh of a mode | 697 | * drm_mode_vrefresh - get the vrefresh of a mode |
667 | * @mode: mode | 698 | * @mode: mode |
668 | * | 699 | * |
669 | * LOCKING: | 700 | * Returns: |
670 | * None. | 701 | * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the |
671 | * | 702 | * value first if it is not yet set. |
672 | * Return @mode's vrefresh rate in Hz or calculate it if necessary. | ||
673 | * | ||
674 | * FIXME: why is this needed? shouldn't vrefresh be set already? | ||
675 | * | ||
676 | * RETURNS: | ||
677 | * Vertical refresh rate. It will be the result of actual value plus 0.5. | ||
678 | * If it is 70.288, it will return 70Hz. | ||
679 | * If it is 59.6, it will return 60Hz. | ||
680 | */ | 703 | */ |
681 | int drm_mode_vrefresh(const struct drm_display_mode *mode) | 704 | int drm_mode_vrefresh(const struct drm_display_mode *mode) |
682 | { | 705 | { |
@@ -705,14 +728,11 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) | |||
705 | EXPORT_SYMBOL(drm_mode_vrefresh); | 728 | EXPORT_SYMBOL(drm_mode_vrefresh); |
706 | 729 | ||
707 | /** | 730 | /** |
708 | * drm_mode_set_crtcinfo - set CRTC modesetting parameters | 731 | * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters |
709 | * @p: mode | 732 | * @p: mode |
710 | * @adjust_flags: a combination of adjustment flags | 733 | * @adjust_flags: a combination of adjustment flags |
711 | * | 734 | * |
712 | * LOCKING: | 735 | * Setup the CRTC modesetting timing parameters for @p, adjusting if necessary. |
713 | * None. | ||
714 | * | ||
715 | * Setup the CRTC modesetting parameters for @p, adjusting if necessary. | ||
716 | * | 736 | * |
717 | * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of | 737 | * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of |
718 | * interlaced modes. | 738 | * interlaced modes. |
@@ -780,15 +800,11 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) | |||
780 | } | 800 | } |
781 | EXPORT_SYMBOL(drm_mode_set_crtcinfo); | 801 | EXPORT_SYMBOL(drm_mode_set_crtcinfo); |
782 | 802 | ||
783 | |||
784 | /** | 803 | /** |
785 | * drm_mode_copy - copy the mode | 804 | * drm_mode_copy - copy the mode |
786 | * @dst: mode to overwrite | 805 | * @dst: mode to overwrite |
787 | * @src: mode to copy | 806 | * @src: mode to copy |
788 | * | 807 | * |
789 | * LOCKING: | ||
790 | * None. | ||
791 | * | ||
792 | * Copy an existing mode into another mode, preserving the object id and | 808 | * Copy an existing mode into another mode, preserving the object id and |
793 | * list head of the destination mode. | 809 | * list head of the destination mode. |
794 | */ | 810 | */ |
@@ -805,13 +821,14 @@ EXPORT_SYMBOL(drm_mode_copy); | |||
805 | 821 | ||
806 | /** | 822 | /** |
807 | * drm_mode_duplicate - allocate and duplicate an existing mode | 823 | * drm_mode_duplicate - allocate and duplicate an existing mode |
808 | * @m: mode to duplicate | 824 | * @dev: drm_device to allocate the duplicated mode for |
809 | * | 825 | * @mode: mode to duplicate |
810 | * LOCKING: | ||
811 | * None. | ||
812 | * | 826 | * |
813 | * Just allocate a new mode, copy the existing mode into it, and return | 827 | * Just allocate a new mode, copy the existing mode into it, and return |
814 | * a pointer to it. Used to create new instances of established modes. | 828 | * a pointer to it. Used to create new instances of established modes. |
829 | * | ||
830 | * Returns: | ||
831 | * Pointer to duplicated mode on success, NULL on error. | ||
815 | */ | 832 | */ |
816 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | 833 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
817 | const struct drm_display_mode *mode) | 834 | const struct drm_display_mode *mode) |
@@ -833,12 +850,9 @@ EXPORT_SYMBOL(drm_mode_duplicate); | |||
833 | * @mode1: first mode | 850 | * @mode1: first mode |
834 | * @mode2: second mode | 851 | * @mode2: second mode |
835 | * | 852 | * |
836 | * LOCKING: | ||
837 | * None. | ||
838 | * | ||
839 | * Check to see if @mode1 and @mode2 are equivalent. | 853 | * Check to see if @mode1 and @mode2 are equivalent. |
840 | * | 854 | * |
841 | * RETURNS: | 855 | * Returns: |
842 | * True if the modes are equal, false otherwise. | 856 | * True if the modes are equal, false otherwise. |
843 | */ | 857 | */ |
844 | bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) | 858 | bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) |
@@ -864,13 +878,10 @@ EXPORT_SYMBOL(drm_mode_equal); | |||
864 | * @mode1: first mode | 878 | * @mode1: first mode |
865 | * @mode2: second mode | 879 | * @mode2: second mode |
866 | * | 880 | * |
867 | * LOCKING: | ||
868 | * None. | ||
869 | * | ||
870 | * Check to see if @mode1 and @mode2 are equivalent, but | 881 | * Check to see if @mode1 and @mode2 are equivalent, but |
871 | * don't check the pixel clocks nor the stereo layout. | 882 | * don't check the pixel clocks nor the stereo layout. |
872 | * | 883 | * |
873 | * RETURNS: | 884 | * Returns: |
874 | * True if the modes are equal, false otherwise. | 885 | * True if the modes are equal, false otherwise. |
875 | */ | 886 | */ |
876 | bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, | 887 | bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, |
@@ -900,25 +911,19 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo); | |||
900 | * @mode_list: list of modes to check | 911 | * @mode_list: list of modes to check |
901 | * @maxX: maximum width | 912 | * @maxX: maximum width |
902 | * @maxY: maximum height | 913 | * @maxY: maximum height |
903 | * @maxPitch: max pitch | ||
904 | * | 914 | * |
905 | * LOCKING: | 915 | * This function is a helper which can be used to validate modes against size |
906 | * Caller must hold a lock protecting @mode_list. | 916 | * limitations of the DRM device/connector. If a mode is too big its status |
907 | * | 917 | * memeber is updated with the appropriate validation failure code. The list |
908 | * The DRM device (@dev) has size and pitch limits. Here we validate the | 918 | * itself is not changed. |
909 | * modes we probed for @dev against those limits and set their status as | ||
910 | * necessary. | ||
911 | */ | 919 | */ |
912 | void drm_mode_validate_size(struct drm_device *dev, | 920 | void drm_mode_validate_size(struct drm_device *dev, |
913 | struct list_head *mode_list, | 921 | struct list_head *mode_list, |
914 | int maxX, int maxY, int maxPitch) | 922 | int maxX, int maxY) |
915 | { | 923 | { |
916 | struct drm_display_mode *mode; | 924 | struct drm_display_mode *mode; |
917 | 925 | ||
918 | list_for_each_entry(mode, mode_list, head) { | 926 | list_for_each_entry(mode, mode_list, head) { |
919 | if (maxPitch > 0 && mode->hdisplay > maxPitch) | ||
920 | mode->status = MODE_BAD_WIDTH; | ||
921 | |||
922 | if (maxX > 0 && mode->hdisplay > maxX) | 927 | if (maxX > 0 && mode->hdisplay > maxX) |
923 | mode->status = MODE_VIRTUAL_X; | 928 | mode->status = MODE_VIRTUAL_X; |
924 | 929 | ||
@@ -934,12 +939,10 @@ EXPORT_SYMBOL(drm_mode_validate_size); | |||
934 | * @mode_list: list of modes to check | 939 | * @mode_list: list of modes to check |
935 | * @verbose: be verbose about it | 940 | * @verbose: be verbose about it |
936 | * | 941 | * |
937 | * LOCKING: | 942 | * This helper function can be used to prune a display mode list after |
938 | * Caller must hold a lock protecting @mode_list. | 943 | * validation has been completed. All modes who's status is not MODE_OK will be |
939 | * | 944 | * removed from the list, and if @verbose the status code and mode name is also |
940 | * Once mode list generation is complete, a caller can use this routine to | 945 | * printed to dmesg. |
941 | * remove invalid modes from a mode list. If any of the modes have a | ||
942 | * status other than %MODE_OK, they are removed from @mode_list and freed. | ||
943 | */ | 946 | */ |
944 | void drm_mode_prune_invalid(struct drm_device *dev, | 947 | void drm_mode_prune_invalid(struct drm_device *dev, |
945 | struct list_head *mode_list, bool verbose) | 948 | struct list_head *mode_list, bool verbose) |
@@ -966,13 +969,10 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); | |||
966 | * @lh_a: list_head for first mode | 969 | * @lh_a: list_head for first mode |
967 | * @lh_b: list_head for second mode | 970 | * @lh_b: list_head for second mode |
968 | * | 971 | * |
969 | * LOCKING: | ||
970 | * None. | ||
971 | * | ||
972 | * Compare two modes, given by @lh_a and @lh_b, returning a value indicating | 972 | * Compare two modes, given by @lh_a and @lh_b, returning a value indicating |
973 | * which is better. | 973 | * which is better. |
974 | * | 974 | * |
975 | * RETURNS: | 975 | * Returns: |
976 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or | 976 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or |
977 | * positive if @lh_b is better than @lh_a. | 977 | * positive if @lh_b is better than @lh_a. |
978 | */ | 978 | */ |
@@ -1000,12 +1000,9 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head | |||
1000 | 1000 | ||
1001 | /** | 1001 | /** |
1002 | * drm_mode_sort - sort mode list | 1002 | * drm_mode_sort - sort mode list |
1003 | * @mode_list: list to sort | 1003 | * @mode_list: list of drm_display_mode structures to sort |
1004 | * | 1004 | * |
1005 | * LOCKING: | 1005 | * Sort @mode_list by favorability, moving good modes to the head of the list. |
1006 | * Caller must hold a lock protecting @mode_list. | ||
1007 | * | ||
1008 | * Sort @mode_list by favorability, putting good modes first. | ||
1009 | */ | 1006 | */ |
1010 | void drm_mode_sort(struct list_head *mode_list) | 1007 | void drm_mode_sort(struct list_head *mode_list) |
1011 | { | 1008 | { |
@@ -1017,13 +1014,12 @@ EXPORT_SYMBOL(drm_mode_sort); | |||
1017 | * drm_mode_connector_list_update - update the mode list for the connector | 1014 | * drm_mode_connector_list_update - update the mode list for the connector |
1018 | * @connector: the connector to update | 1015 | * @connector: the connector to update |
1019 | * | 1016 | * |
1020 | * LOCKING: | ||
1021 | * Caller must hold a lock protecting @mode_list. | ||
1022 | * | ||
1023 | * This moves the modes from the @connector probed_modes list | 1017 | * This moves the modes from the @connector probed_modes list |
1024 | * to the actual mode list. It compares the probed mode against the current | 1018 | * to the actual mode list. It compares the probed mode against the current |
1025 | * list and only adds different modes. All modes unverified after this point | 1019 | * list and only adds different/new modes. |
1026 | * will be removed by the prune invalid modes. | 1020 | * |
1021 | * This is just a helper functions doesn't validate any modes itself and also | ||
1022 | * doesn't prune any invalid modes. Callers need to do that themselves. | ||
1027 | */ | 1023 | */ |
1028 | void drm_mode_connector_list_update(struct drm_connector *connector) | 1024 | void drm_mode_connector_list_update(struct drm_connector *connector) |
1029 | { | 1025 | { |
@@ -1031,6 +1027,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector) | |||
1031 | struct drm_display_mode *pmode, *pt; | 1027 | struct drm_display_mode *pmode, *pt; |
1032 | int found_it; | 1028 | int found_it; |
1033 | 1029 | ||
1030 | WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex)); | ||
1031 | |||
1034 | list_for_each_entry_safe(pmode, pt, &connector->probed_modes, | 1032 | list_for_each_entry_safe(pmode, pt, &connector->probed_modes, |
1035 | head) { | 1033 | head) { |
1036 | found_it = 0; | 1034 | found_it = 0; |
@@ -1056,17 +1054,25 @@ void drm_mode_connector_list_update(struct drm_connector *connector) | |||
1056 | EXPORT_SYMBOL(drm_mode_connector_list_update); | 1054 | EXPORT_SYMBOL(drm_mode_connector_list_update); |
1057 | 1055 | ||
1058 | /** | 1056 | /** |
1059 | * drm_mode_parse_command_line_for_connector - parse command line for connector | 1057 | * drm_mode_parse_command_line_for_connector - parse command line modeline for connector |
1060 | * @mode_option - per connector mode option | 1058 | * @mode_option: optional per connector mode option |
1061 | * @connector - connector to parse line for | 1059 | * @connector: connector to parse modeline for |
1060 | * @mode: preallocated drm_cmdline_mode structure to fill out | ||
1061 | * | ||
1062 | * This parses @mode_option command line modeline for modes and options to | ||
1063 | * configure the connector. If @mode_option is NULL the default command line | ||
1064 | * modeline in fb_mode_option will be parsed instead. | ||
1062 | * | 1065 | * |
1063 | * This parses the connector specific then generic command lines for | 1066 | * This uses the same parameters as the fb modedb.c, except for an extra |
1064 | * modes and options to configure the connector. | 1067 | * force-enable, force-enable-digital and force-disable bit at the end: |
1065 | * | 1068 | * |
1066 | * This uses the same parameters as the fb modedb.c, except for extra | ||
1067 | * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] | 1069 | * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] |
1068 | * | 1070 | * |
1069 | * enable/enable Digital/disable bit at the end | 1071 | * The intermediate drm_cmdline_mode structure is required to store additional |
1072 | * options from the command line modline like the force-enabel/disable flag. | ||
1073 | * | ||
1074 | * Returns: | ||
1075 | * True if a valid modeline has been parsed, false otherwise. | ||
1070 | */ | 1076 | */ |
1071 | bool drm_mode_parse_command_line_for_connector(const char *mode_option, | 1077 | bool drm_mode_parse_command_line_for_connector(const char *mode_option, |
1072 | struct drm_connector *connector, | 1078 | struct drm_connector *connector, |
@@ -1219,6 +1225,14 @@ done: | |||
1219 | } | 1225 | } |
1220 | EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector); | 1226 | EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector); |
1221 | 1227 | ||
1228 | /** | ||
1229 | * drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode | ||
1230 | * @dev: DRM device to create the new mode for | ||
1231 | * @cmd: input command line modeline | ||
1232 | * | ||
1233 | * Returns: | ||
1234 | * Pointer to converted mode on success, NULL on error. | ||
1235 | */ | ||
1222 | struct drm_display_mode * | 1236 | struct drm_display_mode * |
1223 | drm_mode_create_from_cmdline_mode(struct drm_device *dev, | 1237 | drm_mode_create_from_cmdline_mode(struct drm_device *dev, |
1224 | struct drm_cmdline_mode *cmd) | 1238 | struct drm_cmdline_mode *cmd) |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 5736aaa7e86c..9c696a5ad74d 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -351,7 +351,7 @@ err_agp: | |||
351 | drm_pci_agp_destroy(dev); | 351 | drm_pci_agp_destroy(dev); |
352 | pci_disable_device(pdev); | 352 | pci_disable_device(pdev); |
353 | err_free: | 353 | err_free: |
354 | drm_dev_free(dev); | 354 | drm_dev_unref(dev); |
355 | return ret; | 355 | return ret; |
356 | } | 356 | } |
357 | EXPORT_SYMBOL(drm_get_pci_dev); | 357 | EXPORT_SYMBOL(drm_get_pci_dev); |
@@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | |||
468 | } else { | 468 | } else { |
469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, | 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, |
470 | legacy_dev_list) { | 470 | legacy_dev_list) { |
471 | drm_put_dev(dev); | ||
472 | list_del(&dev->legacy_dev_list); | 471 | list_del(&dev->legacy_dev_list); |
472 | drm_put_dev(dev); | ||
473 | } | 473 | } |
474 | } | 474 | } |
475 | DRM_INFO("Module unloaded\n"); | 475 | DRM_INFO("Module unloaded\n"); |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 21fc82006b78..319ff5385601 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -64,7 +64,7 @@ static int drm_get_platform_dev(struct platform_device *platdev, | |||
64 | return 0; | 64 | return 0; |
65 | 65 | ||
66 | err_free: | 66 | err_free: |
67 | drm_dev_free(dev); | 67 | drm_dev_unref(dev); |
68 | return ret; | 68 | return ret; |
69 | } | 69 | } |
70 | 70 | ||
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 56805c39c906..f1437b6c8dbf 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -68,7 +68,8 @@ struct drm_prime_attachment { | |||
68 | enum dma_data_direction dir; | 68 | enum dma_data_direction dir; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) | 71 | static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, |
72 | struct dma_buf *dma_buf, uint32_t handle) | ||
72 | { | 73 | { |
73 | struct drm_prime_member *member; | 74 | struct drm_prime_member *member; |
74 | 75 | ||
@@ -174,7 +175,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr | |||
174 | } | 175 | } |
175 | 176 | ||
176 | static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, | 177 | static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, |
177 | enum dma_data_direction dir) | 178 | enum dma_data_direction dir) |
178 | { | 179 | { |
179 | struct drm_prime_attachment *prime_attach = attach->priv; | 180 | struct drm_prime_attachment *prime_attach = attach->priv; |
180 | struct drm_gem_object *obj = attach->dmabuf->priv; | 181 | struct drm_gem_object *obj = attach->dmabuf->priv; |
@@ -211,11 +212,19 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, | |||
211 | } | 212 | } |
212 | 213 | ||
213 | static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | 214 | static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, |
214 | struct sg_table *sgt, enum dma_data_direction dir) | 215 | struct sg_table *sgt, |
216 | enum dma_data_direction dir) | ||
215 | { | 217 | { |
216 | /* nothing to be done here */ | 218 | /* nothing to be done here */ |
217 | } | 219 | } |
218 | 220 | ||
221 | /** | ||
222 | * drm_gem_dmabuf_release - dma_buf release implementation for GEM | ||
223 | * @dma_buf: buffer to be released | ||
224 | * | ||
225 | * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers | ||
226 | * must use this in their dma_buf ops structure as the release callback. | ||
227 | */ | ||
219 | void drm_gem_dmabuf_release(struct dma_buf *dma_buf) | 228 | void drm_gem_dmabuf_release(struct dma_buf *dma_buf) |
220 | { | 229 | { |
221 | struct drm_gem_object *obj = dma_buf->priv; | 230 | struct drm_gem_object *obj = dma_buf->priv; |
@@ -242,30 +251,30 @@ static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | |||
242 | } | 251 | } |
243 | 252 | ||
244 | static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | 253 | static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, |
245 | unsigned long page_num) | 254 | unsigned long page_num) |
246 | { | 255 | { |
247 | return NULL; | 256 | return NULL; |
248 | } | 257 | } |
249 | 258 | ||
250 | static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | 259 | static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, |
251 | unsigned long page_num, void *addr) | 260 | unsigned long page_num, void *addr) |
252 | { | 261 | { |
253 | 262 | ||
254 | } | 263 | } |
255 | static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, | 264 | static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, |
256 | unsigned long page_num) | 265 | unsigned long page_num) |
257 | { | 266 | { |
258 | return NULL; | 267 | return NULL; |
259 | } | 268 | } |
260 | 269 | ||
261 | static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, | 270 | static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, |
262 | unsigned long page_num, void *addr) | 271 | unsigned long page_num, void *addr) |
263 | { | 272 | { |
264 | 273 | ||
265 | } | 274 | } |
266 | 275 | ||
267 | static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, | 276 | static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, |
268 | struct vm_area_struct *vma) | 277 | struct vm_area_struct *vma) |
269 | { | 278 | { |
270 | struct drm_gem_object *obj = dma_buf->priv; | 279 | struct drm_gem_object *obj = dma_buf->priv; |
271 | struct drm_device *dev = obj->dev; | 280 | struct drm_device *dev = obj->dev; |
@@ -315,6 +324,15 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { | |||
315 | * driver's scatter/gather table | 324 | * driver's scatter/gather table |
316 | */ | 325 | */ |
317 | 326 | ||
327 | /** | ||
328 | * drm_gem_prime_export - helper library implemention of the export callback | ||
329 | * @dev: drm_device to export from | ||
330 | * @obj: GEM object to export | ||
331 | * @flags: flags like DRM_CLOEXEC | ||
332 | * | ||
333 | * This is the implementation of the gem_prime_export functions for GEM drivers | ||
334 | * using the PRIME helpers. | ||
335 | */ | ||
318 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | 336 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
319 | struct drm_gem_object *obj, int flags) | 337 | struct drm_gem_object *obj, int flags) |
320 | { | 338 | { |
@@ -355,9 +373,23 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, | |||
355 | return dmabuf; | 373 | return dmabuf; |
356 | } | 374 | } |
357 | 375 | ||
376 | /** | ||
377 | * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers | ||
378 | * @dev: dev to export the buffer from | ||
379 | * @file_priv: drm file-private structure | ||
380 | * @handle: buffer handle to export | ||
381 | * @flags: flags like DRM_CLOEXEC | ||
382 | * @prime_fd: pointer to storage for the fd id of the create dma-buf | ||
383 | * | ||
384 | * This is the PRIME export function which must be used mandatorily by GEM | ||
385 | * drivers to ensure correct lifetime management of the underlying GEM object. | ||
386 | * The actual exporting from GEM object to a dma-buf is done through the | ||
387 | * gem_prime_export driver callback. | ||
388 | */ | ||
358 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, | 389 | int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
359 | struct drm_file *file_priv, uint32_t handle, uint32_t flags, | 390 | struct drm_file *file_priv, uint32_t handle, |
360 | int *prime_fd) | 391 | uint32_t flags, |
392 | int *prime_fd) | ||
361 | { | 393 | { |
362 | struct drm_gem_object *obj; | 394 | struct drm_gem_object *obj; |
363 | int ret = 0; | 395 | int ret = 0; |
@@ -441,6 +473,14 @@ out_unlock: | |||
441 | } | 473 | } |
442 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); | 474 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); |
443 | 475 | ||
476 | /** | ||
477 | * drm_gem_prime_import - helper library implemention of the import callback | ||
478 | * @dev: drm_device to import into | ||
479 | * @dma_buf: dma-buf object to import | ||
480 | * | ||
481 | * This is the implementation of the gem_prime_import functions for GEM drivers | ||
482 | * using the PRIME helpers. | ||
483 | */ | ||
444 | struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, | 484 | struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
445 | struct dma_buf *dma_buf) | 485 | struct dma_buf *dma_buf) |
446 | { | 486 | { |
@@ -496,8 +536,21 @@ fail_detach: | |||
496 | } | 536 | } |
497 | EXPORT_SYMBOL(drm_gem_prime_import); | 537 | EXPORT_SYMBOL(drm_gem_prime_import); |
498 | 538 | ||
539 | /** | ||
540 | * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers | ||
541 | * @dev: dev to export the buffer from | ||
542 | * @file_priv: drm file-private structure | ||
543 | * @prime_fd: fd id of the dma-buf which should be imported | ||
544 | * @handle: pointer to storage for the handle of the imported buffer object | ||
545 | * | ||
546 | * This is the PRIME import function which must be used mandatorily by GEM | ||
547 | * drivers to ensure correct lifetime management of the underlying GEM object. | ||
548 | * The actual importing of GEM object from the dma-buf is done through the | ||
549 | * gem_import_export driver callback. | ||
550 | */ | ||
499 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, | 551 | int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
500 | struct drm_file *file_priv, int prime_fd, uint32_t *handle) | 552 | struct drm_file *file_priv, int prime_fd, |
553 | uint32_t *handle) | ||
501 | { | 554 | { |
502 | struct dma_buf *dma_buf; | 555 | struct dma_buf *dma_buf; |
503 | struct drm_gem_object *obj; | 556 | struct drm_gem_object *obj; |
@@ -598,12 +651,14 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, | |||
598 | args->fd, &args->handle); | 651 | args->fd, &args->handle); |
599 | } | 652 | } |
600 | 653 | ||
601 | /* | 654 | /** |
602 | * drm_prime_pages_to_sg | 655 | * drm_prime_pages_to_sg - converts a page array into an sg list |
656 | * @pages: pointer to the array of page pointers to convert | ||
657 | * @nr_pages: length of the page vector | ||
603 | * | 658 | * |
604 | * this helper creates an sg table object from a set of pages | 659 | * This helper creates an sg table object from a set of pages |
605 | * the driver is responsible for mapping the pages into the | 660 | * the driver is responsible for mapping the pages into the |
606 | * importers address space | 661 | * importers address space for use with dma_buf itself. |
607 | */ | 662 | */ |
608 | struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) | 663 | struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) |
609 | { | 664 | { |
@@ -628,9 +683,16 @@ out: | |||
628 | } | 683 | } |
629 | EXPORT_SYMBOL(drm_prime_pages_to_sg); | 684 | EXPORT_SYMBOL(drm_prime_pages_to_sg); |
630 | 685 | ||
631 | /* export an sg table into an array of pages and addresses | 686 | /** |
632 | this is currently required by the TTM driver in order to do correct fault | 687 | * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array |
633 | handling */ | 688 | * @sgt: scatter-gather table to convert |
689 | * @pages: array of page pointers to store the page array in | ||
690 | * @addrs: optional array to store the dma bus address of each page | ||
691 | * @max_pages: size of both the passed-in arrays | ||
692 | * | ||
693 | * Exports an sg table into an array of pages and addresses. This is currently | ||
694 | * required by the TTM driver in order to do correct fault handling. | ||
695 | */ | ||
634 | int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, | 696 | int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, |
635 | dma_addr_t *addrs, int max_pages) | 697 | dma_addr_t *addrs, int max_pages) |
636 | { | 698 | { |
@@ -663,7 +725,15 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, | |||
663 | return 0; | 725 | return 0; |
664 | } | 726 | } |
665 | EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); | 727 | EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); |
666 | /* helper function to cleanup a GEM/prime object */ | 728 | |
729 | /** | ||
730 | * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object | ||
731 | * @obj: GEM object which was created from a dma-buf | ||
732 | * @sg: the sg-table which was pinned at import time | ||
733 | * | ||
734 | * This is the cleanup functions which GEM drivers need to call when they use | ||
735 | * @drm_gem_prime_import to import dma-bufs. | ||
736 | */ | ||
667 | void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) | 737 | void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) |
668 | { | 738 | { |
669 | struct dma_buf_attachment *attach; | 739 | struct dma_buf_attachment *attach; |
@@ -683,11 +753,9 @@ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) | |||
683 | INIT_LIST_HEAD(&prime_fpriv->head); | 753 | INIT_LIST_HEAD(&prime_fpriv->head); |
684 | mutex_init(&prime_fpriv->lock); | 754 | mutex_init(&prime_fpriv->lock); |
685 | } | 755 | } |
686 | EXPORT_SYMBOL(drm_prime_init_file_private); | ||
687 | 756 | ||
688 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) | 757 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) |
689 | { | 758 | { |
690 | /* by now drm_gem_release should've made sure the list is empty */ | 759 | /* by now drm_gem_release should've made sure the list is empty */ |
691 | WARN_ON(!list_empty(&prime_fpriv->head)); | 760 | WARN_ON(!list_empty(&prime_fpriv->head)); |
692 | } | 761 | } |
693 | EXPORT_SYMBOL(drm_prime_destroy_file_private); | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 98a33c580ca1..dc2c6095d850 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -31,8 +31,10 @@ | |||
31 | * DEALINGS IN THE SOFTWARE. | 31 | * DEALINGS IN THE SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/fs.h> | ||
34 | #include <linux/module.h> | 35 | #include <linux/module.h> |
35 | #include <linux/moduleparam.h> | 36 | #include <linux/moduleparam.h> |
37 | #include <linux/mount.h> | ||
36 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
37 | #include <drm/drmP.h> | 39 | #include <drm/drmP.h> |
38 | #include <drm/drm_core.h> | 40 | #include <drm/drm_core.h> |
@@ -70,6 +72,7 @@ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); | |||
70 | module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); | 72 | module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); |
71 | module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); | 73 | module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); |
72 | 74 | ||
75 | static DEFINE_SPINLOCK(drm_minor_lock); | ||
73 | struct idr drm_minors_idr; | 76 | struct idr drm_minors_idr; |
74 | 77 | ||
75 | struct class *drm_class; | 78 | struct class *drm_class; |
@@ -117,26 +120,6 @@ void drm_ut_debug_printk(unsigned int request_level, | |||
117 | } | 120 | } |
118 | EXPORT_SYMBOL(drm_ut_debug_printk); | 121 | EXPORT_SYMBOL(drm_ut_debug_printk); |
119 | 122 | ||
120 | static int drm_minor_get_id(struct drm_device *dev, int type) | ||
121 | { | ||
122 | int ret; | ||
123 | int base = 0, limit = 63; | ||
124 | |||
125 | if (type == DRM_MINOR_CONTROL) { | ||
126 | base += 64; | ||
127 | limit = base + 63; | ||
128 | } else if (type == DRM_MINOR_RENDER) { | ||
129 | base += 128; | ||
130 | limit = base + 63; | ||
131 | } | ||
132 | |||
133 | mutex_lock(&dev->struct_mutex); | ||
134 | ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL); | ||
135 | mutex_unlock(&dev->struct_mutex); | ||
136 | |||
137 | return ret == -ENOSPC ? -EINVAL : ret; | ||
138 | } | ||
139 | |||
140 | struct drm_master *drm_master_create(struct drm_minor *minor) | 123 | struct drm_master *drm_master_create(struct drm_minor *minor) |
141 | { | 124 | { |
142 | struct drm_master *master; | 125 | struct drm_master *master; |
@@ -260,119 +243,183 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | |||
260 | return 0; | 243 | return 0; |
261 | } | 244 | } |
262 | 245 | ||
263 | /** | 246 | /* |
264 | * drm_get_minor - Allocate and register new DRM minor | 247 | * DRM Minors |
265 | * @dev: DRM device | 248 | * A DRM device can provide several char-dev interfaces on the DRM-Major. Each |
266 | * @minor: Pointer to where new minor is stored | 249 | * of them is represented by a drm_minor object. Depending on the capabilities |
267 | * @type: Type of minor | 250 | * of the device-driver, different interfaces are registered. |
268 | * | ||
269 | * Allocate a new minor of the given type and register it. A pointer to the new | ||
270 | * minor is returned in @minor. | ||
271 | * Caller must hold the global DRM mutex. | ||
272 | * | 251 | * |
273 | * RETURNS: | 252 | * Minors can be accessed via dev->$minor_name. This pointer is either |
274 | * 0 on success, negative error code on failure. | 253 | * NULL or a valid drm_minor pointer and stays valid as long as the device is |
254 | * valid. This means, DRM minors have the same life-time as the underlying | ||
255 | * device. However, this doesn't mean that the minor is active. Minors are | ||
256 | * registered and unregistered dynamically according to device-state. | ||
275 | */ | 257 | */ |
276 | static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, | 258 | |
277 | int type) | 259 | static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, |
260 | unsigned int type) | ||
261 | { | ||
262 | switch (type) { | ||
263 | case DRM_MINOR_LEGACY: | ||
264 | return &dev->primary; | ||
265 | case DRM_MINOR_RENDER: | ||
266 | return &dev->render; | ||
267 | case DRM_MINOR_CONTROL: | ||
268 | return &dev->control; | ||
269 | default: | ||
270 | return NULL; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static int drm_minor_alloc(struct drm_device *dev, unsigned int type) | ||
275 | { | ||
276 | struct drm_minor *minor; | ||
277 | |||
278 | minor = kzalloc(sizeof(*minor), GFP_KERNEL); | ||
279 | if (!minor) | ||
280 | return -ENOMEM; | ||
281 | |||
282 | minor->type = type; | ||
283 | minor->dev = dev; | ||
284 | INIT_LIST_HEAD(&minor->master_list); | ||
285 | |||
286 | *drm_minor_get_slot(dev, type) = minor; | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static void drm_minor_free(struct drm_device *dev, unsigned int type) | ||
291 | { | ||
292 | struct drm_minor **slot; | ||
293 | |||
294 | slot = drm_minor_get_slot(dev, type); | ||
295 | if (*slot) { | ||
296 | kfree(*slot); | ||
297 | *slot = NULL; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | static int drm_minor_register(struct drm_device *dev, unsigned int type) | ||
278 | { | 302 | { |
279 | struct drm_minor *new_minor; | 303 | struct drm_minor *new_minor; |
304 | unsigned long flags; | ||
280 | int ret; | 305 | int ret; |
281 | int minor_id; | 306 | int minor_id; |
282 | 307 | ||
283 | DRM_DEBUG("\n"); | 308 | DRM_DEBUG("\n"); |
284 | 309 | ||
285 | minor_id = drm_minor_get_id(dev, type); | 310 | new_minor = *drm_minor_get_slot(dev, type); |
311 | if (!new_minor) | ||
312 | return 0; | ||
313 | |||
314 | idr_preload(GFP_KERNEL); | ||
315 | spin_lock_irqsave(&drm_minor_lock, flags); | ||
316 | minor_id = idr_alloc(&drm_minors_idr, | ||
317 | NULL, | ||
318 | 64 * type, | ||
319 | 64 * (type + 1), | ||
320 | GFP_NOWAIT); | ||
321 | spin_unlock_irqrestore(&drm_minor_lock, flags); | ||
322 | idr_preload_end(); | ||
323 | |||
286 | if (minor_id < 0) | 324 | if (minor_id < 0) |
287 | return minor_id; | 325 | return minor_id; |
288 | 326 | ||
289 | new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL); | ||
290 | if (!new_minor) { | ||
291 | ret = -ENOMEM; | ||
292 | goto err_idr; | ||
293 | } | ||
294 | |||
295 | new_minor->type = type; | ||
296 | new_minor->device = MKDEV(DRM_MAJOR, minor_id); | ||
297 | new_minor->dev = dev; | ||
298 | new_minor->index = minor_id; | 327 | new_minor->index = minor_id; |
299 | INIT_LIST_HEAD(&new_minor->master_list); | ||
300 | |||
301 | idr_replace(&drm_minors_idr, new_minor, minor_id); | ||
302 | 328 | ||
303 | #if defined(CONFIG_DEBUG_FS) | ||
304 | ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); | 329 | ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); |
305 | if (ret) { | 330 | if (ret) { |
306 | DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); | 331 | DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); |
307 | goto err_mem; | 332 | goto err_id; |
308 | } | 333 | } |
309 | #endif | ||
310 | 334 | ||
311 | ret = drm_sysfs_device_add(new_minor); | 335 | ret = drm_sysfs_device_add(new_minor); |
312 | if (ret) { | 336 | if (ret) { |
313 | printk(KERN_ERR | 337 | DRM_ERROR("DRM: Error sysfs_device_add.\n"); |
314 | "DRM: Error sysfs_device_add.\n"); | ||
315 | goto err_debugfs; | 338 | goto err_debugfs; |
316 | } | 339 | } |
317 | *minor = new_minor; | 340 | |
341 | /* replace NULL with @minor so lookups will succeed from now on */ | ||
342 | spin_lock_irqsave(&drm_minor_lock, flags); | ||
343 | idr_replace(&drm_minors_idr, new_minor, new_minor->index); | ||
344 | spin_unlock_irqrestore(&drm_minor_lock, flags); | ||
318 | 345 | ||
319 | DRM_DEBUG("new minor assigned %d\n", minor_id); | 346 | DRM_DEBUG("new minor assigned %d\n", minor_id); |
320 | return 0; | 347 | return 0; |
321 | 348 | ||
322 | |||
323 | err_debugfs: | 349 | err_debugfs: |
324 | #if defined(CONFIG_DEBUG_FS) | ||
325 | drm_debugfs_cleanup(new_minor); | 350 | drm_debugfs_cleanup(new_minor); |
326 | err_mem: | 351 | err_id: |
327 | #endif | 352 | spin_lock_irqsave(&drm_minor_lock, flags); |
328 | kfree(new_minor); | ||
329 | err_idr: | ||
330 | idr_remove(&drm_minors_idr, minor_id); | 353 | idr_remove(&drm_minors_idr, minor_id); |
331 | *minor = NULL; | 354 | spin_unlock_irqrestore(&drm_minor_lock, flags); |
355 | new_minor->index = 0; | ||
332 | return ret; | 356 | return ret; |
333 | } | 357 | } |
334 | 358 | ||
335 | /** | 359 | static void drm_minor_unregister(struct drm_device *dev, unsigned int type) |
336 | * drm_unplug_minor - Unplug DRM minor | ||
337 | * @minor: Minor to unplug | ||
338 | * | ||
339 | * Unplugs the given DRM minor but keeps the object. So after this returns, | ||
340 | * minor->dev is still valid so existing open-files can still access it to get | ||
341 | * device information from their drm_file ojects. | ||
342 | * If the minor is already unplugged or if @minor is NULL, nothing is done. | ||
343 | * The global DRM mutex must be held by the caller. | ||
344 | */ | ||
345 | static void drm_unplug_minor(struct drm_minor *minor) | ||
346 | { | 360 | { |
361 | struct drm_minor *minor; | ||
362 | unsigned long flags; | ||
363 | |||
364 | minor = *drm_minor_get_slot(dev, type); | ||
347 | if (!minor || !minor->kdev) | 365 | if (!minor || !minor->kdev) |
348 | return; | 366 | return; |
349 | 367 | ||
350 | #if defined(CONFIG_DEBUG_FS) | 368 | spin_lock_irqsave(&drm_minor_lock, flags); |
351 | drm_debugfs_cleanup(minor); | 369 | idr_remove(&drm_minors_idr, minor->index); |
352 | #endif | 370 | spin_unlock_irqrestore(&drm_minor_lock, flags); |
371 | minor->index = 0; | ||
353 | 372 | ||
373 | drm_debugfs_cleanup(minor); | ||
354 | drm_sysfs_device_remove(minor); | 374 | drm_sysfs_device_remove(minor); |
355 | idr_remove(&drm_minors_idr, minor->index); | ||
356 | } | 375 | } |
357 | 376 | ||
358 | /** | 377 | /** |
359 | * drm_put_minor - Destroy DRM minor | 378 | * drm_minor_acquire - Acquire a DRM minor |
360 | * @minor: Minor to destroy | 379 | * @minor_id: Minor ID of the DRM-minor |
380 | * | ||
381 | * Looks up the given minor-ID and returns the respective DRM-minor object. The | ||
382 | * refence-count of the underlying device is increased so you must release this | ||
383 | * object with drm_minor_release(). | ||
384 | * | ||
385 | * As long as you hold this minor, it is guaranteed that the object and the | ||
386 | * minor->dev pointer will stay valid! However, the device may get unplugged and | ||
387 | * unregistered while you hold the minor. | ||
361 | * | 388 | * |
362 | * This calls drm_unplug_minor() on the given minor and then frees it. Nothing | 389 | * Returns: |
363 | * is done if @minor is NULL. It is fine to call this on already unplugged | 390 | * Pointer to minor-object with increased device-refcount, or PTR_ERR on |
364 | * minors. | 391 | * failure. |
365 | * The global DRM mutex must be held by the caller. | ||
366 | */ | 392 | */ |
367 | static void drm_put_minor(struct drm_minor *minor) | 393 | struct drm_minor *drm_minor_acquire(unsigned int minor_id) |
368 | { | 394 | { |
369 | if (!minor) | 395 | struct drm_minor *minor; |
370 | return; | 396 | unsigned long flags; |
397 | |||
398 | spin_lock_irqsave(&drm_minor_lock, flags); | ||
399 | minor = idr_find(&drm_minors_idr, minor_id); | ||
400 | if (minor) | ||
401 | drm_dev_ref(minor->dev); | ||
402 | spin_unlock_irqrestore(&drm_minor_lock, flags); | ||
403 | |||
404 | if (!minor) { | ||
405 | return ERR_PTR(-ENODEV); | ||
406 | } else if (drm_device_is_unplugged(minor->dev)) { | ||
407 | drm_dev_unref(minor->dev); | ||
408 | return ERR_PTR(-ENODEV); | ||
409 | } | ||
371 | 410 | ||
372 | DRM_DEBUG("release secondary minor %d\n", minor->index); | 411 | return minor; |
412 | } | ||
373 | 413 | ||
374 | drm_unplug_minor(minor); | 414 | /** |
375 | kfree(minor); | 415 | * drm_minor_release - Release DRM minor |
416 | * @minor: Pointer to DRM minor object | ||
417 | * | ||
418 | * Release a minor that was previously acquired via drm_minor_acquire(). | ||
419 | */ | ||
420 | void drm_minor_release(struct drm_minor *minor) | ||
421 | { | ||
422 | drm_dev_unref(minor->dev); | ||
376 | } | 423 | } |
377 | 424 | ||
378 | /** | 425 | /** |
@@ -392,18 +439,16 @@ void drm_put_dev(struct drm_device *dev) | |||
392 | } | 439 | } |
393 | 440 | ||
394 | drm_dev_unregister(dev); | 441 | drm_dev_unregister(dev); |
395 | drm_dev_free(dev); | 442 | drm_dev_unref(dev); |
396 | } | 443 | } |
397 | EXPORT_SYMBOL(drm_put_dev); | 444 | EXPORT_SYMBOL(drm_put_dev); |
398 | 445 | ||
399 | void drm_unplug_dev(struct drm_device *dev) | 446 | void drm_unplug_dev(struct drm_device *dev) |
400 | { | 447 | { |
401 | /* for a USB device */ | 448 | /* for a USB device */ |
402 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 449 | drm_minor_unregister(dev, DRM_MINOR_LEGACY); |
403 | drm_unplug_minor(dev->control); | 450 | drm_minor_unregister(dev, DRM_MINOR_RENDER); |
404 | if (dev->render) | 451 | drm_minor_unregister(dev, DRM_MINOR_CONTROL); |
405 | drm_unplug_minor(dev->render); | ||
406 | drm_unplug_minor(dev->primary); | ||
407 | 452 | ||
408 | mutex_lock(&drm_global_mutex); | 453 | mutex_lock(&drm_global_mutex); |
409 | 454 | ||
@@ -416,6 +461,78 @@ void drm_unplug_dev(struct drm_device *dev) | |||
416 | } | 461 | } |
417 | EXPORT_SYMBOL(drm_unplug_dev); | 462 | EXPORT_SYMBOL(drm_unplug_dev); |
418 | 463 | ||
464 | /* | ||
465 | * DRM internal mount | ||
466 | * We want to be able to allocate our own "struct address_space" to control | ||
467 | * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow | ||
468 | * stand-alone address_space objects, so we need an underlying inode. As there | ||
469 | * is no way to allocate an independent inode easily, we need a fake internal | ||
470 | * VFS mount-point. | ||
471 | * | ||
472 | * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() | ||
473 | * frees it again. You are allowed to use iget() and iput() to get references to | ||
474 | * the inode. But each drm_fs_inode_new() call must be paired with exactly one | ||
475 | * drm_fs_inode_free() call (which does not have to be the last iput()). | ||
476 | * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it | ||
477 | * between multiple inode-users. You could, technically, call | ||
478 | * iget() + drm_fs_inode_free() directly after alloc and sometime later do an | ||
479 | * iput(), but this way you'd end up with a new vfsmount for each inode. | ||
480 | */ | ||
481 | |||
482 | static int drm_fs_cnt; | ||
483 | static struct vfsmount *drm_fs_mnt; | ||
484 | |||
485 | static const struct dentry_operations drm_fs_dops = { | ||
486 | .d_dname = simple_dname, | ||
487 | }; | ||
488 | |||
489 | static const struct super_operations drm_fs_sops = { | ||
490 | .statfs = simple_statfs, | ||
491 | }; | ||
492 | |||
493 | static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags, | ||
494 | const char *dev_name, void *data) | ||
495 | { | ||
496 | return mount_pseudo(fs_type, | ||
497 | "drm:", | ||
498 | &drm_fs_sops, | ||
499 | &drm_fs_dops, | ||
500 | 0x010203ff); | ||
501 | } | ||
502 | |||
503 | static struct file_system_type drm_fs_type = { | ||
504 | .name = "drm", | ||
505 | .owner = THIS_MODULE, | ||
506 | .mount = drm_fs_mount, | ||
507 | .kill_sb = kill_anon_super, | ||
508 | }; | ||
509 | |||
510 | static struct inode *drm_fs_inode_new(void) | ||
511 | { | ||
512 | struct inode *inode; | ||
513 | int r; | ||
514 | |||
515 | r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); | ||
516 | if (r < 0) { | ||
517 | DRM_ERROR("Cannot mount pseudo fs: %d\n", r); | ||
518 | return ERR_PTR(r); | ||
519 | } | ||
520 | |||
521 | inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); | ||
522 | if (IS_ERR(inode)) | ||
523 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); | ||
524 | |||
525 | return inode; | ||
526 | } | ||
527 | |||
528 | static void drm_fs_inode_free(struct inode *inode) | ||
529 | { | ||
530 | if (inode) { | ||
531 | iput(inode); | ||
532 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); | ||
533 | } | ||
534 | } | ||
535 | |||
419 | /** | 536 | /** |
420 | * drm_dev_alloc - Allocate new drm device | 537 | * drm_dev_alloc - Allocate new drm device |
421 | * @driver: DRM driver to allocate device for | 538 | * @driver: DRM driver to allocate device for |
@@ -425,6 +542,9 @@ EXPORT_SYMBOL(drm_unplug_dev); | |||
425 | * Call drm_dev_register() to advertice the device to user space and register it | 542 | * Call drm_dev_register() to advertice the device to user space and register it |
426 | * with other core subsystems. | 543 | * with other core subsystems. |
427 | * | 544 | * |
545 | * The initial ref-count of the object is 1. Use drm_dev_ref() and | ||
546 | * drm_dev_unref() to take and drop further ref-counts. | ||
547 | * | ||
428 | * RETURNS: | 548 | * RETURNS: |
429 | * Pointer to new DRM device, or NULL if out of memory. | 549 | * Pointer to new DRM device, or NULL if out of memory. |
430 | */ | 550 | */ |
@@ -438,6 +558,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, | |||
438 | if (!dev) | 558 | if (!dev) |
439 | return NULL; | 559 | return NULL; |
440 | 560 | ||
561 | kref_init(&dev->ref); | ||
441 | dev->dev = parent; | 562 | dev->dev = parent; |
442 | dev->driver = driver; | 563 | dev->driver = driver; |
443 | 564 | ||
@@ -452,8 +573,31 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, | |||
452 | mutex_init(&dev->struct_mutex); | 573 | mutex_init(&dev->struct_mutex); |
453 | mutex_init(&dev->ctxlist_mutex); | 574 | mutex_init(&dev->ctxlist_mutex); |
454 | 575 | ||
455 | if (drm_ht_create(&dev->map_hash, 12)) | 576 | dev->anon_inode = drm_fs_inode_new(); |
577 | if (IS_ERR(dev->anon_inode)) { | ||
578 | ret = PTR_ERR(dev->anon_inode); | ||
579 | DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); | ||
456 | goto err_free; | 580 | goto err_free; |
581 | } | ||
582 | |||
583 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
584 | ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); | ||
585 | if (ret) | ||
586 | goto err_minors; | ||
587 | } | ||
588 | |||
589 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | ||
590 | ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); | ||
591 | if (ret) | ||
592 | goto err_minors; | ||
593 | } | ||
594 | |||
595 | ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); | ||
596 | if (ret) | ||
597 | goto err_minors; | ||
598 | |||
599 | if (drm_ht_create(&dev->map_hash, 12)) | ||
600 | goto err_minors; | ||
457 | 601 | ||
458 | ret = drm_ctxbitmap_init(dev); | 602 | ret = drm_ctxbitmap_init(dev); |
459 | if (ret) { | 603 | if (ret) { |
@@ -475,38 +619,68 @@ err_ctxbitmap: | |||
475 | drm_ctxbitmap_cleanup(dev); | 619 | drm_ctxbitmap_cleanup(dev); |
476 | err_ht: | 620 | err_ht: |
477 | drm_ht_remove(&dev->map_hash); | 621 | drm_ht_remove(&dev->map_hash); |
622 | err_minors: | ||
623 | drm_minor_free(dev, DRM_MINOR_LEGACY); | ||
624 | drm_minor_free(dev, DRM_MINOR_RENDER); | ||
625 | drm_minor_free(dev, DRM_MINOR_CONTROL); | ||
626 | drm_fs_inode_free(dev->anon_inode); | ||
478 | err_free: | 627 | err_free: |
479 | kfree(dev); | 628 | kfree(dev); |
480 | return NULL; | 629 | return NULL; |
481 | } | 630 | } |
482 | EXPORT_SYMBOL(drm_dev_alloc); | 631 | EXPORT_SYMBOL(drm_dev_alloc); |
483 | 632 | ||
484 | /** | 633 | static void drm_dev_release(struct kref *ref) |
485 | * drm_dev_free - Free DRM device | ||
486 | * @dev: DRM device to free | ||
487 | * | ||
488 | * Free a DRM device that has previously been allocated via drm_dev_alloc(). | ||
489 | * You must not use kfree() instead or you will leak memory. | ||
490 | * | ||
491 | * This must not be called once the device got registered. Use drm_put_dev() | ||
492 | * instead, which then calls drm_dev_free(). | ||
493 | */ | ||
494 | void drm_dev_free(struct drm_device *dev) | ||
495 | { | 634 | { |
496 | drm_put_minor(dev->control); | 635 | struct drm_device *dev = container_of(ref, struct drm_device, ref); |
497 | drm_put_minor(dev->render); | ||
498 | drm_put_minor(dev->primary); | ||
499 | 636 | ||
500 | if (dev->driver->driver_features & DRIVER_GEM) | 637 | if (dev->driver->driver_features & DRIVER_GEM) |
501 | drm_gem_destroy(dev); | 638 | drm_gem_destroy(dev); |
502 | 639 | ||
503 | drm_ctxbitmap_cleanup(dev); | 640 | drm_ctxbitmap_cleanup(dev); |
504 | drm_ht_remove(&dev->map_hash); | 641 | drm_ht_remove(&dev->map_hash); |
642 | drm_fs_inode_free(dev->anon_inode); | ||
643 | |||
644 | drm_minor_free(dev, DRM_MINOR_LEGACY); | ||
645 | drm_minor_free(dev, DRM_MINOR_RENDER); | ||
646 | drm_minor_free(dev, DRM_MINOR_CONTROL); | ||
505 | 647 | ||
506 | kfree(dev->devname); | 648 | kfree(dev->devname); |
507 | kfree(dev); | 649 | kfree(dev); |
508 | } | 650 | } |
509 | EXPORT_SYMBOL(drm_dev_free); | 651 | |
652 | /** | ||
653 | * drm_dev_ref - Take reference of a DRM device | ||
654 | * @dev: device to take reference of or NULL | ||
655 | * | ||
656 | * This increases the ref-count of @dev by one. You *must* already own a | ||
657 | * reference when calling this. Use drm_dev_unref() to drop this reference | ||
658 | * again. | ||
659 | * | ||
660 | * This function never fails. However, this function does not provide *any* | ||
661 | * guarantee whether the device is alive or running. It only provides a | ||
662 | * reference to the object and the memory associated with it. | ||
663 | */ | ||
664 | void drm_dev_ref(struct drm_device *dev) | ||
665 | { | ||
666 | if (dev) | ||
667 | kref_get(&dev->ref); | ||
668 | } | ||
669 | EXPORT_SYMBOL(drm_dev_ref); | ||
670 | |||
671 | /** | ||
672 | * drm_dev_unref - Drop reference of a DRM device | ||
673 | * @dev: device to drop reference of or NULL | ||
674 | * | ||
675 | * This decreases the ref-count of @dev by one. The device is destroyed if the | ||
676 | * ref-count drops to zero. | ||
677 | */ | ||
678 | void drm_dev_unref(struct drm_device *dev) | ||
679 | { | ||
680 | if (dev) | ||
681 | kref_put(&dev->ref, drm_dev_release); | ||
682 | } | ||
683 | EXPORT_SYMBOL(drm_dev_unref); | ||
510 | 684 | ||
511 | /** | 685 | /** |
512 | * drm_dev_register - Register DRM device | 686 | * drm_dev_register - Register DRM device |
@@ -527,26 +701,22 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
527 | 701 | ||
528 | mutex_lock(&drm_global_mutex); | 702 | mutex_lock(&drm_global_mutex); |
529 | 703 | ||
530 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 704 | ret = drm_minor_register(dev, DRM_MINOR_CONTROL); |
531 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | 705 | if (ret) |
532 | if (ret) | 706 | goto err_minors; |
533 | goto out_unlock; | ||
534 | } | ||
535 | 707 | ||
536 | if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { | 708 | ret = drm_minor_register(dev, DRM_MINOR_RENDER); |
537 | ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER); | 709 | if (ret) |
538 | if (ret) | 710 | goto err_minors; |
539 | goto err_control_node; | ||
540 | } | ||
541 | 711 | ||
542 | ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); | 712 | ret = drm_minor_register(dev, DRM_MINOR_LEGACY); |
543 | if (ret) | 713 | if (ret) |
544 | goto err_render_node; | 714 | goto err_minors; |
545 | 715 | ||
546 | if (dev->driver->load) { | 716 | if (dev->driver->load) { |
547 | ret = dev->driver->load(dev, flags); | 717 | ret = dev->driver->load(dev, flags); |
548 | if (ret) | 718 | if (ret) |
549 | goto err_primary_node; | 719 | goto err_minors; |
550 | } | 720 | } |
551 | 721 | ||
552 | /* setup grouping for legacy outputs */ | 722 | /* setup grouping for legacy outputs */ |
@@ -563,12 +733,10 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
563 | err_unload: | 733 | err_unload: |
564 | if (dev->driver->unload) | 734 | if (dev->driver->unload) |
565 | dev->driver->unload(dev); | 735 | dev->driver->unload(dev); |
566 | err_primary_node: | 736 | err_minors: |
567 | drm_unplug_minor(dev->primary); | 737 | drm_minor_unregister(dev, DRM_MINOR_LEGACY); |
568 | err_render_node: | 738 | drm_minor_unregister(dev, DRM_MINOR_RENDER); |
569 | drm_unplug_minor(dev->render); | 739 | drm_minor_unregister(dev, DRM_MINOR_CONTROL); |
570 | err_control_node: | ||
571 | drm_unplug_minor(dev->control); | ||
572 | out_unlock: | 740 | out_unlock: |
573 | mutex_unlock(&drm_global_mutex); | 741 | mutex_unlock(&drm_global_mutex); |
574 | return ret; | 742 | return ret; |
@@ -581,7 +749,7 @@ EXPORT_SYMBOL(drm_dev_register); | |||
581 | * | 749 | * |
582 | * Unregister the DRM device from the system. This does the reverse of | 750 | * Unregister the DRM device from the system. This does the reverse of |
583 | * drm_dev_register() but does not deallocate the device. The caller must call | 751 | * drm_dev_register() but does not deallocate the device. The caller must call |
584 | * drm_dev_free() to free all resources. | 752 | * drm_dev_unref() to drop their final reference. |
585 | */ | 753 | */ |
586 | void drm_dev_unregister(struct drm_device *dev) | 754 | void drm_dev_unregister(struct drm_device *dev) |
587 | { | 755 | { |
@@ -600,8 +768,8 @@ void drm_dev_unregister(struct drm_device *dev) | |||
600 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | 768 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) |
601 | drm_rmmap(dev, r_list->map); | 769 | drm_rmmap(dev, r_list->map); |
602 | 770 | ||
603 | drm_unplug_minor(dev->control); | 771 | drm_minor_unregister(dev, DRM_MINOR_LEGACY); |
604 | drm_unplug_minor(dev->render); | 772 | drm_minor_unregister(dev, DRM_MINOR_RENDER); |
605 | drm_unplug_minor(dev->primary); | 773 | drm_minor_unregister(dev, DRM_MINOR_CONTROL); |
606 | } | 774 | } |
607 | EXPORT_SYMBOL(drm_dev_unregister); | 775 | EXPORT_SYMBOL(drm_dev_unregister); |
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 0f8cb1ae7607..c3406aad2944 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c | |||
@@ -30,7 +30,7 @@ int drm_get_usb_dev(struct usb_interface *interface, | |||
30 | return 0; | 30 | return 0; |
31 | 31 | ||
32 | err_free: | 32 | err_free: |
33 | drm_dev_free(dev); | 33 | drm_dev_unref(dev); |
34 | return ret; | 34 | return ret; |
35 | 35 | ||
36 | } | 36 | } |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index faa77f543a07..48af5cac1902 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -19,6 +19,8 @@ | |||
19 | 19 | ||
20 | #include <linux/hdmi.h> | 20 | #include <linux/hdmi.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/irq.h> | ||
23 | #include <sound/asoundef.h> | ||
22 | 24 | ||
23 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
24 | #include <drm/drm_crtc_helper.h> | 26 | #include <drm/drm_crtc_helper.h> |
@@ -30,6 +32,7 @@ | |||
30 | 32 | ||
31 | struct tda998x_priv { | 33 | struct tda998x_priv { |
32 | struct i2c_client *cec; | 34 | struct i2c_client *cec; |
35 | struct i2c_client *hdmi; | ||
33 | uint16_t rev; | 36 | uint16_t rev; |
34 | uint8_t current_page; | 37 | uint8_t current_page; |
35 | int dpms; | 38 | int dpms; |
@@ -38,6 +41,10 @@ struct tda998x_priv { | |||
38 | u8 vip_cntrl_1; | 41 | u8 vip_cntrl_1; |
39 | u8 vip_cntrl_2; | 42 | u8 vip_cntrl_2; |
40 | struct tda998x_encoder_params params; | 43 | struct tda998x_encoder_params params; |
44 | |||
45 | wait_queue_head_t wq_edid; | ||
46 | volatile int wq_edid_wait; | ||
47 | struct drm_encoder *encoder; | ||
41 | }; | 48 | }; |
42 | 49 | ||
43 | #define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) | 50 | #define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv) |
@@ -120,6 +127,8 @@ struct tda998x_priv { | |||
120 | # define VIP_CNTRL_5_CKCASE (1 << 0) | 127 | # define VIP_CNTRL_5_CKCASE (1 << 0) |
121 | # define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) | 128 | # define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1) |
122 | #define REG_MUX_AP REG(0x00, 0x26) /* read/write */ | 129 | #define REG_MUX_AP REG(0x00, 0x26) /* read/write */ |
130 | # define MUX_AP_SELECT_I2S 0x64 | ||
131 | # define MUX_AP_SELECT_SPDIF 0x40 | ||
123 | #define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */ | 132 | #define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */ |
124 | #define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ | 133 | #define REG_MAT_CONTRL REG(0x00, 0x80) /* write */ |
125 | # define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) | 134 | # define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0) |
@@ -197,10 +206,11 @@ struct tda998x_priv { | |||
197 | #define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */ | 206 | #define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */ |
198 | # define I2S_FORMAT(x) (((x) & 3) << 0) | 207 | # define I2S_FORMAT(x) (((x) & 3) << 0) |
199 | #define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */ | 208 | #define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */ |
200 | # define AIP_CLKSEL_FS(x) (((x) & 3) << 0) | 209 | # define AIP_CLKSEL_AIP_SPDIF (0 << 3) |
201 | # define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2) | 210 | # define AIP_CLKSEL_AIP_I2S (1 << 3) |
202 | # define AIP_CLKSEL_AIP(x) (((x) & 7) << 3) | 211 | # define AIP_CLKSEL_FS_ACLK (0 << 0) |
203 | 212 | # define AIP_CLKSEL_FS_MCLK (1 << 0) | |
213 | # define AIP_CLKSEL_FS_FS64SPDIF (2 << 0) | ||
204 | 214 | ||
205 | /* Page 02h: PLL settings */ | 215 | /* Page 02h: PLL settings */ |
206 | #define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */ | 216 | #define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */ |
@@ -304,11 +314,16 @@ struct tda998x_priv { | |||
304 | 314 | ||
305 | /* CEC registers: (not paged) | 315 | /* CEC registers: (not paged) |
306 | */ | 316 | */ |
317 | #define REG_CEC_INTSTATUS 0xee /* read */ | ||
318 | # define CEC_INTSTATUS_CEC (1 << 0) | ||
319 | # define CEC_INTSTATUS_HDMI (1 << 1) | ||
307 | #define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */ | 320 | #define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */ |
308 | # define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7) | 321 | # define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7) |
309 | # define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6) | 322 | # define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6) |
310 | # define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1) | 323 | # define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1) |
311 | # define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0) | 324 | # define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0) |
325 | #define REG_CEC_RXSHPDINTENA 0xfc /* read/write */ | ||
326 | #define REG_CEC_RXSHPDINT 0xfd /* read */ | ||
312 | #define REG_CEC_RXSHPDLEV 0xfe /* read */ | 327 | #define REG_CEC_RXSHPDLEV 0xfe /* read */ |
313 | # define CEC_RXSHPDLEV_RXSENS (1 << 0) | 328 | # define CEC_RXSHPDLEV_RXSENS (1 << 0) |
314 | # define CEC_RXSHPDLEV_HPD (1 << 1) | 329 | # define CEC_RXSHPDLEV_HPD (1 << 1) |
@@ -328,21 +343,21 @@ struct tda998x_priv { | |||
328 | #define TDA19988 0x0301 | 343 | #define TDA19988 0x0301 |
329 | 344 | ||
330 | static void | 345 | static void |
331 | cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val) | 346 | cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val) |
332 | { | 347 | { |
333 | struct i2c_client *client = to_tda998x_priv(encoder)->cec; | 348 | struct i2c_client *client = priv->cec; |
334 | uint8_t buf[] = {addr, val}; | 349 | uint8_t buf[] = {addr, val}; |
335 | int ret; | 350 | int ret; |
336 | 351 | ||
337 | ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); | 352 | ret = i2c_master_send(client, buf, sizeof(buf)); |
338 | if (ret < 0) | 353 | if (ret < 0) |
339 | dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr); | 354 | dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr); |
340 | } | 355 | } |
341 | 356 | ||
342 | static uint8_t | 357 | static uint8_t |
343 | cec_read(struct drm_encoder *encoder, uint8_t addr) | 358 | cec_read(struct tda998x_priv *priv, uint8_t addr) |
344 | { | 359 | { |
345 | struct i2c_client *client = to_tda998x_priv(encoder)->cec; | 360 | struct i2c_client *client = priv->cec; |
346 | uint8_t val; | 361 | uint8_t val; |
347 | int ret; | 362 | int ret; |
348 | 363 | ||
@@ -361,32 +376,36 @@ fail: | |||
361 | return 0; | 376 | return 0; |
362 | } | 377 | } |
363 | 378 | ||
364 | static void | 379 | static int |
365 | set_page(struct drm_encoder *encoder, uint16_t reg) | 380 | set_page(struct tda998x_priv *priv, uint16_t reg) |
366 | { | 381 | { |
367 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | ||
368 | |||
369 | if (REG2PAGE(reg) != priv->current_page) { | 382 | if (REG2PAGE(reg) != priv->current_page) { |
370 | struct i2c_client *client = drm_i2c_encoder_get_client(encoder); | 383 | struct i2c_client *client = priv->hdmi; |
371 | uint8_t buf[] = { | 384 | uint8_t buf[] = { |
372 | REG_CURPAGE, REG2PAGE(reg) | 385 | REG_CURPAGE, REG2PAGE(reg) |
373 | }; | 386 | }; |
374 | int ret = i2c_master_send(client, buf, sizeof(buf)); | 387 | int ret = i2c_master_send(client, buf, sizeof(buf)); |
375 | if (ret < 0) | 388 | if (ret < 0) { |
376 | dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret); | 389 | dev_err(&client->dev, "setpage %04x err %d\n", |
390 | reg, ret); | ||
391 | return ret; | ||
392 | } | ||
377 | 393 | ||
378 | priv->current_page = REG2PAGE(reg); | 394 | priv->current_page = REG2PAGE(reg); |
379 | } | 395 | } |
396 | return 0; | ||
380 | } | 397 | } |
381 | 398 | ||
382 | static int | 399 | static int |
383 | reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt) | 400 | reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) |
384 | { | 401 | { |
385 | struct i2c_client *client = drm_i2c_encoder_get_client(encoder); | 402 | struct i2c_client *client = priv->hdmi; |
386 | uint8_t addr = REG2ADDR(reg); | 403 | uint8_t addr = REG2ADDR(reg); |
387 | int ret; | 404 | int ret; |
388 | 405 | ||
389 | set_page(encoder, reg); | 406 | ret = set_page(priv, reg); |
407 | if (ret < 0) | ||
408 | return ret; | ||
390 | 409 | ||
391 | ret = i2c_master_send(client, &addr, sizeof(addr)); | 410 | ret = i2c_master_send(client, &addr, sizeof(addr)); |
392 | if (ret < 0) | 411 | if (ret < 0) |
@@ -404,100 +423,147 @@ fail: | |||
404 | } | 423 | } |
405 | 424 | ||
406 | static void | 425 | static void |
407 | reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt) | 426 | reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) |
408 | { | 427 | { |
409 | struct i2c_client *client = drm_i2c_encoder_get_client(encoder); | 428 | struct i2c_client *client = priv->hdmi; |
410 | uint8_t buf[cnt+1]; | 429 | uint8_t buf[cnt+1]; |
411 | int ret; | 430 | int ret; |
412 | 431 | ||
413 | buf[0] = REG2ADDR(reg); | 432 | buf[0] = REG2ADDR(reg); |
414 | memcpy(&buf[1], p, cnt); | 433 | memcpy(&buf[1], p, cnt); |
415 | 434 | ||
416 | set_page(encoder, reg); | 435 | ret = set_page(priv, reg); |
436 | if (ret < 0) | ||
437 | return; | ||
417 | 438 | ||
418 | ret = i2c_master_send(client, buf, cnt + 1); | 439 | ret = i2c_master_send(client, buf, cnt + 1); |
419 | if (ret < 0) | 440 | if (ret < 0) |
420 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 441 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
421 | } | 442 | } |
422 | 443 | ||
423 | static uint8_t | 444 | static int |
424 | reg_read(struct drm_encoder *encoder, uint16_t reg) | 445 | reg_read(struct tda998x_priv *priv, uint16_t reg) |
425 | { | 446 | { |
426 | uint8_t val = 0; | 447 | uint8_t val = 0; |
427 | reg_read_range(encoder, reg, &val, sizeof(val)); | 448 | int ret; |
449 | |||
450 | ret = reg_read_range(priv, reg, &val, sizeof(val)); | ||
451 | if (ret < 0) | ||
452 | return ret; | ||
428 | return val; | 453 | return val; |
429 | } | 454 | } |
430 | 455 | ||
431 | static void | 456 | static void |
432 | reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val) | 457 | reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) |
433 | { | 458 | { |
434 | struct i2c_client *client = drm_i2c_encoder_get_client(encoder); | 459 | struct i2c_client *client = priv->hdmi; |
435 | uint8_t buf[] = {REG2ADDR(reg), val}; | 460 | uint8_t buf[] = {REG2ADDR(reg), val}; |
436 | int ret; | 461 | int ret; |
437 | 462 | ||
438 | set_page(encoder, reg); | 463 | ret = set_page(priv, reg); |
464 | if (ret < 0) | ||
465 | return; | ||
439 | 466 | ||
440 | ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); | 467 | ret = i2c_master_send(client, buf, sizeof(buf)); |
441 | if (ret < 0) | 468 | if (ret < 0) |
442 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 469 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
443 | } | 470 | } |
444 | 471 | ||
445 | static void | 472 | static void |
446 | reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val) | 473 | reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) |
447 | { | 474 | { |
448 | struct i2c_client *client = drm_i2c_encoder_get_client(encoder); | 475 | struct i2c_client *client = priv->hdmi; |
449 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; | 476 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; |
450 | int ret; | 477 | int ret; |
451 | 478 | ||
452 | set_page(encoder, reg); | 479 | ret = set_page(priv, reg); |
480 | if (ret < 0) | ||
481 | return; | ||
453 | 482 | ||
454 | ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); | 483 | ret = i2c_master_send(client, buf, sizeof(buf)); |
455 | if (ret < 0) | 484 | if (ret < 0) |
456 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 485 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
457 | } | 486 | } |
458 | 487 | ||
459 | static void | 488 | static void |
460 | reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val) | 489 | reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val) |
461 | { | 490 | { |
462 | reg_write(encoder, reg, reg_read(encoder, reg) | val); | 491 | int old_val; |
492 | |||
493 | old_val = reg_read(priv, reg); | ||
494 | if (old_val >= 0) | ||
495 | reg_write(priv, reg, old_val | val); | ||
463 | } | 496 | } |
464 | 497 | ||
465 | static void | 498 | static void |
466 | reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val) | 499 | reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val) |
467 | { | 500 | { |
468 | reg_write(encoder, reg, reg_read(encoder, reg) & ~val); | 501 | int old_val; |
502 | |||
503 | old_val = reg_read(priv, reg); | ||
504 | if (old_val >= 0) | ||
505 | reg_write(priv, reg, old_val & ~val); | ||
469 | } | 506 | } |
470 | 507 | ||
471 | static void | 508 | static void |
472 | tda998x_reset(struct drm_encoder *encoder) | 509 | tda998x_reset(struct tda998x_priv *priv) |
473 | { | 510 | { |
474 | /* reset audio and i2c master: */ | 511 | /* reset audio and i2c master: */ |
475 | reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER); | 512 | reg_write(priv, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER); |
476 | msleep(50); | 513 | msleep(50); |
477 | reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER); | 514 | reg_write(priv, REG_SOFTRESET, 0); |
478 | msleep(50); | 515 | msleep(50); |
479 | 516 | ||
480 | /* reset transmitter: */ | 517 | /* reset transmitter: */ |
481 | reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); | 518 | reg_set(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); |
482 | reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); | 519 | reg_clear(priv, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR); |
483 | 520 | ||
484 | /* PLL registers common configuration */ | 521 | /* PLL registers common configuration */ |
485 | reg_write(encoder, REG_PLL_SERIAL_1, 0x00); | 522 | reg_write(priv, REG_PLL_SERIAL_1, 0x00); |
486 | reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1)); | 523 | reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1)); |
487 | reg_write(encoder, REG_PLL_SERIAL_3, 0x00); | 524 | reg_write(priv, REG_PLL_SERIAL_3, 0x00); |
488 | reg_write(encoder, REG_SERIALIZER, 0x00); | 525 | reg_write(priv, REG_SERIALIZER, 0x00); |
489 | reg_write(encoder, REG_BUFFER_OUT, 0x00); | 526 | reg_write(priv, REG_BUFFER_OUT, 0x00); |
490 | reg_write(encoder, REG_PLL_SCG1, 0x00); | 527 | reg_write(priv, REG_PLL_SCG1, 0x00); |
491 | reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8); | 528 | reg_write(priv, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8); |
492 | reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); | 529 | reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); |
493 | reg_write(encoder, REG_PLL_SCGN1, 0xfa); | 530 | reg_write(priv, REG_PLL_SCGN1, 0xfa); |
494 | reg_write(encoder, REG_PLL_SCGN2, 0x00); | 531 | reg_write(priv, REG_PLL_SCGN2, 0x00); |
495 | reg_write(encoder, REG_PLL_SCGR1, 0x5b); | 532 | reg_write(priv, REG_PLL_SCGR1, 0x5b); |
496 | reg_write(encoder, REG_PLL_SCGR2, 0x00); | 533 | reg_write(priv, REG_PLL_SCGR2, 0x00); |
497 | reg_write(encoder, REG_PLL_SCG2, 0x10); | 534 | reg_write(priv, REG_PLL_SCG2, 0x10); |
498 | 535 | ||
499 | /* Write the default value MUX register */ | 536 | /* Write the default value MUX register */ |
500 | reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24); | 537 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); |
538 | } | ||
539 | |||
540 | /* | ||
541 | * only 2 interrupts may occur: screen plug/unplug and EDID read | ||
542 | */ | ||
543 | static irqreturn_t tda998x_irq_thread(int irq, void *data) | ||
544 | { | ||
545 | struct tda998x_priv *priv = data; | ||
546 | u8 sta, cec, lvl, flag0, flag1, flag2; | ||
547 | |||
548 | if (!priv) | ||
549 | return IRQ_HANDLED; | ||
550 | sta = cec_read(priv, REG_CEC_INTSTATUS); | ||
551 | cec = cec_read(priv, REG_CEC_RXSHPDINT); | ||
552 | lvl = cec_read(priv, REG_CEC_RXSHPDLEV); | ||
553 | flag0 = reg_read(priv, REG_INT_FLAGS_0); | ||
554 | flag1 = reg_read(priv, REG_INT_FLAGS_1); | ||
555 | flag2 = reg_read(priv, REG_INT_FLAGS_2); | ||
556 | DRM_DEBUG_DRIVER( | ||
557 | "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n", | ||
558 | sta, cec, lvl, flag0, flag1, flag2); | ||
559 | if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) { | ||
560 | priv->wq_edid_wait = 0; | ||
561 | wake_up(&priv->wq_edid); | ||
562 | } else if (cec != 0) { /* HPD change */ | ||
563 | if (priv->encoder && priv->encoder->dev) | ||
564 | drm_helper_hpd_irq_event(priv->encoder->dev); | ||
565 | } | ||
566 | return IRQ_HANDLED; | ||
501 | } | 567 | } |
502 | 568 | ||
503 | static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) | 569 | static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) |
@@ -513,91 +579,88 @@ static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes) | |||
513 | #define PB(x) (HB(2) + 1 + (x)) | 579 | #define PB(x) (HB(2) + 1 + (x)) |
514 | 580 | ||
515 | static void | 581 | static void |
516 | tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr, | 582 | tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, |
517 | uint8_t *buf, size_t size) | 583 | uint8_t *buf, size_t size) |
518 | { | 584 | { |
519 | buf[PB(0)] = tda998x_cksum(buf, size); | 585 | buf[PB(0)] = tda998x_cksum(buf, size); |
520 | 586 | ||
521 | reg_clear(encoder, REG_DIP_IF_FLAGS, bit); | 587 | reg_clear(priv, REG_DIP_IF_FLAGS, bit); |
522 | reg_write_range(encoder, addr, buf, size); | 588 | reg_write_range(priv, addr, buf, size); |
523 | reg_set(encoder, REG_DIP_IF_FLAGS, bit); | 589 | reg_set(priv, REG_DIP_IF_FLAGS, bit); |
524 | } | 590 | } |
525 | 591 | ||
526 | static void | 592 | static void |
527 | tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) | 593 | tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) |
528 | { | 594 | { |
529 | uint8_t buf[PB(5) + 1]; | 595 | u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1]; |
530 | 596 | ||
531 | memset(buf, 0, sizeof(buf)); | 597 | memset(buf, 0, sizeof(buf)); |
532 | buf[HB(0)] = 0x84; | 598 | buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO; |
533 | buf[HB(1)] = 0x01; | 599 | buf[HB(1)] = 0x01; |
534 | buf[HB(2)] = 10; | 600 | buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE; |
535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ | 601 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ |
536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ | 602 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ |
537 | buf[PB(4)] = p->audio_frame[4]; | 603 | buf[PB(4)] = p->audio_frame[4]; |
538 | buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ | 604 | buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ |
539 | 605 | ||
540 | tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, | 606 | tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, |
541 | sizeof(buf)); | 607 | sizeof(buf)); |
542 | } | 608 | } |
543 | 609 | ||
544 | static void | 610 | static void |
545 | tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode) | 611 | tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode) |
546 | { | 612 | { |
547 | uint8_t buf[PB(13) + 1]; | 613 | u8 buf[PB(HDMI_AVI_INFOFRAME_SIZE) + 1]; |
548 | 614 | ||
549 | memset(buf, 0, sizeof(buf)); | 615 | memset(buf, 0, sizeof(buf)); |
550 | buf[HB(0)] = 0x82; | 616 | buf[HB(0)] = HDMI_INFOFRAME_TYPE_AVI; |
551 | buf[HB(1)] = 0x02; | 617 | buf[HB(1)] = 0x02; |
552 | buf[HB(2)] = 13; | 618 | buf[HB(2)] = HDMI_AVI_INFOFRAME_SIZE; |
553 | buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN; | 619 | buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN; |
620 | buf[PB(2)] = HDMI_ACTIVE_ASPECT_PICTURE; | ||
554 | buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2; | 621 | buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2; |
555 | buf[PB(4)] = drm_match_cea_mode(mode); | 622 | buf[PB(4)] = drm_match_cea_mode(mode); |
556 | 623 | ||
557 | tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, | 624 | tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, |
558 | sizeof(buf)); | 625 | sizeof(buf)); |
559 | } | 626 | } |
560 | 627 | ||
561 | static void tda998x_audio_mute(struct drm_encoder *encoder, bool on) | 628 | static void tda998x_audio_mute(struct tda998x_priv *priv, bool on) |
562 | { | 629 | { |
563 | if (on) { | 630 | if (on) { |
564 | reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); | 631 | reg_set(priv, REG_SOFTRESET, SOFTRESET_AUDIO); |
565 | reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO); | 632 | reg_clear(priv, REG_SOFTRESET, SOFTRESET_AUDIO); |
566 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 633 | reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
567 | } else { | 634 | } else { |
568 | reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 635 | reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
569 | } | 636 | } |
570 | } | 637 | } |
571 | 638 | ||
572 | static void | 639 | static void |
573 | tda998x_configure_audio(struct drm_encoder *encoder, | 640 | tda998x_configure_audio(struct tda998x_priv *priv, |
574 | struct drm_display_mode *mode, struct tda998x_encoder_params *p) | 641 | struct drm_display_mode *mode, struct tda998x_encoder_params *p) |
575 | { | 642 | { |
576 | uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv; | 643 | uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv; |
577 | uint32_t n; | 644 | uint32_t n; |
578 | 645 | ||
579 | /* Enable audio ports */ | 646 | /* Enable audio ports */ |
580 | reg_write(encoder, REG_ENA_AP, p->audio_cfg); | 647 | reg_write(priv, REG_ENA_AP, p->audio_cfg); |
581 | reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg); | 648 | reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg); |
582 | 649 | ||
583 | /* Set audio input source */ | 650 | /* Set audio input source */ |
584 | switch (p->audio_format) { | 651 | switch (p->audio_format) { |
585 | case AFMT_SPDIF: | 652 | case AFMT_SPDIF: |
586 | reg_write(encoder, REG_MUX_AP, 0x40); | 653 | reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF); |
587 | clksel_aip = AIP_CLKSEL_AIP(0); | 654 | clksel_aip = AIP_CLKSEL_AIP_SPDIF; |
588 | /* FS64SPDIF */ | 655 | clksel_fs = AIP_CLKSEL_FS_FS64SPDIF; |
589 | clksel_fs = AIP_CLKSEL_FS(2); | ||
590 | cts_n = CTS_N_M(3) | CTS_N_K(3); | 656 | cts_n = CTS_N_M(3) | CTS_N_K(3); |
591 | ca_i2s = 0; | ||
592 | break; | 657 | break; |
593 | 658 | ||
594 | case AFMT_I2S: | 659 | case AFMT_I2S: |
595 | reg_write(encoder, REG_MUX_AP, 0x64); | 660 | reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S); |
596 | clksel_aip = AIP_CLKSEL_AIP(1); | 661 | clksel_aip = AIP_CLKSEL_AIP_I2S; |
597 | /* ACLK */ | 662 | clksel_fs = AIP_CLKSEL_FS_ACLK; |
598 | clksel_fs = AIP_CLKSEL_FS(0); | ||
599 | cts_n = CTS_N_M(3) | CTS_N_K(3); | 663 | cts_n = CTS_N_M(3) | CTS_N_K(3); |
600 | ca_i2s = CA_I2S_CA_I2S(0); | ||
601 | break; | 664 | break; |
602 | 665 | ||
603 | default: | 666 | default: |
@@ -605,12 +668,10 @@ tda998x_configure_audio(struct drm_encoder *encoder, | |||
605 | return; | 668 | return; |
606 | } | 669 | } |
607 | 670 | ||
608 | reg_write(encoder, REG_AIP_CLKSEL, clksel_aip); | 671 | reg_write(priv, REG_AIP_CLKSEL, clksel_aip); |
609 | reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT); | 672 | reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT | |
610 | 673 | AIP_CNTRL_0_ACR_MAN); /* auto CTS */ | |
611 | /* Enable automatic CTS generation */ | 674 | reg_write(priv, REG_CTS_N, cts_n); |
612 | reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN); | ||
613 | reg_write(encoder, REG_CTS_N, cts_n); | ||
614 | 675 | ||
615 | /* | 676 | /* |
616 | * Audio input somehow depends on HDMI line rate which is | 677 | * Audio input somehow depends on HDMI line rate which is |
@@ -619,11 +680,15 @@ tda998x_configure_audio(struct drm_encoder *encoder, | |||
619 | * There is no detailed info in the datasheet, so we just | 680 | * There is no detailed info in the datasheet, so we just |
620 | * assume 100MHz requires larger divider. | 681 | * assume 100MHz requires larger divider. |
621 | */ | 682 | */ |
683 | adiv = AUDIO_DIV_SERCLK_8; | ||
622 | if (mode->clock > 100000) | 684 | if (mode->clock > 100000) |
623 | adiv = AUDIO_DIV_SERCLK_16; | 685 | adiv++; /* AUDIO_DIV_SERCLK_16 */ |
624 | else | 686 | |
625 | adiv = AUDIO_DIV_SERCLK_8; | 687 | /* S/PDIF asks for a larger divider */ |
626 | reg_write(encoder, REG_AUDIO_DIV, adiv); | 688 | if (p->audio_format == AFMT_SPDIF) |
689 | adiv++; /* AUDIO_DIV_SERCLK_16 or _32 */ | ||
690 | |||
691 | reg_write(priv, REG_AUDIO_DIV, adiv); | ||
627 | 692 | ||
628 | /* | 693 | /* |
629 | * This is the approximate value of N, which happens to be | 694 | * This is the approximate value of N, which happens to be |
@@ -638,28 +703,29 @@ tda998x_configure_audio(struct drm_encoder *encoder, | |||
638 | buf[3] = n; | 703 | buf[3] = n; |
639 | buf[4] = n >> 8; | 704 | buf[4] = n >> 8; |
640 | buf[5] = n >> 16; | 705 | buf[5] = n >> 16; |
641 | reg_write_range(encoder, REG_ACR_CTS_0, buf, 6); | 706 | reg_write_range(priv, REG_ACR_CTS_0, buf, 6); |
642 | 707 | ||
643 | /* Set CTS clock reference */ | 708 | /* Set CTS clock reference */ |
644 | reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs); | 709 | reg_write(priv, REG_AIP_CLKSEL, clksel_aip | clksel_fs); |
645 | 710 | ||
646 | /* Reset CTS generator */ | 711 | /* Reset CTS generator */ |
647 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); | 712 | reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); |
648 | reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); | 713 | reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS); |
649 | 714 | ||
650 | /* Write the channel status */ | 715 | /* Write the channel status */ |
651 | buf[0] = 0x04; | 716 | buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT; |
652 | buf[1] = 0x00; | 717 | buf[1] = 0x00; |
653 | buf[2] = 0x00; | 718 | buf[2] = IEC958_AES3_CON_FS_NOTID; |
654 | buf[3] = 0xf1; | 719 | buf[3] = IEC958_AES4_CON_ORIGFS_NOTID | |
655 | reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4); | 720 | IEC958_AES4_CON_MAX_WORDLEN_24; |
721 | reg_write_range(priv, REG_CH_STAT_B(0), buf, 4); | ||
656 | 722 | ||
657 | tda998x_audio_mute(encoder, true); | 723 | tda998x_audio_mute(priv, true); |
658 | mdelay(20); | 724 | msleep(20); |
659 | tda998x_audio_mute(encoder, false); | 725 | tda998x_audio_mute(priv, false); |
660 | 726 | ||
661 | /* Write the audio information packet */ | 727 | /* Write the audio information packet */ |
662 | tda998x_write_aif(encoder, p); | 728 | tda998x_write_aif(priv, p); |
663 | } | 729 | } |
664 | 730 | ||
665 | /* DRM encoder functions */ | 731 | /* DRM encoder functions */ |
@@ -701,19 +767,19 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
701 | switch (mode) { | 767 | switch (mode) { |
702 | case DRM_MODE_DPMS_ON: | 768 | case DRM_MODE_DPMS_ON: |
703 | /* enable video ports, audio will be enabled later */ | 769 | /* enable video ports, audio will be enabled later */ |
704 | reg_write(encoder, REG_ENA_VP_0, 0xff); | 770 | reg_write(priv, REG_ENA_VP_0, 0xff); |
705 | reg_write(encoder, REG_ENA_VP_1, 0xff); | 771 | reg_write(priv, REG_ENA_VP_1, 0xff); |
706 | reg_write(encoder, REG_ENA_VP_2, 0xff); | 772 | reg_write(priv, REG_ENA_VP_2, 0xff); |
707 | /* set muxing after enabling ports: */ | 773 | /* set muxing after enabling ports: */ |
708 | reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0); | 774 | reg_write(priv, REG_VIP_CNTRL_0, priv->vip_cntrl_0); |
709 | reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1); | 775 | reg_write(priv, REG_VIP_CNTRL_1, priv->vip_cntrl_1); |
710 | reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); | 776 | reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2); |
711 | break; | 777 | break; |
712 | case DRM_MODE_DPMS_OFF: | 778 | case DRM_MODE_DPMS_OFF: |
713 | /* disable video ports */ | 779 | /* disable video ports */ |
714 | reg_write(encoder, REG_ENA_VP_0, 0x00); | 780 | reg_write(priv, REG_ENA_VP_0, 0x00); |
715 | reg_write(encoder, REG_ENA_VP_1, 0x00); | 781 | reg_write(priv, REG_ENA_VP_1, 0x00); |
716 | reg_write(encoder, REG_ENA_VP_2, 0x00); | 782 | reg_write(priv, REG_ENA_VP_2, 0x00); |
717 | break; | 783 | break; |
718 | } | 784 | } |
719 | 785 | ||
@@ -831,110 +897,110 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
831 | } | 897 | } |
832 | 898 | ||
833 | /* mute the audio FIFO: */ | 899 | /* mute the audio FIFO: */ |
834 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 900 | reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
835 | 901 | ||
836 | /* set HDMI HDCP mode off: */ | 902 | /* set HDMI HDCP mode off: */ |
837 | reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); | 903 | reg_write(priv, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); |
838 | reg_clear(encoder, REG_TX33, TX33_HDMI); | 904 | reg_clear(priv, REG_TX33, TX33_HDMI); |
905 | reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0)); | ||
839 | 906 | ||
840 | reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0)); | ||
841 | /* no pre-filter or interpolator: */ | 907 | /* no pre-filter or interpolator: */ |
842 | reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) | | 908 | reg_write(priv, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) | |
843 | HVF_CNTRL_0_INTPOL(0)); | 909 | HVF_CNTRL_0_INTPOL(0)); |
844 | reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0)); | 910 | reg_write(priv, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0)); |
845 | reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) | | 911 | reg_write(priv, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) | |
846 | VIP_CNTRL_4_BLC(0)); | 912 | VIP_CNTRL_4_BLC(0)); |
847 | reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR); | ||
848 | 913 | ||
849 | reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ); | 914 | reg_clear(priv, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ); |
850 | reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE); | 915 | reg_clear(priv, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR | |
851 | reg_write(encoder, REG_SERIALIZER, 0); | 916 | PLL_SERIAL_3_SRL_DE); |
852 | reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0)); | 917 | reg_write(priv, REG_SERIALIZER, 0); |
918 | reg_write(priv, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0)); | ||
853 | 919 | ||
854 | /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */ | 920 | /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */ |
855 | rep = 0; | 921 | rep = 0; |
856 | reg_write(encoder, REG_RPT_CNTRL, 0); | 922 | reg_write(priv, REG_RPT_CNTRL, 0); |
857 | reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) | | 923 | reg_write(priv, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) | |
858 | SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); | 924 | SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); |
859 | 925 | ||
860 | reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | | 926 | reg_write(priv, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | |
861 | PLL_SERIAL_2_SRL_PR(rep)); | 927 | PLL_SERIAL_2_SRL_PR(rep)); |
862 | 928 | ||
863 | /* set color matrix bypass flag: */ | 929 | /* set color matrix bypass flag: */ |
864 | reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP); | 930 | reg_write(priv, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP | |
931 | MAT_CONTRL_MAT_SC(1)); | ||
865 | 932 | ||
866 | /* set BIAS tmds value: */ | 933 | /* set BIAS tmds value: */ |
867 | reg_write(encoder, REG_ANA_GENERAL, 0x09); | 934 | reg_write(priv, REG_ANA_GENERAL, 0x09); |
868 | |||
869 | reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); | ||
870 | 935 | ||
871 | /* | 936 | /* |
872 | * Sync on rising HSYNC/VSYNC | 937 | * Sync on rising HSYNC/VSYNC |
873 | */ | 938 | */ |
874 | reg_write(encoder, REG_VIP_CNTRL_3, 0); | 939 | reg = VIP_CNTRL_3_SYNC_HS; |
875 | reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS); | ||
876 | 940 | ||
877 | /* | 941 | /* |
878 | * TDA19988 requires high-active sync at input stage, | 942 | * TDA19988 requires high-active sync at input stage, |
879 | * so invert low-active sync provided by master encoder here | 943 | * so invert low-active sync provided by master encoder here |
880 | */ | 944 | */ |
881 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 945 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
882 | reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL); | 946 | reg |= VIP_CNTRL_3_H_TGL; |
883 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 947 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
884 | reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL); | 948 | reg |= VIP_CNTRL_3_V_TGL; |
949 | reg_write(priv, REG_VIP_CNTRL_3, reg); | ||
950 | |||
951 | reg_write(priv, REG_VIDFORMAT, 0x00); | ||
952 | reg_write16(priv, REG_REFPIX_MSB, ref_pix); | ||
953 | reg_write16(priv, REG_REFLINE_MSB, ref_line); | ||
954 | reg_write16(priv, REG_NPIX_MSB, n_pix); | ||
955 | reg_write16(priv, REG_NLINE_MSB, n_line); | ||
956 | reg_write16(priv, REG_VS_LINE_STRT_1_MSB, vs1_line_s); | ||
957 | reg_write16(priv, REG_VS_PIX_STRT_1_MSB, vs1_pix_s); | ||
958 | reg_write16(priv, REG_VS_LINE_END_1_MSB, vs1_line_e); | ||
959 | reg_write16(priv, REG_VS_PIX_END_1_MSB, vs1_pix_e); | ||
960 | reg_write16(priv, REG_VS_LINE_STRT_2_MSB, vs2_line_s); | ||
961 | reg_write16(priv, REG_VS_PIX_STRT_2_MSB, vs2_pix_s); | ||
962 | reg_write16(priv, REG_VS_LINE_END_2_MSB, vs2_line_e); | ||
963 | reg_write16(priv, REG_VS_PIX_END_2_MSB, vs2_pix_e); | ||
964 | reg_write16(priv, REG_HS_PIX_START_MSB, hs_pix_s); | ||
965 | reg_write16(priv, REG_HS_PIX_STOP_MSB, hs_pix_e); | ||
966 | reg_write16(priv, REG_VWIN_START_1_MSB, vwin1_line_s); | ||
967 | reg_write16(priv, REG_VWIN_END_1_MSB, vwin1_line_e); | ||
968 | reg_write16(priv, REG_VWIN_START_2_MSB, vwin2_line_s); | ||
969 | reg_write16(priv, REG_VWIN_END_2_MSB, vwin2_line_e); | ||
970 | reg_write16(priv, REG_DE_START_MSB, de_pix_s); | ||
971 | reg_write16(priv, REG_DE_STOP_MSB, de_pix_e); | ||
972 | |||
973 | if (priv->rev == TDA19988) { | ||
974 | /* let incoming pixels fill the active space (if any) */ | ||
975 | reg_write(priv, REG_ENABLE_SPACE, 0x00); | ||
976 | } | ||
885 | 977 | ||
886 | /* | 978 | /* |
887 | * Always generate sync polarity relative to input sync and | 979 | * Always generate sync polarity relative to input sync and |
888 | * revert input stage toggled sync at output stage | 980 | * revert input stage toggled sync at output stage |
889 | */ | 981 | */ |
890 | reg = TBG_CNTRL_1_TGL_EN; | 982 | reg = TBG_CNTRL_1_DWIN_DIS | TBG_CNTRL_1_TGL_EN; |
891 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 983 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
892 | reg |= TBG_CNTRL_1_H_TGL; | 984 | reg |= TBG_CNTRL_1_H_TGL; |
893 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 985 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
894 | reg |= TBG_CNTRL_1_V_TGL; | 986 | reg |= TBG_CNTRL_1_V_TGL; |
895 | reg_write(encoder, REG_TBG_CNTRL_1, reg); | 987 | reg_write(priv, REG_TBG_CNTRL_1, reg); |
896 | |||
897 | reg_write(encoder, REG_VIDFORMAT, 0x00); | ||
898 | reg_write16(encoder, REG_REFPIX_MSB, ref_pix); | ||
899 | reg_write16(encoder, REG_REFLINE_MSB, ref_line); | ||
900 | reg_write16(encoder, REG_NPIX_MSB, n_pix); | ||
901 | reg_write16(encoder, REG_NLINE_MSB, n_line); | ||
902 | reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s); | ||
903 | reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s); | ||
904 | reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e); | ||
905 | reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e); | ||
906 | reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s); | ||
907 | reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s); | ||
908 | reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e); | ||
909 | reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e); | ||
910 | reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s); | ||
911 | reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e); | ||
912 | reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s); | ||
913 | reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e); | ||
914 | reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s); | ||
915 | reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e); | ||
916 | reg_write16(encoder, REG_DE_START_MSB, de_pix_s); | ||
917 | reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e); | ||
918 | |||
919 | if (priv->rev == TDA19988) { | ||
920 | /* let incoming pixels fill the active space (if any) */ | ||
921 | reg_write(encoder, REG_ENABLE_SPACE, 0x00); | ||
922 | } | ||
923 | 988 | ||
924 | /* must be last register set: */ | 989 | /* must be last register set: */ |
925 | reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); | 990 | reg_write(priv, REG_TBG_CNTRL_0, 0); |
926 | 991 | ||
927 | /* Only setup the info frames if the sink is HDMI */ | 992 | /* Only setup the info frames if the sink is HDMI */ |
928 | if (priv->is_hdmi_sink) { | 993 | if (priv->is_hdmi_sink) { |
929 | /* We need to turn HDMI HDCP stuff on to get audio through */ | 994 | /* We need to turn HDMI HDCP stuff on to get audio through */ |
930 | reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); | 995 | reg &= ~TBG_CNTRL_1_DWIN_DIS; |
931 | reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1)); | 996 | reg_write(priv, REG_TBG_CNTRL_1, reg); |
932 | reg_set(encoder, REG_TX33, TX33_HDMI); | 997 | reg_write(priv, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1)); |
998 | reg_set(priv, REG_TX33, TX33_HDMI); | ||
933 | 999 | ||
934 | tda998x_write_avi(encoder, adjusted_mode); | 1000 | tda998x_write_avi(priv, adjusted_mode); |
935 | 1001 | ||
936 | if (priv->params.audio_cfg) | 1002 | if (priv->params.audio_cfg) |
937 | tda998x_configure_audio(encoder, adjusted_mode, | 1003 | tda998x_configure_audio(priv, adjusted_mode, |
938 | &priv->params); | 1004 | &priv->params); |
939 | } | 1005 | } |
940 | } | 1006 | } |
@@ -943,7 +1009,9 @@ static enum drm_connector_status | |||
943 | tda998x_encoder_detect(struct drm_encoder *encoder, | 1009 | tda998x_encoder_detect(struct drm_encoder *encoder, |
944 | struct drm_connector *connector) | 1010 | struct drm_connector *connector) |
945 | { | 1011 | { |
946 | uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV); | 1012 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1013 | uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV); | ||
1014 | |||
947 | return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : | 1015 | return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : |
948 | connector_status_disconnected; | 1016 | connector_status_disconnected; |
949 | } | 1017 | } |
@@ -951,46 +1019,57 @@ tda998x_encoder_detect(struct drm_encoder *encoder, | |||
951 | static int | 1019 | static int |
952 | read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) | 1020 | read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) |
953 | { | 1021 | { |
1022 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | ||
954 | uint8_t offset, segptr; | 1023 | uint8_t offset, segptr; |
955 | int ret, i; | 1024 | int ret, i; |
956 | 1025 | ||
957 | /* enable EDID read irq: */ | ||
958 | reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | ||
959 | |||
960 | offset = (blk & 1) ? 128 : 0; | 1026 | offset = (blk & 1) ? 128 : 0; |
961 | segptr = blk / 2; | 1027 | segptr = blk / 2; |
962 | 1028 | ||
963 | reg_write(encoder, REG_DDC_ADDR, 0xa0); | 1029 | reg_write(priv, REG_DDC_ADDR, 0xa0); |
964 | reg_write(encoder, REG_DDC_OFFS, offset); | 1030 | reg_write(priv, REG_DDC_OFFS, offset); |
965 | reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60); | 1031 | reg_write(priv, REG_DDC_SEGM_ADDR, 0x60); |
966 | reg_write(encoder, REG_DDC_SEGM, segptr); | 1032 | reg_write(priv, REG_DDC_SEGM, segptr); |
967 | 1033 | ||
968 | /* enable reading EDID: */ | 1034 | /* enable reading EDID: */ |
969 | reg_write(encoder, REG_EDID_CTRL, 0x1); | 1035 | priv->wq_edid_wait = 1; |
1036 | reg_write(priv, REG_EDID_CTRL, 0x1); | ||
970 | 1037 | ||
971 | /* flag must be cleared by sw: */ | 1038 | /* flag must be cleared by sw: */ |
972 | reg_write(encoder, REG_EDID_CTRL, 0x0); | 1039 | reg_write(priv, REG_EDID_CTRL, 0x0); |
973 | 1040 | ||
974 | /* wait for block read to complete: */ | 1041 | /* wait for block read to complete: */ |
975 | for (i = 100; i > 0; i--) { | 1042 | if (priv->hdmi->irq) { |
976 | uint8_t val = reg_read(encoder, REG_INT_FLAGS_2); | 1043 | i = wait_event_timeout(priv->wq_edid, |
977 | if (val & INT_FLAGS_2_EDID_BLK_RD) | 1044 | !priv->wq_edid_wait, |
978 | break; | 1045 | msecs_to_jiffies(100)); |
979 | msleep(1); | 1046 | if (i < 0) { |
1047 | dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i); | ||
1048 | return i; | ||
1049 | } | ||
1050 | } else { | ||
1051 | for (i = 10; i > 0; i--) { | ||
1052 | msleep(10); | ||
1053 | ret = reg_read(priv, REG_INT_FLAGS_2); | ||
1054 | if (ret < 0) | ||
1055 | return ret; | ||
1056 | if (ret & INT_FLAGS_2_EDID_BLK_RD) | ||
1057 | break; | ||
1058 | } | ||
980 | } | 1059 | } |
981 | 1060 | ||
982 | if (i == 0) | 1061 | if (i == 0) { |
1062 | dev_err(&priv->hdmi->dev, "read edid timeout\n"); | ||
983 | return -ETIMEDOUT; | 1063 | return -ETIMEDOUT; |
1064 | } | ||
984 | 1065 | ||
985 | ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH); | 1066 | ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH); |
986 | if (ret != EDID_LENGTH) { | 1067 | if (ret != EDID_LENGTH) { |
987 | dev_err(encoder->dev->dev, "failed to read edid block %d: %d", | 1068 | dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n", |
988 | blk, ret); | 1069 | blk, ret); |
989 | return ret; | 1070 | return ret; |
990 | } | 1071 | } |
991 | 1072 | ||
992 | reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | ||
993 | |||
994 | return 0; | 1073 | return 0; |
995 | } | 1074 | } |
996 | 1075 | ||
@@ -998,7 +1077,7 @@ static uint8_t * | |||
998 | do_get_edid(struct drm_encoder *encoder) | 1077 | do_get_edid(struct drm_encoder *encoder) |
999 | { | 1078 | { |
1000 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | 1079 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1001 | int j = 0, valid_extensions = 0; | 1080 | int j, valid_extensions = 0; |
1002 | uint8_t *block, *new; | 1081 | uint8_t *block, *new; |
1003 | bool print_bad_edid = drm_debug & DRM_UT_KMS; | 1082 | bool print_bad_edid = drm_debug & DRM_UT_KMS; |
1004 | 1083 | ||
@@ -1006,7 +1085,7 @@ do_get_edid(struct drm_encoder *encoder) | |||
1006 | return NULL; | 1085 | return NULL; |
1007 | 1086 | ||
1008 | if (priv->rev == TDA19988) | 1087 | if (priv->rev == TDA19988) |
1009 | reg_clear(encoder, REG_TX4, TX4_PD_RAM); | 1088 | reg_clear(priv, REG_TX4, TX4_PD_RAM); |
1010 | 1089 | ||
1011 | /* base block fetch */ | 1090 | /* base block fetch */ |
1012 | if (read_edid_block(encoder, block, 0)) | 1091 | if (read_edid_block(encoder, block, 0)) |
@@ -1046,14 +1125,14 @@ do_get_edid(struct drm_encoder *encoder) | |||
1046 | 1125 | ||
1047 | done: | 1126 | done: |
1048 | if (priv->rev == TDA19988) | 1127 | if (priv->rev == TDA19988) |
1049 | reg_set(encoder, REG_TX4, TX4_PD_RAM); | 1128 | reg_set(priv, REG_TX4, TX4_PD_RAM); |
1050 | 1129 | ||
1051 | return block; | 1130 | return block; |
1052 | 1131 | ||
1053 | fail: | 1132 | fail: |
1054 | if (priv->rev == TDA19988) | 1133 | if (priv->rev == TDA19988) |
1055 | reg_set(encoder, REG_TX4, TX4_PD_RAM); | 1134 | reg_set(priv, REG_TX4, TX4_PD_RAM); |
1056 | dev_warn(encoder->dev->dev, "failed to read EDID\n"); | 1135 | dev_warn(&priv->hdmi->dev, "failed to read EDID\n"); |
1057 | kfree(block); | 1136 | kfree(block); |
1058 | return NULL; | 1137 | return NULL; |
1059 | } | 1138 | } |
@@ -1080,7 +1159,13 @@ static int | |||
1080 | tda998x_encoder_create_resources(struct drm_encoder *encoder, | 1159 | tda998x_encoder_create_resources(struct drm_encoder *encoder, |
1081 | struct drm_connector *connector) | 1160 | struct drm_connector *connector) |
1082 | { | 1161 | { |
1083 | DBG(""); | 1162 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1163 | |||
1164 | if (priv->hdmi->irq) | ||
1165 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
1166 | else | ||
1167 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | | ||
1168 | DRM_CONNECTOR_POLL_DISCONNECT; | ||
1084 | return 0; | 1169 | return 0; |
1085 | } | 1170 | } |
1086 | 1171 | ||
@@ -1099,6 +1184,13 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) | |||
1099 | { | 1184 | { |
1100 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | 1185 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1101 | drm_i2c_encoder_destroy(encoder); | 1186 | drm_i2c_encoder_destroy(encoder); |
1187 | |||
1188 | /* disable all IRQs and free the IRQ handler */ | ||
1189 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); | ||
1190 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | ||
1191 | if (priv->hdmi->irq) | ||
1192 | free_irq(priv->hdmi->irq, priv); | ||
1193 | |||
1102 | if (priv->cec) | 1194 | if (priv->cec) |
1103 | i2c_unregister_device(priv->cec); | 1195 | i2c_unregister_device(priv->cec); |
1104 | kfree(priv); | 1196 | kfree(priv); |
@@ -1138,8 +1230,10 @@ tda998x_encoder_init(struct i2c_client *client, | |||
1138 | struct drm_device *dev, | 1230 | struct drm_device *dev, |
1139 | struct drm_encoder_slave *encoder_slave) | 1231 | struct drm_encoder_slave *encoder_slave) |
1140 | { | 1232 | { |
1141 | struct drm_encoder *encoder = &encoder_slave->base; | ||
1142 | struct tda998x_priv *priv; | 1233 | struct tda998x_priv *priv; |
1234 | struct device_node *np = client->dev.of_node; | ||
1235 | u32 video; | ||
1236 | int rev_lo, rev_hi, ret; | ||
1143 | 1237 | ||
1144 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 1238 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
1145 | if (!priv) | 1239 | if (!priv) |
@@ -1150,52 +1244,113 @@ tda998x_encoder_init(struct i2c_client *client, | |||
1150 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); | 1244 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); |
1151 | 1245 | ||
1152 | priv->current_page = 0xff; | 1246 | priv->current_page = 0xff; |
1247 | priv->hdmi = client; | ||
1153 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1248 | priv->cec = i2c_new_dummy(client->adapter, 0x34); |
1154 | if (!priv->cec) { | 1249 | if (!priv->cec) { |
1155 | kfree(priv); | 1250 | kfree(priv); |
1156 | return -ENODEV; | 1251 | return -ENODEV; |
1157 | } | 1252 | } |
1253 | |||
1254 | priv->encoder = &encoder_slave->base; | ||
1158 | priv->dpms = DRM_MODE_DPMS_OFF; | 1255 | priv->dpms = DRM_MODE_DPMS_OFF; |
1159 | 1256 | ||
1160 | encoder_slave->slave_priv = priv; | 1257 | encoder_slave->slave_priv = priv; |
1161 | encoder_slave->slave_funcs = &tda998x_encoder_funcs; | 1258 | encoder_slave->slave_funcs = &tda998x_encoder_funcs; |
1162 | 1259 | ||
1163 | /* wake up the device: */ | 1260 | /* wake up the device: */ |
1164 | cec_write(encoder, REG_CEC_ENAMODS, | 1261 | cec_write(priv, REG_CEC_ENAMODS, |
1165 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); | 1262 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); |
1166 | 1263 | ||
1167 | tda998x_reset(encoder); | 1264 | tda998x_reset(priv); |
1168 | 1265 | ||
1169 | /* read version: */ | 1266 | /* read version: */ |
1170 | priv->rev = reg_read(encoder, REG_VERSION_LSB) | | 1267 | rev_lo = reg_read(priv, REG_VERSION_LSB); |
1171 | reg_read(encoder, REG_VERSION_MSB) << 8; | 1268 | rev_hi = reg_read(priv, REG_VERSION_MSB); |
1269 | if (rev_lo < 0 || rev_hi < 0) { | ||
1270 | ret = rev_lo < 0 ? rev_lo : rev_hi; | ||
1271 | goto fail; | ||
1272 | } | ||
1273 | |||
1274 | priv->rev = rev_lo | rev_hi << 8; | ||
1172 | 1275 | ||
1173 | /* mask off feature bits: */ | 1276 | /* mask off feature bits: */ |
1174 | priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */ | 1277 | priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */ |
1175 | 1278 | ||
1176 | switch (priv->rev) { | 1279 | switch (priv->rev) { |
1177 | case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break; | 1280 | case TDA9989N2: |
1178 | case TDA19989: dev_info(dev->dev, "found TDA19989"); break; | 1281 | dev_info(&client->dev, "found TDA9989 n2"); |
1179 | case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break; | 1282 | break; |
1180 | case TDA19988: dev_info(dev->dev, "found TDA19988"); break; | 1283 | case TDA19989: |
1284 | dev_info(&client->dev, "found TDA19989"); | ||
1285 | break; | ||
1286 | case TDA19989N2: | ||
1287 | dev_info(&client->dev, "found TDA19989 n2"); | ||
1288 | break; | ||
1289 | case TDA19988: | ||
1290 | dev_info(&client->dev, "found TDA19988"); | ||
1291 | break; | ||
1181 | default: | 1292 | default: |
1182 | DBG("found unsupported device: %04x", priv->rev); | 1293 | dev_err(&client->dev, "found unsupported device: %04x\n", |
1294 | priv->rev); | ||
1183 | goto fail; | 1295 | goto fail; |
1184 | } | 1296 | } |
1185 | 1297 | ||
1186 | /* after reset, enable DDC: */ | 1298 | /* after reset, enable DDC: */ |
1187 | reg_write(encoder, REG_DDC_DISABLE, 0x00); | 1299 | reg_write(priv, REG_DDC_DISABLE, 0x00); |
1188 | 1300 | ||
1189 | /* set clock on DDC channel: */ | 1301 | /* set clock on DDC channel: */ |
1190 | reg_write(encoder, REG_TX3, 39); | 1302 | reg_write(priv, REG_TX3, 39); |
1191 | 1303 | ||
1192 | /* if necessary, disable multi-master: */ | 1304 | /* if necessary, disable multi-master: */ |
1193 | if (priv->rev == TDA19989) | 1305 | if (priv->rev == TDA19989) |
1194 | reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM); | 1306 | reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM); |
1195 | 1307 | ||
1196 | cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL, | 1308 | cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL, |
1197 | CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL); | 1309 | CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL); |
1198 | 1310 | ||
1311 | /* initialize the optional IRQ */ | ||
1312 | if (client->irq) { | ||
1313 | int irqf_trigger; | ||
1314 | |||
1315 | /* init read EDID waitqueue */ | ||
1316 | init_waitqueue_head(&priv->wq_edid); | ||
1317 | |||
1318 | /* clear pending interrupts */ | ||
1319 | reg_read(priv, REG_INT_FLAGS_0); | ||
1320 | reg_read(priv, REG_INT_FLAGS_1); | ||
1321 | reg_read(priv, REG_INT_FLAGS_2); | ||
1322 | |||
1323 | irqf_trigger = | ||
1324 | irqd_get_trigger_type(irq_get_irq_data(client->irq)); | ||
1325 | ret = request_threaded_irq(client->irq, NULL, | ||
1326 | tda998x_irq_thread, | ||
1327 | irqf_trigger | IRQF_ONESHOT, | ||
1328 | "tda998x", priv); | ||
1329 | if (ret) { | ||
1330 | dev_err(&client->dev, | ||
1331 | "failed to request IRQ#%u: %d\n", | ||
1332 | client->irq, ret); | ||
1333 | goto fail; | ||
1334 | } | ||
1335 | |||
1336 | /* enable HPD irq */ | ||
1337 | cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD); | ||
1338 | } | ||
1339 | |||
1340 | /* enable EDID read irq: */ | ||
1341 | reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | ||
1342 | |||
1343 | if (!np) | ||
1344 | return 0; /* non-DT */ | ||
1345 | |||
1346 | /* get the optional video properties */ | ||
1347 | ret = of_property_read_u32(np, "video-ports", &video); | ||
1348 | if (ret == 0) { | ||
1349 | priv->vip_cntrl_0 = video >> 16; | ||
1350 | priv->vip_cntrl_1 = video >> 8; | ||
1351 | priv->vip_cntrl_2 = video; | ||
1352 | } | ||
1353 | |||
1199 | return 0; | 1354 | return 0; |
1200 | 1355 | ||
1201 | fail: | 1356 | fail: |
@@ -1210,6 +1365,14 @@ fail: | |||
1210 | return -ENXIO; | 1365 | return -ENXIO; |
1211 | } | 1366 | } |
1212 | 1367 | ||
1368 | #ifdef CONFIG_OF | ||
1369 | static const struct of_device_id tda998x_dt_ids[] = { | ||
1370 | { .compatible = "nxp,tda998x", }, | ||
1371 | { } | ||
1372 | }; | ||
1373 | MODULE_DEVICE_TABLE(of, tda998x_dt_ids); | ||
1374 | #endif | ||
1375 | |||
1213 | static struct i2c_device_id tda998x_ids[] = { | 1376 | static struct i2c_device_id tda998x_ids[] = { |
1214 | { "tda998x", 0 }, | 1377 | { "tda998x", 0 }, |
1215 | { } | 1378 | { } |
@@ -1222,6 +1385,7 @@ static struct drm_i2c_encoder_driver tda998x_driver = { | |||
1222 | .remove = tda998x_remove, | 1385 | .remove = tda998x_remove, |
1223 | .driver = { | 1386 | .driver = { |
1224 | .name = "tda998x", | 1387 | .name = "tda998x", |
1388 | .of_match_table = of_match_ptr(tda998x_dt_ids), | ||
1225 | }, | 1389 | }, |
1226 | .id_table = tda998x_ids, | 1390 | .id_table = tda998x_ids, |
1227 | }, | 1391 | }, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 70384c8d1404..338fa6799ecf 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1526,7 +1526,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |||
1526 | if (!obj->fault_mappable) | 1526 | if (!obj->fault_mappable) |
1527 | return; | 1527 | return; |
1528 | 1528 | ||
1529 | drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); | 1529 | drm_vma_node_unmap(&obj->base.vma_node, |
1530 | obj->base.dev->anon_inode->i_mapping); | ||
1530 | obj->fault_mappable = false; | 1531 | obj->fault_mappable = false; |
1531 | } | 1532 | } |
1532 | 1533 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fb8a967df027..bbb1327644d4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -91,6 +91,7 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 93 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
94 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
94 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | 95 | static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); |
95 | 96 | ||
96 | static int | 97 | static int |
@@ -459,6 +460,9 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
459 | uint32_t status; | 460 | uint32_t status; |
460 | int try, clock = 0; | 461 | int try, clock = 0; |
461 | bool has_aux_irq = HAS_AUX_IRQ(dev); | 462 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
463 | bool vdd; | ||
464 | |||
465 | vdd = _edp_panel_vdd_on(intel_dp); | ||
462 | 466 | ||
463 | /* dp aux is extremely sensitive to irq latency, hence request the | 467 | /* dp aux is extremely sensitive to irq latency, hence request the |
464 | * lowest possible wakeup latency and so prevent the cpu from going into | 468 | * lowest possible wakeup latency and so prevent the cpu from going into |
@@ -564,223 +568,130 @@ out: | |||
564 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); | 568 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
565 | intel_aux_display_runtime_put(dev_priv); | 569 | intel_aux_display_runtime_put(dev_priv); |
566 | 570 | ||
571 | if (vdd) | ||
572 | edp_panel_vdd_off(intel_dp, false); | ||
573 | |||
567 | return ret; | 574 | return ret; |
568 | } | 575 | } |
569 | 576 | ||
570 | /* Write data to the aux channel in native mode */ | 577 | #define HEADER_SIZE 4 |
571 | static int | 578 | static ssize_t |
572 | intel_dp_aux_native_write(struct intel_dp *intel_dp, | 579 | intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
573 | uint16_t address, uint8_t *send, int send_bytes) | ||
574 | { | 580 | { |
581 | struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); | ||
582 | uint8_t txbuf[20], rxbuf[20]; | ||
583 | size_t txsize, rxsize; | ||
575 | int ret; | 584 | int ret; |
576 | uint8_t msg[20]; | ||
577 | int msg_bytes; | ||
578 | uint8_t ack; | ||
579 | int retry; | ||
580 | 585 | ||
581 | if (WARN_ON(send_bytes > 16)) | 586 | txbuf[0] = msg->request << 4; |
582 | return -E2BIG; | 587 | txbuf[1] = msg->address >> 8; |
588 | txbuf[2] = msg->address & 0xff; | ||
589 | txbuf[3] = msg->size - 1; | ||
583 | 590 | ||
584 | intel_dp_check_edp(intel_dp); | 591 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
585 | msg[0] = DP_AUX_NATIVE_WRITE << 4; | 592 | case DP_AUX_NATIVE_WRITE: |
586 | msg[1] = address >> 8; | 593 | case DP_AUX_I2C_WRITE: |
587 | msg[2] = address & 0xff; | 594 | txsize = HEADER_SIZE + msg->size; |
588 | msg[3] = send_bytes - 1; | 595 | rxsize = 1; |
589 | memcpy(&msg[4], send, send_bytes); | ||
590 | msg_bytes = send_bytes + 4; | ||
591 | for (retry = 0; retry < 7; retry++) { | ||
592 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); | ||
593 | if (ret < 0) | ||
594 | return ret; | ||
595 | ack >>= 4; | ||
596 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | ||
597 | return send_bytes; | ||
598 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | ||
599 | usleep_range(400, 500); | ||
600 | else | ||
601 | return -EIO; | ||
602 | } | ||
603 | 596 | ||
604 | DRM_ERROR("too many retries, giving up\n"); | 597 | if (WARN_ON(txsize > 20)) |
605 | return -EIO; | 598 | return -E2BIG; |
606 | } | ||
607 | 599 | ||
608 | /* Write a single byte to the aux channel in native mode */ | 600 | memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); |
609 | static int | ||
610 | intel_dp_aux_native_write_1(struct intel_dp *intel_dp, | ||
611 | uint16_t address, uint8_t byte) | ||
612 | { | ||
613 | return intel_dp_aux_native_write(intel_dp, address, &byte, 1); | ||
614 | } | ||
615 | 601 | ||
616 | /* read bytes from a native aux channel */ | 602 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); |
617 | static int | 603 | if (ret > 0) { |
618 | intel_dp_aux_native_read(struct intel_dp *intel_dp, | 604 | msg->reply = rxbuf[0] >> 4; |
619 | uint16_t address, uint8_t *recv, int recv_bytes) | ||
620 | { | ||
621 | uint8_t msg[4]; | ||
622 | int msg_bytes; | ||
623 | uint8_t reply[20]; | ||
624 | int reply_bytes; | ||
625 | uint8_t ack; | ||
626 | int ret; | ||
627 | int retry; | ||
628 | 605 | ||
629 | if (WARN_ON(recv_bytes > 19)) | 606 | /* Return payload size. */ |
630 | return -E2BIG; | 607 | ret = msg->size; |
608 | } | ||
609 | break; | ||
631 | 610 | ||
632 | intel_dp_check_edp(intel_dp); | 611 | case DP_AUX_NATIVE_READ: |
633 | msg[0] = DP_AUX_NATIVE_READ << 4; | 612 | case DP_AUX_I2C_READ: |
634 | msg[1] = address >> 8; | 613 | txsize = HEADER_SIZE; |
635 | msg[2] = address & 0xff; | 614 | rxsize = msg->size + 1; |
636 | msg[3] = recv_bytes - 1; | 615 | |
637 | 616 | if (WARN_ON(rxsize > 20)) | |
638 | msg_bytes = 4; | 617 | return -E2BIG; |
639 | reply_bytes = recv_bytes + 1; | 618 | |
640 | 619 | ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); | |
641 | for (retry = 0; retry < 7; retry++) { | 620 | if (ret > 0) { |
642 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, | 621 | msg->reply = rxbuf[0] >> 4; |
643 | reply, reply_bytes); | 622 | /* |
644 | if (ret == 0) | 623 | * Assume happy day, and copy the data. The caller is |
645 | return -EPROTO; | 624 | * expected to check msg->reply before touching it. |
646 | if (ret < 0) | 625 | * |
647 | return ret; | 626 | * Return payload size. |
648 | ack = reply[0] >> 4; | 627 | */ |
649 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { | 628 | ret--; |
650 | memcpy(recv, reply + 1, ret - 1); | 629 | memcpy(msg->buffer, rxbuf + 1, ret); |
651 | return ret - 1; | ||
652 | } | 630 | } |
653 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 631 | break; |
654 | usleep_range(400, 500); | 632 | |
655 | else | 633 | default: |
656 | return -EIO; | 634 | ret = -EINVAL; |
635 | break; | ||
657 | } | 636 | } |
658 | 637 | ||
659 | DRM_ERROR("too many retries, giving up\n"); | 638 | return ret; |
660 | return -EIO; | ||
661 | } | 639 | } |
662 | 640 | ||
663 | static int | 641 | static void |
664 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 642 | intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) |
665 | uint8_t write_byte, uint8_t *read_byte) | 643 | { |
666 | { | 644 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
667 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 645 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
668 | struct intel_dp *intel_dp = container_of(adapter, | 646 | enum port port = intel_dig_port->port; |
669 | struct intel_dp, | 647 | const char *name = NULL; |
670 | adapter); | ||
671 | uint16_t address = algo_data->address; | ||
672 | uint8_t msg[5]; | ||
673 | uint8_t reply[2]; | ||
674 | unsigned retry; | ||
675 | int msg_bytes; | ||
676 | int reply_bytes; | ||
677 | int ret; | 648 | int ret; |
678 | 649 | ||
679 | intel_edp_panel_vdd_on(intel_dp); | 650 | switch (port) { |
680 | intel_dp_check_edp(intel_dp); | 651 | case PORT_A: |
681 | /* Set up the command byte */ | 652 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; |
682 | if (mode & MODE_I2C_READ) | 653 | name = "DPDDC-A"; |
683 | msg[0] = DP_AUX_I2C_READ << 4; | ||
684 | else | ||
685 | msg[0] = DP_AUX_I2C_WRITE << 4; | ||
686 | |||
687 | if (!(mode & MODE_I2C_STOP)) | ||
688 | msg[0] |= DP_AUX_I2C_MOT << 4; | ||
689 | |||
690 | msg[1] = address >> 8; | ||
691 | msg[2] = address; | ||
692 | |||
693 | switch (mode) { | ||
694 | case MODE_I2C_WRITE: | ||
695 | msg[3] = 0; | ||
696 | msg[4] = write_byte; | ||
697 | msg_bytes = 5; | ||
698 | reply_bytes = 1; | ||
699 | break; | 654 | break; |
700 | case MODE_I2C_READ: | 655 | case PORT_B: |
701 | msg[3] = 0; | 656 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; |
702 | msg_bytes = 4; | 657 | name = "DPDDC-B"; |
703 | reply_bytes = 2; | ||
704 | break; | 658 | break; |
705 | default: | 659 | case PORT_C: |
706 | msg_bytes = 3; | 660 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; |
707 | reply_bytes = 1; | 661 | name = "DPDDC-C"; |
708 | break; | 662 | break; |
663 | case PORT_D: | ||
664 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; | ||
665 | name = "DPDDC-D"; | ||
666 | break; | ||
667 | default: | ||
668 | BUG(); | ||
709 | } | 669 | } |
710 | 670 | ||
711 | /* | 671 | if (!HAS_DDI(dev)) |
712 | * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is | 672 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; |
713 | * required to retry at least seven times upon receiving AUX_DEFER | ||
714 | * before giving up the AUX transaction. | ||
715 | */ | ||
716 | for (retry = 0; retry < 7; retry++) { | ||
717 | ret = intel_dp_aux_ch(intel_dp, | ||
718 | msg, msg_bytes, | ||
719 | reply, reply_bytes); | ||
720 | if (ret < 0) { | ||
721 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | ||
722 | goto out; | ||
723 | } | ||
724 | 673 | ||
725 | switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { | 674 | intel_dp->aux.name = name; |
726 | case DP_AUX_NATIVE_REPLY_ACK: | 675 | intel_dp->aux.dev = dev->dev; |
727 | /* I2C-over-AUX Reply field is only valid | 676 | intel_dp->aux.transfer = intel_dp_aux_transfer; |
728 | * when paired with AUX ACK. | ||
729 | */ | ||
730 | break; | ||
731 | case DP_AUX_NATIVE_REPLY_NACK: | ||
732 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
733 | ret = -EREMOTEIO; | ||
734 | goto out; | ||
735 | case DP_AUX_NATIVE_REPLY_DEFER: | ||
736 | /* | ||
737 | * For now, just give more slack to branch devices. We | ||
738 | * could check the DPCD for I2C bit rate capabilities, | ||
739 | * and if available, adjust the interval. We could also | ||
740 | * be more careful with DP-to-Legacy adapters where a | ||
741 | * long legacy cable may force very low I2C bit rates. | ||
742 | */ | ||
743 | if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | ||
744 | DP_DWN_STRM_PORT_PRESENT) | ||
745 | usleep_range(500, 600); | ||
746 | else | ||
747 | usleep_range(300, 400); | ||
748 | continue; | ||
749 | default: | ||
750 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | ||
751 | reply[0]); | ||
752 | ret = -EREMOTEIO; | ||
753 | goto out; | ||
754 | } | ||
755 | 677 | ||
756 | switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { | 678 | DRM_DEBUG_KMS("registering %s bus for %s\n", name, |
757 | case DP_AUX_I2C_REPLY_ACK: | 679 | connector->base.kdev->kobj.name); |
758 | if (mode == MODE_I2C_READ) { | ||
759 | *read_byte = reply[1]; | ||
760 | } | ||
761 | ret = reply_bytes - 1; | ||
762 | goto out; | ||
763 | case DP_AUX_I2C_REPLY_NACK: | ||
764 | DRM_DEBUG_KMS("aux_i2c nack\n"); | ||
765 | ret = -EREMOTEIO; | ||
766 | goto out; | ||
767 | case DP_AUX_I2C_REPLY_DEFER: | ||
768 | DRM_DEBUG_KMS("aux_i2c defer\n"); | ||
769 | udelay(100); | ||
770 | break; | ||
771 | default: | ||
772 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); | ||
773 | ret = -EREMOTEIO; | ||
774 | goto out; | ||
775 | } | ||
776 | } | ||
777 | 680 | ||
778 | DRM_ERROR("too many retries, giving up\n"); | 681 | ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux); |
779 | ret = -EREMOTEIO; | 682 | if (ret < 0) { |
683 | DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n", | ||
684 | name, ret); | ||
685 | return; | ||
686 | } | ||
780 | 687 | ||
781 | out: | 688 | ret = sysfs_create_link(&connector->base.kdev->kobj, |
782 | edp_panel_vdd_off(intel_dp, false); | 689 | &intel_dp->aux.ddc.dev.kobj, |
783 | return ret; | 690 | intel_dp->aux.ddc.dev.kobj.name); |
691 | if (ret < 0) { | ||
692 | DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); | ||
693 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); | ||
694 | } | ||
784 | } | 695 | } |
785 | 696 | ||
786 | static void | 697 | static void |
@@ -789,43 +700,10 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) | |||
789 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); | 700 | struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); |
790 | 701 | ||
791 | sysfs_remove_link(&intel_connector->base.kdev->kobj, | 702 | sysfs_remove_link(&intel_connector->base.kdev->kobj, |
792 | intel_dp->adapter.dev.kobj.name); | 703 | intel_dp->aux.ddc.dev.kobj.name); |
793 | intel_connector_unregister(intel_connector); | 704 | intel_connector_unregister(intel_connector); |
794 | } | 705 | } |
795 | 706 | ||
796 | static int | ||
797 | intel_dp_i2c_init(struct intel_dp *intel_dp, | ||
798 | struct intel_connector *intel_connector, const char *name) | ||
799 | { | ||
800 | int ret; | ||
801 | |||
802 | DRM_DEBUG_KMS("i2c_init %s\n", name); | ||
803 | intel_dp->algo.running = false; | ||
804 | intel_dp->algo.address = 0; | ||
805 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; | ||
806 | |||
807 | memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); | ||
808 | intel_dp->adapter.owner = THIS_MODULE; | ||
809 | intel_dp->adapter.class = I2C_CLASS_DDC; | ||
810 | strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); | ||
811 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; | ||
812 | intel_dp->adapter.algo_data = &intel_dp->algo; | ||
813 | intel_dp->adapter.dev.parent = intel_connector->base.dev->dev; | ||
814 | |||
815 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); | ||
816 | if (ret < 0) | ||
817 | return ret; | ||
818 | |||
819 | ret = sysfs_create_link(&intel_connector->base.kdev->kobj, | ||
820 | &intel_dp->adapter.dev.kobj, | ||
821 | intel_dp->adapter.dev.kobj.name); | ||
822 | |||
823 | if (ret < 0) | ||
824 | i2c_del_adapter(&intel_dp->adapter); | ||
825 | |||
826 | return ret; | ||
827 | } | ||
828 | |||
829 | static void | 707 | static void |
830 | intel_dp_set_clock(struct intel_encoder *encoder, | 708 | intel_dp_set_clock(struct intel_encoder *encoder, |
831 | struct intel_crtc_config *pipe_config, int link_bw) | 709 | struct intel_crtc_config *pipe_config, int link_bw) |
@@ -1161,23 +1039,21 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) | |||
1161 | return control; | 1039 | return control; |
1162 | } | 1040 | } |
1163 | 1041 | ||
1164 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | 1042 | static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) |
1165 | { | 1043 | { |
1166 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 1044 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1167 | struct drm_i915_private *dev_priv = dev->dev_private; | 1045 | struct drm_i915_private *dev_priv = dev->dev_private; |
1168 | u32 pp; | 1046 | u32 pp; |
1169 | u32 pp_stat_reg, pp_ctrl_reg; | 1047 | u32 pp_stat_reg, pp_ctrl_reg; |
1048 | bool need_to_disable = !intel_dp->want_panel_vdd; | ||
1170 | 1049 | ||
1171 | if (!is_edp(intel_dp)) | 1050 | if (!is_edp(intel_dp)) |
1172 | return; | 1051 | return false; |
1173 | |||
1174 | WARN(intel_dp->want_panel_vdd, | ||
1175 | "eDP VDD already requested on\n"); | ||
1176 | 1052 | ||
1177 | intel_dp->want_panel_vdd = true; | 1053 | intel_dp->want_panel_vdd = true; |
1178 | 1054 | ||
1179 | if (edp_have_panel_vdd(intel_dp)) | 1055 | if (edp_have_panel_vdd(intel_dp)) |
1180 | return; | 1056 | return need_to_disable; |
1181 | 1057 | ||
1182 | intel_runtime_pm_get(dev_priv); | 1058 | intel_runtime_pm_get(dev_priv); |
1183 | 1059 | ||
@@ -1203,6 +1079,17 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1203 | DRM_DEBUG_KMS("eDP was not running\n"); | 1079 | DRM_DEBUG_KMS("eDP was not running\n"); |
1204 | msleep(intel_dp->panel_power_up_delay); | 1080 | msleep(intel_dp->panel_power_up_delay); |
1205 | } | 1081 | } |
1082 | |||
1083 | return need_to_disable; | ||
1084 | } | ||
1085 | |||
1086 | void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | ||
1087 | { | ||
1088 | if (is_edp(intel_dp)) { | ||
1089 | bool vdd = _edp_panel_vdd_on(intel_dp); | ||
1090 | |||
1091 | WARN(!vdd, "eDP VDD already requested on\n"); | ||
1092 | } | ||
1206 | } | 1093 | } |
1207 | 1094 | ||
1208 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) | 1095 | static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) |
@@ -1465,8 +1352,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
1465 | return; | 1352 | return; |
1466 | 1353 | ||
1467 | if (mode != DRM_MODE_DPMS_ON) { | 1354 | if (mode != DRM_MODE_DPMS_ON) { |
1468 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, | 1355 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1469 | DP_SET_POWER_D3); | 1356 | DP_SET_POWER_D3); |
1470 | if (ret != 1) | 1357 | if (ret != 1) |
1471 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | 1358 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); |
1472 | } else { | 1359 | } else { |
@@ -1475,9 +1362,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
1475 | * time to wake up. | 1362 | * time to wake up. |
1476 | */ | 1363 | */ |
1477 | for (i = 0; i < 3; i++) { | 1364 | for (i = 0; i < 3; i++) { |
1478 | ret = intel_dp_aux_native_write_1(intel_dp, | 1365 | ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, |
1479 | DP_SET_POWER, | 1366 | DP_SET_POWER_D0); |
1480 | DP_SET_POWER_D0); | ||
1481 | if (ret == 1) | 1367 | if (ret == 1) |
1482 | break; | 1368 | break; |
1483 | msleep(1); | 1369 | msleep(1); |
@@ -1701,13 +1587,11 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | |||
1701 | 1587 | ||
1702 | /* Enable PSR in sink */ | 1588 | /* Enable PSR in sink */ |
1703 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) | 1589 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) |
1704 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | 1590 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1705 | DP_PSR_ENABLE & | 1591 | DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); |
1706 | ~DP_PSR_MAIN_LINK_ACTIVE); | ||
1707 | else | 1592 | else |
1708 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | 1593 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, |
1709 | DP_PSR_ENABLE | | 1594 | DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); |
1710 | DP_PSR_MAIN_LINK_ACTIVE); | ||
1711 | 1595 | ||
1712 | /* Setup AUX registers */ | 1596 | /* Setup AUX registers */ |
1713 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); | 1597 | I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); |
@@ -2018,26 +1902,25 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) | |||
2018 | /* | 1902 | /* |
2019 | * Native read with retry for link status and receiver capability reads for | 1903 | * Native read with retry for link status and receiver capability reads for |
2020 | * cases where the sink may still be asleep. | 1904 | * cases where the sink may still be asleep. |
1905 | * | ||
1906 | * Sinks are *supposed* to come up within 1ms from an off state, but we're also | ||
1907 | * supposed to retry 3 times per the spec. | ||
2021 | */ | 1908 | */ |
2022 | static bool | 1909 | static ssize_t |
2023 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | 1910 | intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, |
2024 | uint8_t *recv, int recv_bytes) | 1911 | void *buffer, size_t size) |
2025 | { | 1912 | { |
2026 | int ret, i; | 1913 | ssize_t ret; |
1914 | int i; | ||
2027 | 1915 | ||
2028 | /* | ||
2029 | * Sinks are *supposed* to come up within 1ms from an off state, | ||
2030 | * but we're also supposed to retry 3 times per the spec. | ||
2031 | */ | ||
2032 | for (i = 0; i < 3; i++) { | 1916 | for (i = 0; i < 3; i++) { |
2033 | ret = intel_dp_aux_native_read(intel_dp, address, recv, | 1917 | ret = drm_dp_dpcd_read(aux, offset, buffer, size); |
2034 | recv_bytes); | 1918 | if (ret == size) |
2035 | if (ret == recv_bytes) | 1919 | return ret; |
2036 | return true; | ||
2037 | msleep(1); | 1920 | msleep(1); |
2038 | } | 1921 | } |
2039 | 1922 | ||
2040 | return false; | 1923 | return ret; |
2041 | } | 1924 | } |
2042 | 1925 | ||
2043 | /* | 1926 | /* |
@@ -2047,10 +1930,10 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | |||
2047 | static bool | 1930 | static bool |
2048 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) | 1931 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
2049 | { | 1932 | { |
2050 | return intel_dp_aux_native_read_retry(intel_dp, | 1933 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
2051 | DP_LANE0_1_STATUS, | 1934 | DP_LANE0_1_STATUS, |
2052 | link_status, | 1935 | link_status, |
2053 | DP_LINK_STATUS_SIZE); | 1936 | DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; |
2054 | } | 1937 | } |
2055 | 1938 | ||
2056 | /* | 1939 | /* |
@@ -2564,8 +2447,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
2564 | len = intel_dp->lane_count + 1; | 2447 | len = intel_dp->lane_count + 1; |
2565 | } | 2448 | } |
2566 | 2449 | ||
2567 | ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, | 2450 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, |
2568 | buf, len); | 2451 | buf, len); |
2569 | 2452 | ||
2570 | return ret == len; | 2453 | return ret == len; |
2571 | } | 2454 | } |
@@ -2594,9 +2477,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, | |||
2594 | I915_WRITE(intel_dp->output_reg, *DP); | 2477 | I915_WRITE(intel_dp->output_reg, *DP); |
2595 | POSTING_READ(intel_dp->output_reg); | 2478 | POSTING_READ(intel_dp->output_reg); |
2596 | 2479 | ||
2597 | ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, | 2480 | ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, |
2598 | intel_dp->train_set, | 2481 | intel_dp->train_set, intel_dp->lane_count); |
2599 | intel_dp->lane_count); | ||
2600 | 2482 | ||
2601 | return ret == intel_dp->lane_count; | 2483 | return ret == intel_dp->lane_count; |
2602 | } | 2484 | } |
@@ -2652,11 +2534,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
2652 | link_config[1] = intel_dp->lane_count; | 2534 | link_config[1] = intel_dp->lane_count; |
2653 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 2535 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
2654 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 2536 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
2655 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); | 2537 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); |
2656 | 2538 | ||
2657 | link_config[0] = 0; | 2539 | link_config[0] = 0; |
2658 | link_config[1] = DP_SET_ANSI_8B10B; | 2540 | link_config[1] = DP_SET_ANSI_8B10B; |
2659 | intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); | 2541 | drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); |
2660 | 2542 | ||
2661 | DP |= DP_PORT_EN; | 2543 | DP |= DP_PORT_EN; |
2662 | 2544 | ||
@@ -2899,8 +2781,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2899 | 2781 | ||
2900 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; | 2782 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
2901 | 2783 | ||
2902 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | 2784 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, |
2903 | sizeof(intel_dp->dpcd)) == 0) | 2785 | sizeof(intel_dp->dpcd)) < 0) |
2904 | return false; /* aux transfer failed */ | 2786 | return false; /* aux transfer failed */ |
2905 | 2787 | ||
2906 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), | 2788 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
@@ -2913,9 +2795,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2913 | /* Check if the panel supports PSR */ | 2795 | /* Check if the panel supports PSR */ |
2914 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); | 2796 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); |
2915 | if (is_edp(intel_dp)) { | 2797 | if (is_edp(intel_dp)) { |
2916 | intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, | 2798 | intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT, |
2917 | intel_dp->psr_dpcd, | 2799 | intel_dp->psr_dpcd, |
2918 | sizeof(intel_dp->psr_dpcd)); | 2800 | sizeof(intel_dp->psr_dpcd)); |
2919 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { | 2801 | if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { |
2920 | dev_priv->psr.sink_support = true; | 2802 | dev_priv->psr.sink_support = true; |
2921 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); | 2803 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); |
@@ -2937,9 +2819,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2937 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) | 2819 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
2938 | return true; /* no per-port downstream info */ | 2820 | return true; /* no per-port downstream info */ |
2939 | 2821 | ||
2940 | if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, | 2822 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, |
2941 | intel_dp->downstream_ports, | 2823 | intel_dp->downstream_ports, |
2942 | DP_MAX_DOWNSTREAM_PORTS) == 0) | 2824 | DP_MAX_DOWNSTREAM_PORTS) < 0) |
2943 | return false; /* downstream port status fetch failed */ | 2825 | return false; /* downstream port status fetch failed */ |
2944 | 2826 | ||
2945 | return true; | 2827 | return true; |
@@ -2955,11 +2837,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) | |||
2955 | 2837 | ||
2956 | intel_edp_panel_vdd_on(intel_dp); | 2838 | intel_edp_panel_vdd_on(intel_dp); |
2957 | 2839 | ||
2958 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) | 2840 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) |
2959 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 2841 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
2960 | buf[0], buf[1], buf[2]); | 2842 | buf[0], buf[1], buf[2]); |
2961 | 2843 | ||
2962 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) | 2844 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) |
2963 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 2845 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
2964 | buf[0], buf[1], buf[2]); | 2846 | buf[0], buf[1], buf[2]); |
2965 | 2847 | ||
@@ -2974,46 +2856,40 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) | |||
2974 | to_intel_crtc(intel_dig_port->base.base.crtc); | 2856 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2975 | u8 buf[1]; | 2857 | u8 buf[1]; |
2976 | 2858 | ||
2977 | if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1)) | 2859 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) |
2978 | return -EAGAIN; | 2860 | return -EAGAIN; |
2979 | 2861 | ||
2980 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) | 2862 | if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) |
2981 | return -ENOTTY; | 2863 | return -ENOTTY; |
2982 | 2864 | ||
2983 | if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, | 2865 | if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, |
2984 | DP_TEST_SINK_START)) | 2866 | DP_TEST_SINK_START) < 0) |
2985 | return -EAGAIN; | 2867 | return -EAGAIN; |
2986 | 2868 | ||
2987 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ | 2869 | /* Wait 2 vblanks to be sure we will have the correct CRC value */ |
2988 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2870 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2989 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2871 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2990 | 2872 | ||
2991 | if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6)) | 2873 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) |
2992 | return -EAGAIN; | 2874 | return -EAGAIN; |
2993 | 2875 | ||
2994 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0); | 2876 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); |
2995 | return 0; | 2877 | return 0; |
2996 | } | 2878 | } |
2997 | 2879 | ||
2998 | static bool | 2880 | static bool |
2999 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | 2881 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
3000 | { | 2882 | { |
3001 | int ret; | 2883 | return intel_dp_dpcd_read_wake(&intel_dp->aux, |
3002 | 2884 | DP_DEVICE_SERVICE_IRQ_VECTOR, | |
3003 | ret = intel_dp_aux_native_read_retry(intel_dp, | 2885 | sink_irq_vector, 1) == 1; |
3004 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
3005 | sink_irq_vector, 1); | ||
3006 | if (!ret) | ||
3007 | return false; | ||
3008 | |||
3009 | return true; | ||
3010 | } | 2886 | } |
3011 | 2887 | ||
3012 | static void | 2888 | static void |
3013 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | 2889 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
3014 | { | 2890 | { |
3015 | /* NAK by default */ | 2891 | /* NAK by default */ |
3016 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); | 2892 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); |
3017 | } | 2893 | } |
3018 | 2894 | ||
3019 | /* | 2895 | /* |
@@ -3052,9 +2928,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
3052 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2928 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
3053 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { | 2929 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { |
3054 | /* Clear interrupt source */ | 2930 | /* Clear interrupt source */ |
3055 | intel_dp_aux_native_write_1(intel_dp, | 2931 | drm_dp_dpcd_writeb(&intel_dp->aux, |
3056 | DP_DEVICE_SERVICE_IRQ_VECTOR, | 2932 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
3057 | sink_irq_vector); | 2933 | sink_irq_vector); |
3058 | 2934 | ||
3059 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | 2935 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) |
3060 | intel_dp_handle_test_request(intel_dp); | 2936 | intel_dp_handle_test_request(intel_dp); |
@@ -3089,15 +2965,17 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
3089 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | 2965 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
3090 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { | 2966 | intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { |
3091 | uint8_t reg; | 2967 | uint8_t reg; |
3092 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, | 2968 | |
3093 | ®, 1)) | 2969 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, |
2970 | ®, 1) < 0) | ||
3094 | return connector_status_unknown; | 2971 | return connector_status_unknown; |
2972 | |||
3095 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected | 2973 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected |
3096 | : connector_status_disconnected; | 2974 | : connector_status_disconnected; |
3097 | } | 2975 | } |
3098 | 2976 | ||
3099 | /* If no HPD, poke DDC gently */ | 2977 | /* If no HPD, poke DDC gently */ |
3100 | if (drm_probe_ddc(&intel_dp->adapter)) | 2978 | if (drm_probe_ddc(&intel_dp->aux.ddc)) |
3101 | return connector_status_connected; | 2979 | return connector_status_connected; |
3102 | 2980 | ||
3103 | /* Well we tried, say unknown for unreliable port types */ | 2981 | /* Well we tried, say unknown for unreliable port types */ |
@@ -3265,7 +3143,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
3265 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { | 3143 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
3266 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); | 3144 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
3267 | } else { | 3145 | } else { |
3268 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 3146 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3269 | if (edid) { | 3147 | if (edid) { |
3270 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 3148 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
3271 | kfree(edid); | 3149 | kfree(edid); |
@@ -3301,7 +3179,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
3301 | power_domain = intel_display_port_power_domain(intel_encoder); | 3179 | power_domain = intel_display_port_power_domain(intel_encoder); |
3302 | intel_display_power_get(dev_priv, power_domain); | 3180 | intel_display_power_get(dev_priv, power_domain); |
3303 | 3181 | ||
3304 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); | 3182 | ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); |
3305 | intel_display_power_put(dev_priv, power_domain); | 3183 | intel_display_power_put(dev_priv, power_domain); |
3306 | if (ret) | 3184 | if (ret) |
3307 | return ret; | 3185 | return ret; |
@@ -3334,7 +3212,7 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
3334 | power_domain = intel_display_port_power_domain(intel_encoder); | 3212 | power_domain = intel_display_port_power_domain(intel_encoder); |
3335 | intel_display_power_get(dev_priv, power_domain); | 3213 | intel_display_power_get(dev_priv, power_domain); |
3336 | 3214 | ||
3337 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 3215 | edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc); |
3338 | if (edid) { | 3216 | if (edid) { |
3339 | has_audio = drm_detect_monitor_audio(edid); | 3217 | has_audio = drm_detect_monitor_audio(edid); |
3340 | kfree(edid); | 3218 | kfree(edid); |
@@ -3456,7 +3334,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
3456 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 3334 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
3457 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 3335 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
3458 | 3336 | ||
3459 | i2c_del_adapter(&intel_dp->adapter); | 3337 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3460 | drm_encoder_cleanup(encoder); | 3338 | drm_encoder_cleanup(encoder); |
3461 | if (is_edp(intel_dp)) { | 3339 | if (is_edp(intel_dp)) { |
3462 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3340 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
@@ -3768,7 +3646,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
3768 | /* We now know it's not a ghost, init power sequence regs. */ | 3646 | /* We now know it's not a ghost, init power sequence regs. */ |
3769 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); | 3647 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); |
3770 | 3648 | ||
3771 | edid = drm_get_edid(connector, &intel_dp->adapter); | 3649 | edid = drm_get_edid(connector, &intel_dp->aux.ddc); |
3772 | if (edid) { | 3650 | if (edid) { |
3773 | if (drm_add_edid_modes(connector, edid)) { | 3651 | if (drm_add_edid_modes(connector, edid)) { |
3774 | drm_mode_connector_update_edid_property(connector, | 3652 | drm_mode_connector_update_edid_property(connector, |
@@ -3816,8 +3694,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3816 | struct drm_i915_private *dev_priv = dev->dev_private; | 3694 | struct drm_i915_private *dev_priv = dev->dev_private; |
3817 | enum port port = intel_dig_port->port; | 3695 | enum port port = intel_dig_port->port; |
3818 | struct edp_power_seq power_seq = { 0 }; | 3696 | struct edp_power_seq power_seq = { 0 }; |
3819 | const char *name = NULL; | 3697 | int type; |
3820 | int type, error; | ||
3821 | 3698 | ||
3822 | /* intel_dp vfuncs */ | 3699 | /* intel_dp vfuncs */ |
3823 | if (IS_VALLEYVIEW(dev)) | 3700 | if (IS_VALLEYVIEW(dev)) |
@@ -3870,43 +3747,19 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3870 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 3747 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
3871 | intel_connector->unregister = intel_dp_connector_unregister; | 3748 | intel_connector->unregister = intel_dp_connector_unregister; |
3872 | 3749 | ||
3873 | intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; | 3750 | /* Set up the hotplug pin. */ |
3874 | if (HAS_DDI(dev)) { | ||
3875 | switch (intel_dig_port->port) { | ||
3876 | case PORT_A: | ||
3877 | intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; | ||
3878 | break; | ||
3879 | case PORT_B: | ||
3880 | intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; | ||
3881 | break; | ||
3882 | case PORT_C: | ||
3883 | intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; | ||
3884 | break; | ||
3885 | case PORT_D: | ||
3886 | intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; | ||
3887 | break; | ||
3888 | default: | ||
3889 | BUG(); | ||
3890 | } | ||
3891 | } | ||
3892 | |||
3893 | /* Set up the DDC bus. */ | ||
3894 | switch (port) { | 3751 | switch (port) { |
3895 | case PORT_A: | 3752 | case PORT_A: |
3896 | intel_encoder->hpd_pin = HPD_PORT_A; | 3753 | intel_encoder->hpd_pin = HPD_PORT_A; |
3897 | name = "DPDDC-A"; | ||
3898 | break; | 3754 | break; |
3899 | case PORT_B: | 3755 | case PORT_B: |
3900 | intel_encoder->hpd_pin = HPD_PORT_B; | 3756 | intel_encoder->hpd_pin = HPD_PORT_B; |
3901 | name = "DPDDC-B"; | ||
3902 | break; | 3757 | break; |
3903 | case PORT_C: | 3758 | case PORT_C: |
3904 | intel_encoder->hpd_pin = HPD_PORT_C; | 3759 | intel_encoder->hpd_pin = HPD_PORT_C; |
3905 | name = "DPDDC-C"; | ||
3906 | break; | 3760 | break; |
3907 | case PORT_D: | 3761 | case PORT_D: |
3908 | intel_encoder->hpd_pin = HPD_PORT_D; | 3762 | intel_encoder->hpd_pin = HPD_PORT_D; |
3909 | name = "DPDDC-D"; | ||
3910 | break; | 3763 | break; |
3911 | default: | 3764 | default: |
3912 | BUG(); | 3765 | BUG(); |
@@ -3917,14 +3770,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3917 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 3770 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
3918 | } | 3771 | } |
3919 | 3772 | ||
3920 | error = intel_dp_i2c_init(intel_dp, intel_connector, name); | 3773 | intel_dp_aux_init(intel_dp, intel_connector); |
3921 | WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", | ||
3922 | error, port_name(port)); | ||
3923 | 3774 | ||
3924 | intel_dp->psr_setup_done = false; | 3775 | intel_dp->psr_setup_done = false; |
3925 | 3776 | ||
3926 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { | 3777 | if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { |
3927 | i2c_del_adapter(&intel_dp->adapter); | 3778 | drm_dp_aux_unregister_i2c_bus(&intel_dp->aux); |
3928 | if (is_edp(intel_dp)) { | 3779 | if (is_edp(intel_dp)) { |
3929 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 3780 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
3930 | mutex_lock(&dev->mode_config.mutex); | 3781 | mutex_lock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e0064a18352d..890c5cd98268 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -489,8 +489,7 @@ struct intel_dp { | |||
489 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 489 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
490 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 490 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
491 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 491 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
492 | struct i2c_adapter adapter; | 492 | struct drm_dp_aux aux; |
493 | struct i2c_algo_dp_aux_data algo; | ||
494 | uint8_t train_set[4]; | 493 | uint8_t train_set[4]; |
495 | int panel_power_up_delay; | 494 | int panel_power_up_delay; |
496 | int panel_power_down_delay; | 495 | int panel_power_down_delay; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index adb5166a5dfd..5a00e90696de 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -259,7 +259,9 @@ int mgag200_mm_init(struct mga_device *mdev) | |||
259 | 259 | ||
260 | ret = ttm_bo_device_init(&mdev->ttm.bdev, | 260 | ret = ttm_bo_device_init(&mdev->ttm.bdev, |
261 | mdev->ttm.bo_global_ref.ref.object, | 261 | mdev->ttm.bo_global_ref.ref.object, |
262 | &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET, | 262 | &mgag200_bo_driver, |
263 | dev->anon_inode->i_mapping, | ||
264 | DRM_FILE_PAGE_OFFSET, | ||
263 | true); | 265 | true); |
264 | if (ret) { | 266 | if (ret) { |
265 | DRM_ERROR("Error initialising bo driver; %d\n", ret); | 267 | DRM_ERROR("Error initialising bo driver; %d\n", ret); |
@@ -324,7 +326,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
324 | } | 326 | } |
325 | 327 | ||
326 | mgabo->bo.bdev = &mdev->ttm.bdev; | 328 | mgabo->bo.bdev = &mdev->ttm.bdev; |
327 | mgabo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
328 | 329 | ||
329 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 330 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
330 | 331 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 27c3fd89e8ce..c90c0dc0afe8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -228,8 +228,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
228 | struct nouveau_bo *nvbo = NULL; | 228 | struct nouveau_bo *nvbo = NULL; |
229 | int ret = 0; | 229 | int ret = 0; |
230 | 230 | ||
231 | drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; | ||
232 | |||
233 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { | 231 | if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { |
234 | NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); | 232 | NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); |
235 | return -EINVAL; | 233 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index d45d50da978f..be3a3c9feafa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
@@ -376,7 +376,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
376 | 376 | ||
377 | ret = ttm_bo_device_init(&drm->ttm.bdev, | 377 | ret = ttm_bo_device_init(&drm->ttm.bdev, |
378 | drm->ttm.bo_global_ref.ref.object, | 378 | drm->ttm.bo_global_ref.ref.object, |
379 | &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, | 379 | &nouveau_bo_driver, |
380 | dev->anon_inode->i_mapping, | ||
381 | DRM_FILE_PAGE_OFFSET, | ||
380 | bits <= 32 ? true : false); | 382 | bits <= 32 ? true : false); |
381 | if (ret) { | 383 | if (ret) { |
382 | NV_ERROR(drm, "error initialising bo driver, %d\n", ret); | 384 | NV_ERROR(drm, "error initialising bo driver, %d\n", ret); |
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 5aec3e81fe24..c8d972763889 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -153,24 +153,24 @@ static struct { | |||
153 | static void evict_entry(struct drm_gem_object *obj, | 153 | static void evict_entry(struct drm_gem_object *obj, |
154 | enum tiler_fmt fmt, struct usergart_entry *entry) | 154 | enum tiler_fmt fmt, struct usergart_entry *entry) |
155 | { | 155 | { |
156 | if (obj->dev->dev_mapping) { | 156 | struct omap_gem_object *omap_obj = to_omap_bo(obj); |
157 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | 157 | int n = usergart[fmt].height; |
158 | int n = usergart[fmt].height; | 158 | size_t size = PAGE_SIZE * n; |
159 | size_t size = PAGE_SIZE * n; | 159 | loff_t off = mmap_offset(obj) + |
160 | loff_t off = mmap_offset(obj) + | 160 | (entry->obj_pgoff << PAGE_SHIFT); |
161 | (entry->obj_pgoff << PAGE_SHIFT); | 161 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
162 | const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); | 162 | |
163 | if (m > 1) { | 163 | if (m > 1) { |
164 | int i; | 164 | int i; |
165 | /* if stride > than PAGE_SIZE then sparse mapping: */ | 165 | /* if stride > than PAGE_SIZE then sparse mapping: */ |
166 | for (i = n; i > 0; i--) { | 166 | for (i = n; i > 0; i--) { |
167 | unmap_mapping_range(obj->dev->dev_mapping, | 167 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, |
168 | off, PAGE_SIZE, 1); | 168 | off, PAGE_SIZE, 1); |
169 | off += PAGE_SIZE * m; | 169 | off += PAGE_SIZE * m; |
170 | } | ||
171 | } else { | ||
172 | unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); | ||
173 | } | 170 | } |
171 | } else { | ||
172 | unmap_mapping_range(obj->dev->anon_inode->i_mapping, | ||
173 | off, size, 1); | ||
174 | } | 174 | } |
175 | 175 | ||
176 | entry->obj = NULL; | 176 | entry->obj = NULL; |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 8691c76c5ef0..b95f144f0b49 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
@@ -82,8 +82,6 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
82 | enum ttm_bo_type type; | 82 | enum ttm_bo_type type; |
83 | int r; | 83 | int r; |
84 | 84 | ||
85 | if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) | ||
86 | qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; | ||
87 | if (kernel) | 85 | if (kernel) |
88 | type = ttm_bo_type_kernel; | 86 | type = ttm_bo_type_kernel; |
89 | else | 87 | else |
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index c7e7e6590c2b..29c02e0e857f 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -493,7 +493,9 @@ int qxl_ttm_init(struct qxl_device *qdev) | |||
493 | /* No others user of address space so set it to 0 */ | 493 | /* No others user of address space so set it to 0 */ |
494 | r = ttm_bo_device_init(&qdev->mman.bdev, | 494 | r = ttm_bo_device_init(&qdev->mman.bdev, |
495 | qdev->mman.bo_global_ref.ref.object, | 495 | qdev->mman.bo_global_ref.ref.object, |
496 | &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); | 496 | &qxl_bo_driver, |
497 | qdev->ddev->anon_inode->i_mapping, | ||
498 | DRM_FILE_PAGE_OFFSET, 0); | ||
497 | if (r) { | 499 | if (r) { |
498 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 500 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
499 | return r; | 501 | return r; |
@@ -518,8 +520,6 @@ int qxl_ttm_init(struct qxl_device *qdev) | |||
518 | ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); | 520 | ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); |
519 | DRM_INFO("qxl: %uM of Surface memory size\n", | 521 | DRM_INFO("qxl: %uM of Surface memory size\n", |
520 | (unsigned)qdev->surfaceram_size / (1024 * 1024)); | 522 | (unsigned)qdev->surfaceram_size / (1024 * 1024)); |
521 | if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) | ||
522 | qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; | ||
523 | r = qxl_ttm_debugfs_init(qdev); | 523 | r = qxl_ttm_debugfs_init(qdev); |
524 | if (r) { | 524 | if (r) { |
525 | DRM_ERROR("Failed to init debugfs\n"); | 525 | DRM_ERROR("Failed to init debugfs\n"); |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 306364a1ecda..09433534dc47 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ | 80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ |
81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ | 81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ |
82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ | 82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ |
83 | ci_dpm.o dce6_afmt.o | 83 | ci_dpm.o dce6_afmt.o radeon_vm.o |
84 | 84 | ||
85 | # add async DMA block | 85 | # add async DMA block |
86 | radeon-y += \ | 86 | radeon-y += \ |
@@ -99,6 +99,12 @@ radeon-y += \ | |||
99 | uvd_v3_1.o \ | 99 | uvd_v3_1.o \ |
100 | uvd_v4_2.o | 100 | uvd_v4_2.o |
101 | 101 | ||
102 | # add VCE block | ||
103 | radeon-y += \ | ||
104 | radeon_vce.o \ | ||
105 | vce_v1_0.o \ | ||
106 | vce_v2_0.o \ | ||
107 | |||
102 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 108 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
103 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 109 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
104 | radeon-$(CONFIG_ACPI) += radeon_acpi.o | 110 | radeon-$(CONFIG_ACPI) += radeon_acpi.o |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index ea103ccdf4bd..f81d7ca134db 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -2601,6 +2601,10 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2601 | pi->min_vddc_in_table = 0; | 2601 | pi->min_vddc_in_table = 0; |
2602 | pi->max_vddc_in_table = 0; | 2602 | pi->max_vddc_in_table = 0; |
2603 | 2603 | ||
2604 | ret = r600_get_platform_caps(rdev); | ||
2605 | if (ret) | ||
2606 | return ret; | ||
2607 | |||
2604 | ret = rv7xx_parse_power_table(rdev); | 2608 | ret = rv7xx_parse_power_table(rdev); |
2605 | if (ret) | 2609 | if (ret) |
2606 | return ret; | 2610 | return ret; |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 8d49104ca6c2..cad89a977527 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -172,6 +172,8 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, | |||
172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); | 172 | extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); |
173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); | 173 | extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); |
174 | extern int ci_mc_load_microcode(struct radeon_device *rdev); | 174 | extern int ci_mc_load_microcode(struct radeon_device *rdev); |
175 | extern void cik_update_cg(struct radeon_device *rdev, | ||
176 | u32 block, bool enable); | ||
175 | 177 | ||
176 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, | 178 | static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, |
177 | struct atom_voltage_table_entry *voltage_table, | 179 | struct atom_voltage_table_entry *voltage_table, |
@@ -746,6 +748,14 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, | |||
746 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | 748 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
747 | int i; | 749 | int i; |
748 | 750 | ||
751 | if (rps->vce_active) { | ||
752 | rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; | ||
753 | rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; | ||
754 | } else { | ||
755 | rps->evclk = 0; | ||
756 | rps->ecclk = 0; | ||
757 | } | ||
758 | |||
749 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 759 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
750 | ci_dpm_vblank_too_short(rdev)) | 760 | ci_dpm_vblank_too_short(rdev)) |
751 | disable_mclk_switching = true; | 761 | disable_mclk_switching = true; |
@@ -804,6 +814,13 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, | |||
804 | sclk = ps->performance_levels[0].sclk; | 814 | sclk = ps->performance_levels[0].sclk; |
805 | } | 815 | } |
806 | 816 | ||
817 | if (rps->vce_active) { | ||
818 | if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) | ||
819 | sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; | ||
820 | if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) | ||
821 | mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; | ||
822 | } | ||
823 | |||
807 | ps->performance_levels[0].sclk = sclk; | 824 | ps->performance_levels[0].sclk = sclk; |
808 | ps->performance_levels[0].mclk = mclk; | 825 | ps->performance_levels[0].mclk = mclk; |
809 | 826 | ||
@@ -3468,7 +3485,6 @@ static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) | |||
3468 | 0 : -EINVAL; | 3485 | 0 : -EINVAL; |
3469 | } | 3486 | } |
3470 | 3487 | ||
3471 | #if 0 | ||
3472 | static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) | 3488 | static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) |
3473 | { | 3489 | { |
3474 | struct ci_power_info *pi = ci_get_pi(rdev); | 3490 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -3501,6 +3517,7 @@ static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) | |||
3501 | 0 : -EINVAL; | 3517 | 0 : -EINVAL; |
3502 | } | 3518 | } |
3503 | 3519 | ||
3520 | #if 0 | ||
3504 | static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) | 3521 | static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) |
3505 | { | 3522 | { |
3506 | struct ci_power_info *pi = ci_get_pi(rdev); | 3523 | struct ci_power_info *pi = ci_get_pi(rdev); |
@@ -3587,7 +3604,6 @@ static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
3587 | return ci_enable_uvd_dpm(rdev, !gate); | 3604 | return ci_enable_uvd_dpm(rdev, !gate); |
3588 | } | 3605 | } |
3589 | 3606 | ||
3590 | #if 0 | ||
3591 | static u8 ci_get_vce_boot_level(struct radeon_device *rdev) | 3607 | static u8 ci_get_vce_boot_level(struct radeon_device *rdev) |
3592 | { | 3608 | { |
3593 | u8 i; | 3609 | u8 i; |
@@ -3608,15 +3624,15 @@ static int ci_update_vce_dpm(struct radeon_device *rdev, | |||
3608 | struct radeon_ps *radeon_current_state) | 3624 | struct radeon_ps *radeon_current_state) |
3609 | { | 3625 | { |
3610 | struct ci_power_info *pi = ci_get_pi(rdev); | 3626 | struct ci_power_info *pi = ci_get_pi(rdev); |
3611 | bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0); | ||
3612 | bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0); | ||
3613 | int ret = 0; | 3627 | int ret = 0; |
3614 | u32 tmp; | 3628 | u32 tmp; |
3615 | 3629 | ||
3616 | if (new_vce_clock_non_zero != old_vce_clock_non_zero) { | 3630 | if (radeon_current_state->evclk != radeon_new_state->evclk) { |
3617 | if (new_vce_clock_non_zero) { | 3631 | if (radeon_new_state->evclk) { |
3618 | pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); | 3632 | /* turn the clocks on when encoding */ |
3633 | cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); | ||
3619 | 3634 | ||
3635 | pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); | ||
3620 | tmp = RREG32_SMC(DPM_TABLE_475); | 3636 | tmp = RREG32_SMC(DPM_TABLE_475); |
3621 | tmp &= ~VceBootLevel_MASK; | 3637 | tmp &= ~VceBootLevel_MASK; |
3622 | tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); | 3638 | tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); |
@@ -3624,12 +3640,16 @@ static int ci_update_vce_dpm(struct radeon_device *rdev, | |||
3624 | 3640 | ||
3625 | ret = ci_enable_vce_dpm(rdev, true); | 3641 | ret = ci_enable_vce_dpm(rdev, true); |
3626 | } else { | 3642 | } else { |
3643 | /* turn the clocks off when not encoding */ | ||
3644 | cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); | ||
3645 | |||
3627 | ret = ci_enable_vce_dpm(rdev, false); | 3646 | ret = ci_enable_vce_dpm(rdev, false); |
3628 | } | 3647 | } |
3629 | } | 3648 | } |
3630 | return ret; | 3649 | return ret; |
3631 | } | 3650 | } |
3632 | 3651 | ||
3652 | #if 0 | ||
3633 | static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) | 3653 | static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) |
3634 | { | 3654 | { |
3635 | return ci_enable_samu_dpm(rdev, gate); | 3655 | return ci_enable_samu_dpm(rdev, gate); |
@@ -4752,13 +4772,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) | |||
4752 | DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); | 4772 | DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); |
4753 | return ret; | 4773 | return ret; |
4754 | } | 4774 | } |
4755 | #if 0 | 4775 | |
4756 | ret = ci_update_vce_dpm(rdev, new_ps, old_ps); | 4776 | ret = ci_update_vce_dpm(rdev, new_ps, old_ps); |
4757 | if (ret) { | 4777 | if (ret) { |
4758 | DRM_ERROR("ci_update_vce_dpm failed\n"); | 4778 | DRM_ERROR("ci_update_vce_dpm failed\n"); |
4759 | return ret; | 4779 | return ret; |
4760 | } | 4780 | } |
4761 | #endif | 4781 | |
4762 | ret = ci_update_sclk_t(rdev); | 4782 | ret = ci_update_sclk_t(rdev); |
4763 | if (ret) { | 4783 | if (ret) { |
4764 | DRM_ERROR("ci_update_sclk_t failed\n"); | 4784 | DRM_ERROR("ci_update_sclk_t failed\n"); |
@@ -4959,9 +4979,6 @@ static int ci_parse_power_table(struct radeon_device *rdev) | |||
4959 | if (!rdev->pm.dpm.ps) | 4979 | if (!rdev->pm.dpm.ps) |
4960 | return -ENOMEM; | 4980 | return -ENOMEM; |
4961 | power_state_offset = (u8 *)state_array->states; | 4981 | power_state_offset = (u8 *)state_array->states; |
4962 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
4963 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
4964 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
4965 | for (i = 0; i < state_array->ucNumEntries; i++) { | 4982 | for (i = 0; i < state_array->ucNumEntries; i++) { |
4966 | u8 *idx; | 4983 | u8 *idx; |
4967 | power_state = (union pplib_power_state *)power_state_offset; | 4984 | power_state = (union pplib_power_state *)power_state_offset; |
@@ -4998,6 +5015,21 @@ static int ci_parse_power_table(struct radeon_device *rdev) | |||
4998 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; | 5015 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
4999 | } | 5016 | } |
5000 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; | 5017 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; |
5018 | |||
5019 | /* fill in the vce power states */ | ||
5020 | for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { | ||
5021 | u32 sclk, mclk; | ||
5022 | clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; | ||
5023 | clock_info = (union pplib_clock_info *) | ||
5024 | &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; | ||
5025 | sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); | ||
5026 | sclk |= clock_info->ci.ucEngineClockHigh << 16; | ||
5027 | mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); | ||
5028 | mclk |= clock_info->ci.ucMemoryClockHigh << 16; | ||
5029 | rdev->pm.dpm.vce_states[i].sclk = sclk; | ||
5030 | rdev->pm.dpm.vce_states[i].mclk = mclk; | ||
5031 | } | ||
5032 | |||
5001 | return 0; | 5033 | return 0; |
5002 | } | 5034 | } |
5003 | 5035 | ||
@@ -5077,17 +5109,25 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5077 | ci_dpm_fini(rdev); | 5109 | ci_dpm_fini(rdev); |
5078 | return ret; | 5110 | return ret; |
5079 | } | 5111 | } |
5080 | ret = ci_parse_power_table(rdev); | 5112 | |
5113 | ret = r600_get_platform_caps(rdev); | ||
5081 | if (ret) { | 5114 | if (ret) { |
5082 | ci_dpm_fini(rdev); | 5115 | ci_dpm_fini(rdev); |
5083 | return ret; | 5116 | return ret; |
5084 | } | 5117 | } |
5118 | |||
5085 | ret = r600_parse_extended_power_table(rdev); | 5119 | ret = r600_parse_extended_power_table(rdev); |
5086 | if (ret) { | 5120 | if (ret) { |
5087 | ci_dpm_fini(rdev); | 5121 | ci_dpm_fini(rdev); |
5088 | return ret; | 5122 | return ret; |
5089 | } | 5123 | } |
5090 | 5124 | ||
5125 | ret = ci_parse_power_table(rdev); | ||
5126 | if (ret) { | ||
5127 | ci_dpm_fini(rdev); | ||
5128 | return ret; | ||
5129 | } | ||
5130 | |||
5091 | pi->dll_default_on = false; | 5131 | pi->dll_default_on = false; |
5092 | pi->sram_end = SMC_RAM_END; | 5132 | pi->sram_end = SMC_RAM_END; |
5093 | 5133 | ||
@@ -5120,6 +5160,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5120 | pi->caps_sclk_throttle_low_notification = false; | 5160 | pi->caps_sclk_throttle_low_notification = false; |
5121 | 5161 | ||
5122 | pi->caps_uvd_dpm = true; | 5162 | pi->caps_uvd_dpm = true; |
5163 | pi->caps_vce_dpm = true; | ||
5123 | 5164 | ||
5124 | ci_get_leakage_voltages(rdev); | 5165 | ci_get_leakage_voltages(rdev); |
5125 | ci_patch_dependency_tables_with_leakage(rdev); | 5166 | ci_patch_dependency_tables_with_leakage(rdev); |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e22be8458d92..0ae991d3289a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -75,6 +75,7 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev); | |||
75 | extern int cik_sdma_resume(struct radeon_device *rdev); | 75 | extern int cik_sdma_resume(struct radeon_device *rdev); |
76 | extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); | 76 | extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); |
77 | extern void cik_sdma_fini(struct radeon_device *rdev); | 77 | extern void cik_sdma_fini(struct radeon_device *rdev); |
78 | extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable); | ||
78 | static void cik_rlc_stop(struct radeon_device *rdev); | 79 | static void cik_rlc_stop(struct radeon_device *rdev); |
79 | static void cik_pcie_gen3_enable(struct radeon_device *rdev); | 80 | static void cik_pcie_gen3_enable(struct radeon_device *rdev); |
80 | static void cik_program_aspm(struct radeon_device *rdev); | 81 | static void cik_program_aspm(struct radeon_device *rdev); |
@@ -4030,8 +4031,6 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) | |||
4030 | WREG32(CP_RB0_BASE, rb_addr); | 4031 | WREG32(CP_RB0_BASE, rb_addr); |
4031 | WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr)); | 4032 | WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr)); |
4032 | 4033 | ||
4033 | ring->rptr = RREG32(CP_RB0_RPTR); | ||
4034 | |||
4035 | /* start the ring */ | 4034 | /* start the ring */ |
4036 | cik_cp_gfx_start(rdev); | 4035 | cik_cp_gfx_start(rdev); |
4037 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; | 4036 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; |
@@ -4134,8 +4133,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) | |||
4134 | { | 4133 | { |
4135 | if (enable) | 4134 | if (enable) |
4136 | WREG32(CP_MEC_CNTL, 0); | 4135 | WREG32(CP_MEC_CNTL, 0); |
4137 | else | 4136 | else { |
4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); | 4137 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
4138 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | ||
4139 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | ||
4140 | } | ||
4139 | udelay(50); | 4141 | udelay(50); |
4140 | } | 4142 | } |
4141 | 4143 | ||
@@ -4586,8 +4588,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
4586 | rdev->ring[idx].wptr = 0; | 4588 | rdev->ring[idx].wptr = 0; |
4587 | mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr; | 4589 | mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr; |
4588 | WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); | 4590 | WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); |
4589 | rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR); | 4591 | mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR); |
4590 | mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr; | ||
4591 | 4592 | ||
4592 | /* set the vmid for the queue */ | 4593 | /* set the vmid for the queue */ |
4593 | mqd->queue_state.cp_hqd_vmid = 0; | 4594 | mqd->queue_state.cp_hqd_vmid = 0; |
@@ -5117,11 +5118,9 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
5117 | if (!(reset_mask & (RADEON_RESET_GFX | | 5118 | if (!(reset_mask & (RADEON_RESET_GFX | |
5118 | RADEON_RESET_COMPUTE | | 5119 | RADEON_RESET_COMPUTE | |
5119 | RADEON_RESET_CP))) { | 5120 | RADEON_RESET_CP))) { |
5120 | radeon_ring_lockup_update(ring); | 5121 | radeon_ring_lockup_update(rdev, ring); |
5121 | return false; | 5122 | return false; |
5122 | } | 5123 | } |
5123 | /* force CP activities */ | ||
5124 | radeon_ring_force_activity(rdev, ring); | ||
5125 | return radeon_ring_test_lockup(rdev, ring); | 5124 | return radeon_ring_test_lockup(rdev, ring); |
5126 | } | 5125 | } |
5127 | 5126 | ||
@@ -6141,6 +6140,10 @@ void cik_update_cg(struct radeon_device *rdev, | |||
6141 | cik_enable_hdp_mgcg(rdev, enable); | 6140 | cik_enable_hdp_mgcg(rdev, enable); |
6142 | cik_enable_hdp_ls(rdev, enable); | 6141 | cik_enable_hdp_ls(rdev, enable); |
6143 | } | 6142 | } |
6143 | |||
6144 | if (block & RADEON_CG_BLOCK_VCE) { | ||
6145 | vce_v2_0_enable_mgcg(rdev, enable); | ||
6146 | } | ||
6144 | } | 6147 | } |
6145 | 6148 | ||
6146 | static void cik_init_cg(struct radeon_device *rdev) | 6149 | static void cik_init_cg(struct radeon_device *rdev) |
@@ -7490,6 +7493,20 @@ restart_ih: | |||
7490 | /* reset addr and status */ | 7493 | /* reset addr and status */ |
7491 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | 7494 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
7492 | break; | 7495 | break; |
7496 | case 167: /* VCE */ | ||
7497 | DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data); | ||
7498 | switch (src_data) { | ||
7499 | case 0: | ||
7500 | radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX); | ||
7501 | break; | ||
7502 | case 1: | ||
7503 | radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX); | ||
7504 | break; | ||
7505 | default: | ||
7506 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
7507 | break; | ||
7508 | } | ||
7509 | break; | ||
7493 | case 176: /* GFX RB CP_INT */ | 7510 | case 176: /* GFX RB CP_INT */ |
7494 | case 177: /* GFX IB CP_INT */ | 7511 | case 177: /* GFX IB CP_INT */ |
7495 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | 7512 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
@@ -7789,6 +7806,22 @@ static int cik_startup(struct radeon_device *rdev) | |||
7789 | if (r) | 7806 | if (r) |
7790 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; | 7807 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
7791 | 7808 | ||
7809 | r = radeon_vce_resume(rdev); | ||
7810 | if (!r) { | ||
7811 | r = vce_v2_0_resume(rdev); | ||
7812 | if (!r) | ||
7813 | r = radeon_fence_driver_start_ring(rdev, | ||
7814 | TN_RING_TYPE_VCE1_INDEX); | ||
7815 | if (!r) | ||
7816 | r = radeon_fence_driver_start_ring(rdev, | ||
7817 | TN_RING_TYPE_VCE2_INDEX); | ||
7818 | } | ||
7819 | if (r) { | ||
7820 | dev_err(rdev->dev, "VCE init error (%d).\n", r); | ||
7821 | rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; | ||
7822 | rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; | ||
7823 | } | ||
7824 | |||
7792 | /* Enable IRQ */ | 7825 | /* Enable IRQ */ |
7793 | if (!rdev->irq.installed) { | 7826 | if (!rdev->irq.installed) { |
7794 | r = radeon_irq_kms_init(rdev); | 7827 | r = radeon_irq_kms_init(rdev); |
@@ -7864,6 +7897,23 @@ static int cik_startup(struct radeon_device *rdev) | |||
7864 | DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); | 7897 | DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
7865 | } | 7898 | } |
7866 | 7899 | ||
7900 | r = -ENOENT; | ||
7901 | |||
7902 | ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; | ||
7903 | if (ring->ring_size) | ||
7904 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | ||
7905 | VCE_CMD_NO_OP); | ||
7906 | |||
7907 | ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; | ||
7908 | if (ring->ring_size) | ||
7909 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | ||
7910 | VCE_CMD_NO_OP); | ||
7911 | |||
7912 | if (!r) | ||
7913 | r = vce_v1_0_init(rdev); | ||
7914 | else if (r != -ENOENT) | ||
7915 | DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); | ||
7916 | |||
7867 | r = radeon_ib_pool_init(rdev); | 7917 | r = radeon_ib_pool_init(rdev); |
7868 | if (r) { | 7918 | if (r) { |
7869 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | 7919 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
@@ -7935,6 +7985,7 @@ int cik_suspend(struct radeon_device *rdev) | |||
7935 | cik_sdma_enable(rdev, false); | 7985 | cik_sdma_enable(rdev, false); |
7936 | uvd_v1_0_fini(rdev); | 7986 | uvd_v1_0_fini(rdev); |
7937 | radeon_uvd_suspend(rdev); | 7987 | radeon_uvd_suspend(rdev); |
7988 | radeon_vce_suspend(rdev); | ||
7938 | cik_fini_pg(rdev); | 7989 | cik_fini_pg(rdev); |
7939 | cik_fini_cg(rdev); | 7990 | cik_fini_cg(rdev); |
7940 | cik_irq_suspend(rdev); | 7991 | cik_irq_suspend(rdev); |
@@ -8067,6 +8118,17 @@ int cik_init(struct radeon_device *rdev) | |||
8067 | r600_ring_init(rdev, ring, 4096); | 8118 | r600_ring_init(rdev, ring, 4096); |
8068 | } | 8119 | } |
8069 | 8120 | ||
8121 | r = radeon_vce_init(rdev); | ||
8122 | if (!r) { | ||
8123 | ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; | ||
8124 | ring->ring_obj = NULL; | ||
8125 | r600_ring_init(rdev, ring, 4096); | ||
8126 | |||
8127 | ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; | ||
8128 | ring->ring_obj = NULL; | ||
8129 | r600_ring_init(rdev, ring, 4096); | ||
8130 | } | ||
8131 | |||
8070 | rdev->ih.ring_obj = NULL; | 8132 | rdev->ih.ring_obj = NULL; |
8071 | r600_ih_ring_init(rdev, 64 * 1024); | 8133 | r600_ih_ring_init(rdev, 64 * 1024); |
8072 | 8134 | ||
@@ -8128,6 +8190,7 @@ void cik_fini(struct radeon_device *rdev) | |||
8128 | radeon_irq_kms_fini(rdev); | 8190 | radeon_irq_kms_fini(rdev); |
8129 | uvd_v1_0_fini(rdev); | 8191 | uvd_v1_0_fini(rdev); |
8130 | radeon_uvd_fini(rdev); | 8192 | radeon_uvd_fini(rdev); |
8193 | radeon_vce_fini(rdev); | ||
8131 | cik_pcie_gart_fini(rdev); | 8194 | cik_pcie_gart_fini(rdev); |
8132 | r600_vram_scratch_fini(rdev); | 8195 | r600_vram_scratch_fini(rdev); |
8133 | radeon_gem_fini(rdev); | 8196 | radeon_gem_fini(rdev); |
@@ -8866,6 +8929,41 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
8866 | return r; | 8929 | return r; |
8867 | } | 8930 | } |
8868 | 8931 | ||
8932 | int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) | ||
8933 | { | ||
8934 | int r, i; | ||
8935 | struct atom_clock_dividers dividers; | ||
8936 | u32 tmp; | ||
8937 | |||
8938 | r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
8939 | ecclk, false, ÷rs); | ||
8940 | if (r) | ||
8941 | return r; | ||
8942 | |||
8943 | for (i = 0; i < 100; i++) { | ||
8944 | if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS) | ||
8945 | break; | ||
8946 | mdelay(10); | ||
8947 | } | ||
8948 | if (i == 100) | ||
8949 | return -ETIMEDOUT; | ||
8950 | |||
8951 | tmp = RREG32_SMC(CG_ECLK_CNTL); | ||
8952 | tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK); | ||
8953 | tmp |= dividers.post_divider; | ||
8954 | WREG32_SMC(CG_ECLK_CNTL, tmp); | ||
8955 | |||
8956 | for (i = 0; i < 100; i++) { | ||
8957 | if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS) | ||
8958 | break; | ||
8959 | mdelay(10); | ||
8960 | } | ||
8961 | if (i == 100) | ||
8962 | return -ETIMEDOUT; | ||
8963 | |||
8964 | return 0; | ||
8965 | } | ||
8966 | |||
8869 | static void cik_pcie_gen3_enable(struct radeon_device *rdev) | 8967 | static void cik_pcie_gen3_enable(struct radeon_device *rdev) |
8870 | { | 8968 | { |
8871 | struct pci_dev *root = rdev->pdev->bus->self; | 8969 | struct pci_dev *root = rdev->pdev->bus->self; |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e3..89b4afa5041c 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) | |||
264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); |
265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); | 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); |
266 | } | 266 | } |
267 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; | ||
268 | rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /** | 271 | /** |
@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) | |||
291 | u32 me_cntl, reg_offset; | 293 | u32 me_cntl, reg_offset; |
292 | int i; | 294 | int i; |
293 | 295 | ||
296 | if (enable == false) { | ||
297 | cik_sdma_gfx_stop(rdev); | ||
298 | cik_sdma_rlc_stop(rdev); | ||
299 | } | ||
300 | |||
294 | for (i = 0; i < 2; i++) { | 301 | for (i = 0; i < 2; i++) { |
295 | if (i == 0) | 302 | if (i == 0) |
296 | reg_offset = SDMA0_REGISTER_OFFSET; | 303 | reg_offset = SDMA0_REGISTER_OFFSET; |
@@ -362,8 +369,6 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) | |||
362 | ring->wptr = 0; | 369 | ring->wptr = 0; |
363 | WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); | 370 | WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); |
364 | 371 | ||
365 | ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; | ||
366 | |||
367 | /* enable DMA RB */ | 372 | /* enable DMA RB */ |
368 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); | 373 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); |
369 | 374 | ||
@@ -420,10 +425,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) | |||
420 | if (!rdev->sdma_fw) | 425 | if (!rdev->sdma_fw) |
421 | return -EINVAL; | 426 | return -EINVAL; |
422 | 427 | ||
423 | /* stop the gfx rings and rlc compute queues */ | ||
424 | cik_sdma_gfx_stop(rdev); | ||
425 | cik_sdma_rlc_stop(rdev); | ||
426 | |||
427 | /* halt the MEs */ | 428 | /* halt the MEs */ |
428 | cik_sdma_enable(rdev, false); | 429 | cik_sdma_enable(rdev, false); |
429 | 430 | ||
@@ -492,9 +493,6 @@ int cik_sdma_resume(struct radeon_device *rdev) | |||
492 | */ | 493 | */ |
493 | void cik_sdma_fini(struct radeon_device *rdev) | 494 | void cik_sdma_fini(struct radeon_device *rdev) |
494 | { | 495 | { |
495 | /* stop the gfx rings and rlc compute queues */ | ||
496 | cik_sdma_gfx_stop(rdev); | ||
497 | cik_sdma_rlc_stop(rdev); | ||
498 | /* halt the MEs */ | 496 | /* halt the MEs */ |
499 | cik_sdma_enable(rdev, false); | 497 | cik_sdma_enable(rdev, false); |
500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | 498 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
@@ -713,11 +711,9 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
713 | mask = RADEON_RESET_DMA1; | 711 | mask = RADEON_RESET_DMA1; |
714 | 712 | ||
715 | if (!(reset_mask & mask)) { | 713 | if (!(reset_mask & mask)) { |
716 | radeon_ring_lockup_update(ring); | 714 | radeon_ring_lockup_update(rdev, ring); |
717 | return false; | 715 | return false; |
718 | } | 716 | } |
719 | /* force ring activities */ | ||
720 | radeon_ring_force_activity(rdev, ring); | ||
721 | return radeon_ring_test_lockup(rdev, ring); | 717 | return radeon_ring_test_lockup(rdev, ring); |
722 | } | 718 | } |
723 | 719 | ||
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 98bae9d7b74d..213873270d5f 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -203,6 +203,12 @@ | |||
203 | #define CTF_TEMP_MASK 0x0003fe00 | 203 | #define CTF_TEMP_MASK 0x0003fe00 |
204 | #define CTF_TEMP_SHIFT 9 | 204 | #define CTF_TEMP_SHIFT 9 |
205 | 205 | ||
206 | #define CG_ECLK_CNTL 0xC05000AC | ||
207 | # define ECLK_DIVIDER_MASK 0x7f | ||
208 | # define ECLK_DIR_CNTL_EN (1 << 8) | ||
209 | #define CG_ECLK_STATUS 0xC05000B0 | ||
210 | # define ECLK_STATUS (1 << 0) | ||
211 | |||
206 | #define CG_SPLL_FUNC_CNTL 0xC0500140 | 212 | #define CG_SPLL_FUNC_CNTL 0xC0500140 |
207 | #define SPLL_RESET (1 << 0) | 213 | #define SPLL_RESET (1 << 0) |
208 | #define SPLL_PWRON (1 << 1) | 214 | #define SPLL_PWRON (1 << 1) |
@@ -2010,4 +2016,47 @@ | |||
2010 | /* UVD CTX indirect */ | 2016 | /* UVD CTX indirect */ |
2011 | #define UVD_CGC_MEM_CTRL 0xC0 | 2017 | #define UVD_CGC_MEM_CTRL 0xC0 |
2012 | 2018 | ||
2019 | /* VCE */ | ||
2020 | |||
2021 | #define VCE_VCPU_CACHE_OFFSET0 0x20024 | ||
2022 | #define VCE_VCPU_CACHE_SIZE0 0x20028 | ||
2023 | #define VCE_VCPU_CACHE_OFFSET1 0x2002c | ||
2024 | #define VCE_VCPU_CACHE_SIZE1 0x20030 | ||
2025 | #define VCE_VCPU_CACHE_OFFSET2 0x20034 | ||
2026 | #define VCE_VCPU_CACHE_SIZE2 0x20038 | ||
2027 | #define VCE_RB_RPTR2 0x20178 | ||
2028 | #define VCE_RB_WPTR2 0x2017c | ||
2029 | #define VCE_RB_RPTR 0x2018c | ||
2030 | #define VCE_RB_WPTR 0x20190 | ||
2031 | #define VCE_CLOCK_GATING_A 0x202f8 | ||
2032 | # define CGC_CLK_GATE_DLY_TIMER_MASK (0xf << 0) | ||
2033 | # define CGC_CLK_GATE_DLY_TIMER(x) ((x) << 0) | ||
2034 | # define CGC_CLK_GATER_OFF_DLY_TIMER_MASK (0xff << 4) | ||
2035 | # define CGC_CLK_GATER_OFF_DLY_TIMER(x) ((x) << 4) | ||
2036 | # define CGC_UENC_WAIT_AWAKE (1 << 18) | ||
2037 | #define VCE_CLOCK_GATING_B 0x202fc | ||
2038 | #define VCE_CGTT_CLK_OVERRIDE 0x207a0 | ||
2039 | #define VCE_UENC_CLOCK_GATING 0x207bc | ||
2040 | # define CLOCK_ON_DELAY_MASK (0xf << 0) | ||
2041 | # define CLOCK_ON_DELAY(x) ((x) << 0) | ||
2042 | # define CLOCK_OFF_DELAY_MASK (0xff << 4) | ||
2043 | # define CLOCK_OFF_DELAY(x) ((x) << 4) | ||
2044 | #define VCE_UENC_REG_CLOCK_GATING 0x207c0 | ||
2045 | #define VCE_SYS_INT_EN 0x21300 | ||
2046 | # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) | ||
2047 | #define VCE_LMI_CTRL2 0x21474 | ||
2048 | #define VCE_LMI_CTRL 0x21498 | ||
2049 | #define VCE_LMI_VM_CTRL 0x214a0 | ||
2050 | #define VCE_LMI_SWAP_CNTL 0x214b4 | ||
2051 | #define VCE_LMI_SWAP_CNTL1 0x214b8 | ||
2052 | #define VCE_LMI_CACHE_CTRL 0x214f4 | ||
2053 | |||
2054 | #define VCE_CMD_NO_OP 0x00000000 | ||
2055 | #define VCE_CMD_END 0x00000001 | ||
2056 | #define VCE_CMD_IB 0x00000002 | ||
2057 | #define VCE_CMD_FENCE 0x00000003 | ||
2058 | #define VCE_CMD_TRAP 0x00000004 | ||
2059 | #define VCE_CMD_IB_AUTO 0x00000005 | ||
2060 | #define VCE_CMD_SEMAPHORE 0x00000006 | ||
2061 | |||
2013 | #endif | 2062 | #endif |
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index cf783fc0ef21..5a9a5f4d7888 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
@@ -2036,6 +2036,10 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2036 | pi->min_vddc_in_table = 0; | 2036 | pi->min_vddc_in_table = 0; |
2037 | pi->max_vddc_in_table = 0; | 2037 | pi->max_vddc_in_table = 0; |
2038 | 2038 | ||
2039 | ret = r600_get_platform_caps(rdev); | ||
2040 | if (ret) | ||
2041 | return ret; | ||
2042 | |||
2039 | ret = rv7xx_parse_power_table(rdev); | 2043 | ret = rv7xx_parse_power_table(rdev); |
2040 | if (ret) | 2044 | if (ret) |
2041 | return ret; | 2045 | return ret; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 27b0ff16082e..b406546440da 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2990,8 +2990,6 @@ static int evergreen_cp_resume(struct radeon_device *rdev) | |||
2990 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); | 2990 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
2991 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2991 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2992 | 2992 | ||
2993 | ring->rptr = RREG32(CP_RB_RPTR); | ||
2994 | |||
2995 | evergreen_cp_start(rdev); | 2993 | evergreen_cp_start(rdev); |
2996 | ring->ready = true; | 2994 | ring->ready = true; |
2997 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); | 2995 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
@@ -3952,11 +3950,9 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin | |||
3952 | if (!(reset_mask & (RADEON_RESET_GFX | | 3950 | if (!(reset_mask & (RADEON_RESET_GFX | |
3953 | RADEON_RESET_COMPUTE | | 3951 | RADEON_RESET_COMPUTE | |
3954 | RADEON_RESET_CP))) { | 3952 | RADEON_RESET_CP))) { |
3955 | radeon_ring_lockup_update(ring); | 3953 | radeon_ring_lockup_update(rdev, ring); |
3956 | return false; | 3954 | return false; |
3957 | } | 3955 | } |
3958 | /* force CP activities */ | ||
3959 | radeon_ring_force_activity(rdev, ring); | ||
3960 | return radeon_ring_test_lockup(rdev, ring); | 3956 | return radeon_ring_test_lockup(rdev, ring); |
3961 | } | 3957 | } |
3962 | 3958 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index c7cac07f139b..5c8b358f9fba 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -1165,7 +1165,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1165 | "0x%04X\n", reg); | 1165 | "0x%04X\n", reg); |
1166 | return -EINVAL; | 1166 | return -EINVAL; |
1167 | } | 1167 | } |
1168 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1168 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1169 | break; | 1169 | break; |
1170 | case DB_DEPTH_CONTROL: | 1170 | case DB_DEPTH_CONTROL: |
1171 | track->db_depth_control = radeon_get_ib_value(p, idx); | 1171 | track->db_depth_control = radeon_get_ib_value(p, idx); |
@@ -1196,12 +1196,12 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1196 | } | 1196 | } |
1197 | ib[idx] &= ~Z_ARRAY_MODE(0xf); | 1197 | ib[idx] &= ~Z_ARRAY_MODE(0xf); |
1198 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); | 1198 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); |
1199 | ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1199 | ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1200 | track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1200 | track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1201 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1201 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
1202 | unsigned bankw, bankh, mtaspect, tile_split; | 1202 | unsigned bankw, bankh, mtaspect, tile_split; |
1203 | 1203 | ||
1204 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | 1204 | evergreen_tiling_fields(reloc->tiling_flags, |
1205 | &bankw, &bankh, &mtaspect, | 1205 | &bankw, &bankh, &mtaspect, |
1206 | &tile_split); | 1206 | &tile_split); |
1207 | ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 1207 | ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); |
@@ -1237,7 +1237,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1237 | return -EINVAL; | 1237 | return -EINVAL; |
1238 | } | 1238 | } |
1239 | track->db_z_read_offset = radeon_get_ib_value(p, idx); | 1239 | track->db_z_read_offset = radeon_get_ib_value(p, idx); |
1240 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1240 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1241 | track->db_z_read_bo = reloc->robj; | 1241 | track->db_z_read_bo = reloc->robj; |
1242 | track->db_dirty = true; | 1242 | track->db_dirty = true; |
1243 | break; | 1243 | break; |
@@ -1249,7 +1249,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1249 | return -EINVAL; | 1249 | return -EINVAL; |
1250 | } | 1250 | } |
1251 | track->db_z_write_offset = radeon_get_ib_value(p, idx); | 1251 | track->db_z_write_offset = radeon_get_ib_value(p, idx); |
1252 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1252 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1253 | track->db_z_write_bo = reloc->robj; | 1253 | track->db_z_write_bo = reloc->robj; |
1254 | track->db_dirty = true; | 1254 | track->db_dirty = true; |
1255 | break; | 1255 | break; |
@@ -1261,7 +1261,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1261 | return -EINVAL; | 1261 | return -EINVAL; |
1262 | } | 1262 | } |
1263 | track->db_s_read_offset = radeon_get_ib_value(p, idx); | 1263 | track->db_s_read_offset = radeon_get_ib_value(p, idx); |
1264 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1264 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1265 | track->db_s_read_bo = reloc->robj; | 1265 | track->db_s_read_bo = reloc->robj; |
1266 | track->db_dirty = true; | 1266 | track->db_dirty = true; |
1267 | break; | 1267 | break; |
@@ -1273,7 +1273,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1273 | return -EINVAL; | 1273 | return -EINVAL; |
1274 | } | 1274 | } |
1275 | track->db_s_write_offset = radeon_get_ib_value(p, idx); | 1275 | track->db_s_write_offset = radeon_get_ib_value(p, idx); |
1276 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1276 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1277 | track->db_s_write_bo = reloc->robj; | 1277 | track->db_s_write_bo = reloc->robj; |
1278 | track->db_dirty = true; | 1278 | track->db_dirty = true; |
1279 | break; | 1279 | break; |
@@ -1297,7 +1297,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1297 | } | 1297 | } |
1298 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; | 1298 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; |
1299 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; | 1299 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; |
1300 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1300 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1301 | track->vgt_strmout_bo[tmp] = reloc->robj; | 1301 | track->vgt_strmout_bo[tmp] = reloc->robj; |
1302 | track->streamout_dirty = true; | 1302 | track->streamout_dirty = true; |
1303 | break; | 1303 | break; |
@@ -1317,7 +1317,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1317 | "0x%04X\n", reg); | 1317 | "0x%04X\n", reg); |
1318 | return -EINVAL; | 1318 | return -EINVAL; |
1319 | } | 1319 | } |
1320 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1320 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1321 | case CB_TARGET_MASK: | 1321 | case CB_TARGET_MASK: |
1322 | track->cb_target_mask = radeon_get_ib_value(p, idx); | 1322 | track->cb_target_mask = radeon_get_ib_value(p, idx); |
1323 | track->cb_dirty = true; | 1323 | track->cb_dirty = true; |
@@ -1381,8 +1381,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1381 | "0x%04X\n", reg); | 1381 | "0x%04X\n", reg); |
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | } | 1383 | } |
1384 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1384 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1385 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1385 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1386 | } | 1386 | } |
1387 | track->cb_dirty = true; | 1387 | track->cb_dirty = true; |
1388 | break; | 1388 | break; |
@@ -1399,8 +1399,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1399 | "0x%04X\n", reg); | 1399 | "0x%04X\n", reg); |
1400 | return -EINVAL; | 1400 | return -EINVAL; |
1401 | } | 1401 | } |
1402 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1402 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1403 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1403 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
1404 | } | 1404 | } |
1405 | track->cb_dirty = true; | 1405 | track->cb_dirty = true; |
1406 | break; | 1406 | break; |
@@ -1461,10 +1461,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1461 | return -EINVAL; | 1461 | return -EINVAL; |
1462 | } | 1462 | } |
1463 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1463 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1464 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1464 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
1465 | unsigned bankw, bankh, mtaspect, tile_split; | 1465 | unsigned bankw, bankh, mtaspect, tile_split; |
1466 | 1466 | ||
1467 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | 1467 | evergreen_tiling_fields(reloc->tiling_flags, |
1468 | &bankw, &bankh, &mtaspect, | 1468 | &bankw, &bankh, &mtaspect, |
1469 | &tile_split); | 1469 | &tile_split); |
1470 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 1470 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); |
@@ -1489,10 +1489,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1489 | return -EINVAL; | 1489 | return -EINVAL; |
1490 | } | 1490 | } |
1491 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1491 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1492 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1492 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
1493 | unsigned bankw, bankh, mtaspect, tile_split; | 1493 | unsigned bankw, bankh, mtaspect, tile_split; |
1494 | 1494 | ||
1495 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | 1495 | evergreen_tiling_fields(reloc->tiling_flags, |
1496 | &bankw, &bankh, &mtaspect, | 1496 | &bankw, &bankh, &mtaspect, |
1497 | &tile_split); | 1497 | &tile_split); |
1498 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 1498 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); |
@@ -1520,7 +1520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1520 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | 1520 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); |
1521 | return -EINVAL; | 1521 | return -EINVAL; |
1522 | } | 1522 | } |
1523 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1523 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1524 | track->cb_color_fmask_bo[tmp] = reloc->robj; | 1524 | track->cb_color_fmask_bo[tmp] = reloc->robj; |
1525 | break; | 1525 | break; |
1526 | case CB_COLOR0_CMASK: | 1526 | case CB_COLOR0_CMASK: |
@@ -1537,7 +1537,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1537 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | 1537 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); |
1538 | return -EINVAL; | 1538 | return -EINVAL; |
1539 | } | 1539 | } |
1540 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1540 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1541 | track->cb_color_cmask_bo[tmp] = reloc->robj; | 1541 | track->cb_color_cmask_bo[tmp] = reloc->robj; |
1542 | break; | 1542 | break; |
1543 | case CB_COLOR0_FMASK_SLICE: | 1543 | case CB_COLOR0_FMASK_SLICE: |
@@ -1578,7 +1578,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1578 | } | 1578 | } |
1579 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; | 1579 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; |
1580 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | 1580 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); |
1581 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1581 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1582 | track->cb_color_bo[tmp] = reloc->robj; | 1582 | track->cb_color_bo[tmp] = reloc->robj; |
1583 | track->cb_dirty = true; | 1583 | track->cb_dirty = true; |
1584 | break; | 1584 | break; |
@@ -1594,7 +1594,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1594 | } | 1594 | } |
1595 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; | 1595 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; |
1596 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | 1596 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); |
1597 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1597 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1598 | track->cb_color_bo[tmp] = reloc->robj; | 1598 | track->cb_color_bo[tmp] = reloc->robj; |
1599 | track->cb_dirty = true; | 1599 | track->cb_dirty = true; |
1600 | break; | 1600 | break; |
@@ -1606,7 +1606,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1606 | return -EINVAL; | 1606 | return -EINVAL; |
1607 | } | 1607 | } |
1608 | track->htile_offset = radeon_get_ib_value(p, idx); | 1608 | track->htile_offset = radeon_get_ib_value(p, idx); |
1609 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1609 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1610 | track->htile_bo = reloc->robj; | 1610 | track->htile_bo = reloc->robj; |
1611 | track->db_dirty = true; | 1611 | track->db_dirty = true; |
1612 | break; | 1612 | break; |
@@ -1723,7 +1723,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1723 | "0x%04X\n", reg); | 1723 | "0x%04X\n", reg); |
1724 | return -EINVAL; | 1724 | return -EINVAL; |
1725 | } | 1725 | } |
1726 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1726 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1727 | break; | 1727 | break; |
1728 | case SX_MEMORY_EXPORT_BASE: | 1728 | case SX_MEMORY_EXPORT_BASE: |
1729 | if (p->rdev->family >= CHIP_CAYMAN) { | 1729 | if (p->rdev->family >= CHIP_CAYMAN) { |
@@ -1737,7 +1737,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1737 | "0x%04X\n", reg); | 1737 | "0x%04X\n", reg); |
1738 | return -EINVAL; | 1738 | return -EINVAL; |
1739 | } | 1739 | } |
1740 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1740 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1741 | break; | 1741 | break; |
1742 | case CAYMAN_SX_SCATTER_EXPORT_BASE: | 1742 | case CAYMAN_SX_SCATTER_EXPORT_BASE: |
1743 | if (p->rdev->family < CHIP_CAYMAN) { | 1743 | if (p->rdev->family < CHIP_CAYMAN) { |
@@ -1751,7 +1751,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1751 | "0x%04X\n", reg); | 1751 | "0x%04X\n", reg); |
1752 | return -EINVAL; | 1752 | return -EINVAL; |
1753 | } | 1753 | } |
1754 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1754 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1755 | break; | 1755 | break; |
1756 | case SX_MISC: | 1756 | case SX_MISC: |
1757 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; | 1757 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; |
@@ -1836,7 +1836,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1836 | return -EINVAL; | 1836 | return -EINVAL; |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | offset = reloc->lobj.gpu_offset + | 1839 | offset = reloc->gpu_offset + |
1840 | (idx_value & 0xfffffff0) + | 1840 | (idx_value & 0xfffffff0) + |
1841 | ((u64)(tmp & 0xff) << 32); | 1841 | ((u64)(tmp & 0xff) << 32); |
1842 | 1842 | ||
@@ -1882,7 +1882,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1882 | return -EINVAL; | 1882 | return -EINVAL; |
1883 | } | 1883 | } |
1884 | 1884 | ||
1885 | offset = reloc->lobj.gpu_offset + | 1885 | offset = reloc->gpu_offset + |
1886 | idx_value + | 1886 | idx_value + |
1887 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | 1887 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
1888 | 1888 | ||
@@ -1909,7 +1909,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1909 | return -EINVAL; | 1909 | return -EINVAL; |
1910 | } | 1910 | } |
1911 | 1911 | ||
1912 | offset = reloc->lobj.gpu_offset + | 1912 | offset = reloc->gpu_offset + |
1913 | idx_value + | 1913 | idx_value + |
1914 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | 1914 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
1915 | 1915 | ||
@@ -1937,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1937 | return -EINVAL; | 1937 | return -EINVAL; |
1938 | } | 1938 | } |
1939 | 1939 | ||
1940 | offset = reloc->lobj.gpu_offset + | 1940 | offset = reloc->gpu_offset + |
1941 | radeon_get_ib_value(p, idx+1) + | 1941 | radeon_get_ib_value(p, idx+1) + |
1942 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 1942 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
1943 | 1943 | ||
@@ -2027,7 +2027,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2027 | DRM_ERROR("bad DISPATCH_INDIRECT\n"); | 2027 | DRM_ERROR("bad DISPATCH_INDIRECT\n"); |
2028 | return -EINVAL; | 2028 | return -EINVAL; |
2029 | } | 2029 | } |
2030 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 2030 | ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff); |
2031 | r = evergreen_cs_track_check(p); | 2031 | r = evergreen_cs_track_check(p); |
2032 | if (r) { | 2032 | if (r) { |
2033 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | 2033 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
@@ -2049,7 +2049,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2049 | return -EINVAL; | 2049 | return -EINVAL; |
2050 | } | 2050 | } |
2051 | 2051 | ||
2052 | offset = reloc->lobj.gpu_offset + | 2052 | offset = reloc->gpu_offset + |
2053 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | 2053 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
2054 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 2054 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
2055 | 2055 | ||
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2106 | tmp = radeon_get_ib_value(p, idx) + | 2106 | tmp = radeon_get_ib_value(p, idx) + |
2107 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | 2107 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
2108 | 2108 | ||
2109 | offset = reloc->lobj.gpu_offset + tmp; | 2109 | offset = reloc->gpu_offset + tmp; |
2110 | 2110 | ||
2111 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { | 2111 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { |
2112 | dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", | 2112 | dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", |
@@ -2144,7 +2144,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2144 | tmp = radeon_get_ib_value(p, idx+2) + | 2144 | tmp = radeon_get_ib_value(p, idx+2) + |
2145 | ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); | 2145 | ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); |
2146 | 2146 | ||
2147 | offset = reloc->lobj.gpu_offset + tmp; | 2147 | offset = reloc->gpu_offset + tmp; |
2148 | 2148 | ||
2149 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { | 2149 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { |
2150 | dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", | 2150 | dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", |
@@ -2174,7 +2174,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2174 | DRM_ERROR("bad SURFACE_SYNC\n"); | 2174 | DRM_ERROR("bad SURFACE_SYNC\n"); |
2175 | return -EINVAL; | 2175 | return -EINVAL; |
2176 | } | 2176 | } |
2177 | ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 2177 | ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
2178 | } | 2178 | } |
2179 | break; | 2179 | break; |
2180 | case PACKET3_EVENT_WRITE: | 2180 | case PACKET3_EVENT_WRITE: |
@@ -2190,7 +2190,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2190 | DRM_ERROR("bad EVENT_WRITE\n"); | 2190 | DRM_ERROR("bad EVENT_WRITE\n"); |
2191 | return -EINVAL; | 2191 | return -EINVAL; |
2192 | } | 2192 | } |
2193 | offset = reloc->lobj.gpu_offset + | 2193 | offset = reloc->gpu_offset + |
2194 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + | 2194 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
2195 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 2195 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
2196 | 2196 | ||
@@ -2212,7 +2212,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2212 | return -EINVAL; | 2212 | return -EINVAL; |
2213 | } | 2213 | } |
2214 | 2214 | ||
2215 | offset = reloc->lobj.gpu_offset + | 2215 | offset = reloc->gpu_offset + |
2216 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | 2216 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
2217 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 2217 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
2218 | 2218 | ||
@@ -2234,7 +2234,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2234 | return -EINVAL; | 2234 | return -EINVAL; |
2235 | } | 2235 | } |
2236 | 2236 | ||
2237 | offset = reloc->lobj.gpu_offset + | 2237 | offset = reloc->gpu_offset + |
2238 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | 2238 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
2239 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 2239 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
2240 | 2240 | ||
@@ -2302,11 +2302,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2302 | } | 2302 | } |
2303 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 2303 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
2304 | ib[idx+1+(i*8)+1] |= | 2304 | ib[idx+1+(i*8)+1] |= |
2305 | TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 2305 | TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); |
2306 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 2306 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
2307 | unsigned bankw, bankh, mtaspect, tile_split; | 2307 | unsigned bankw, bankh, mtaspect, tile_split; |
2308 | 2308 | ||
2309 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | 2309 | evergreen_tiling_fields(reloc->tiling_flags, |
2310 | &bankw, &bankh, &mtaspect, | 2310 | &bankw, &bankh, &mtaspect, |
2311 | &tile_split); | 2311 | &tile_split); |
2312 | ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); | 2312 | ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); |
@@ -2318,7 +2318,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2318 | } | 2318 | } |
2319 | } | 2319 | } |
2320 | texture = reloc->robj; | 2320 | texture = reloc->robj; |
2321 | toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 2321 | toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
2322 | 2322 | ||
2323 | /* tex mip base */ | 2323 | /* tex mip base */ |
2324 | tex_dim = ib[idx+1+(i*8)+0] & 0x7; | 2324 | tex_dim = ib[idx+1+(i*8)+0] & 0x7; |
@@ -2337,7 +2337,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2337 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 2337 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
2338 | return -EINVAL; | 2338 | return -EINVAL; |
2339 | } | 2339 | } |
2340 | moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 2340 | moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
2341 | mipmap = reloc->robj; | 2341 | mipmap = reloc->robj; |
2342 | } | 2342 | } |
2343 | 2343 | ||
@@ -2364,7 +2364,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2364 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; | 2364 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; |
2365 | } | 2365 | } |
2366 | 2366 | ||
2367 | offset64 = reloc->lobj.gpu_offset + offset; | 2367 | offset64 = reloc->gpu_offset + offset; |
2368 | ib[idx+1+(i*8)+0] = offset64; | 2368 | ib[idx+1+(i*8)+0] = offset64; |
2369 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | | 2369 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | |
2370 | (upper_32_bits(offset64) & 0xff); | 2370 | (upper_32_bits(offset64) & 0xff); |
@@ -2445,7 +2445,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2445 | offset + 4, radeon_bo_size(reloc->robj)); | 2445 | offset + 4, radeon_bo_size(reloc->robj)); |
2446 | return -EINVAL; | 2446 | return -EINVAL; |
2447 | } | 2447 | } |
2448 | offset += reloc->lobj.gpu_offset; | 2448 | offset += reloc->gpu_offset; |
2449 | ib[idx+1] = offset; | 2449 | ib[idx+1] = offset; |
2450 | ib[idx+2] = upper_32_bits(offset) & 0xff; | 2450 | ib[idx+2] = upper_32_bits(offset) & 0xff; |
2451 | } | 2451 | } |
@@ -2464,7 +2464,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2464 | offset + 4, radeon_bo_size(reloc->robj)); | 2464 | offset + 4, radeon_bo_size(reloc->robj)); |
2465 | return -EINVAL; | 2465 | return -EINVAL; |
2466 | } | 2466 | } |
2467 | offset += reloc->lobj.gpu_offset; | 2467 | offset += reloc->gpu_offset; |
2468 | ib[idx+3] = offset; | 2468 | ib[idx+3] = offset; |
2469 | ib[idx+4] = upper_32_bits(offset) & 0xff; | 2469 | ib[idx+4] = upper_32_bits(offset) & 0xff; |
2470 | } | 2470 | } |
@@ -2493,7 +2493,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2493 | offset + 8, radeon_bo_size(reloc->robj)); | 2493 | offset + 8, radeon_bo_size(reloc->robj)); |
2494 | return -EINVAL; | 2494 | return -EINVAL; |
2495 | } | 2495 | } |
2496 | offset += reloc->lobj.gpu_offset; | 2496 | offset += reloc->gpu_offset; |
2497 | ib[idx+0] = offset; | 2497 | ib[idx+0] = offset; |
2498 | ib[idx+1] = upper_32_bits(offset) & 0xff; | 2498 | ib[idx+1] = upper_32_bits(offset) & 0xff; |
2499 | break; | 2499 | break; |
@@ -2518,7 +2518,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2518 | offset + 4, radeon_bo_size(reloc->robj)); | 2518 | offset + 4, radeon_bo_size(reloc->robj)); |
2519 | return -EINVAL; | 2519 | return -EINVAL; |
2520 | } | 2520 | } |
2521 | offset += reloc->lobj.gpu_offset; | 2521 | offset += reloc->gpu_offset; |
2522 | ib[idx+1] = offset; | 2522 | ib[idx+1] = offset; |
2523 | ib[idx+2] = upper_32_bits(offset) & 0xff; | 2523 | ib[idx+2] = upper_32_bits(offset) & 0xff; |
2524 | } else { | 2524 | } else { |
@@ -2542,7 +2542,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
2542 | offset + 4, radeon_bo_size(reloc->robj)); | 2542 | offset + 4, radeon_bo_size(reloc->robj)); |
2543 | return -EINVAL; | 2543 | return -EINVAL; |
2544 | } | 2544 | } |
2545 | offset += reloc->lobj.gpu_offset; | 2545 | offset += reloc->gpu_offset; |
2546 | ib[idx+3] = offset; | 2546 | ib[idx+3] = offset; |
2547 | ib[idx+4] = upper_32_bits(offset) & 0xff; | 2547 | ib[idx+4] = upper_32_bits(offset) & 0xff; |
2548 | } else { | 2548 | } else { |
@@ -2717,7 +2717,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2717 | dst_offset = radeon_get_ib_value(p, idx+1); | 2717 | dst_offset = radeon_get_ib_value(p, idx+1); |
2718 | dst_offset <<= 8; | 2718 | dst_offset <<= 8; |
2719 | 2719 | ||
2720 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2720 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2721 | p->idx += count + 7; | 2721 | p->idx += count + 7; |
2722 | break; | 2722 | break; |
2723 | /* linear */ | 2723 | /* linear */ |
@@ -2725,8 +2725,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2725 | dst_offset = radeon_get_ib_value(p, idx+1); | 2725 | dst_offset = radeon_get_ib_value(p, idx+1); |
2726 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | 2726 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2727 | 2727 | ||
2728 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2728 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2729 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2729 | ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2730 | p->idx += count + 3; | 2730 | p->idx += count + 3; |
2731 | break; | 2731 | break; |
2732 | default: | 2732 | default: |
@@ -2768,10 +2768,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2768 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); | 2768 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |
2769 | return -EINVAL; | 2769 | return -EINVAL; |
2770 | } | 2770 | } |
2771 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2771 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2772 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2772 | ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2773 | ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2773 | ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2774 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2774 | ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2775 | p->idx += 5; | 2775 | p->idx += 5; |
2776 | break; | 2776 | break; |
2777 | /* Copy L2T/T2L */ | 2777 | /* Copy L2T/T2L */ |
@@ -2781,22 +2781,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2781 | /* tiled src, linear dst */ | 2781 | /* tiled src, linear dst */ |
2782 | src_offset = radeon_get_ib_value(p, idx+1); | 2782 | src_offset = radeon_get_ib_value(p, idx+1); |
2783 | src_offset <<= 8; | 2783 | src_offset <<= 8; |
2784 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2784 | ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); |
2785 | 2785 | ||
2786 | dst_offset = radeon_get_ib_value(p, idx + 7); | 2786 | dst_offset = radeon_get_ib_value(p, idx + 7); |
2787 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; | 2787 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
2788 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2788 | ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2789 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2789 | ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2790 | } else { | 2790 | } else { |
2791 | /* linear src, tiled dst */ | 2791 | /* linear src, tiled dst */ |
2792 | src_offset = radeon_get_ib_value(p, idx+7); | 2792 | src_offset = radeon_get_ib_value(p, idx+7); |
2793 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; | 2793 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
2794 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2794 | ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2795 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2795 | ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2796 | 2796 | ||
2797 | dst_offset = radeon_get_ib_value(p, idx+1); | 2797 | dst_offset = radeon_get_ib_value(p, idx+1); |
2798 | dst_offset <<= 8; | 2798 | dst_offset <<= 8; |
2799 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2799 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2800 | } | 2800 | } |
2801 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 2801 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
2802 | dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", | 2802 | dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", |
@@ -2827,10 +2827,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2827 | dst_offset + count, radeon_bo_size(dst_reloc->robj)); | 2827 | dst_offset + count, radeon_bo_size(dst_reloc->robj)); |
2828 | return -EINVAL; | 2828 | return -EINVAL; |
2829 | } | 2829 | } |
2830 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); | 2830 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff); |
2831 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); | 2831 | ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff); |
2832 | ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2832 | ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2833 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2833 | ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2834 | p->idx += 5; | 2834 | p->idx += 5; |
2835 | break; | 2835 | break; |
2836 | /* Copy L2L, partial */ | 2836 | /* Copy L2L, partial */ |
@@ -2840,10 +2840,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2840 | DRM_ERROR("L2L Partial is cayman only !\n"); | 2840 | DRM_ERROR("L2L Partial is cayman only !\n"); |
2841 | return -EINVAL; | 2841 | return -EINVAL; |
2842 | } | 2842 | } |
2843 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); | 2843 | ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff); |
2844 | ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2844 | ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2845 | ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); | 2845 | ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff); |
2846 | ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2846 | ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2847 | 2847 | ||
2848 | p->idx += 9; | 2848 | p->idx += 9; |
2849 | break; | 2849 | break; |
@@ -2876,12 +2876,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2876 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); | 2876 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); |
2877 | return -EINVAL; | 2877 | return -EINVAL; |
2878 | } | 2878 | } |
2879 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2879 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2880 | ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc); | 2880 | ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc); |
2881 | ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2881 | ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2882 | ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2882 | ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2883 | ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff; | 2883 | ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff; |
2884 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2884 | ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2885 | p->idx += 7; | 2885 | p->idx += 7; |
2886 | break; | 2886 | break; |
2887 | /* Copy L2T Frame to Field */ | 2887 | /* Copy L2T Frame to Field */ |
@@ -2916,10 +2916,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2916 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); | 2916 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); |
2917 | return -EINVAL; | 2917 | return -EINVAL; |
2918 | } | 2918 | } |
2919 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2919 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2920 | ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); | 2920 | ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); |
2921 | ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2921 | ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2922 | ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2922 | ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2923 | p->idx += 10; | 2923 | p->idx += 10; |
2924 | break; | 2924 | break; |
2925 | /* Copy L2T/T2L, partial */ | 2925 | /* Copy L2T/T2L, partial */ |
@@ -2932,16 +2932,16 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2932 | /* detile bit */ | 2932 | /* detile bit */ |
2933 | if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { | 2933 | if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { |
2934 | /* tiled src, linear dst */ | 2934 | /* tiled src, linear dst */ |
2935 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2935 | ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); |
2936 | 2936 | ||
2937 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2937 | ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2938 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2938 | ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2939 | } else { | 2939 | } else { |
2940 | /* linear src, tiled dst */ | 2940 | /* linear src, tiled dst */ |
2941 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2941 | ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2942 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2942 | ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2943 | 2943 | ||
2944 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2944 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2945 | } | 2945 | } |
2946 | p->idx += 12; | 2946 | p->idx += 12; |
2947 | break; | 2947 | break; |
@@ -2978,10 +2978,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2978 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); | 2978 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); |
2979 | return -EINVAL; | 2979 | return -EINVAL; |
2980 | } | 2980 | } |
2981 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2981 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2982 | ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); | 2982 | ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); |
2983 | ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2983 | ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2984 | ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2984 | ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2985 | p->idx += 10; | 2985 | p->idx += 10; |
2986 | break; | 2986 | break; |
2987 | /* Copy L2T/T2L (tile units) */ | 2987 | /* Copy L2T/T2L (tile units) */ |
@@ -2992,22 +2992,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2992 | /* tiled src, linear dst */ | 2992 | /* tiled src, linear dst */ |
2993 | src_offset = radeon_get_ib_value(p, idx+1); | 2993 | src_offset = radeon_get_ib_value(p, idx+1); |
2994 | src_offset <<= 8; | 2994 | src_offset <<= 8; |
2995 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2995 | ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); |
2996 | 2996 | ||
2997 | dst_offset = radeon_get_ib_value(p, idx+7); | 2997 | dst_offset = radeon_get_ib_value(p, idx+7); |
2998 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; | 2998 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
2999 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2999 | ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
3000 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3000 | ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
3001 | } else { | 3001 | } else { |
3002 | /* linear src, tiled dst */ | 3002 | /* linear src, tiled dst */ |
3003 | src_offset = radeon_get_ib_value(p, idx+7); | 3003 | src_offset = radeon_get_ib_value(p, idx+7); |
3004 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; | 3004 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3005 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3005 | ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
3006 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3006 | ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
3007 | 3007 | ||
3008 | dst_offset = radeon_get_ib_value(p, idx+1); | 3008 | dst_offset = radeon_get_ib_value(p, idx+1); |
3009 | dst_offset <<= 8; | 3009 | dst_offset <<= 8; |
3010 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3010 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
3011 | } | 3011 | } |
3012 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3012 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3013 | dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", | 3013 | dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", |
@@ -3028,8 +3028,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3028 | DRM_ERROR("L2T, T2L Partial is cayman only !\n"); | 3028 | DRM_ERROR("L2T, T2L Partial is cayman only !\n"); |
3029 | return -EINVAL; | 3029 | return -EINVAL; |
3030 | } | 3030 | } |
3031 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3031 | ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); |
3032 | ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3032 | ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8); |
3033 | p->idx += 13; | 3033 | p->idx += 13; |
3034 | break; | 3034 | break; |
3035 | /* Copy L2T broadcast (tile units) */ | 3035 | /* Copy L2T broadcast (tile units) */ |
@@ -3065,10 +3065,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3065 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); | 3065 | dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); |
3066 | return -EINVAL; | 3066 | return -EINVAL; |
3067 | } | 3067 | } |
3068 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3068 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
3069 | ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); | 3069 | ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); |
3070 | ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3070 | ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
3071 | ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3071 | ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
3072 | p->idx += 10; | 3072 | p->idx += 10; |
3073 | break; | 3073 | break; |
3074 | default: | 3074 | default: |
@@ -3089,8 +3089,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3089 | dst_offset, radeon_bo_size(dst_reloc->robj)); | 3089 | dst_offset, radeon_bo_size(dst_reloc->robj)); |
3090 | return -EINVAL; | 3090 | return -EINVAL; |
3091 | } | 3091 | } |
3092 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3092 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
3093 | ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; | 3093 | ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; |
3094 | p->idx += 4; | 3094 | p->idx += 4; |
3095 | break; | 3095 | break; |
3096 | case DMA_PACKET_NOP: | 3096 | case DMA_PACKET_NOP: |
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index a37b54436382..287fe966d7de 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
@@ -174,11 +174,9 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin | |||
174 | u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); | 174 | u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); |
175 | 175 | ||
176 | if (!(reset_mask & RADEON_RESET_DMA)) { | 176 | if (!(reset_mask & RADEON_RESET_DMA)) { |
177 | radeon_ring_lockup_update(ring); | 177 | radeon_ring_lockup_update(rdev, ring); |
178 | return false; | 178 | return false; |
179 | } | 179 | } |
180 | /* force ring activities */ | ||
181 | radeon_ring_force_activity(rdev, ring); | ||
182 | return radeon_ring_test_lockup(rdev, ring); | 180 | return radeon_ring_test_lockup(rdev, ring); |
183 | } | 181 | } |
184 | 182 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 351db361239d..16ec9d56a234 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -1338,13 +1338,11 @@ static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) | |||
1338 | PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); | 1338 | PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | #if 0 | ||
1342 | static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) | 1341 | static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) |
1343 | { | 1342 | { |
1344 | return kv_notify_message_to_smu(rdev, enable ? | 1343 | return kv_notify_message_to_smu(rdev, enable ? |
1345 | PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); | 1344 | PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); |
1346 | } | 1345 | } |
1347 | #endif | ||
1348 | 1346 | ||
1349 | static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) | 1347 | static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) |
1350 | { | 1348 | { |
@@ -1389,7 +1387,6 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
1389 | return kv_enable_uvd_dpm(rdev, !gate); | 1387 | return kv_enable_uvd_dpm(rdev, !gate); |
1390 | } | 1388 | } |
1391 | 1389 | ||
1392 | #if 0 | ||
1393 | static u8 kv_get_vce_boot_level(struct radeon_device *rdev) | 1390 | static u8 kv_get_vce_boot_level(struct radeon_device *rdev) |
1394 | { | 1391 | { |
1395 | u8 i; | 1392 | u8 i; |
@@ -1414,6 +1411,9 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, | |||
1414 | int ret; | 1411 | int ret; |
1415 | 1412 | ||
1416 | if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { | 1413 | if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { |
1414 | kv_dpm_powergate_vce(rdev, false); | ||
1415 | /* turn the clocks on when encoding */ | ||
1416 | cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); | ||
1417 | if (pi->caps_stable_p_state) | 1417 | if (pi->caps_stable_p_state) |
1418 | pi->vce_boot_level = table->count - 1; | 1418 | pi->vce_boot_level = table->count - 1; |
1419 | else | 1419 | else |
@@ -1436,11 +1436,13 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, | |||
1436 | kv_enable_vce_dpm(rdev, true); | 1436 | kv_enable_vce_dpm(rdev, true); |
1437 | } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { | 1437 | } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { |
1438 | kv_enable_vce_dpm(rdev, false); | 1438 | kv_enable_vce_dpm(rdev, false); |
1439 | /* turn the clocks off when not encoding */ | ||
1440 | cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); | ||
1441 | kv_dpm_powergate_vce(rdev, true); | ||
1439 | } | 1442 | } |
1440 | 1443 | ||
1441 | return 0; | 1444 | return 0; |
1442 | } | 1445 | } |
1443 | #endif | ||
1444 | 1446 | ||
1445 | static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) | 1447 | static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) |
1446 | { | 1448 | { |
@@ -1575,11 +1577,16 @@ static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) | |||
1575 | pi->vce_power_gated = gate; | 1577 | pi->vce_power_gated = gate; |
1576 | 1578 | ||
1577 | if (gate) { | 1579 | if (gate) { |
1578 | if (pi->caps_vce_pg) | 1580 | if (pi->caps_vce_pg) { |
1581 | /* XXX do we need a vce_v1_0_stop() ? */ | ||
1579 | kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); | 1582 | kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); |
1583 | } | ||
1580 | } else { | 1584 | } else { |
1581 | if (pi->caps_vce_pg) | 1585 | if (pi->caps_vce_pg) { |
1582 | kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); | 1586 | kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); |
1587 | vce_v2_0_resume(rdev); | ||
1588 | vce_v1_0_start(rdev); | ||
1589 | } | ||
1583 | } | 1590 | } |
1584 | } | 1591 | } |
1585 | 1592 | ||
@@ -1768,7 +1775,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1768 | { | 1775 | { |
1769 | struct kv_power_info *pi = kv_get_pi(rdev); | 1776 | struct kv_power_info *pi = kv_get_pi(rdev); |
1770 | struct radeon_ps *new_ps = &pi->requested_rps; | 1777 | struct radeon_ps *new_ps = &pi->requested_rps; |
1771 | /*struct radeon_ps *old_ps = &pi->current_rps;*/ | 1778 | struct radeon_ps *old_ps = &pi->current_rps; |
1772 | int ret; | 1779 | int ret; |
1773 | 1780 | ||
1774 | if (pi->bapm_enable) { | 1781 | if (pi->bapm_enable) { |
@@ -1798,13 +1805,12 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1798 | kv_set_enabled_levels(rdev); | 1805 | kv_set_enabled_levels(rdev); |
1799 | kv_force_lowest_valid(rdev); | 1806 | kv_force_lowest_valid(rdev); |
1800 | kv_unforce_levels(rdev); | 1807 | kv_unforce_levels(rdev); |
1801 | #if 0 | 1808 | |
1802 | ret = kv_update_vce_dpm(rdev, new_ps, old_ps); | 1809 | ret = kv_update_vce_dpm(rdev, new_ps, old_ps); |
1803 | if (ret) { | 1810 | if (ret) { |
1804 | DRM_ERROR("kv_update_vce_dpm failed\n"); | 1811 | DRM_ERROR("kv_update_vce_dpm failed\n"); |
1805 | return ret; | 1812 | return ret; |
1806 | } | 1813 | } |
1807 | #endif | ||
1808 | kv_update_sclk_t(rdev); | 1814 | kv_update_sclk_t(rdev); |
1809 | } | 1815 | } |
1810 | } else { | 1816 | } else { |
@@ -1823,13 +1829,11 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1823 | kv_program_nbps_index_settings(rdev, new_ps); | 1829 | kv_program_nbps_index_settings(rdev, new_ps); |
1824 | kv_freeze_sclk_dpm(rdev, false); | 1830 | kv_freeze_sclk_dpm(rdev, false); |
1825 | kv_set_enabled_levels(rdev); | 1831 | kv_set_enabled_levels(rdev); |
1826 | #if 0 | ||
1827 | ret = kv_update_vce_dpm(rdev, new_ps, old_ps); | 1832 | ret = kv_update_vce_dpm(rdev, new_ps, old_ps); |
1828 | if (ret) { | 1833 | if (ret) { |
1829 | DRM_ERROR("kv_update_vce_dpm failed\n"); | 1834 | DRM_ERROR("kv_update_vce_dpm failed\n"); |
1830 | return ret; | 1835 | return ret; |
1831 | } | 1836 | } |
1832 | #endif | ||
1833 | kv_update_acp_boot_level(rdev); | 1837 | kv_update_acp_boot_level(rdev); |
1834 | kv_update_sclk_t(rdev); | 1838 | kv_update_sclk_t(rdev); |
1835 | kv_enable_nb_dpm(rdev); | 1839 | kv_enable_nb_dpm(rdev); |
@@ -2037,6 +2041,14 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2037 | struct radeon_clock_and_voltage_limits *max_limits = | 2041 | struct radeon_clock_and_voltage_limits *max_limits = |
2038 | &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 2042 | &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
2039 | 2043 | ||
2044 | if (new_rps->vce_active) { | ||
2045 | new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; | ||
2046 | new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; | ||
2047 | } else { | ||
2048 | new_rps->evclk = 0; | ||
2049 | new_rps->ecclk = 0; | ||
2050 | } | ||
2051 | |||
2040 | mclk = max_limits->mclk; | 2052 | mclk = max_limits->mclk; |
2041 | sclk = min_sclk; | 2053 | sclk = min_sclk; |
2042 | 2054 | ||
@@ -2056,6 +2068,11 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2056 | sclk = stable_p_state_sclk; | 2068 | sclk = stable_p_state_sclk; |
2057 | } | 2069 | } |
2058 | 2070 | ||
2071 | if (new_rps->vce_active) { | ||
2072 | if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) | ||
2073 | sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; | ||
2074 | } | ||
2075 | |||
2059 | ps->need_dfs_bypass = true; | 2076 | ps->need_dfs_bypass = true; |
2060 | 2077 | ||
2061 | for (i = 0; i < ps->num_levels; i++) { | 2078 | for (i = 0; i < ps->num_levels; i++) { |
@@ -2092,7 +2109,8 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2092 | } | 2109 | } |
2093 | } | 2110 | } |
2094 | 2111 | ||
2095 | pi->video_start = new_rps->dclk || new_rps->vclk; | 2112 | pi->video_start = new_rps->dclk || new_rps->vclk || |
2113 | new_rps->evclk || new_rps->ecclk; | ||
2096 | 2114 | ||
2097 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == | 2115 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == |
2098 | ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) | 2116 | ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) |
@@ -2538,9 +2556,6 @@ static int kv_parse_power_table(struct radeon_device *rdev) | |||
2538 | if (!rdev->pm.dpm.ps) | 2556 | if (!rdev->pm.dpm.ps) |
2539 | return -ENOMEM; | 2557 | return -ENOMEM; |
2540 | power_state_offset = (u8 *)state_array->states; | 2558 | power_state_offset = (u8 *)state_array->states; |
2541 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
2542 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
2543 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
2544 | for (i = 0; i < state_array->ucNumEntries; i++) { | 2559 | for (i = 0; i < state_array->ucNumEntries; i++) { |
2545 | u8 *idx; | 2560 | u8 *idx; |
2546 | power_state = (union pplib_power_state *)power_state_offset; | 2561 | power_state = (union pplib_power_state *)power_state_offset; |
@@ -2577,6 +2592,19 @@ static int kv_parse_power_table(struct radeon_device *rdev) | |||
2577 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; | 2592 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
2578 | } | 2593 | } |
2579 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; | 2594 | rdev->pm.dpm.num_ps = state_array->ucNumEntries; |
2595 | |||
2596 | /* fill in the vce power states */ | ||
2597 | for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { | ||
2598 | u32 sclk; | ||
2599 | clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; | ||
2600 | clock_info = (union pplib_clock_info *) | ||
2601 | &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; | ||
2602 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); | ||
2603 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; | ||
2604 | rdev->pm.dpm.vce_states[i].sclk = sclk; | ||
2605 | rdev->pm.dpm.vce_states[i].mclk = 0; | ||
2606 | } | ||
2607 | |||
2580 | return 0; | 2608 | return 0; |
2581 | } | 2609 | } |
2582 | 2610 | ||
@@ -2590,6 +2618,10 @@ int kv_dpm_init(struct radeon_device *rdev) | |||
2590 | return -ENOMEM; | 2618 | return -ENOMEM; |
2591 | rdev->pm.dpm.priv = pi; | 2619 | rdev->pm.dpm.priv = pi; |
2592 | 2620 | ||
2621 | ret = r600_get_platform_caps(rdev); | ||
2622 | if (ret) | ||
2623 | return ret; | ||
2624 | |||
2593 | ret = r600_parse_extended_power_table(rdev); | 2625 | ret = r600_parse_extended_power_table(rdev); |
2594 | if (ret) | 2626 | if (ret) |
2595 | return ret; | 2627 | return ret; |
@@ -2623,7 +2655,7 @@ int kv_dpm_init(struct radeon_device *rdev) | |||
2623 | pi->caps_fps = false; /* true? */ | 2655 | pi->caps_fps = false; /* true? */ |
2624 | pi->caps_uvd_pg = true; | 2656 | pi->caps_uvd_pg = true; |
2625 | pi->caps_uvd_dpm = true; | 2657 | pi->caps_uvd_dpm = true; |
2626 | pi->caps_vce_pg = false; | 2658 | pi->caps_vce_pg = false; /* XXX true */ |
2627 | pi->caps_samu_pg = false; | 2659 | pi->caps_samu_pg = false; |
2628 | pi->caps_acp_pg = false; | 2660 | pi->caps_acp_pg = false; |
2629 | pi->caps_stable_p_state = false; | 2661 | pi->caps_stable_p_state = false; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index bf6300cfd62d..d246e043421a 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1642,8 +1642,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) | |||
1642 | ring = &rdev->ring[ridx[i]]; | 1642 | ring = &rdev->ring[ridx[i]]; |
1643 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); | 1643 | WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); |
1644 | 1644 | ||
1645 | ring->rptr = ring->wptr = 0; | 1645 | ring->wptr = 0; |
1646 | WREG32(cp_rb_rptr[i], ring->rptr); | 1646 | WREG32(cp_rb_rptr[i], 0); |
1647 | WREG32(cp_rb_wptr[i], ring->wptr); | 1647 | WREG32(cp_rb_wptr[i], ring->wptr); |
1648 | 1648 | ||
1649 | mdelay(1); | 1649 | mdelay(1); |
@@ -1917,11 +1917,9 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
1917 | if (!(reset_mask & (RADEON_RESET_GFX | | 1917 | if (!(reset_mask & (RADEON_RESET_GFX | |
1918 | RADEON_RESET_COMPUTE | | 1918 | RADEON_RESET_COMPUTE | |
1919 | RADEON_RESET_CP))) { | 1919 | RADEON_RESET_CP))) { |
1920 | radeon_ring_lockup_update(ring); | 1920 | radeon_ring_lockup_update(rdev, ring); |
1921 | return false; | 1921 | return false; |
1922 | } | 1922 | } |
1923 | /* force CP activities */ | ||
1924 | radeon_ring_force_activity(rdev, ring); | ||
1925 | return radeon_ring_test_lockup(rdev, ring); | 1923 | return radeon_ring_test_lockup(rdev, ring); |
1926 | } | 1924 | } |
1927 | 1925 | ||
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 7cf96b15377f..6378e0276691 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
@@ -248,8 +248,6 @@ int cayman_dma_resume(struct radeon_device *rdev) | |||
248 | ring->wptr = 0; | 248 | ring->wptr = 0; |
249 | WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); | 249 | WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); |
250 | 250 | ||
251 | ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; | ||
252 | |||
253 | WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); | 251 | WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); |
254 | 252 | ||
255 | ring->ready = true; | 253 | ring->ready = true; |
@@ -302,11 +300,9 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
302 | mask = RADEON_RESET_DMA1; | 300 | mask = RADEON_RESET_DMA1; |
303 | 301 | ||
304 | if (!(reset_mask & mask)) { | 302 | if (!(reset_mask & mask)) { |
305 | radeon_ring_lockup_update(ring); | 303 | radeon_ring_lockup_update(rdev, ring); |
306 | return false; | 304 | return false; |
307 | } | 305 | } |
308 | /* force ring activities */ | ||
309 | radeon_ring_force_activity(rdev, ring); | ||
310 | return radeon_ring_test_lockup(rdev, ring); | 306 | return radeon_ring_test_lockup(rdev, ring); |
311 | } | 307 | } |
312 | 308 | ||
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index ca814276b075..004c931606c4 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -4025,9 +4025,6 @@ static int ni_parse_power_table(struct radeon_device *rdev) | |||
4025 | power_info->pplib.ucNumStates, GFP_KERNEL); | 4025 | power_info->pplib.ucNumStates, GFP_KERNEL); |
4026 | if (!rdev->pm.dpm.ps) | 4026 | if (!rdev->pm.dpm.ps) |
4027 | return -ENOMEM; | 4027 | return -ENOMEM; |
4028 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
4029 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
4030 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
4031 | 4028 | ||
4032 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 4029 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
4033 | power_state = (union pplib_power_state *) | 4030 | power_state = (union pplib_power_state *) |
@@ -4089,6 +4086,10 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4089 | pi->min_vddc_in_table = 0; | 4086 | pi->min_vddc_in_table = 0; |
4090 | pi->max_vddc_in_table = 0; | 4087 | pi->max_vddc_in_table = 0; |
4091 | 4088 | ||
4089 | ret = r600_get_platform_caps(rdev); | ||
4090 | if (ret) | ||
4091 | return ret; | ||
4092 | |||
4092 | ret = ni_parse_power_table(rdev); | 4093 | ret = ni_parse_power_table(rdev); |
4093 | if (ret) | 4094 | if (ret) |
4094 | return ret; | 4095 | return ret; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 3cc78bb66042..030f8e49c5ee 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1193,7 +1193,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1193 | 1193 | ||
1194 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1194 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1195 | udelay(10); | 1195 | udelay(10); |
1196 | ring->rptr = RREG32(RADEON_CP_RB_RPTR); | ||
1197 | /* Set cp mode to bus mastering & enable cp*/ | 1196 | /* Set cp mode to bus mastering & enable cp*/ |
1198 | WREG32(RADEON_CP_CSQ_MODE, | 1197 | WREG32(RADEON_CP_CSQ_MODE, |
1199 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1198 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
@@ -1275,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
1275 | 1274 | ||
1276 | value = radeon_get_ib_value(p, idx); | 1275 | value = radeon_get_ib_value(p, idx); |
1277 | tmp = value & 0x003fffff; | 1276 | tmp = value & 0x003fffff; |
1278 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 1277 | tmp += (((u32)reloc->gpu_offset) >> 10); |
1279 | 1278 | ||
1280 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1279 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1281 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1280 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1282 | tile_flags |= RADEON_DST_TILE_MACRO; | 1281 | tile_flags |= RADEON_DST_TILE_MACRO; |
1283 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | 1282 | if (reloc->tiling_flags & RADEON_TILING_MICRO) { |
1284 | if (reg == RADEON_SRC_PITCH_OFFSET) { | 1283 | if (reg == RADEON_SRC_PITCH_OFFSET) { |
1285 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | 1284 | DRM_ERROR("Cannot src blit from microtiled surface\n"); |
1286 | radeon_cs_dump_packet(p, pkt); | 1285 | radeon_cs_dump_packet(p, pkt); |
@@ -1326,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | |||
1326 | return r; | 1325 | return r; |
1327 | } | 1326 | } |
1328 | idx_value = radeon_get_ib_value(p, idx); | 1327 | idx_value = radeon_get_ib_value(p, idx); |
1329 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | 1328 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1330 | 1329 | ||
1331 | track->arrays[i + 0].esize = idx_value >> 8; | 1330 | track->arrays[i + 0].esize = idx_value >> 8; |
1332 | track->arrays[i + 0].robj = reloc->robj; | 1331 | track->arrays[i + 0].robj = reloc->robj; |
@@ -1338,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | |||
1338 | radeon_cs_dump_packet(p, pkt); | 1337 | radeon_cs_dump_packet(p, pkt); |
1339 | return r; | 1338 | return r; |
1340 | } | 1339 | } |
1341 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); | 1340 | ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); |
1342 | track->arrays[i + 1].robj = reloc->robj; | 1341 | track->arrays[i + 1].robj = reloc->robj; |
1343 | track->arrays[i + 1].esize = idx_value >> 24; | 1342 | track->arrays[i + 1].esize = idx_value >> 24; |
1344 | track->arrays[i + 1].esize &= 0x7F; | 1343 | track->arrays[i + 1].esize &= 0x7F; |
@@ -1352,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, | |||
1352 | return r; | 1351 | return r; |
1353 | } | 1352 | } |
1354 | idx_value = radeon_get_ib_value(p, idx); | 1353 | idx_value = radeon_get_ib_value(p, idx); |
1355 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | 1354 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1356 | track->arrays[i + 0].robj = reloc->robj; | 1355 | track->arrays[i + 0].robj = reloc->robj; |
1357 | track->arrays[i + 0].esize = idx_value >> 8; | 1356 | track->arrays[i + 0].esize = idx_value >> 8; |
1358 | track->arrays[i + 0].esize &= 0x7F; | 1357 | track->arrays[i + 0].esize &= 0x7F; |
@@ -1595,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1595 | track->zb.robj = reloc->robj; | 1594 | track->zb.robj = reloc->robj; |
1596 | track->zb.offset = idx_value; | 1595 | track->zb.offset = idx_value; |
1597 | track->zb_dirty = true; | 1596 | track->zb_dirty = true; |
1598 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1597 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1599 | break; | 1598 | break; |
1600 | case RADEON_RB3D_COLOROFFSET: | 1599 | case RADEON_RB3D_COLOROFFSET: |
1601 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); | 1600 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
@@ -1608,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1608 | track->cb[0].robj = reloc->robj; | 1607 | track->cb[0].robj = reloc->robj; |
1609 | track->cb[0].offset = idx_value; | 1608 | track->cb[0].offset = idx_value; |
1610 | track->cb_dirty = true; | 1609 | track->cb_dirty = true; |
1611 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1610 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1612 | break; | 1611 | break; |
1613 | case RADEON_PP_TXOFFSET_0: | 1612 | case RADEON_PP_TXOFFSET_0: |
1614 | case RADEON_PP_TXOFFSET_1: | 1613 | case RADEON_PP_TXOFFSET_1: |
@@ -1622,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1622 | return r; | 1621 | return r; |
1623 | } | 1622 | } |
1624 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1623 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1625 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1624 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1626 | tile_flags |= RADEON_TXO_MACRO_TILE; | 1625 | tile_flags |= RADEON_TXO_MACRO_TILE; |
1627 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1626 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
1628 | tile_flags |= RADEON_TXO_MICRO_TILE_X2; | 1627 | tile_flags |= RADEON_TXO_MICRO_TILE_X2; |
1629 | 1628 | ||
1630 | tmp = idx_value & ~(0x7 << 2); | 1629 | tmp = idx_value & ~(0x7 << 2); |
1631 | tmp |= tile_flags; | 1630 | tmp |= tile_flags; |
1632 | ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); | 1631 | ib[idx] = tmp + ((u32)reloc->gpu_offset); |
1633 | } else | 1632 | } else |
1634 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1633 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1635 | track->textures[i].robj = reloc->robj; | 1634 | track->textures[i].robj = reloc->robj; |
1636 | track->tex_dirty = true; | 1635 | track->tex_dirty = true; |
1637 | break; | 1636 | break; |
@@ -1649,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1649 | return r; | 1648 | return r; |
1650 | } | 1649 | } |
1651 | track->textures[0].cube_info[i].offset = idx_value; | 1650 | track->textures[0].cube_info[i].offset = idx_value; |
1652 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1651 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1653 | track->textures[0].cube_info[i].robj = reloc->robj; | 1652 | track->textures[0].cube_info[i].robj = reloc->robj; |
1654 | track->tex_dirty = true; | 1653 | track->tex_dirty = true; |
1655 | break; | 1654 | break; |
@@ -1667,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1667 | return r; | 1666 | return r; |
1668 | } | 1667 | } |
1669 | track->textures[1].cube_info[i].offset = idx_value; | 1668 | track->textures[1].cube_info[i].offset = idx_value; |
1670 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1669 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1671 | track->textures[1].cube_info[i].robj = reloc->robj; | 1670 | track->textures[1].cube_info[i].robj = reloc->robj; |
1672 | track->tex_dirty = true; | 1671 | track->tex_dirty = true; |
1673 | break; | 1672 | break; |
@@ -1685,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1685 | return r; | 1684 | return r; |
1686 | } | 1685 | } |
1687 | track->textures[2].cube_info[i].offset = idx_value; | 1686 | track->textures[2].cube_info[i].offset = idx_value; |
1688 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1687 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1689 | track->textures[2].cube_info[i].robj = reloc->robj; | 1688 | track->textures[2].cube_info[i].robj = reloc->robj; |
1690 | track->tex_dirty = true; | 1689 | track->tex_dirty = true; |
1691 | break; | 1690 | break; |
@@ -1703,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1703 | return r; | 1702 | return r; |
1704 | } | 1703 | } |
1705 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1704 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1706 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1705 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1707 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | 1706 | tile_flags |= RADEON_COLOR_TILE_ENABLE; |
1708 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1707 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
1709 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 1708 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1710 | 1709 | ||
1711 | tmp = idx_value & ~(0x7 << 16); | 1710 | tmp = idx_value & ~(0x7 << 16); |
@@ -1773,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1773 | radeon_cs_dump_packet(p, pkt); | 1772 | radeon_cs_dump_packet(p, pkt); |
1774 | return r; | 1773 | return r; |
1775 | } | 1774 | } |
1776 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1775 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1777 | break; | 1776 | break; |
1778 | case RADEON_PP_CNTL: | 1777 | case RADEON_PP_CNTL: |
1779 | { | 1778 | { |
@@ -1933,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1933 | radeon_cs_dump_packet(p, pkt); | 1932 | radeon_cs_dump_packet(p, pkt); |
1934 | return r; | 1933 | return r; |
1935 | } | 1934 | } |
1936 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); | 1935 | ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); |
1937 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1936 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1938 | if (r) { | 1937 | if (r) { |
1939 | return r; | 1938 | return r; |
@@ -1947,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1947 | radeon_cs_dump_packet(p, pkt); | 1946 | radeon_cs_dump_packet(p, pkt); |
1948 | return r; | 1947 | return r; |
1949 | } | 1948 | } |
1950 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); | 1949 | ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); |
1951 | track->num_arrays = 1; | 1950 | track->num_arrays = 1; |
1952 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); | 1951 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); |
1953 | 1952 | ||
@@ -2523,11 +2522,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
2523 | 2522 | ||
2524 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); | 2523 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); |
2525 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { | 2524 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { |
2526 | radeon_ring_lockup_update(ring); | 2525 | radeon_ring_lockup_update(rdev, ring); |
2527 | return false; | 2526 | return false; |
2528 | } | 2527 | } |
2529 | /* force CP activities */ | ||
2530 | radeon_ring_force_activity(rdev, ring); | ||
2531 | return radeon_ring_test_lockup(rdev, ring); | 2528 | return radeon_ring_test_lockup(rdev, ring); |
2532 | } | 2529 | } |
2533 | 2530 | ||
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index b3807edb1936..58f0473aa73f 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
185 | track->zb.robj = reloc->robj; | 185 | track->zb.robj = reloc->robj; |
186 | track->zb.offset = idx_value; | 186 | track->zb.offset = idx_value; |
187 | track->zb_dirty = true; | 187 | track->zb_dirty = true; |
188 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 188 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
189 | break; | 189 | break; |
190 | case RADEON_RB3D_COLOROFFSET: | 190 | case RADEON_RB3D_COLOROFFSET: |
191 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); | 191 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
198 | track->cb[0].robj = reloc->robj; | 198 | track->cb[0].robj = reloc->robj; |
199 | track->cb[0].offset = idx_value; | 199 | track->cb[0].offset = idx_value; |
200 | track->cb_dirty = true; | 200 | track->cb_dirty = true; |
201 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 201 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
202 | break; | 202 | break; |
203 | case R200_PP_TXOFFSET_0: | 203 | case R200_PP_TXOFFSET_0: |
204 | case R200_PP_TXOFFSET_1: | 204 | case R200_PP_TXOFFSET_1: |
@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
215 | return r; | 215 | return r; |
216 | } | 216 | } |
217 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 217 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
218 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 218 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
219 | tile_flags |= R200_TXO_MACRO_TILE; | 219 | tile_flags |= R200_TXO_MACRO_TILE; |
220 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 220 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
221 | tile_flags |= R200_TXO_MICRO_TILE; | 221 | tile_flags |= R200_TXO_MICRO_TILE; |
222 | 222 | ||
223 | tmp = idx_value & ~(0x7 << 2); | 223 | tmp = idx_value & ~(0x7 << 2); |
224 | tmp |= tile_flags; | 224 | tmp |= tile_flags; |
225 | ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); | 225 | ib[idx] = tmp + ((u32)reloc->gpu_offset); |
226 | } else | 226 | } else |
227 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 227 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
228 | track->textures[i].robj = reloc->robj; | 228 | track->textures[i].robj = reloc->robj; |
229 | track->tex_dirty = true; | 229 | track->tex_dirty = true; |
230 | break; | 230 | break; |
@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
268 | return r; | 268 | return r; |
269 | } | 269 | } |
270 | track->textures[i].cube_info[face - 1].offset = idx_value; | 270 | track->textures[i].cube_info[face - 1].offset = idx_value; |
271 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 271 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
272 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 272 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
273 | track->tex_dirty = true; | 273 | track->tex_dirty = true; |
274 | break; | 274 | break; |
@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
287 | } | 287 | } |
288 | 288 | ||
289 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 289 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
290 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 290 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
291 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | 291 | tile_flags |= RADEON_COLOR_TILE_ENABLE; |
292 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 292 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
293 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 293 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
294 | 294 | ||
295 | tmp = idx_value & ~(0x7 << 16); | 295 | tmp = idx_value & ~(0x7 << 16); |
@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
362 | radeon_cs_dump_packet(p, pkt); | 362 | radeon_cs_dump_packet(p, pkt); |
363 | return r; | 363 | return r; |
364 | } | 364 | } |
365 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 365 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
366 | break; | 366 | break; |
367 | case RADEON_PP_CNTL: | 367 | case RADEON_PP_CNTL: |
368 | { | 368 | { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0b658b34b33a..206caf9700b7 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
640 | track->cb[i].robj = reloc->robj; | 640 | track->cb[i].robj = reloc->robj; |
641 | track->cb[i].offset = idx_value; | 641 | track->cb[i].offset = idx_value; |
642 | track->cb_dirty = true; | 642 | track->cb_dirty = true; |
643 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 643 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
644 | break; | 644 | break; |
645 | case R300_ZB_DEPTHOFFSET: | 645 | case R300_ZB_DEPTHOFFSET: |
646 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); | 646 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
653 | track->zb.robj = reloc->robj; | 653 | track->zb.robj = reloc->robj; |
654 | track->zb.offset = idx_value; | 654 | track->zb.offset = idx_value; |
655 | track->zb_dirty = true; | 655 | track->zb_dirty = true; |
656 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 656 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
657 | break; | 657 | break; |
658 | case R300_TX_OFFSET_0: | 658 | case R300_TX_OFFSET_0: |
659 | case R300_TX_OFFSET_0+4: | 659 | case R300_TX_OFFSET_0+4: |
@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
682 | 682 | ||
683 | if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { | 683 | if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { |
684 | ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ | 684 | ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ |
685 | ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); | 685 | ((idx_value & ~31) + (u32)reloc->gpu_offset); |
686 | } else { | 686 | } else { |
687 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 687 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
688 | tile_flags |= R300_TXO_MACRO_TILE; | 688 | tile_flags |= R300_TXO_MACRO_TILE; |
689 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 689 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
690 | tile_flags |= R300_TXO_MICRO_TILE; | 690 | tile_flags |= R300_TXO_MICRO_TILE; |
691 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | 691 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
692 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; | 692 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; |
693 | 693 | ||
694 | tmp = idx_value + ((u32)reloc->lobj.gpu_offset); | 694 | tmp = idx_value + ((u32)reloc->gpu_offset); |
695 | tmp |= tile_flags; | 695 | tmp |= tile_flags; |
696 | ib[idx] = tmp; | 696 | ib[idx] = tmp; |
697 | } | 697 | } |
@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
753 | return r; | 753 | return r; |
754 | } | 754 | } |
755 | 755 | ||
756 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 756 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
757 | tile_flags |= R300_COLOR_TILE_ENABLE; | 757 | tile_flags |= R300_COLOR_TILE_ENABLE; |
758 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 758 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
759 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | 759 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
760 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | 760 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
761 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; | 761 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; |
762 | 762 | ||
763 | tmp = idx_value & ~(0x7 << 16); | 763 | tmp = idx_value & ~(0x7 << 16); |
@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
838 | return r; | 838 | return r; |
839 | } | 839 | } |
840 | 840 | ||
841 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 841 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
842 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | 842 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
843 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 843 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
844 | tile_flags |= R300_DEPTHMICROTILE_TILED; | 844 | tile_flags |= R300_DEPTHMICROTILE_TILED; |
845 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) | 845 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
846 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; | 846 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; |
847 | 847 | ||
848 | tmp = idx_value & ~(0x7 << 16); | 848 | tmp = idx_value & ~(0x7 << 16); |
@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1052 | radeon_cs_dump_packet(p, pkt); | 1052 | radeon_cs_dump_packet(p, pkt); |
1053 | return r; | 1053 | return r; |
1054 | } | 1054 | } |
1055 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1055 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1056 | break; | 1056 | break; |
1057 | case 0x4e0c: | 1057 | case 0x4e0c: |
1058 | /* RB3D_COLOR_CHANNEL_MASK */ | 1058 | /* RB3D_COLOR_CHANNEL_MASK */ |
@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1097 | track->aa.robj = reloc->robj; | 1097 | track->aa.robj = reloc->robj; |
1098 | track->aa.offset = idx_value; | 1098 | track->aa.offset = idx_value; |
1099 | track->aa_dirty = true; | 1099 | track->aa_dirty = true; |
1100 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1100 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1101 | break; | 1101 | break; |
1102 | case R300_RB3D_AARESOLVE_PITCH: | 1102 | case R300_RB3D_AARESOLVE_PITCH: |
1103 | track->aa.pitch = idx_value & 0x3FFE; | 1103 | track->aa.pitch = idx_value & 0x3FFE; |
@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1162 | radeon_cs_dump_packet(p, pkt); | 1162 | radeon_cs_dump_packet(p, pkt); |
1163 | return r; | 1163 | return r; |
1164 | } | 1164 | } |
1165 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); | 1165 | ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); |
1166 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); | 1166 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); |
1167 | if (r) { | 1167 | if (r) { |
1168 | return r; | 1168 | return r; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 647ef4079217..6e887d004eba 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1748,11 +1748,9 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
1748 | if (!(reset_mask & (RADEON_RESET_GFX | | 1748 | if (!(reset_mask & (RADEON_RESET_GFX | |
1749 | RADEON_RESET_COMPUTE | | 1749 | RADEON_RESET_COMPUTE | |
1750 | RADEON_RESET_CP))) { | 1750 | RADEON_RESET_CP))) { |
1751 | radeon_ring_lockup_update(ring); | 1751 | radeon_ring_lockup_update(rdev, ring); |
1752 | return false; | 1752 | return false; |
1753 | } | 1753 | } |
1754 | /* force CP activities */ | ||
1755 | radeon_ring_force_activity(rdev, ring); | ||
1756 | return radeon_ring_test_lockup(rdev, ring); | 1754 | return radeon_ring_test_lockup(rdev, ring); |
1757 | } | 1755 | } |
1758 | 1756 | ||
@@ -2604,8 +2602,6 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2604 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); | 2602 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
2605 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2603 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2606 | 2604 | ||
2607 | ring->rptr = RREG32(CP_RB_RPTR); | ||
2608 | |||
2609 | r600_cp_start(rdev); | 2605 | r600_cp_start(rdev); |
2610 | ring->ready = true; | 2606 | ring->ready = true; |
2611 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); | 2607 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 2812c7d1ae6f..12511bb5fd6f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -1022,7 +1022,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1022 | "0x%04X\n", reg); | 1022 | "0x%04X\n", reg); |
1023 | return -EINVAL; | 1023 | return -EINVAL; |
1024 | } | 1024 | } |
1025 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1025 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1026 | break; | 1026 | break; |
1027 | case SQ_CONFIG: | 1027 | case SQ_CONFIG: |
1028 | track->sq_config = radeon_get_ib_value(p, idx); | 1028 | track->sq_config = radeon_get_ib_value(p, idx); |
@@ -1043,7 +1043,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1043 | track->db_depth_info = radeon_get_ib_value(p, idx); | 1043 | track->db_depth_info = radeon_get_ib_value(p, idx); |
1044 | ib[idx] &= C_028010_ARRAY_MODE; | 1044 | ib[idx] &= C_028010_ARRAY_MODE; |
1045 | track->db_depth_info &= C_028010_ARRAY_MODE; | 1045 | track->db_depth_info &= C_028010_ARRAY_MODE; |
1046 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1046 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
1047 | ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); | 1047 | ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); |
1048 | track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); | 1048 | track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); |
1049 | } else { | 1049 | } else { |
@@ -1084,9 +1084,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1084 | } | 1084 | } |
1085 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; | 1085 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; |
1086 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; | 1086 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; |
1087 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1087 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1088 | track->vgt_strmout_bo[tmp] = reloc->robj; | 1088 | track->vgt_strmout_bo[tmp] = reloc->robj; |
1089 | track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; | 1089 | track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset; |
1090 | track->streamout_dirty = true; | 1090 | track->streamout_dirty = true; |
1091 | break; | 1091 | break; |
1092 | case VGT_STRMOUT_BUFFER_SIZE_0: | 1092 | case VGT_STRMOUT_BUFFER_SIZE_0: |
@@ -1105,7 +1105,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1105 | "0x%04X\n", reg); | 1105 | "0x%04X\n", reg); |
1106 | return -EINVAL; | 1106 | return -EINVAL; |
1107 | } | 1107 | } |
1108 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1108 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1109 | break; | 1109 | break; |
1110 | case R_028238_CB_TARGET_MASK: | 1110 | case R_028238_CB_TARGET_MASK: |
1111 | track->cb_target_mask = radeon_get_ib_value(p, idx); | 1111 | track->cb_target_mask = radeon_get_ib_value(p, idx); |
@@ -1142,10 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1142 | } | 1142 | } |
1143 | tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; | 1143 | tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; |
1144 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | 1144 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); |
1145 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1145 | if (reloc->tiling_flags & RADEON_TILING_MACRO) { |
1146 | ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); | 1146 | ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); |
1147 | track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); | 1147 | track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); |
1148 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | 1148 | } else if (reloc->tiling_flags & RADEON_TILING_MICRO) { |
1149 | ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); | 1149 | ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); |
1150 | track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); | 1150 | track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); |
1151 | } | 1151 | } |
@@ -1214,7 +1214,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1214 | } | 1214 | } |
1215 | track->cb_color_frag_bo[tmp] = reloc->robj; | 1215 | track->cb_color_frag_bo[tmp] = reloc->robj; |
1216 | track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; | 1216 | track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; |
1217 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1217 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1218 | } | 1218 | } |
1219 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { | 1219 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { |
1220 | track->cb_dirty = true; | 1220 | track->cb_dirty = true; |
@@ -1245,7 +1245,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1245 | } | 1245 | } |
1246 | track->cb_color_tile_bo[tmp] = reloc->robj; | 1246 | track->cb_color_tile_bo[tmp] = reloc->robj; |
1247 | track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; | 1247 | track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; |
1248 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1248 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1249 | } | 1249 | } |
1250 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { | 1250 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { |
1251 | track->cb_dirty = true; | 1251 | track->cb_dirty = true; |
@@ -1281,10 +1281,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1281 | } | 1281 | } |
1282 | tmp = (reg - CB_COLOR0_BASE) / 4; | 1282 | tmp = (reg - CB_COLOR0_BASE) / 4; |
1283 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; | 1283 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; |
1284 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1284 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1285 | track->cb_color_base_last[tmp] = ib[idx]; | 1285 | track->cb_color_base_last[tmp] = ib[idx]; |
1286 | track->cb_color_bo[tmp] = reloc->robj; | 1286 | track->cb_color_bo[tmp] = reloc->robj; |
1287 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | 1287 | track->cb_color_bo_mc[tmp] = reloc->gpu_offset; |
1288 | track->cb_dirty = true; | 1288 | track->cb_dirty = true; |
1289 | break; | 1289 | break; |
1290 | case DB_DEPTH_BASE: | 1290 | case DB_DEPTH_BASE: |
@@ -1295,9 +1295,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1295 | return -EINVAL; | 1295 | return -EINVAL; |
1296 | } | 1296 | } |
1297 | track->db_offset = radeon_get_ib_value(p, idx) << 8; | 1297 | track->db_offset = radeon_get_ib_value(p, idx) << 8; |
1298 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1298 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1299 | track->db_bo = reloc->robj; | 1299 | track->db_bo = reloc->robj; |
1300 | track->db_bo_mc = reloc->lobj.gpu_offset; | 1300 | track->db_bo_mc = reloc->gpu_offset; |
1301 | track->db_dirty = true; | 1301 | track->db_dirty = true; |
1302 | break; | 1302 | break; |
1303 | case DB_HTILE_DATA_BASE: | 1303 | case DB_HTILE_DATA_BASE: |
@@ -1308,7 +1308,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1308 | return -EINVAL; | 1308 | return -EINVAL; |
1309 | } | 1309 | } |
1310 | track->htile_offset = radeon_get_ib_value(p, idx) << 8; | 1310 | track->htile_offset = radeon_get_ib_value(p, idx) << 8; |
1311 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1311 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1312 | track->htile_bo = reloc->robj; | 1312 | track->htile_bo = reloc->robj; |
1313 | track->db_dirty = true; | 1313 | track->db_dirty = true; |
1314 | break; | 1314 | break; |
@@ -1377,7 +1377,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1377 | "0x%04X\n", reg); | 1377 | "0x%04X\n", reg); |
1378 | return -EINVAL; | 1378 | return -EINVAL; |
1379 | } | 1379 | } |
1380 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1380 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1381 | break; | 1381 | break; |
1382 | case SX_MEMORY_EXPORT_BASE: | 1382 | case SX_MEMORY_EXPORT_BASE: |
1383 | r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); | 1383 | r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); |
@@ -1386,7 +1386,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1386 | "0x%04X\n", reg); | 1386 | "0x%04X\n", reg); |
1387 | return -EINVAL; | 1387 | return -EINVAL; |
1388 | } | 1388 | } |
1389 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1389 | ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1390 | break; | 1390 | break; |
1391 | case SX_MISC: | 1391 | case SX_MISC: |
1392 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; | 1392 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; |
@@ -1672,7 +1672,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1672 | return -EINVAL; | 1672 | return -EINVAL; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | offset = reloc->lobj.gpu_offset + | 1675 | offset = reloc->gpu_offset + |
1676 | (idx_value & 0xfffffff0) + | 1676 | (idx_value & 0xfffffff0) + |
1677 | ((u64)(tmp & 0xff) << 32); | 1677 | ((u64)(tmp & 0xff) << 32); |
1678 | 1678 | ||
@@ -1713,7 +1713,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1713 | return -EINVAL; | 1713 | return -EINVAL; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | offset = reloc->lobj.gpu_offset + | 1716 | offset = reloc->gpu_offset + |
1717 | idx_value + | 1717 | idx_value + |
1718 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | 1718 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
1719 | 1719 | ||
@@ -1765,7 +1765,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1765 | return -EINVAL; | 1765 | return -EINVAL; |
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | offset = reloc->lobj.gpu_offset + | 1768 | offset = reloc->gpu_offset + |
1769 | (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + | 1769 | (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + |
1770 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 1770 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
1771 | 1771 | ||
@@ -1805,7 +1805,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1805 | tmp = radeon_get_ib_value(p, idx) + | 1805 | tmp = radeon_get_ib_value(p, idx) + |
1806 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | 1806 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
1807 | 1807 | ||
1808 | offset = reloc->lobj.gpu_offset + tmp; | 1808 | offset = reloc->gpu_offset + tmp; |
1809 | 1809 | ||
1810 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { | 1810 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { |
1811 | dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", | 1811 | dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", |
@@ -1835,7 +1835,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1835 | tmp = radeon_get_ib_value(p, idx+2) + | 1835 | tmp = radeon_get_ib_value(p, idx+2) + |
1836 | ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); | 1836 | ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); |
1837 | 1837 | ||
1838 | offset = reloc->lobj.gpu_offset + tmp; | 1838 | offset = reloc->gpu_offset + tmp; |
1839 | 1839 | ||
1840 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { | 1840 | if ((tmp + size) > radeon_bo_size(reloc->robj)) { |
1841 | dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", | 1841 | dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", |
@@ -1861,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1861 | DRM_ERROR("bad SURFACE_SYNC\n"); | 1861 | DRM_ERROR("bad SURFACE_SYNC\n"); |
1862 | return -EINVAL; | 1862 | return -EINVAL; |
1863 | } | 1863 | } |
1864 | ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1864 | ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1865 | } | 1865 | } |
1866 | break; | 1866 | break; |
1867 | case PACKET3_EVENT_WRITE: | 1867 | case PACKET3_EVENT_WRITE: |
@@ -1877,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1877 | DRM_ERROR("bad EVENT_WRITE\n"); | 1877 | DRM_ERROR("bad EVENT_WRITE\n"); |
1878 | return -EINVAL; | 1878 | return -EINVAL; |
1879 | } | 1879 | } |
1880 | offset = reloc->lobj.gpu_offset + | 1880 | offset = reloc->gpu_offset + |
1881 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + | 1881 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
1882 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 1882 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
1883 | 1883 | ||
@@ -1899,7 +1899,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1899 | return -EINVAL; | 1899 | return -EINVAL; |
1900 | } | 1900 | } |
1901 | 1901 | ||
1902 | offset = reloc->lobj.gpu_offset + | 1902 | offset = reloc->gpu_offset + |
1903 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | 1903 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
1904 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | 1904 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
1905 | 1905 | ||
@@ -1964,11 +1964,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1964 | DRM_ERROR("bad SET_RESOURCE\n"); | 1964 | DRM_ERROR("bad SET_RESOURCE\n"); |
1965 | return -EINVAL; | 1965 | return -EINVAL; |
1966 | } | 1966 | } |
1967 | base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1967 | base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1968 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 1968 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1969 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1969 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
1970 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); | 1970 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); |
1971 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1971 | else if (reloc->tiling_flags & RADEON_TILING_MICRO) |
1972 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); | 1972 | ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); |
1973 | } | 1973 | } |
1974 | texture = reloc->robj; | 1974 | texture = reloc->robj; |
@@ -1978,13 +1978,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1978 | DRM_ERROR("bad SET_RESOURCE\n"); | 1978 | DRM_ERROR("bad SET_RESOURCE\n"); |
1979 | return -EINVAL; | 1979 | return -EINVAL; |
1980 | } | 1980 | } |
1981 | mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1981 | mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
1982 | mipmap = reloc->robj; | 1982 | mipmap = reloc->robj; |
1983 | r = r600_check_texture_resource(p, idx+(i*7)+1, | 1983 | r = r600_check_texture_resource(p, idx+(i*7)+1, |
1984 | texture, mipmap, | 1984 | texture, mipmap, |
1985 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), | 1985 | base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), |
1986 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), | 1986 | mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), |
1987 | reloc->lobj.tiling_flags); | 1987 | reloc->tiling_flags); |
1988 | if (r) | 1988 | if (r) |
1989 | return r; | 1989 | return r; |
1990 | ib[idx+1+(i*7)+2] += base_offset; | 1990 | ib[idx+1+(i*7)+2] += base_offset; |
@@ -2008,7 +2008,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2008 | ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; | 2008 | ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | offset64 = reloc->lobj.gpu_offset + offset; | 2011 | offset64 = reloc->gpu_offset + offset; |
2012 | ib[idx+1+(i*8)+0] = offset64; | 2012 | ib[idx+1+(i*8)+0] = offset64; |
2013 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | | 2013 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | |
2014 | (upper_32_bits(offset64) & 0xff); | 2014 | (upper_32_bits(offset64) & 0xff); |
@@ -2118,7 +2118,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2118 | offset + 4, radeon_bo_size(reloc->robj)); | 2118 | offset + 4, radeon_bo_size(reloc->robj)); |
2119 | return -EINVAL; | 2119 | return -EINVAL; |
2120 | } | 2120 | } |
2121 | ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 2121 | ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); |
2122 | } | 2122 | } |
2123 | break; | 2123 | break; |
2124 | case PACKET3_SURFACE_BASE_UPDATE: | 2124 | case PACKET3_SURFACE_BASE_UPDATE: |
@@ -2151,7 +2151,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2151 | offset + 4, radeon_bo_size(reloc->robj)); | 2151 | offset + 4, radeon_bo_size(reloc->robj)); |
2152 | return -EINVAL; | 2152 | return -EINVAL; |
2153 | } | 2153 | } |
2154 | offset += reloc->lobj.gpu_offset; | 2154 | offset += reloc->gpu_offset; |
2155 | ib[idx+1] = offset; | 2155 | ib[idx+1] = offset; |
2156 | ib[idx+2] = upper_32_bits(offset) & 0xff; | 2156 | ib[idx+2] = upper_32_bits(offset) & 0xff; |
2157 | } | 2157 | } |
@@ -2170,7 +2170,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2170 | offset + 4, radeon_bo_size(reloc->robj)); | 2170 | offset + 4, radeon_bo_size(reloc->robj)); |
2171 | return -EINVAL; | 2171 | return -EINVAL; |
2172 | } | 2172 | } |
2173 | offset += reloc->lobj.gpu_offset; | 2173 | offset += reloc->gpu_offset; |
2174 | ib[idx+3] = offset; | 2174 | ib[idx+3] = offset; |
2175 | ib[idx+4] = upper_32_bits(offset) & 0xff; | 2175 | ib[idx+4] = upper_32_bits(offset) & 0xff; |
2176 | } | 2176 | } |
@@ -2199,7 +2199,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2199 | offset + 8, radeon_bo_size(reloc->robj)); | 2199 | offset + 8, radeon_bo_size(reloc->robj)); |
2200 | return -EINVAL; | 2200 | return -EINVAL; |
2201 | } | 2201 | } |
2202 | offset += reloc->lobj.gpu_offset; | 2202 | offset += reloc->gpu_offset; |
2203 | ib[idx+0] = offset; | 2203 | ib[idx+0] = offset; |
2204 | ib[idx+1] = upper_32_bits(offset) & 0xff; | 2204 | ib[idx+1] = upper_32_bits(offset) & 0xff; |
2205 | break; | 2205 | break; |
@@ -2224,7 +2224,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2224 | offset + 4, radeon_bo_size(reloc->robj)); | 2224 | offset + 4, radeon_bo_size(reloc->robj)); |
2225 | return -EINVAL; | 2225 | return -EINVAL; |
2226 | } | 2226 | } |
2227 | offset += reloc->lobj.gpu_offset; | 2227 | offset += reloc->gpu_offset; |
2228 | ib[idx+1] = offset; | 2228 | ib[idx+1] = offset; |
2229 | ib[idx+2] = upper_32_bits(offset) & 0xff; | 2229 | ib[idx+2] = upper_32_bits(offset) & 0xff; |
2230 | } else { | 2230 | } else { |
@@ -2248,7 +2248,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2248 | offset + 4, radeon_bo_size(reloc->robj)); | 2248 | offset + 4, radeon_bo_size(reloc->robj)); |
2249 | return -EINVAL; | 2249 | return -EINVAL; |
2250 | } | 2250 | } |
2251 | offset += reloc->lobj.gpu_offset; | 2251 | offset += reloc->gpu_offset; |
2252 | ib[idx+3] = offset; | 2252 | ib[idx+3] = offset; |
2253 | ib[idx+4] = upper_32_bits(offset) & 0xff; | 2253 | ib[idx+4] = upper_32_bits(offset) & 0xff; |
2254 | } else { | 2254 | } else { |
@@ -2505,14 +2505,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2505 | dst_offset = radeon_get_ib_value(p, idx+1); | 2505 | dst_offset = radeon_get_ib_value(p, idx+1); |
2506 | dst_offset <<= 8; | 2506 | dst_offset <<= 8; |
2507 | 2507 | ||
2508 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2508 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2509 | p->idx += count + 5; | 2509 | p->idx += count + 5; |
2510 | } else { | 2510 | } else { |
2511 | dst_offset = radeon_get_ib_value(p, idx+1); | 2511 | dst_offset = radeon_get_ib_value(p, idx+1); |
2512 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | 2512 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2513 | 2513 | ||
2514 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2514 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2515 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2515 | ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2516 | p->idx += count + 3; | 2516 | p->idx += count + 3; |
2517 | } | 2517 | } |
2518 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 2518 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
@@ -2539,22 +2539,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2539 | /* tiled src, linear dst */ | 2539 | /* tiled src, linear dst */ |
2540 | src_offset = radeon_get_ib_value(p, idx+1); | 2540 | src_offset = radeon_get_ib_value(p, idx+1); |
2541 | src_offset <<= 8; | 2541 | src_offset <<= 8; |
2542 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2542 | ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); |
2543 | 2543 | ||
2544 | dst_offset = radeon_get_ib_value(p, idx+5); | 2544 | dst_offset = radeon_get_ib_value(p, idx+5); |
2545 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; | 2545 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2546 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2546 | ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2547 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2547 | ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2548 | } else { | 2548 | } else { |
2549 | /* linear src, tiled dst */ | 2549 | /* linear src, tiled dst */ |
2550 | src_offset = radeon_get_ib_value(p, idx+5); | 2550 | src_offset = radeon_get_ib_value(p, idx+5); |
2551 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; | 2551 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2552 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2552 | ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2553 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2553 | ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2554 | 2554 | ||
2555 | dst_offset = radeon_get_ib_value(p, idx+1); | 2555 | dst_offset = radeon_get_ib_value(p, idx+1); |
2556 | dst_offset <<= 8; | 2556 | dst_offset <<= 8; |
2557 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2557 | ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); |
2558 | } | 2558 | } |
2559 | p->idx += 7; | 2559 | p->idx += 7; |
2560 | } else { | 2560 | } else { |
@@ -2564,10 +2564,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2564 | dst_offset = radeon_get_ib_value(p, idx+1); | 2564 | dst_offset = radeon_get_ib_value(p, idx+1); |
2565 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; | 2565 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
2566 | 2566 | ||
2567 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2567 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2568 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2568 | ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2569 | ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2569 | ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; |
2570 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2570 | ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2571 | p->idx += 5; | 2571 | p->idx += 5; |
2572 | } else { | 2572 | } else { |
2573 | src_offset = radeon_get_ib_value(p, idx+2); | 2573 | src_offset = radeon_get_ib_value(p, idx+2); |
@@ -2575,10 +2575,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2575 | dst_offset = radeon_get_ib_value(p, idx+1); | 2575 | dst_offset = radeon_get_ib_value(p, idx+1); |
2576 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; | 2576 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; |
2577 | 2577 | ||
2578 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2578 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2579 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2579 | ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); |
2580 | ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2580 | ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff; |
2581 | ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; | 2581 | ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16; |
2582 | p->idx += 4; | 2582 | p->idx += 4; |
2583 | } | 2583 | } |
2584 | } | 2584 | } |
@@ -2610,8 +2610,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2610 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); | 2610 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |
2611 | return -EINVAL; | 2611 | return -EINVAL; |
2612 | } | 2612 | } |
2613 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2613 | ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); |
2614 | ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; | 2614 | ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; |
2615 | p->idx += 4; | 2615 | p->idx += 4; |
2616 | break; | 2616 | break; |
2617 | case DMA_PACKET_NOP: | 2617 | case DMA_PACKET_NOP: |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index b2d4c91e6272..53fcb28f5578 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
@@ -176,8 +176,6 @@ int r600_dma_resume(struct radeon_device *rdev) | |||
176 | ring->wptr = 0; | 176 | ring->wptr = 0; |
177 | WREG32(DMA_RB_WPTR, ring->wptr << 2); | 177 | WREG32(DMA_RB_WPTR, ring->wptr << 2); |
178 | 178 | ||
179 | ring->rptr = RREG32(DMA_RB_RPTR) >> 2; | ||
180 | |||
181 | WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); | 179 | WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); |
182 | 180 | ||
183 | ring->ready = true; | 181 | ring->ready = true; |
@@ -221,11 +219,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
221 | u32 reset_mask = r600_gpu_check_soft_reset(rdev); | 219 | u32 reset_mask = r600_gpu_check_soft_reset(rdev); |
222 | 220 | ||
223 | if (!(reset_mask & RADEON_RESET_DMA)) { | 221 | if (!(reset_mask & RADEON_RESET_DMA)) { |
224 | radeon_ring_lockup_update(ring); | 222 | radeon_ring_lockup_update(rdev, ring); |
225 | return false; | 223 | return false; |
226 | } | 224 | } |
227 | /* force ring activities */ | ||
228 | radeon_ring_force_activity(rdev, ring); | ||
229 | return radeon_ring_test_lockup(rdev, ring); | 225 | return radeon_ring_test_lockup(rdev, ring); |
230 | } | 226 | } |
231 | 227 | ||
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e4cc9b314ce9..cbf7e3269f84 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -834,6 +834,26 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen | |||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | 836 | ||
837 | int r600_get_platform_caps(struct radeon_device *rdev) | ||
838 | { | ||
839 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
840 | union power_info *power_info; | ||
841 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
842 | u16 data_offset; | ||
843 | u8 frev, crev; | ||
844 | |||
845 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
846 | &frev, &crev, &data_offset)) | ||
847 | return -EINVAL; | ||
848 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
849 | |||
850 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
851 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
852 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
837 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ | 857 | /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ |
838 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 | 858 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 |
839 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 | 859 | #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 |
@@ -1043,7 +1063,15 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) | |||
1043 | (mode_info->atom_context->bios + data_offset + | 1063 | (mode_info->atom_context->bios + data_offset + |
1044 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | 1064 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + |
1045 | 1 + array->ucNumEntries * sizeof(VCEClockInfo)); | 1065 | 1 + array->ucNumEntries * sizeof(VCEClockInfo)); |
1066 | ATOM_PPLIB_VCE_State_Table *states = | ||
1067 | (ATOM_PPLIB_VCE_State_Table *) | ||
1068 | (mode_info->atom_context->bios + data_offset + | ||
1069 | le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + | ||
1070 | 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + | ||
1071 | 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); | ||
1046 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; | 1072 | ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; |
1073 | ATOM_PPLIB_VCE_State_Record *state_entry; | ||
1074 | VCEClockInfo *vce_clk; | ||
1047 | u32 size = limits->numEntries * | 1075 | u32 size = limits->numEntries * |
1048 | sizeof(struct radeon_vce_clock_voltage_dependency_entry); | 1076 | sizeof(struct radeon_vce_clock_voltage_dependency_entry); |
1049 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = | 1077 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = |
@@ -1055,8 +1083,9 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) | |||
1055 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = | 1083 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = |
1056 | limits->numEntries; | 1084 | limits->numEntries; |
1057 | entry = &limits->entries[0]; | 1085 | entry = &limits->entries[0]; |
1086 | state_entry = &states->entries[0]; | ||
1058 | for (i = 0; i < limits->numEntries; i++) { | 1087 | for (i = 0; i < limits->numEntries; i++) { |
1059 | VCEClockInfo *vce_clk = (VCEClockInfo *) | 1088 | vce_clk = (VCEClockInfo *) |
1060 | ((u8 *)&array->entries[0] + | 1089 | ((u8 *)&array->entries[0] + |
1061 | (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | 1090 | (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); |
1062 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = | 1091 | rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = |
@@ -1068,6 +1097,23 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) | |||
1068 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) | 1097 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) |
1069 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); | 1098 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); |
1070 | } | 1099 | } |
1100 | for (i = 0; i < states->numEntries; i++) { | ||
1101 | if (i >= RADEON_MAX_VCE_LEVELS) | ||
1102 | break; | ||
1103 | vce_clk = (VCEClockInfo *) | ||
1104 | ((u8 *)&array->entries[0] + | ||
1105 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | ||
1106 | rdev->pm.dpm.vce_states[i].evclk = | ||
1107 | le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); | ||
1108 | rdev->pm.dpm.vce_states[i].ecclk = | ||
1109 | le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); | ||
1110 | rdev->pm.dpm.vce_states[i].clk_idx = | ||
1111 | state_entry->ucClockInfoIndex & 0x3f; | ||
1112 | rdev->pm.dpm.vce_states[i].pstate = | ||
1113 | (state_entry->ucClockInfoIndex & 0xc0) >> 6; | ||
1114 | state_entry = (ATOM_PPLIB_VCE_State_Record *) | ||
1115 | ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); | ||
1116 | } | ||
1071 | } | 1117 | } |
1072 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && | 1118 | if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && |
1073 | ext_hdr->usUVDTableOffset) { | 1119 | ext_hdr->usUVDTableOffset) { |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 07eab2b04e81..46b9d2a03018 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h | |||
@@ -215,6 +215,8 @@ void r600_stop_dpm(struct radeon_device *rdev); | |||
215 | 215 | ||
216 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); | 216 | bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); |
217 | 217 | ||
218 | int r600_get_platform_caps(struct radeon_device *rdev); | ||
219 | |||
218 | int r600_parse_extended_power_table(struct radeon_device *rdev); | 220 | int r600_parse_extended_power_table(struct radeon_device *rdev); |
219 | void r600_free_extended_power_table(struct radeon_device *rdev); | 221 | void r600_free_extended_power_table(struct radeon_device *rdev); |
220 | 222 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e887d027b6d0..f21db7a0b34d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -113,19 +113,16 @@ extern int radeon_hard_reset; | |||
113 | #define RADEONFB_CONN_LIMIT 4 | 113 | #define RADEONFB_CONN_LIMIT 4 |
114 | #define RADEON_BIOS_NUM_SCRATCH 8 | 114 | #define RADEON_BIOS_NUM_SCRATCH 8 |
115 | 115 | ||
116 | /* max number of rings */ | ||
117 | #define RADEON_NUM_RINGS 6 | ||
118 | |||
119 | /* fence seq are set to this number when signaled */ | 116 | /* fence seq are set to this number when signaled */ |
120 | #define RADEON_FENCE_SIGNALED_SEQ 0LL | 117 | #define RADEON_FENCE_SIGNALED_SEQ 0LL |
121 | 118 | ||
122 | /* internal ring indices */ | 119 | /* internal ring indices */ |
123 | /* r1xx+ has gfx CP ring */ | 120 | /* r1xx+ has gfx CP ring */ |
124 | #define RADEON_RING_TYPE_GFX_INDEX 0 | 121 | #define RADEON_RING_TYPE_GFX_INDEX 0 |
125 | 122 | ||
126 | /* cayman has 2 compute CP rings */ | 123 | /* cayman has 2 compute CP rings */ |
127 | #define CAYMAN_RING_TYPE_CP1_INDEX 1 | 124 | #define CAYMAN_RING_TYPE_CP1_INDEX 1 |
128 | #define CAYMAN_RING_TYPE_CP2_INDEX 2 | 125 | #define CAYMAN_RING_TYPE_CP2_INDEX 2 |
129 | 126 | ||
130 | /* R600+ has an async dma ring */ | 127 | /* R600+ has an async dma ring */ |
131 | #define R600_RING_TYPE_DMA_INDEX 3 | 128 | #define R600_RING_TYPE_DMA_INDEX 3 |
@@ -133,7 +130,17 @@ extern int radeon_hard_reset; | |||
133 | #define CAYMAN_RING_TYPE_DMA1_INDEX 4 | 130 | #define CAYMAN_RING_TYPE_DMA1_INDEX 4 |
134 | 131 | ||
135 | /* R600+ */ | 132 | /* R600+ */ |
136 | #define R600_RING_TYPE_UVD_INDEX 5 | 133 | #define R600_RING_TYPE_UVD_INDEX 5 |
134 | |||
135 | /* TN+ */ | ||
136 | #define TN_RING_TYPE_VCE1_INDEX 6 | ||
137 | #define TN_RING_TYPE_VCE2_INDEX 7 | ||
138 | |||
139 | /* max number of rings */ | ||
140 | #define RADEON_NUM_RINGS 8 | ||
141 | |||
142 | /* number of hw syncs before falling back on blocking */ | ||
143 | #define RADEON_NUM_SYNCS 4 | ||
137 | 144 | ||
138 | /* number of hw syncs before falling back on blocking */ | 145 | /* number of hw syncs before falling back on blocking */ |
139 | #define RADEON_NUM_SYNCS 4 | 146 | #define RADEON_NUM_SYNCS 4 |
@@ -356,9 +363,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i | |||
356 | void radeon_fence_process(struct radeon_device *rdev, int ring); | 363 | void radeon_fence_process(struct radeon_device *rdev, int ring); |
357 | bool radeon_fence_signaled(struct radeon_fence *fence); | 364 | bool radeon_fence_signaled(struct radeon_fence *fence); |
358 | int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); | 365 | int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); |
359 | int radeon_fence_wait_locked(struct radeon_fence *fence); | 366 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring); |
360 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); | 367 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); |
361 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); | ||
362 | int radeon_fence_wait_any(struct radeon_device *rdev, | 368 | int radeon_fence_wait_any(struct radeon_device *rdev, |
363 | struct radeon_fence **fences, | 369 | struct radeon_fence **fences, |
364 | bool intr); | 370 | bool intr); |
@@ -450,6 +456,7 @@ struct radeon_bo { | |||
450 | /* Protected by gem.mutex */ | 456 | /* Protected by gem.mutex */ |
451 | struct list_head list; | 457 | struct list_head list; |
452 | /* Protected by tbo.reserved */ | 458 | /* Protected by tbo.reserved */ |
459 | u32 initial_domain; | ||
453 | u32 placements[3]; | 460 | u32 placements[3]; |
454 | struct ttm_placement placement; | 461 | struct ttm_placement placement; |
455 | struct ttm_buffer_object tbo; | 462 | struct ttm_buffer_object tbo; |
@@ -472,16 +479,6 @@ struct radeon_bo { | |||
472 | }; | 479 | }; |
473 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) | 480 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
474 | 481 | ||
475 | struct radeon_bo_list { | ||
476 | struct ttm_validate_buffer tv; | ||
477 | struct radeon_bo *bo; | ||
478 | uint64_t gpu_offset; | ||
479 | bool written; | ||
480 | unsigned domain; | ||
481 | unsigned alt_domain; | ||
482 | u32 tiling_flags; | ||
483 | }; | ||
484 | |||
485 | int radeon_gem_debugfs_init(struct radeon_device *rdev); | 482 | int radeon_gem_debugfs_init(struct radeon_device *rdev); |
486 | 483 | ||
487 | /* sub-allocation manager, it has to be protected by another lock. | 484 | /* sub-allocation manager, it has to be protected by another lock. |
@@ -789,7 +786,6 @@ struct radeon_ib { | |||
789 | struct radeon_ring { | 786 | struct radeon_ring { |
790 | struct radeon_bo *ring_obj; | 787 | struct radeon_bo *ring_obj; |
791 | volatile uint32_t *ring; | 788 | volatile uint32_t *ring; |
792 | unsigned rptr; | ||
793 | unsigned rptr_offs; | 789 | unsigned rptr_offs; |
794 | unsigned rptr_save_reg; | 790 | unsigned rptr_save_reg; |
795 | u64 next_rptr_gpu_addr; | 791 | u64 next_rptr_gpu_addr; |
@@ -799,8 +795,8 @@ struct radeon_ring { | |||
799 | unsigned ring_size; | 795 | unsigned ring_size; |
800 | unsigned ring_free_dw; | 796 | unsigned ring_free_dw; |
801 | int count_dw; | 797 | int count_dw; |
802 | unsigned long last_activity; | 798 | atomic_t last_rptr; |
803 | unsigned last_rptr; | 799 | atomic64_t last_activity; |
804 | uint64_t gpu_addr; | 800 | uint64_t gpu_addr; |
805 | uint32_t align_mask; | 801 | uint32_t align_mask; |
806 | uint32_t ptr_mask; | 802 | uint32_t ptr_mask; |
@@ -852,17 +848,22 @@ struct radeon_mec { | |||
852 | #define R600_PTE_READABLE (1 << 5) | 848 | #define R600_PTE_READABLE (1 << 5) |
853 | #define R600_PTE_WRITEABLE (1 << 6) | 849 | #define R600_PTE_WRITEABLE (1 << 6) |
854 | 850 | ||
851 | struct radeon_vm_pt { | ||
852 | struct radeon_bo *bo; | ||
853 | uint64_t addr; | ||
854 | }; | ||
855 | |||
855 | struct radeon_vm { | 856 | struct radeon_vm { |
856 | struct list_head list; | ||
857 | struct list_head va; | 857 | struct list_head va; |
858 | unsigned id; | 858 | unsigned id; |
859 | 859 | ||
860 | /* contains the page directory */ | 860 | /* contains the page directory */ |
861 | struct radeon_sa_bo *page_directory; | 861 | struct radeon_bo *page_directory; |
862 | uint64_t pd_gpu_addr; | 862 | uint64_t pd_gpu_addr; |
863 | unsigned max_pde_used; | ||
863 | 864 | ||
864 | /* array of page tables, one for each page directory entry */ | 865 | /* array of page tables, one for each page directory entry */ |
865 | struct radeon_sa_bo **page_tables; | 866 | struct radeon_vm_pt *page_tables; |
866 | 867 | ||
867 | struct mutex mutex; | 868 | struct mutex mutex; |
868 | /* last fence for cs using this vm */ | 869 | /* last fence for cs using this vm */ |
@@ -874,10 +875,7 @@ struct radeon_vm { | |||
874 | }; | 875 | }; |
875 | 876 | ||
876 | struct radeon_vm_manager { | 877 | struct radeon_vm_manager { |
877 | struct mutex lock; | ||
878 | struct list_head lru_vm; | ||
879 | struct radeon_fence *active[RADEON_NUM_VM]; | 878 | struct radeon_fence *active[RADEON_NUM_VM]; |
880 | struct radeon_sa_manager sa_manager; | ||
881 | uint32_t max_pfn; | 879 | uint32_t max_pfn; |
882 | /* number of VMIDs */ | 880 | /* number of VMIDs */ |
883 | unsigned nvm; | 881 | unsigned nvm; |
@@ -953,8 +951,8 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *c | |||
953 | void radeon_ring_undo(struct radeon_ring *ring); | 951 | void radeon_ring_undo(struct radeon_ring *ring); |
954 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); | 952 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); |
955 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); | 953 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
956 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); | 954 | void radeon_ring_lockup_update(struct radeon_device *rdev, |
957 | void radeon_ring_lockup_update(struct radeon_ring *ring); | 955 | struct radeon_ring *ring); |
958 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); | 956 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
959 | unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, | 957 | unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, |
960 | uint32_t **data); | 958 | uint32_t **data); |
@@ -980,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev); | |||
980 | struct radeon_cs_reloc { | 978 | struct radeon_cs_reloc { |
981 | struct drm_gem_object *gobj; | 979 | struct drm_gem_object *gobj; |
982 | struct radeon_bo *robj; | 980 | struct radeon_bo *robj; |
983 | struct radeon_bo_list lobj; | 981 | struct ttm_validate_buffer tv; |
982 | uint64_t gpu_offset; | ||
983 | unsigned domain; | ||
984 | unsigned alt_domain; | ||
985 | uint32_t tiling_flags; | ||
984 | uint32_t handle; | 986 | uint32_t handle; |
985 | uint32_t flags; | ||
986 | }; | 987 | }; |
987 | 988 | ||
988 | struct radeon_cs_chunk { | 989 | struct radeon_cs_chunk { |
@@ -1006,6 +1007,7 @@ struct radeon_cs_parser { | |||
1006 | unsigned nrelocs; | 1007 | unsigned nrelocs; |
1007 | struct radeon_cs_reloc *relocs; | 1008 | struct radeon_cs_reloc *relocs; |
1008 | struct radeon_cs_reloc **relocs_ptr; | 1009 | struct radeon_cs_reloc **relocs_ptr; |
1010 | struct radeon_cs_reloc *vm_bos; | ||
1009 | struct list_head validated; | 1011 | struct list_head validated; |
1010 | unsigned dma_reloc_idx; | 1012 | unsigned dma_reloc_idx; |
1011 | /* indices of various chunks */ | 1013 | /* indices of various chunks */ |
@@ -1255,6 +1257,17 @@ enum radeon_dpm_event_src { | |||
1255 | RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | 1257 | RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 |
1256 | }; | 1258 | }; |
1257 | 1259 | ||
1260 | #define RADEON_MAX_VCE_LEVELS 6 | ||
1261 | |||
1262 | enum radeon_vce_level { | ||
1263 | RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
1264 | RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
1265 | RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
1266 | RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
1267 | RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
1268 | RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
1269 | }; | ||
1270 | |||
1258 | struct radeon_ps { | 1271 | struct radeon_ps { |
1259 | u32 caps; /* vbios flags */ | 1272 | u32 caps; /* vbios flags */ |
1260 | u32 class; /* vbios flags */ | 1273 | u32 class; /* vbios flags */ |
@@ -1265,6 +1278,8 @@ struct radeon_ps { | |||
1265 | /* VCE clocks */ | 1278 | /* VCE clocks */ |
1266 | u32 evclk; | 1279 | u32 evclk; |
1267 | u32 ecclk; | 1280 | u32 ecclk; |
1281 | bool vce_active; | ||
1282 | enum radeon_vce_level vce_level; | ||
1268 | /* asic priv */ | 1283 | /* asic priv */ |
1269 | void *ps_priv; | 1284 | void *ps_priv; |
1270 | }; | 1285 | }; |
@@ -1439,6 +1454,17 @@ enum radeon_dpm_forced_level { | |||
1439 | RADEON_DPM_FORCED_LEVEL_HIGH = 2, | 1454 | RADEON_DPM_FORCED_LEVEL_HIGH = 2, |
1440 | }; | 1455 | }; |
1441 | 1456 | ||
1457 | struct radeon_vce_state { | ||
1458 | /* vce clocks */ | ||
1459 | u32 evclk; | ||
1460 | u32 ecclk; | ||
1461 | /* gpu clocks */ | ||
1462 | u32 sclk; | ||
1463 | u32 mclk; | ||
1464 | u8 clk_idx; | ||
1465 | u8 pstate; | ||
1466 | }; | ||
1467 | |||
1442 | struct radeon_dpm { | 1468 | struct radeon_dpm { |
1443 | struct radeon_ps *ps; | 1469 | struct radeon_ps *ps; |
1444 | /* number of valid power states */ | 1470 | /* number of valid power states */ |
@@ -1451,6 +1477,9 @@ struct radeon_dpm { | |||
1451 | struct radeon_ps *boot_ps; | 1477 | struct radeon_ps *boot_ps; |
1452 | /* default uvd power state */ | 1478 | /* default uvd power state */ |
1453 | struct radeon_ps *uvd_ps; | 1479 | struct radeon_ps *uvd_ps; |
1480 | /* vce requirements */ | ||
1481 | struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS]; | ||
1482 | enum radeon_vce_level vce_level; | ||
1454 | enum radeon_pm_state_type state; | 1483 | enum radeon_pm_state_type state; |
1455 | enum radeon_pm_state_type user_state; | 1484 | enum radeon_pm_state_type user_state; |
1456 | u32 platform_caps; | 1485 | u32 platform_caps; |
@@ -1476,6 +1505,7 @@ struct radeon_dpm { | |||
1476 | /* special states active */ | 1505 | /* special states active */ |
1477 | bool thermal_active; | 1506 | bool thermal_active; |
1478 | bool uvd_active; | 1507 | bool uvd_active; |
1508 | bool vce_active; | ||
1479 | /* thermal handling */ | 1509 | /* thermal handling */ |
1480 | struct radeon_dpm_thermal thermal; | 1510 | struct radeon_dpm_thermal thermal; |
1481 | /* forced levels */ | 1511 | /* forced levels */ |
@@ -1486,6 +1516,7 @@ struct radeon_dpm { | |||
1486 | }; | 1516 | }; |
1487 | 1517 | ||
1488 | void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); | 1518 | void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); |
1519 | void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable); | ||
1489 | 1520 | ||
1490 | struct radeon_pm { | 1521 | struct radeon_pm { |
1491 | struct mutex mutex; | 1522 | struct mutex mutex; |
@@ -1591,6 +1622,45 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, | |||
1591 | int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, | 1622 | int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, |
1592 | unsigned cg_upll_func_cntl); | 1623 | unsigned cg_upll_func_cntl); |
1593 | 1624 | ||
1625 | /* | ||
1626 | * VCE | ||
1627 | */ | ||
1628 | #define RADEON_MAX_VCE_HANDLES 16 | ||
1629 | #define RADEON_VCE_STACK_SIZE (1024*1024) | ||
1630 | #define RADEON_VCE_HEAP_SIZE (4*1024*1024) | ||
1631 | |||
1632 | struct radeon_vce { | ||
1633 | struct radeon_bo *vcpu_bo; | ||
1634 | uint64_t gpu_addr; | ||
1635 | unsigned fw_version; | ||
1636 | unsigned fb_version; | ||
1637 | atomic_t handles[RADEON_MAX_VCE_HANDLES]; | ||
1638 | struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; | ||
1639 | struct delayed_work idle_work; | ||
1640 | }; | ||
1641 | |||
1642 | int radeon_vce_init(struct radeon_device *rdev); | ||
1643 | void radeon_vce_fini(struct radeon_device *rdev); | ||
1644 | int radeon_vce_suspend(struct radeon_device *rdev); | ||
1645 | int radeon_vce_resume(struct radeon_device *rdev); | ||
1646 | int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | ||
1647 | uint32_t handle, struct radeon_fence **fence); | ||
1648 | int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | ||
1649 | uint32_t handle, struct radeon_fence **fence); | ||
1650 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); | ||
1651 | void radeon_vce_note_usage(struct radeon_device *rdev); | ||
1652 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); | ||
1653 | int radeon_vce_cs_parse(struct radeon_cs_parser *p); | ||
1654 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | ||
1655 | struct radeon_ring *ring, | ||
1656 | struct radeon_semaphore *semaphore, | ||
1657 | bool emit_wait); | ||
1658 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | ||
1659 | void radeon_vce_fence_emit(struct radeon_device *rdev, | ||
1660 | struct radeon_fence *fence); | ||
1661 | int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); | ||
1662 | int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | ||
1663 | |||
1594 | struct r600_audio_pin { | 1664 | struct r600_audio_pin { |
1595 | int channels; | 1665 | int channels; |
1596 | int rate; | 1666 | int rate; |
@@ -1780,6 +1850,7 @@ struct radeon_asic { | |||
1780 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | 1850 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
1781 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | 1851 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
1782 | int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); | 1852 | int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); |
1853 | int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk); | ||
1783 | int (*get_temperature)(struct radeon_device *rdev); | 1854 | int (*get_temperature)(struct radeon_device *rdev); |
1784 | } pm; | 1855 | } pm; |
1785 | /* dynamic power management */ | 1856 | /* dynamic power management */ |
@@ -2041,6 +2112,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
2041 | struct drm_file *filp); | 2112 | struct drm_file *filp); |
2042 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, | 2113 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, |
2043 | struct drm_file *filp); | 2114 | struct drm_file *filp); |
2115 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | ||
2116 | struct drm_file *filp); | ||
2044 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 2117 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
2045 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | 2118 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
2046 | struct drm_file *filp); | 2119 | struct drm_file *filp); |
@@ -2186,6 +2259,7 @@ struct radeon_device { | |||
2186 | struct radeon_gem gem; | 2259 | struct radeon_gem gem; |
2187 | struct radeon_pm pm; | 2260 | struct radeon_pm pm; |
2188 | struct radeon_uvd uvd; | 2261 | struct radeon_uvd uvd; |
2262 | struct radeon_vce vce; | ||
2189 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; | 2263 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; |
2190 | struct radeon_wb wb; | 2264 | struct radeon_wb wb; |
2191 | struct radeon_dummy_page dummy_page; | 2265 | struct radeon_dummy_page dummy_page; |
@@ -2205,6 +2279,7 @@ struct radeon_device { | |||
2205 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ | 2279 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ |
2206 | const struct firmware *smc_fw; /* SMC firmware */ | 2280 | const struct firmware *smc_fw; /* SMC firmware */ |
2207 | const struct firmware *uvd_fw; /* UVD firmware */ | 2281 | const struct firmware *uvd_fw; /* UVD firmware */ |
2282 | const struct firmware *vce_fw; /* VCE firmware */ | ||
2208 | struct r600_vram_scratch vram_scratch; | 2283 | struct r600_vram_scratch vram_scratch; |
2209 | int msi_enabled; /* msi enabled */ | 2284 | int msi_enabled; /* msi enabled */ |
2210 | struct r600_ih ih; /* r6/700 interrupt ring */ | 2285 | struct r600_ih ih; /* r6/700 interrupt ring */ |
@@ -2229,6 +2304,10 @@ struct radeon_device { | |||
2229 | /* virtual memory */ | 2304 | /* virtual memory */ |
2230 | struct radeon_vm_manager vm_manager; | 2305 | struct radeon_vm_manager vm_manager; |
2231 | struct mutex gpu_clock_mutex; | 2306 | struct mutex gpu_clock_mutex; |
2307 | /* memory stats */ | ||
2308 | atomic64_t vram_usage; | ||
2309 | atomic64_t gtt_usage; | ||
2310 | atomic64_t num_bytes_moved; | ||
2232 | /* ACPI interface */ | 2311 | /* ACPI interface */ |
2233 | struct radeon_atif atif; | 2312 | struct radeon_atif atif; |
2234 | struct radeon_atcs atcs; | 2313 | struct radeon_atcs atcs; |
@@ -2639,6 +2718,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
2639 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) | 2718 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) |
2640 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) | 2719 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) |
2641 | #define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) | 2720 | #define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) |
2721 | #define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec)) | ||
2642 | #define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev)) | 2722 | #define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev)) |
2643 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) | 2723 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) |
2644 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) | 2724 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) |
@@ -2715,16 +2795,22 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev, | |||
2715 | */ | 2795 | */ |
2716 | int radeon_vm_manager_init(struct radeon_device *rdev); | 2796 | int radeon_vm_manager_init(struct radeon_device *rdev); |
2717 | void radeon_vm_manager_fini(struct radeon_device *rdev); | 2797 | void radeon_vm_manager_fini(struct radeon_device *rdev); |
2718 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); | 2798 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); |
2719 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); | 2799 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); |
2720 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); | 2800 | struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, |
2721 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); | 2801 | struct radeon_vm *vm, |
2802 | struct list_head *head); | ||
2722 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | 2803 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
2723 | struct radeon_vm *vm, int ring); | 2804 | struct radeon_vm *vm, int ring); |
2805 | void radeon_vm_flush(struct radeon_device *rdev, | ||
2806 | struct radeon_vm *vm, | ||
2807 | int ring); | ||
2724 | void radeon_vm_fence(struct radeon_device *rdev, | 2808 | void radeon_vm_fence(struct radeon_device *rdev, |
2725 | struct radeon_vm *vm, | 2809 | struct radeon_vm *vm, |
2726 | struct radeon_fence *fence); | 2810 | struct radeon_fence *fence); |
2727 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); | 2811 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); |
2812 | int radeon_vm_update_page_directory(struct radeon_device *rdev, | ||
2813 | struct radeon_vm *vm); | ||
2728 | int radeon_vm_bo_update(struct radeon_device *rdev, | 2814 | int radeon_vm_bo_update(struct radeon_device *rdev, |
2729 | struct radeon_vm *vm, | 2815 | struct radeon_vm *vm, |
2730 | struct radeon_bo *bo, | 2816 | struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index dda02bfc10a4..b8a24a75d4ff 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1987,6 +1987,19 @@ static struct radeon_asic_ring ci_dma_ring = { | |||
1987 | .set_wptr = &cik_sdma_set_wptr, | 1987 | .set_wptr = &cik_sdma_set_wptr, |
1988 | }; | 1988 | }; |
1989 | 1989 | ||
1990 | static struct radeon_asic_ring ci_vce_ring = { | ||
1991 | .ib_execute = &radeon_vce_ib_execute, | ||
1992 | .emit_fence = &radeon_vce_fence_emit, | ||
1993 | .emit_semaphore = &radeon_vce_semaphore_emit, | ||
1994 | .cs_parse = &radeon_vce_cs_parse, | ||
1995 | .ring_test = &radeon_vce_ring_test, | ||
1996 | .ib_test = &radeon_vce_ib_test, | ||
1997 | .is_lockup = &radeon_ring_test_lockup, | ||
1998 | .get_rptr = &vce_v1_0_get_rptr, | ||
1999 | .get_wptr = &vce_v1_0_get_wptr, | ||
2000 | .set_wptr = &vce_v1_0_set_wptr, | ||
2001 | }; | ||
2002 | |||
1990 | static struct radeon_asic ci_asic = { | 2003 | static struct radeon_asic ci_asic = { |
1991 | .init = &cik_init, | 2004 | .init = &cik_init, |
1992 | .fini = &cik_fini, | 2005 | .fini = &cik_fini, |
@@ -2015,6 +2028,8 @@ static struct radeon_asic ci_asic = { | |||
2015 | [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, | 2028 | [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, |
2016 | [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, | 2029 | [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, |
2017 | [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, | 2030 | [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, |
2031 | [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, | ||
2032 | [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, | ||
2018 | }, | 2033 | }, |
2019 | .irq = { | 2034 | .irq = { |
2020 | .set = &cik_irq_set, | 2035 | .set = &cik_irq_set, |
@@ -2061,6 +2076,7 @@ static struct radeon_asic ci_asic = { | |||
2061 | .set_pcie_lanes = NULL, | 2076 | .set_pcie_lanes = NULL, |
2062 | .set_clock_gating = NULL, | 2077 | .set_clock_gating = NULL, |
2063 | .set_uvd_clocks = &cik_set_uvd_clocks, | 2078 | .set_uvd_clocks = &cik_set_uvd_clocks, |
2079 | .set_vce_clocks = &cik_set_vce_clocks, | ||
2064 | .get_temperature = &ci_get_temp, | 2080 | .get_temperature = &ci_get_temp, |
2065 | }, | 2081 | }, |
2066 | .dpm = { | 2082 | .dpm = { |
@@ -2117,6 +2133,8 @@ static struct radeon_asic kv_asic = { | |||
2117 | [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, | 2133 | [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, |
2118 | [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, | 2134 | [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, |
2119 | [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, | 2135 | [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, |
2136 | [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, | ||
2137 | [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, | ||
2120 | }, | 2138 | }, |
2121 | .irq = { | 2139 | .irq = { |
2122 | .set = &cik_irq_set, | 2140 | .set = &cik_irq_set, |
@@ -2163,6 +2181,7 @@ static struct radeon_asic kv_asic = { | |||
2163 | .set_pcie_lanes = NULL, | 2181 | .set_pcie_lanes = NULL, |
2164 | .set_clock_gating = NULL, | 2182 | .set_clock_gating = NULL, |
2165 | .set_uvd_clocks = &cik_set_uvd_clocks, | 2183 | .set_uvd_clocks = &cik_set_uvd_clocks, |
2184 | .set_vce_clocks = &cik_set_vce_clocks, | ||
2166 | .get_temperature = &kv_get_temp, | 2185 | .get_temperature = &kv_get_temp, |
2167 | }, | 2186 | }, |
2168 | .dpm = { | 2187 | .dpm = { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ae637cfda783..3d55a3a39e82 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -717,6 +717,7 @@ u32 cik_get_xclk(struct radeon_device *rdev); | |||
717 | uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); | 717 | uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); |
718 | void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 718 | void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
719 | int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); | 719 | int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
720 | int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk); | ||
720 | void cik_sdma_fence_ring_emit(struct radeon_device *rdev, | 721 | void cik_sdma_fence_ring_emit(struct radeon_device *rdev, |
721 | struct radeon_fence *fence); | 722 | struct radeon_fence *fence); |
722 | bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, | 723 | bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, |
@@ -863,4 +864,17 @@ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, | |||
863 | /* uvd v4.2 */ | 864 | /* uvd v4.2 */ |
864 | int uvd_v4_2_resume(struct radeon_device *rdev); | 865 | int uvd_v4_2_resume(struct radeon_device *rdev); |
865 | 866 | ||
867 | /* vce v1.0 */ | ||
868 | uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev, | ||
869 | struct radeon_ring *ring); | ||
870 | uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev, | ||
871 | struct radeon_ring *ring); | ||
872 | void vce_v1_0_set_wptr(struct radeon_device *rdev, | ||
873 | struct radeon_ring *ring); | ||
874 | int vce_v1_0_init(struct radeon_device *rdev); | ||
875 | int vce_v1_0_start(struct radeon_device *rdev); | ||
876 | |||
877 | /* vce v2.0 */ | ||
878 | int vce_v2_0_resume(struct radeon_device *rdev); | ||
879 | |||
866 | #endif | 880 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index dfb5a1db87d4..2b6e0ebcc13a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -24,16 +24,59 @@ | |||
24 | * Authors: | 24 | * Authors: |
25 | * Jerome Glisse <glisse@freedesktop.org> | 25 | * Jerome Glisse <glisse@freedesktop.org> |
26 | */ | 26 | */ |
27 | #include <linux/list_sort.h> | ||
27 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
28 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
29 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 31 | #include "radeon.h" |
31 | #include "radeon_trace.h" | 32 | #include "radeon_trace.h" |
32 | 33 | ||
34 | #define RADEON_CS_MAX_PRIORITY 32u | ||
35 | #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) | ||
36 | |||
37 | /* This is based on the bucket sort with O(n) time complexity. | ||
38 | * An item with priority "i" is added to bucket[i]. The lists are then | ||
39 | * concatenated in descending order. | ||
40 | */ | ||
41 | struct radeon_cs_buckets { | ||
42 | struct list_head bucket[RADEON_CS_NUM_BUCKETS]; | ||
43 | }; | ||
44 | |||
45 | static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) | ||
46 | { | ||
47 | unsigned i; | ||
48 | |||
49 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) | ||
50 | INIT_LIST_HEAD(&b->bucket[i]); | ||
51 | } | ||
52 | |||
53 | static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, | ||
54 | struct list_head *item, unsigned priority) | ||
55 | { | ||
56 | /* Since buffers which appear sooner in the relocation list are | ||
57 | * likely to be used more often than buffers which appear later | ||
58 | * in the list, the sort mustn't change the ordering of buffers | ||
59 | * with the same priority, i.e. it must be stable. | ||
60 | */ | ||
61 | list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); | ||
62 | } | ||
63 | |||
64 | static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, | ||
65 | struct list_head *out_list) | ||
66 | { | ||
67 | unsigned i; | ||
68 | |||
69 | /* Connect the sorted buckets in the output list. */ | ||
70 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { | ||
71 | list_splice(&b->bucket[i], out_list); | ||
72 | } | ||
73 | } | ||
74 | |||
33 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | 75 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
34 | { | 76 | { |
35 | struct drm_device *ddev = p->rdev->ddev; | 77 | struct drm_device *ddev = p->rdev->ddev; |
36 | struct radeon_cs_chunk *chunk; | 78 | struct radeon_cs_chunk *chunk; |
79 | struct radeon_cs_buckets buckets; | ||
37 | unsigned i, j; | 80 | unsigned i, j; |
38 | bool duplicate; | 81 | bool duplicate; |
39 | 82 | ||
@@ -52,8 +95,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
52 | if (p->relocs == NULL) { | 95 | if (p->relocs == NULL) { |
53 | return -ENOMEM; | 96 | return -ENOMEM; |
54 | } | 97 | } |
98 | |||
99 | radeon_cs_buckets_init(&buckets); | ||
100 | |||
55 | for (i = 0; i < p->nrelocs; i++) { | 101 | for (i = 0; i < p->nrelocs; i++) { |
56 | struct drm_radeon_cs_reloc *r; | 102 | struct drm_radeon_cs_reloc *r; |
103 | unsigned priority; | ||
57 | 104 | ||
58 | duplicate = false; | 105 | duplicate = false; |
59 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; | 106 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
@@ -78,8 +125,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
78 | } | 125 | } |
79 | p->relocs_ptr[i] = &p->relocs[i]; | 126 | p->relocs_ptr[i] = &p->relocs[i]; |
80 | p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); | 127 | p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); |
81 | p->relocs[i].lobj.bo = p->relocs[i].robj; | 128 | |
82 | p->relocs[i].lobj.written = !!r->write_domain; | 129 | /* The userspace buffer priorities are from 0 to 15. A higher |
130 | * number means the buffer is more important. | ||
131 | * Also, the buffers used for write have a higher priority than | ||
132 | * the buffers used for read only, which doubles the range | ||
133 | * to 0 to 31. 32 is reserved for the kernel driver. | ||
134 | */ | ||
135 | priority = (r->flags & 0xf) * 2 + !!r->write_domain; | ||
83 | 136 | ||
84 | /* the first reloc of an UVD job is the msg and that must be in | 137 | /* the first reloc of an UVD job is the msg and that must be in |
85 | VRAM, also but everything into VRAM on AGP cards to avoid | 138 | VRAM, also but everything into VRAM on AGP cards to avoid |
@@ -87,29 +140,38 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
87 | if (p->ring == R600_RING_TYPE_UVD_INDEX && | 140 | if (p->ring == R600_RING_TYPE_UVD_INDEX && |
88 | (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { | 141 | (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { |
89 | /* TODO: is this still needed for NI+ ? */ | 142 | /* TODO: is this still needed for NI+ ? */ |
90 | p->relocs[i].lobj.domain = | 143 | p->relocs[i].domain = |
91 | RADEON_GEM_DOMAIN_VRAM; | 144 | RADEON_GEM_DOMAIN_VRAM; |
92 | 145 | ||
93 | p->relocs[i].lobj.alt_domain = | 146 | p->relocs[i].alt_domain = |
94 | RADEON_GEM_DOMAIN_VRAM; | 147 | RADEON_GEM_DOMAIN_VRAM; |
95 | 148 | ||
149 | /* prioritize this over any other relocation */ | ||
150 | priority = RADEON_CS_MAX_PRIORITY; | ||
96 | } else { | 151 | } else { |
97 | uint32_t domain = r->write_domain ? | 152 | uint32_t domain = r->write_domain ? |
98 | r->write_domain : r->read_domains; | 153 | r->write_domain : r->read_domains; |
99 | 154 | ||
100 | p->relocs[i].lobj.domain = domain; | 155 | p->relocs[i].domain = domain; |
101 | if (domain == RADEON_GEM_DOMAIN_VRAM) | 156 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
102 | domain |= RADEON_GEM_DOMAIN_GTT; | 157 | domain |= RADEON_GEM_DOMAIN_GTT; |
103 | p->relocs[i].lobj.alt_domain = domain; | 158 | p->relocs[i].alt_domain = domain; |
104 | } | 159 | } |
105 | 160 | ||
106 | p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; | 161 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
107 | p->relocs[i].handle = r->handle; | 162 | p->relocs[i].handle = r->handle; |
108 | 163 | ||
109 | radeon_bo_list_add_object(&p->relocs[i].lobj, | 164 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
110 | &p->validated); | 165 | priority); |
111 | } | 166 | } |
112 | return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring); | 167 | |
168 | radeon_cs_buckets_get_list(&buckets, &p->validated); | ||
169 | |||
170 | if (p->cs_flags & RADEON_CS_USE_VM) | ||
171 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, | ||
172 | &p->validated); | ||
173 | |||
174 | return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); | ||
113 | } | 175 | } |
114 | 176 | ||
115 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) | 177 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
@@ -147,6 +209,10 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority | |||
147 | case RADEON_CS_RING_UVD: | 209 | case RADEON_CS_RING_UVD: |
148 | p->ring = R600_RING_TYPE_UVD_INDEX; | 210 | p->ring = R600_RING_TYPE_UVD_INDEX; |
149 | break; | 211 | break; |
212 | case RADEON_CS_RING_VCE: | ||
213 | /* TODO: only use the low priority ring for now */ | ||
214 | p->ring = TN_RING_TYPE_VCE1_INDEX; | ||
215 | break; | ||
150 | } | 216 | } |
151 | return 0; | 217 | return 0; |
152 | } | 218 | } |
@@ -286,6 +352,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
286 | return 0; | 352 | return 0; |
287 | } | 353 | } |
288 | 354 | ||
355 | static int cmp_size_smaller_first(void *priv, struct list_head *a, | ||
356 | struct list_head *b) | ||
357 | { | ||
358 | struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); | ||
359 | struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); | ||
360 | |||
361 | /* Sort A before B if A is smaller. */ | ||
362 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | ||
363 | } | ||
364 | |||
289 | /** | 365 | /** |
290 | * cs_parser_fini() - clean parser states | 366 | * cs_parser_fini() - clean parser states |
291 | * @parser: parser structure holding parsing context. | 367 | * @parser: parser structure holding parsing context. |
@@ -299,6 +375,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo | |||
299 | unsigned i; | 375 | unsigned i; |
300 | 376 | ||
301 | if (!error) { | 377 | if (!error) { |
378 | /* Sort the buffer list from the smallest to largest buffer, | ||
379 | * which affects the order of buffers in the LRU list. | ||
380 | * This assures that the smallest buffers are added first | ||
381 | * to the LRU list, so they are likely to be later evicted | ||
382 | * first, instead of large buffers whose eviction is more | ||
383 | * expensive. | ||
384 | * | ||
385 | * This slightly lowers the number of bytes moved by TTM | ||
386 | * per frame under memory pressure. | ||
387 | */ | ||
388 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | ||
389 | |||
302 | ttm_eu_fence_buffer_objects(&parser->ticket, | 390 | ttm_eu_fence_buffer_objects(&parser->ticket, |
303 | &parser->validated, | 391 | &parser->validated, |
304 | parser->ib.fence); | 392 | parser->ib.fence); |
@@ -316,6 +404,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo | |||
316 | kfree(parser->track); | 404 | kfree(parser->track); |
317 | kfree(parser->relocs); | 405 | kfree(parser->relocs); |
318 | kfree(parser->relocs_ptr); | 406 | kfree(parser->relocs_ptr); |
407 | kfree(parser->vm_bos); | ||
319 | for (i = 0; i < parser->nchunks; i++) | 408 | for (i = 0; i < parser->nchunks; i++) |
320 | drm_free_large(parser->chunks[i].kdata); | 409 | drm_free_large(parser->chunks[i].kdata); |
321 | kfree(parser->chunks); | 410 | kfree(parser->chunks); |
@@ -343,6 +432,9 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
343 | 432 | ||
344 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) | 433 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
345 | radeon_uvd_note_usage(rdev); | 434 | radeon_uvd_note_usage(rdev); |
435 | else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || | ||
436 | (parser->ring == TN_RING_TYPE_VCE2_INDEX)) | ||
437 | radeon_vce_note_usage(rdev); | ||
346 | 438 | ||
347 | radeon_cs_sync_rings(parser); | 439 | radeon_cs_sync_rings(parser); |
348 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); | 440 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); |
@@ -352,24 +444,32 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
352 | return r; | 444 | return r; |
353 | } | 445 | } |
354 | 446 | ||
355 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, | 447 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, |
356 | struct radeon_vm *vm) | 448 | struct radeon_vm *vm) |
357 | { | 449 | { |
358 | struct radeon_device *rdev = parser->rdev; | 450 | struct radeon_device *rdev = p->rdev; |
359 | struct radeon_bo_list *lobj; | 451 | int i, r; |
360 | struct radeon_bo *bo; | ||
361 | int r; | ||
362 | 452 | ||
363 | r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); | 453 | r = radeon_vm_update_page_directory(rdev, vm); |
364 | if (r) { | 454 | if (r) |
365 | return r; | 455 | return r; |
366 | } | 456 | |
367 | list_for_each_entry(lobj, &parser->validated, tv.head) { | 457 | r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, |
368 | bo = lobj->bo; | 458 | &rdev->ring_tmp_bo.bo->tbo.mem); |
369 | r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem); | 459 | if (r) |
370 | if (r) { | 460 | return r; |
461 | |||
462 | for (i = 0; i < p->nrelocs; i++) { | ||
463 | struct radeon_bo *bo; | ||
464 | |||
465 | /* ignore duplicates */ | ||
466 | if (p->relocs_ptr[i] != &p->relocs[i]) | ||
467 | continue; | ||
468 | |||
469 | bo = p->relocs[i].robj; | ||
470 | r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); | ||
471 | if (r) | ||
371 | return r; | 472 | return r; |
372 | } | ||
373 | } | 473 | } |
374 | return 0; | 474 | return 0; |
375 | } | 475 | } |
@@ -401,20 +501,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
401 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) | 501 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
402 | radeon_uvd_note_usage(rdev); | 502 | radeon_uvd_note_usage(rdev); |
403 | 503 | ||
404 | mutex_lock(&rdev->vm_manager.lock); | ||
405 | mutex_lock(&vm->mutex); | 504 | mutex_lock(&vm->mutex); |
406 | r = radeon_vm_alloc_pt(rdev, vm); | ||
407 | if (r) { | ||
408 | goto out; | ||
409 | } | ||
410 | r = radeon_bo_vm_update_pte(parser, vm); | 505 | r = radeon_bo_vm_update_pte(parser, vm); |
411 | if (r) { | 506 | if (r) { |
412 | goto out; | 507 | goto out; |
413 | } | 508 | } |
414 | radeon_cs_sync_rings(parser); | 509 | radeon_cs_sync_rings(parser); |
415 | radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); | 510 | radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); |
416 | radeon_semaphore_sync_to(parser->ib.semaphore, | ||
417 | radeon_vm_grab_id(rdev, vm, parser->ring)); | ||
418 | 511 | ||
419 | if ((rdev->family >= CHIP_TAHITI) && | 512 | if ((rdev->family >= CHIP_TAHITI) && |
420 | (parser->chunk_const_ib_idx != -1)) { | 513 | (parser->chunk_const_ib_idx != -1)) { |
@@ -423,14 +516,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
423 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); | 516 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); |
424 | } | 517 | } |
425 | 518 | ||
426 | if (!r) { | ||
427 | radeon_vm_fence(rdev, vm, parser->ib.fence); | ||
428 | } | ||
429 | |||
430 | out: | 519 | out: |
431 | radeon_vm_add_to_lru(rdev, vm); | ||
432 | mutex_unlock(&vm->mutex); | 520 | mutex_unlock(&vm->mutex); |
433 | mutex_unlock(&rdev->vm_manager.lock); | ||
434 | return r; | 521 | return r; |
435 | } | 522 | } |
436 | 523 | ||
@@ -698,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, | |||
698 | /* FIXME: we assume reloc size is 4 dwords */ | 785 | /* FIXME: we assume reloc size is 4 dwords */ |
699 | if (nomm) { | 786 | if (nomm) { |
700 | *cs_reloc = p->relocs; | 787 | *cs_reloc = p->relocs; |
701 | (*cs_reloc)->lobj.gpu_offset = | 788 | (*cs_reloc)->gpu_offset = |
702 | (u64)relocs_chunk->kdata[idx + 3] << 32; | 789 | (u64)relocs_chunk->kdata[idx + 3] << 32; |
703 | (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; | 790 | (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
704 | } else | 791 | } else |
705 | *cs_reloc = p->relocs_ptr[(idx / 4)]; | 792 | *cs_reloc = p->relocs_ptr[(idx / 4)]; |
706 | return 0; | 793 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 044bc98fb459..2e72dcd94b13 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1191,14 +1191,12 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1191 | r = radeon_gem_init(rdev); | 1191 | r = radeon_gem_init(rdev); |
1192 | if (r) | 1192 | if (r) |
1193 | return r; | 1193 | return r; |
1194 | /* initialize vm here */ | 1194 | |
1195 | mutex_init(&rdev->vm_manager.lock); | ||
1196 | /* Adjust VM size here. | 1195 | /* Adjust VM size here. |
1197 | * Currently set to 4GB ((1 << 20) 4k pages). | 1196 | * Currently set to 4GB ((1 << 20) 4k pages). |
1198 | * Max GPUVM size for cayman and SI is 40 bits. | 1197 | * Max GPUVM size for cayman and SI is 40 bits. |
1199 | */ | 1198 | */ |
1200 | rdev->vm_manager.max_pfn = 1 << 20; | 1199 | rdev->vm_manager.max_pfn = 1 << 20; |
1201 | INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); | ||
1202 | 1200 | ||
1203 | /* Set asic functions */ | 1201 | /* Set asic functions */ |
1204 | r = radeon_asic_init(rdev); | 1202 | r = radeon_asic_init(rdev); |
@@ -1445,10 +1443,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1445 | /* evict vram memory */ | 1443 | /* evict vram memory */ |
1446 | radeon_bo_evict_vram(rdev); | 1444 | radeon_bo_evict_vram(rdev); |
1447 | 1445 | ||
1448 | mutex_lock(&rdev->ring_lock); | ||
1449 | /* wait for gpu to finish processing current batch */ | 1446 | /* wait for gpu to finish processing current batch */ |
1450 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 1447 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
1451 | r = radeon_fence_wait_empty_locked(rdev, i); | 1448 | r = radeon_fence_wait_empty(rdev, i); |
1452 | if (r) { | 1449 | if (r) { |
1453 | /* delay GPU reset to resume */ | 1450 | /* delay GPU reset to resume */ |
1454 | force_completion = true; | 1451 | force_completion = true; |
@@ -1457,7 +1454,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1457 | if (force_completion) { | 1454 | if (force_completion) { |
1458 | radeon_fence_driver_force_completion(rdev); | 1455 | radeon_fence_driver_force_completion(rdev); |
1459 | } | 1456 | } |
1460 | mutex_unlock(&rdev->ring_lock); | ||
1461 | 1457 | ||
1462 | radeon_save_bios_scratch_regs(rdev); | 1458 | radeon_save_bios_scratch_regs(rdev); |
1463 | 1459 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84a1bbb75f91..4392b7c95ee6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -79,9 +79,10 @@ | |||
79 | * 2.35.0 - Add CIK macrotile mode array query | 79 | * 2.35.0 - Add CIK macrotile mode array query |
80 | * 2.36.0 - Fix CIK DCE tiling setup | 80 | * 2.36.0 - Fix CIK DCE tiling setup |
81 | * 2.37.0 - allow GS ring setup on r6xx/r7xx | 81 | * 2.37.0 - allow GS ring setup on r6xx/r7xx |
82 | * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN) | ||
82 | */ | 83 | */ |
83 | #define KMS_DRIVER_MAJOR 2 | 84 | #define KMS_DRIVER_MAJOR 2 |
84 | #define KMS_DRIVER_MINOR 37 | 85 | #define KMS_DRIVER_MINOR 38 |
85 | #define KMS_DRIVER_PATCHLEVEL 0 | 86 | #define KMS_DRIVER_PATCHLEVEL 0 |
86 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 87 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
87 | int radeon_driver_unload_kms(struct drm_device *dev); | 88 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index c37cb79a9489..a77b1c13ea43 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |||
288 | * @rdev: radeon device pointer | 288 | * @rdev: radeon device pointer |
289 | * @target_seq: sequence number(s) we want to wait for | 289 | * @target_seq: sequence number(s) we want to wait for |
290 | * @intr: use interruptable sleep | 290 | * @intr: use interruptable sleep |
291 | * @lock_ring: whether the ring should be locked or not | ||
292 | * | 291 | * |
293 | * Wait for the requested sequence number(s) to be written by any ring | 292 | * Wait for the requested sequence number(s) to be written by any ring |
294 | * (all asics). Sequnce number array is indexed by ring id. | 293 | * (all asics). Sequnce number array is indexed by ring id. |
@@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) | |||
299 | * -EDEADLK is returned when a GPU lockup has been detected. | 298 | * -EDEADLK is returned when a GPU lockup has been detected. |
300 | */ | 299 | */ |
301 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | 300 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
302 | bool intr, bool lock_ring) | 301 | bool intr) |
303 | { | 302 | { |
304 | uint64_t last_seq[RADEON_NUM_RINGS]; | 303 | uint64_t last_seq[RADEON_NUM_RINGS]; |
305 | bool signaled; | 304 | bool signaled; |
@@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
358 | if (i != RADEON_NUM_RINGS) | 357 | if (i != RADEON_NUM_RINGS) |
359 | continue; | 358 | continue; |
360 | 359 | ||
361 | if (lock_ring) | ||
362 | mutex_lock(&rdev->ring_lock); | ||
363 | |||
364 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 360 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
365 | if (!target_seq[i]) | 361 | if (!target_seq[i]) |
366 | continue; | 362 | continue; |
@@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, | |||
378 | 374 | ||
379 | /* remember that we need an reset */ | 375 | /* remember that we need an reset */ |
380 | rdev->needs_reset = true; | 376 | rdev->needs_reset = true; |
381 | if (lock_ring) | ||
382 | mutex_unlock(&rdev->ring_lock); | ||
383 | wake_up_all(&rdev->fence_queue); | 377 | wake_up_all(&rdev->fence_queue); |
384 | return -EDEADLK; | 378 | return -EDEADLK; |
385 | } | 379 | } |
386 | |||
387 | if (lock_ring) | ||
388 | mutex_unlock(&rdev->ring_lock); | ||
389 | } | 380 | } |
390 | } | 381 | } |
391 | return 0; | 382 | return 0; |
@@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
416 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | 407 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
417 | return 0; | 408 | return 0; |
418 | 409 | ||
419 | r = radeon_fence_wait_seq(fence->rdev, seq, intr, true); | 410 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
420 | if (r) | 411 | if (r) |
421 | return r; | 412 | return r; |
422 | 413 | ||
@@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
464 | if (num_rings == 0) | 455 | if (num_rings == 0) |
465 | return -ENOENT; | 456 | return -ENOENT; |
466 | 457 | ||
467 | r = radeon_fence_wait_seq(rdev, seq, intr, true); | 458 | r = radeon_fence_wait_seq(rdev, seq, intr); |
468 | if (r) { | 459 | if (r) { |
469 | return r; | 460 | return r; |
470 | } | 461 | } |
@@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, | |||
472 | } | 463 | } |
473 | 464 | ||
474 | /** | 465 | /** |
475 | * radeon_fence_wait_locked - wait for a fence to signal | 466 | * radeon_fence_wait_next - wait for the next fence to signal |
476 | * | ||
477 | * @fence: radeon fence object | ||
478 | * | ||
479 | * Wait for the requested fence to signal (all asics). | ||
480 | * Returns 0 if the fence has passed, error for all other cases. | ||
481 | */ | ||
482 | int radeon_fence_wait_locked(struct radeon_fence *fence) | ||
483 | { | ||
484 | uint64_t seq[RADEON_NUM_RINGS] = {}; | ||
485 | int r; | ||
486 | |||
487 | if (fence == NULL) { | ||
488 | WARN(1, "Querying an invalid fence : %p !\n", fence); | ||
489 | return -EINVAL; | ||
490 | } | ||
491 | |||
492 | seq[fence->ring] = fence->seq; | ||
493 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) | ||
494 | return 0; | ||
495 | |||
496 | r = radeon_fence_wait_seq(fence->rdev, seq, false, false); | ||
497 | if (r) | ||
498 | return r; | ||
499 | |||
500 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * radeon_fence_wait_next_locked - wait for the next fence to signal | ||
506 | * | 467 | * |
507 | * @rdev: radeon device pointer | 468 | * @rdev: radeon device pointer |
508 | * @ring: ring index the fence is associated with | 469 | * @ring: ring index the fence is associated with |
@@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence) | |||
511 | * Returns 0 if the next fence has passed, error for all other cases. | 472 | * Returns 0 if the next fence has passed, error for all other cases. |
512 | * Caller must hold ring lock. | 473 | * Caller must hold ring lock. |
513 | */ | 474 | */ |
514 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | 475 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
515 | { | 476 | { |
516 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 477 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
517 | 478 | ||
@@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | |||
521 | already the last emited fence */ | 482 | already the last emited fence */ |
522 | return -ENOENT; | 483 | return -ENOENT; |
523 | } | 484 | } |
524 | return radeon_fence_wait_seq(rdev, seq, false, false); | 485 | return radeon_fence_wait_seq(rdev, seq, false); |
525 | } | 486 | } |
526 | 487 | ||
527 | /** | 488 | /** |
528 | * radeon_fence_wait_empty_locked - wait for all fences to signal | 489 | * radeon_fence_wait_empty - wait for all fences to signal |
529 | * | 490 | * |
530 | * @rdev: radeon device pointer | 491 | * @rdev: radeon device pointer |
531 | * @ring: ring index the fence is associated with | 492 | * @ring: ring index the fence is associated with |
@@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) | |||
534 | * Returns 0 if the fences have passed, error for all other cases. | 495 | * Returns 0 if the fences have passed, error for all other cases. |
535 | * Caller must hold ring lock. | 496 | * Caller must hold ring lock. |
536 | */ | 497 | */ |
537 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | 498 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
538 | { | 499 | { |
539 | uint64_t seq[RADEON_NUM_RINGS] = {}; | 500 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
540 | int r; | 501 | int r; |
@@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) | |||
543 | if (!seq[ring]) | 504 | if (!seq[ring]) |
544 | return 0; | 505 | return 0; |
545 | 506 | ||
546 | r = radeon_fence_wait_seq(rdev, seq, false, false); | 507 | r = radeon_fence_wait_seq(rdev, seq, false); |
547 | if (r) { | 508 | if (r) { |
548 | if (r == -EDEADLK) | 509 | if (r == -EDEADLK) |
549 | return -EDEADLK; | 510 | return -EDEADLK; |
@@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) | |||
794 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { | 755 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
795 | if (!rdev->fence_drv[ring].initialized) | 756 | if (!rdev->fence_drv[ring].initialized) |
796 | continue; | 757 | continue; |
797 | r = radeon_fence_wait_empty_locked(rdev, ring); | 758 | r = radeon_fence_wait_empty(rdev, ring); |
798 | if (r) { | 759 | if (r) { |
799 | /* no need to trigger GPU reset as we are unloading */ | 760 | /* no need to trigger GPU reset as we are unloading */ |
800 | radeon_fence_driver_force_completion(rdev); | 761 | radeon_fence_driver_force_completion(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a8f9b463bf2a..2e723651069b 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_reg.h" | ||
32 | #include "radeon_trace.h" | ||
33 | 31 | ||
34 | /* | 32 | /* |
35 | * GART | 33 | * GART |
@@ -394,959 +392,3 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
394 | 392 | ||
395 | radeon_dummy_page_fini(rdev); | 393 | radeon_dummy_page_fini(rdev); |
396 | } | 394 | } |
397 | |||
398 | /* | ||
399 | * GPUVM | ||
400 | * GPUVM is similar to the legacy gart on older asics, however | ||
401 | * rather than there being a single global gart table | ||
402 | * for the entire GPU, there are multiple VM page tables active | ||
403 | * at any given time. The VM page tables can contain a mix | ||
404 | * vram pages and system memory pages and system memory pages | ||
405 | * can be mapped as snooped (cached system pages) or unsnooped | ||
406 | * (uncached system pages). | ||
407 | * Each VM has an ID associated with it and there is a page table | ||
408 | * associated with each VMID. When execting a command buffer, | ||
409 | * the kernel tells the the ring what VMID to use for that command | ||
410 | * buffer. VMIDs are allocated dynamically as commands are submitted. | ||
411 | * The userspace drivers maintain their own address space and the kernel | ||
412 | * sets up their pages tables accordingly when they submit their | ||
413 | * command buffers and a VMID is assigned. | ||
414 | * Cayman/Trinity support up to 8 active VMs at any given time; | ||
415 | * SI supports 16. | ||
416 | */ | ||
417 | |||
418 | /* | ||
419 | * vm helpers | ||
420 | * | ||
421 | * TODO bind a default page at vm initialization for default address | ||
422 | */ | ||
423 | |||
424 | /** | ||
425 | * radeon_vm_num_pde - return the number of page directory entries | ||
426 | * | ||
427 | * @rdev: radeon_device pointer | ||
428 | * | ||
429 | * Calculate the number of page directory entries (cayman+). | ||
430 | */ | ||
431 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) | ||
432 | { | ||
433 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * radeon_vm_directory_size - returns the size of the page directory in bytes | ||
438 | * | ||
439 | * @rdev: radeon_device pointer | ||
440 | * | ||
441 | * Calculate the size of the page directory in bytes (cayman+). | ||
442 | */ | ||
443 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) | ||
444 | { | ||
445 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * radeon_vm_manager_init - init the vm manager | ||
450 | * | ||
451 | * @rdev: radeon_device pointer | ||
452 | * | ||
453 | * Init the vm manager (cayman+). | ||
454 | * Returns 0 for success, error for failure. | ||
455 | */ | ||
456 | int radeon_vm_manager_init(struct radeon_device *rdev) | ||
457 | { | ||
458 | struct radeon_vm *vm; | ||
459 | struct radeon_bo_va *bo_va; | ||
460 | int r; | ||
461 | unsigned size; | ||
462 | |||
463 | if (!rdev->vm_manager.enabled) { | ||
464 | /* allocate enough for 2 full VM pts */ | ||
465 | size = radeon_vm_directory_size(rdev); | ||
466 | size += rdev->vm_manager.max_pfn * 8; | ||
467 | size *= 2; | ||
468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | ||
469 | RADEON_GPU_PAGE_ALIGN(size), | ||
470 | RADEON_VM_PTB_ALIGN_SIZE, | ||
471 | RADEON_GEM_DOMAIN_VRAM); | ||
472 | if (r) { | ||
473 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", | ||
474 | (rdev->vm_manager.max_pfn * 8) >> 10); | ||
475 | return r; | ||
476 | } | ||
477 | |||
478 | r = radeon_asic_vm_init(rdev); | ||
479 | if (r) | ||
480 | return r; | ||
481 | |||
482 | rdev->vm_manager.enabled = true; | ||
483 | |||
484 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); | ||
485 | if (r) | ||
486 | return r; | ||
487 | } | ||
488 | |||
489 | /* restore page table */ | ||
490 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { | ||
491 | if (vm->page_directory == NULL) | ||
492 | continue; | ||
493 | |||
494 | list_for_each_entry(bo_va, &vm->va, vm_list) { | ||
495 | bo_va->valid = false; | ||
496 | } | ||
497 | } | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * radeon_vm_free_pt - free the page table for a specific vm | ||
503 | * | ||
504 | * @rdev: radeon_device pointer | ||
505 | * @vm: vm to unbind | ||
506 | * | ||
507 | * Free the page table of a specific vm (cayman+). | ||
508 | * | ||
509 | * Global and local mutex must be lock! | ||
510 | */ | ||
511 | static void radeon_vm_free_pt(struct radeon_device *rdev, | ||
512 | struct radeon_vm *vm) | ||
513 | { | ||
514 | struct radeon_bo_va *bo_va; | ||
515 | int i; | ||
516 | |||
517 | if (!vm->page_directory) | ||
518 | return; | ||
519 | |||
520 | list_del_init(&vm->list); | ||
521 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); | ||
522 | |||
523 | list_for_each_entry(bo_va, &vm->va, vm_list) { | ||
524 | bo_va->valid = false; | ||
525 | } | ||
526 | |||
527 | if (vm->page_tables == NULL) | ||
528 | return; | ||
529 | |||
530 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) | ||
531 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); | ||
532 | |||
533 | kfree(vm->page_tables); | ||
534 | } | ||
535 | |||
536 | /** | ||
537 | * radeon_vm_manager_fini - tear down the vm manager | ||
538 | * | ||
539 | * @rdev: radeon_device pointer | ||
540 | * | ||
541 | * Tear down the VM manager (cayman+). | ||
542 | */ | ||
543 | void radeon_vm_manager_fini(struct radeon_device *rdev) | ||
544 | { | ||
545 | struct radeon_vm *vm, *tmp; | ||
546 | int i; | ||
547 | |||
548 | if (!rdev->vm_manager.enabled) | ||
549 | return; | ||
550 | |||
551 | mutex_lock(&rdev->vm_manager.lock); | ||
552 | /* free all allocated page tables */ | ||
553 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { | ||
554 | mutex_lock(&vm->mutex); | ||
555 | radeon_vm_free_pt(rdev, vm); | ||
556 | mutex_unlock(&vm->mutex); | ||
557 | } | ||
558 | for (i = 0; i < RADEON_NUM_VM; ++i) { | ||
559 | radeon_fence_unref(&rdev->vm_manager.active[i]); | ||
560 | } | ||
561 | radeon_asic_vm_fini(rdev); | ||
562 | mutex_unlock(&rdev->vm_manager.lock); | ||
563 | |||
564 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); | ||
565 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); | ||
566 | rdev->vm_manager.enabled = false; | ||
567 | } | ||
568 | |||
569 | /** | ||
570 | * radeon_vm_evict - evict page table to make room for new one | ||
571 | * | ||
572 | * @rdev: radeon_device pointer | ||
573 | * @vm: VM we want to allocate something for | ||
574 | * | ||
575 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). | ||
576 | * Returns 0 for success, -ENOMEM for failure. | ||
577 | * | ||
578 | * Global and local mutex must be locked! | ||
579 | */ | ||
580 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) | ||
581 | { | ||
582 | struct radeon_vm *vm_evict; | ||
583 | |||
584 | if (list_empty(&rdev->vm_manager.lru_vm)) | ||
585 | return -ENOMEM; | ||
586 | |||
587 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, | ||
588 | struct radeon_vm, list); | ||
589 | if (vm_evict == vm) | ||
590 | return -ENOMEM; | ||
591 | |||
592 | mutex_lock(&vm_evict->mutex); | ||
593 | radeon_vm_free_pt(rdev, vm_evict); | ||
594 | mutex_unlock(&vm_evict->mutex); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * radeon_vm_alloc_pt - allocates a page table for a VM | ||
600 | * | ||
601 | * @rdev: radeon_device pointer | ||
602 | * @vm: vm to bind | ||
603 | * | ||
604 | * Allocate a page table for the requested vm (cayman+). | ||
605 | * Returns 0 for success, error for failure. | ||
606 | * | ||
607 | * Global and local mutex must be locked! | ||
608 | */ | ||
609 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | ||
610 | { | ||
611 | unsigned pd_size, pd_entries, pts_size; | ||
612 | struct radeon_ib ib; | ||
613 | int r; | ||
614 | |||
615 | if (vm == NULL) { | ||
616 | return -EINVAL; | ||
617 | } | ||
618 | |||
619 | if (vm->page_directory != NULL) { | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | pd_size = radeon_vm_directory_size(rdev); | ||
624 | pd_entries = radeon_vm_num_pdes(rdev); | ||
625 | |||
626 | retry: | ||
627 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | ||
628 | &vm->page_directory, pd_size, | ||
629 | RADEON_VM_PTB_ALIGN_SIZE, false); | ||
630 | if (r == -ENOMEM) { | ||
631 | r = radeon_vm_evict(rdev, vm); | ||
632 | if (r) | ||
633 | return r; | ||
634 | goto retry; | ||
635 | |||
636 | } else if (r) { | ||
637 | return r; | ||
638 | } | ||
639 | |||
640 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); | ||
641 | |||
642 | /* Initially clear the page directory */ | ||
643 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, | ||
644 | NULL, pd_entries * 2 + 64); | ||
645 | if (r) { | ||
646 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); | ||
647 | return r; | ||
648 | } | ||
649 | |||
650 | ib.length_dw = 0; | ||
651 | |||
652 | radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, | ||
653 | 0, pd_entries, 0, 0); | ||
654 | |||
655 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | ||
656 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
657 | if (r) { | ||
658 | radeon_ib_free(rdev, &ib); | ||
659 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); | ||
660 | return r; | ||
661 | } | ||
662 | radeon_fence_unref(&vm->fence); | ||
663 | vm->fence = radeon_fence_ref(ib.fence); | ||
664 | radeon_ib_free(rdev, &ib); | ||
665 | radeon_fence_unref(&vm->last_flush); | ||
666 | |||
667 | /* allocate page table array */ | ||
668 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); | ||
669 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | ||
670 | |||
671 | if (vm->page_tables == NULL) { | ||
672 | DRM_ERROR("Cannot allocate memory for page table array\n"); | ||
673 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); | ||
674 | return -ENOMEM; | ||
675 | } | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | /** | ||
681 | * radeon_vm_add_to_lru - add VMs page table to LRU list | ||
682 | * | ||
683 | * @rdev: radeon_device pointer | ||
684 | * @vm: vm to add to LRU | ||
685 | * | ||
686 | * Add the allocated page table to the LRU list (cayman+). | ||
687 | * | ||
688 | * Global mutex must be locked! | ||
689 | */ | ||
690 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) | ||
691 | { | ||
692 | list_del_init(&vm->list); | ||
693 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); | ||
694 | } | ||
695 | |||
696 | /** | ||
697 | * radeon_vm_grab_id - allocate the next free VMID | ||
698 | * | ||
699 | * @rdev: radeon_device pointer | ||
700 | * @vm: vm to allocate id for | ||
701 | * @ring: ring we want to submit job to | ||
702 | * | ||
703 | * Allocate an id for the vm (cayman+). | ||
704 | * Returns the fence we need to sync to (if any). | ||
705 | * | ||
706 | * Global and local mutex must be locked! | ||
707 | */ | ||
708 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | ||
709 | struct radeon_vm *vm, int ring) | ||
710 | { | ||
711 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; | ||
712 | unsigned choices[2] = {}; | ||
713 | unsigned i; | ||
714 | |||
715 | /* check if the id is still valid */ | ||
716 | if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) | ||
717 | return NULL; | ||
718 | |||
719 | /* we definately need to flush */ | ||
720 | radeon_fence_unref(&vm->last_flush); | ||
721 | |||
722 | /* skip over VMID 0, since it is the system VM */ | ||
723 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { | ||
724 | struct radeon_fence *fence = rdev->vm_manager.active[i]; | ||
725 | |||
726 | if (fence == NULL) { | ||
727 | /* found a free one */ | ||
728 | vm->id = i; | ||
729 | trace_radeon_vm_grab_id(vm->id, ring); | ||
730 | return NULL; | ||
731 | } | ||
732 | |||
733 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { | ||
734 | best[fence->ring] = fence; | ||
735 | choices[fence->ring == ring ? 0 : 1] = i; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | for (i = 0; i < 2; ++i) { | ||
740 | if (choices[i]) { | ||
741 | vm->id = choices[i]; | ||
742 | trace_radeon_vm_grab_id(vm->id, ring); | ||
743 | return rdev->vm_manager.active[choices[i]]; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | /* should never happen */ | ||
748 | BUG(); | ||
749 | return NULL; | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * radeon_vm_fence - remember fence for vm | ||
754 | * | ||
755 | * @rdev: radeon_device pointer | ||
756 | * @vm: vm we want to fence | ||
757 | * @fence: fence to remember | ||
758 | * | ||
759 | * Fence the vm (cayman+). | ||
760 | * Set the fence used to protect page table and id. | ||
761 | * | ||
762 | * Global and local mutex must be locked! | ||
763 | */ | ||
764 | void radeon_vm_fence(struct radeon_device *rdev, | ||
765 | struct radeon_vm *vm, | ||
766 | struct radeon_fence *fence) | ||
767 | { | ||
768 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); | ||
769 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); | ||
770 | |||
771 | radeon_fence_unref(&vm->fence); | ||
772 | vm->fence = radeon_fence_ref(fence); | ||
773 | |||
774 | radeon_fence_unref(&vm->last_id_use); | ||
775 | vm->last_id_use = radeon_fence_ref(fence); | ||
776 | } | ||
777 | |||
778 | /** | ||
779 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo | ||
780 | * | ||
781 | * @vm: requested vm | ||
782 | * @bo: requested buffer object | ||
783 | * | ||
784 | * Find @bo inside the requested vm (cayman+). | ||
785 | * Search inside the @bos vm list for the requested vm | ||
786 | * Returns the found bo_va or NULL if none is found | ||
787 | * | ||
788 | * Object has to be reserved! | ||
789 | */ | ||
790 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | ||
791 | struct radeon_bo *bo) | ||
792 | { | ||
793 | struct radeon_bo_va *bo_va; | ||
794 | |||
795 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
796 | if (bo_va->vm == vm) { | ||
797 | return bo_va; | ||
798 | } | ||
799 | } | ||
800 | return NULL; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * radeon_vm_bo_add - add a bo to a specific vm | ||
805 | * | ||
806 | * @rdev: radeon_device pointer | ||
807 | * @vm: requested vm | ||
808 | * @bo: radeon buffer object | ||
809 | * | ||
810 | * Add @bo into the requested vm (cayman+). | ||
811 | * Add @bo to the list of bos associated with the vm | ||
812 | * Returns newly added bo_va or NULL for failure | ||
813 | * | ||
814 | * Object has to be reserved! | ||
815 | */ | ||
816 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, | ||
817 | struct radeon_vm *vm, | ||
818 | struct radeon_bo *bo) | ||
819 | { | ||
820 | struct radeon_bo_va *bo_va; | ||
821 | |||
822 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | ||
823 | if (bo_va == NULL) { | ||
824 | return NULL; | ||
825 | } | ||
826 | bo_va->vm = vm; | ||
827 | bo_va->bo = bo; | ||
828 | bo_va->soffset = 0; | ||
829 | bo_va->eoffset = 0; | ||
830 | bo_va->flags = 0; | ||
831 | bo_va->valid = false; | ||
832 | bo_va->ref_count = 1; | ||
833 | INIT_LIST_HEAD(&bo_va->bo_list); | ||
834 | INIT_LIST_HEAD(&bo_va->vm_list); | ||
835 | |||
836 | mutex_lock(&vm->mutex); | ||
837 | list_add(&bo_va->vm_list, &vm->va); | ||
838 | list_add_tail(&bo_va->bo_list, &bo->va); | ||
839 | mutex_unlock(&vm->mutex); | ||
840 | |||
841 | return bo_va; | ||
842 | } | ||
843 | |||
844 | /** | ||
845 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm | ||
846 | * | ||
847 | * @rdev: radeon_device pointer | ||
848 | * @bo_va: bo_va to store the address | ||
849 | * @soffset: requested offset of the buffer in the VM address space | ||
850 | * @flags: attributes of pages (read/write/valid/etc.) | ||
851 | * | ||
852 | * Set offset of @bo_va (cayman+). | ||
853 | * Validate and set the offset requested within the vm address space. | ||
854 | * Returns 0 for success, error for failure. | ||
855 | * | ||
856 | * Object has to be reserved! | ||
857 | */ | ||
858 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, | ||
859 | struct radeon_bo_va *bo_va, | ||
860 | uint64_t soffset, | ||
861 | uint32_t flags) | ||
862 | { | ||
863 | uint64_t size = radeon_bo_size(bo_va->bo); | ||
864 | uint64_t eoffset, last_offset = 0; | ||
865 | struct radeon_vm *vm = bo_va->vm; | ||
866 | struct radeon_bo_va *tmp; | ||
867 | struct list_head *head; | ||
868 | unsigned last_pfn; | ||
869 | |||
870 | if (soffset) { | ||
871 | /* make sure object fit at this offset */ | ||
872 | eoffset = soffset + size; | ||
873 | if (soffset >= eoffset) { | ||
874 | return -EINVAL; | ||
875 | } | ||
876 | |||
877 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | ||
878 | if (last_pfn > rdev->vm_manager.max_pfn) { | ||
879 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | ||
880 | last_pfn, rdev->vm_manager.max_pfn); | ||
881 | return -EINVAL; | ||
882 | } | ||
883 | |||
884 | } else { | ||
885 | eoffset = last_pfn = 0; | ||
886 | } | ||
887 | |||
888 | mutex_lock(&vm->mutex); | ||
889 | head = &vm->va; | ||
890 | last_offset = 0; | ||
891 | list_for_each_entry(tmp, &vm->va, vm_list) { | ||
892 | if (bo_va == tmp) { | ||
893 | /* skip over currently modified bo */ | ||
894 | continue; | ||
895 | } | ||
896 | |||
897 | if (soffset >= last_offset && eoffset <= tmp->soffset) { | ||
898 | /* bo can be added before this one */ | ||
899 | break; | ||
900 | } | ||
901 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { | ||
902 | /* bo and tmp overlap, invalid offset */ | ||
903 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", | ||
904 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, | ||
905 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); | ||
906 | mutex_unlock(&vm->mutex); | ||
907 | return -EINVAL; | ||
908 | } | ||
909 | last_offset = tmp->eoffset; | ||
910 | head = &tmp->vm_list; | ||
911 | } | ||
912 | |||
913 | bo_va->soffset = soffset; | ||
914 | bo_va->eoffset = eoffset; | ||
915 | bo_va->flags = flags; | ||
916 | bo_va->valid = false; | ||
917 | list_move(&bo_va->vm_list, head); | ||
918 | |||
919 | mutex_unlock(&vm->mutex); | ||
920 | return 0; | ||
921 | } | ||
922 | |||
923 | /** | ||
924 | * radeon_vm_map_gart - get the physical address of a gart page | ||
925 | * | ||
926 | * @rdev: radeon_device pointer | ||
927 | * @addr: the unmapped addr | ||
928 | * | ||
929 | * Look up the physical address of the page that the pte resolves | ||
930 | * to (cayman+). | ||
931 | * Returns the physical address of the page. | ||
932 | */ | ||
933 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | ||
934 | { | ||
935 | uint64_t result; | ||
936 | |||
937 | /* page table offset */ | ||
938 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | ||
939 | |||
940 | /* in case cpu page size != gpu page size*/ | ||
941 | result |= addr & (~PAGE_MASK); | ||
942 | |||
943 | return result; | ||
944 | } | ||
945 | |||
946 | /** | ||
947 | * radeon_vm_page_flags - translate page flags to what the hw uses | ||
948 | * | ||
949 | * @flags: flags comming from userspace | ||
950 | * | ||
951 | * Translate the flags the userspace ABI uses to hw flags. | ||
952 | */ | ||
953 | static uint32_t radeon_vm_page_flags(uint32_t flags) | ||
954 | { | ||
955 | uint32_t hw_flags = 0; | ||
956 | hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | ||
957 | hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | ||
958 | hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | ||
959 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
960 | hw_flags |= R600_PTE_SYSTEM; | ||
961 | hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | ||
962 | } | ||
963 | return hw_flags; | ||
964 | } | ||
965 | |||
966 | /** | ||
967 | * radeon_vm_update_pdes - make sure that page directory is valid | ||
968 | * | ||
969 | * @rdev: radeon_device pointer | ||
970 | * @vm: requested vm | ||
971 | * @start: start of GPU address range | ||
972 | * @end: end of GPU address range | ||
973 | * | ||
974 | * Allocates new page tables if necessary | ||
975 | * and updates the page directory (cayman+). | ||
976 | * Returns 0 for success, error for failure. | ||
977 | * | ||
978 | * Global and local mutex must be locked! | ||
979 | */ | ||
980 | static int radeon_vm_update_pdes(struct radeon_device *rdev, | ||
981 | struct radeon_vm *vm, | ||
982 | struct radeon_ib *ib, | ||
983 | uint64_t start, uint64_t end) | ||
984 | { | ||
985 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; | ||
986 | |||
987 | uint64_t last_pde = ~0, last_pt = ~0; | ||
988 | unsigned count = 0; | ||
989 | uint64_t pt_idx; | ||
990 | int r; | ||
991 | |||
992 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
993 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
994 | |||
995 | /* walk over the address space and update the page directory */ | ||
996 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { | ||
997 | uint64_t pde, pt; | ||
998 | |||
999 | if (vm->page_tables[pt_idx]) | ||
1000 | continue; | ||
1001 | |||
1002 | retry: | ||
1003 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | ||
1004 | &vm->page_tables[pt_idx], | ||
1005 | RADEON_VM_PTE_COUNT * 8, | ||
1006 | RADEON_GPU_PAGE_SIZE, false); | ||
1007 | |||
1008 | if (r == -ENOMEM) { | ||
1009 | r = radeon_vm_evict(rdev, vm); | ||
1010 | if (r) | ||
1011 | return r; | ||
1012 | goto retry; | ||
1013 | } else if (r) { | ||
1014 | return r; | ||
1015 | } | ||
1016 | |||
1017 | pde = vm->pd_gpu_addr + pt_idx * 8; | ||
1018 | |||
1019 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); | ||
1020 | |||
1021 | if (((last_pde + 8 * count) != pde) || | ||
1022 | ((last_pt + incr * count) != pt)) { | ||
1023 | |||
1024 | if (count) { | ||
1025 | radeon_asic_vm_set_page(rdev, ib, last_pde, | ||
1026 | last_pt, count, incr, | ||
1027 | R600_PTE_VALID); | ||
1028 | |||
1029 | count *= RADEON_VM_PTE_COUNT; | ||
1030 | radeon_asic_vm_set_page(rdev, ib, last_pt, 0, | ||
1031 | count, 0, 0); | ||
1032 | } | ||
1033 | |||
1034 | count = 1; | ||
1035 | last_pde = pde; | ||
1036 | last_pt = pt; | ||
1037 | } else { | ||
1038 | ++count; | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | if (count) { | ||
1043 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, | ||
1044 | incr, R600_PTE_VALID); | ||
1045 | |||
1046 | count *= RADEON_VM_PTE_COUNT; | ||
1047 | radeon_asic_vm_set_page(rdev, ib, last_pt, 0, | ||
1048 | count, 0, 0); | ||
1049 | } | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | /** | ||
1055 | * radeon_vm_update_ptes - make sure that page tables are valid | ||
1056 | * | ||
1057 | * @rdev: radeon_device pointer | ||
1058 | * @vm: requested vm | ||
1059 | * @start: start of GPU address range | ||
1060 | * @end: end of GPU address range | ||
1061 | * @dst: destination address to map to | ||
1062 | * @flags: mapping flags | ||
1063 | * | ||
1064 | * Update the page tables in the range @start - @end (cayman+). | ||
1065 | * | ||
1066 | * Global and local mutex must be locked! | ||
1067 | */ | ||
1068 | static void radeon_vm_update_ptes(struct radeon_device *rdev, | ||
1069 | struct radeon_vm *vm, | ||
1070 | struct radeon_ib *ib, | ||
1071 | uint64_t start, uint64_t end, | ||
1072 | uint64_t dst, uint32_t flags) | ||
1073 | { | ||
1074 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; | ||
1075 | |||
1076 | uint64_t last_pte = ~0, last_dst = ~0; | ||
1077 | unsigned count = 0; | ||
1078 | uint64_t addr; | ||
1079 | |||
1080 | start = start / RADEON_GPU_PAGE_SIZE; | ||
1081 | end = end / RADEON_GPU_PAGE_SIZE; | ||
1082 | |||
1083 | /* walk over the address space and update the page tables */ | ||
1084 | for (addr = start; addr < end; ) { | ||
1085 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; | ||
1086 | unsigned nptes; | ||
1087 | uint64_t pte; | ||
1088 | |||
1089 | if ((addr & ~mask) == (end & ~mask)) | ||
1090 | nptes = end - addr; | ||
1091 | else | ||
1092 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | ||
1093 | |||
1094 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); | ||
1095 | pte += (addr & mask) * 8; | ||
1096 | |||
1097 | if ((last_pte + 8 * count) != pte) { | ||
1098 | |||
1099 | if (count) { | ||
1100 | radeon_asic_vm_set_page(rdev, ib, last_pte, | ||
1101 | last_dst, count, | ||
1102 | RADEON_GPU_PAGE_SIZE, | ||
1103 | flags); | ||
1104 | } | ||
1105 | |||
1106 | count = nptes; | ||
1107 | last_pte = pte; | ||
1108 | last_dst = dst; | ||
1109 | } else { | ||
1110 | count += nptes; | ||
1111 | } | ||
1112 | |||
1113 | addr += nptes; | ||
1114 | dst += nptes * RADEON_GPU_PAGE_SIZE; | ||
1115 | } | ||
1116 | |||
1117 | if (count) { | ||
1118 | radeon_asic_vm_set_page(rdev, ib, last_pte, | ||
1119 | last_dst, count, | ||
1120 | RADEON_GPU_PAGE_SIZE, flags); | ||
1121 | } | ||
1122 | } | ||
1123 | |||
1124 | /** | ||
1125 | * radeon_vm_bo_update - map a bo into the vm page table | ||
1126 | * | ||
1127 | * @rdev: radeon_device pointer | ||
1128 | * @vm: requested vm | ||
1129 | * @bo: radeon buffer object | ||
1130 | * @mem: ttm mem | ||
1131 | * | ||
1132 | * Fill in the page table entries for @bo (cayman+). | ||
1133 | * Returns 0 for success, -EINVAL for failure. | ||
1134 | * | ||
1135 | * Object have to be reserved & global and local mutex must be locked! | ||
1136 | */ | ||
1137 | int radeon_vm_bo_update(struct radeon_device *rdev, | ||
1138 | struct radeon_vm *vm, | ||
1139 | struct radeon_bo *bo, | ||
1140 | struct ttm_mem_reg *mem) | ||
1141 | { | ||
1142 | struct radeon_ib ib; | ||
1143 | struct radeon_bo_va *bo_va; | ||
1144 | unsigned nptes, npdes, ndw; | ||
1145 | uint64_t addr; | ||
1146 | int r; | ||
1147 | |||
1148 | /* nothing to do if vm isn't bound */ | ||
1149 | if (vm->page_directory == NULL) | ||
1150 | return 0; | ||
1151 | |||
1152 | bo_va = radeon_vm_bo_find(vm, bo); | ||
1153 | if (bo_va == NULL) { | ||
1154 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); | ||
1155 | return -EINVAL; | ||
1156 | } | ||
1157 | |||
1158 | if (!bo_va->soffset) { | ||
1159 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", | ||
1160 | bo, vm); | ||
1161 | return -EINVAL; | ||
1162 | } | ||
1163 | |||
1164 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) | ||
1165 | return 0; | ||
1166 | |||
1167 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | ||
1168 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | ||
1169 | if (mem) { | ||
1170 | addr = mem->start << PAGE_SHIFT; | ||
1171 | if (mem->mem_type != TTM_PL_SYSTEM) { | ||
1172 | bo_va->flags |= RADEON_VM_PAGE_VALID; | ||
1173 | bo_va->valid = true; | ||
1174 | } | ||
1175 | if (mem->mem_type == TTM_PL_TT) { | ||
1176 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; | ||
1177 | } else { | ||
1178 | addr += rdev->vm_manager.vram_base_offset; | ||
1179 | } | ||
1180 | } else { | ||
1181 | addr = 0; | ||
1182 | bo_va->valid = false; | ||
1183 | } | ||
1184 | |||
1185 | trace_radeon_vm_bo_update(bo_va); | ||
1186 | |||
1187 | nptes = radeon_bo_ngpu_pages(bo); | ||
1188 | |||
1189 | /* assume two extra pdes in case the mapping overlaps the borders */ | ||
1190 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; | ||
1191 | |||
1192 | /* padding, etc. */ | ||
1193 | ndw = 64; | ||
1194 | |||
1195 | if (RADEON_VM_BLOCK_SIZE > 11) | ||
1196 | /* reserve space for one header for every 2k dwords */ | ||
1197 | ndw += (nptes >> 11) * 4; | ||
1198 | else | ||
1199 | /* reserve space for one header for | ||
1200 | every (1 << BLOCK_SIZE) entries */ | ||
1201 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; | ||
1202 | |||
1203 | /* reserve space for pte addresses */ | ||
1204 | ndw += nptes * 2; | ||
1205 | |||
1206 | /* reserve space for one header for every 2k dwords */ | ||
1207 | ndw += (npdes >> 11) * 4; | ||
1208 | |||
1209 | /* reserve space for pde addresses */ | ||
1210 | ndw += npdes * 2; | ||
1211 | |||
1212 | /* reserve space for clearing new page tables */ | ||
1213 | ndw += npdes * 2 * RADEON_VM_PTE_COUNT; | ||
1214 | |||
1215 | /* update too big for an IB */ | ||
1216 | if (ndw > 0xfffff) | ||
1217 | return -ENOMEM; | ||
1218 | |||
1219 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | ||
1220 | if (r) | ||
1221 | return r; | ||
1222 | ib.length_dw = 0; | ||
1223 | |||
1224 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); | ||
1225 | if (r) { | ||
1226 | radeon_ib_free(rdev, &ib); | ||
1227 | return r; | ||
1228 | } | ||
1229 | |||
1230 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, | ||
1231 | addr, radeon_vm_page_flags(bo_va->flags)); | ||
1232 | |||
1233 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | ||
1234 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
1235 | if (r) { | ||
1236 | radeon_ib_free(rdev, &ib); | ||
1237 | return r; | ||
1238 | } | ||
1239 | radeon_fence_unref(&vm->fence); | ||
1240 | vm->fence = radeon_fence_ref(ib.fence); | ||
1241 | radeon_ib_free(rdev, &ib); | ||
1242 | radeon_fence_unref(&vm->last_flush); | ||
1243 | |||
1244 | return 0; | ||
1245 | } | ||
1246 | |||
1247 | /** | ||
1248 | * radeon_vm_bo_rmv - remove a bo to a specific vm | ||
1249 | * | ||
1250 | * @rdev: radeon_device pointer | ||
1251 | * @bo_va: requested bo_va | ||
1252 | * | ||
1253 | * Remove @bo_va->bo from the requested vm (cayman+). | ||
1254 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and | ||
1255 | * remove the ptes for @bo_va in the page table. | ||
1256 | * Returns 0 for success. | ||
1257 | * | ||
1258 | * Object have to be reserved! | ||
1259 | */ | ||
1260 | int radeon_vm_bo_rmv(struct radeon_device *rdev, | ||
1261 | struct radeon_bo_va *bo_va) | ||
1262 | { | ||
1263 | int r = 0; | ||
1264 | |||
1265 | mutex_lock(&rdev->vm_manager.lock); | ||
1266 | mutex_lock(&bo_va->vm->mutex); | ||
1267 | if (bo_va->soffset) { | ||
1268 | r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); | ||
1269 | } | ||
1270 | mutex_unlock(&rdev->vm_manager.lock); | ||
1271 | list_del(&bo_va->vm_list); | ||
1272 | mutex_unlock(&bo_va->vm->mutex); | ||
1273 | list_del(&bo_va->bo_list); | ||
1274 | |||
1275 | kfree(bo_va); | ||
1276 | return r; | ||
1277 | } | ||
1278 | |||
1279 | /** | ||
1280 | * radeon_vm_bo_invalidate - mark the bo as invalid | ||
1281 | * | ||
1282 | * @rdev: radeon_device pointer | ||
1283 | * @vm: requested vm | ||
1284 | * @bo: radeon buffer object | ||
1285 | * | ||
1286 | * Mark @bo as invalid (cayman+). | ||
1287 | */ | ||
1288 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, | ||
1289 | struct radeon_bo *bo) | ||
1290 | { | ||
1291 | struct radeon_bo_va *bo_va; | ||
1292 | |||
1293 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
1294 | bo_va->valid = false; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | /** | ||
1299 | * radeon_vm_init - initialize a vm instance | ||
1300 | * | ||
1301 | * @rdev: radeon_device pointer | ||
1302 | * @vm: requested vm | ||
1303 | * | ||
1304 | * Init @vm fields (cayman+). | ||
1305 | */ | ||
1306 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | ||
1307 | { | ||
1308 | vm->id = 0; | ||
1309 | vm->fence = NULL; | ||
1310 | vm->last_flush = NULL; | ||
1311 | vm->last_id_use = NULL; | ||
1312 | mutex_init(&vm->mutex); | ||
1313 | INIT_LIST_HEAD(&vm->list); | ||
1314 | INIT_LIST_HEAD(&vm->va); | ||
1315 | } | ||
1316 | |||
1317 | /** | ||
1318 | * radeon_vm_fini - tear down a vm instance | ||
1319 | * | ||
1320 | * @rdev: radeon_device pointer | ||
1321 | * @vm: requested vm | ||
1322 | * | ||
1323 | * Tear down @vm (cayman+). | ||
1324 | * Unbind the VM and remove all bos from the vm bo list | ||
1325 | */ | ||
1326 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | ||
1327 | { | ||
1328 | struct radeon_bo_va *bo_va, *tmp; | ||
1329 | int r; | ||
1330 | |||
1331 | mutex_lock(&rdev->vm_manager.lock); | ||
1332 | mutex_lock(&vm->mutex); | ||
1333 | radeon_vm_free_pt(rdev, vm); | ||
1334 | mutex_unlock(&rdev->vm_manager.lock); | ||
1335 | |||
1336 | if (!list_empty(&vm->va)) { | ||
1337 | dev_err(rdev->dev, "still active bo inside vm\n"); | ||
1338 | } | ||
1339 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { | ||
1340 | list_del_init(&bo_va->vm_list); | ||
1341 | r = radeon_bo_reserve(bo_va->bo, false); | ||
1342 | if (!r) { | ||
1343 | list_del_init(&bo_va->bo_list); | ||
1344 | radeon_bo_unreserve(bo_va->bo); | ||
1345 | kfree(bo_va); | ||
1346 | } | ||
1347 | } | ||
1348 | radeon_fence_unref(&vm->fence); | ||
1349 | radeon_fence_unref(&vm->last_flush); | ||
1350 | radeon_fence_unref(&vm->last_id_use); | ||
1351 | mutex_unlock(&vm->mutex); | ||
1352 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index b96c819024b3..d09650c1d720 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -344,18 +344,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
344 | } | 344 | } |
345 | robj = gem_to_radeon_bo(gobj); | 345 | robj = gem_to_radeon_bo(gobj); |
346 | r = radeon_bo_wait(robj, &cur_placement, true); | 346 | r = radeon_bo_wait(robj, &cur_placement, true); |
347 | switch (cur_placement) { | 347 | args->domain = radeon_mem_type_to_domain(cur_placement); |
348 | case TTM_PL_VRAM: | ||
349 | args->domain = RADEON_GEM_DOMAIN_VRAM; | ||
350 | break; | ||
351 | case TTM_PL_TT: | ||
352 | args->domain = RADEON_GEM_DOMAIN_GTT; | ||
353 | break; | ||
354 | case TTM_PL_SYSTEM: | ||
355 | args->domain = RADEON_GEM_DOMAIN_CPU; | ||
356 | default: | ||
357 | break; | ||
358 | } | ||
359 | drm_gem_object_unreference_unlocked(gobj); | 348 | drm_gem_object_unreference_unlocked(gobj); |
360 | r = radeon_gem_handle_lockup(rdev, r); | 349 | r = radeon_gem_handle_lockup(rdev, r); |
361 | return r; | 350 | return r; |
@@ -533,6 +522,42 @@ out: | |||
533 | return r; | 522 | return r; |
534 | } | 523 | } |
535 | 524 | ||
525 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | ||
526 | struct drm_file *filp) | ||
527 | { | ||
528 | struct drm_radeon_gem_op *args = data; | ||
529 | struct drm_gem_object *gobj; | ||
530 | struct radeon_bo *robj; | ||
531 | int r; | ||
532 | |||
533 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
534 | if (gobj == NULL) { | ||
535 | return -ENOENT; | ||
536 | } | ||
537 | robj = gem_to_radeon_bo(gobj); | ||
538 | r = radeon_bo_reserve(robj, false); | ||
539 | if (unlikely(r)) | ||
540 | goto out; | ||
541 | |||
542 | switch (args->op) { | ||
543 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: | ||
544 | args->value = robj->initial_domain; | ||
545 | break; | ||
546 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: | ||
547 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | | ||
548 | RADEON_GEM_DOMAIN_GTT | | ||
549 | RADEON_GEM_DOMAIN_CPU); | ||
550 | break; | ||
551 | default: | ||
552 | r = -EINVAL; | ||
553 | } | ||
554 | |||
555 | radeon_bo_unreserve(robj); | ||
556 | out: | ||
557 | drm_gem_object_unreference_unlocked(gobj); | ||
558 | return r; | ||
559 | } | ||
560 | |||
536 | int radeon_mode_dumb_create(struct drm_file *file_priv, | 561 | int radeon_mode_dumb_create(struct drm_file *file_priv, |
537 | struct drm_device *dev, | 562 | struct drm_device *dev, |
538 | struct drm_mode_create_dumb *args) | 563 | struct drm_mode_create_dumb *args) |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2aecd6dc2610..6f1dfac17507 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -33,6 +33,13 @@ | |||
33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
36 | |||
37 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
38 | bool radeon_is_px(void); | ||
39 | #else | ||
40 | static inline bool radeon_is_px(void) { return false; } | ||
41 | #endif | ||
42 | |||
36 | /** | 43 | /** |
37 | * radeon_driver_unload_kms - Main unload function for KMS. | 44 | * radeon_driver_unload_kms - Main unload function for KMS. |
38 | * | 45 | * |
@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
130 | "Error during ACPI methods call\n"); | 137 | "Error during ACPI methods call\n"); |
131 | } | 138 | } |
132 | 139 | ||
133 | if (radeon_runtime_pm != 0) { | 140 | if ((radeon_runtime_pm == 1) || |
141 | ((radeon_runtime_pm == -1) && radeon_is_px())) { | ||
134 | pm_runtime_use_autosuspend(dev->dev); | 142 | pm_runtime_use_autosuspend(dev->dev); |
135 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 143 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
136 | pm_runtime_set_active(dev->dev); | 144 | pm_runtime_set_active(dev->dev); |
@@ -433,6 +441,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
433 | case RADEON_CS_RING_UVD: | 441 | case RADEON_CS_RING_UVD: |
434 | *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; | 442 | *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; |
435 | break; | 443 | break; |
444 | case RADEON_CS_RING_VCE: | ||
445 | *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; | ||
446 | break; | ||
436 | default: | 447 | default: |
437 | return -EINVAL; | 448 | return -EINVAL; |
438 | } | 449 | } |
@@ -477,6 +488,27 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
477 | else | 488 | else |
478 | *value = rdev->pm.default_sclk * 10; | 489 | *value = rdev->pm.default_sclk * 10; |
479 | break; | 490 | break; |
491 | case RADEON_INFO_VCE_FW_VERSION: | ||
492 | *value = rdev->vce.fw_version; | ||
493 | break; | ||
494 | case RADEON_INFO_VCE_FB_VERSION: | ||
495 | *value = rdev->vce.fb_version; | ||
496 | break; | ||
497 | case RADEON_INFO_NUM_BYTES_MOVED: | ||
498 | value = (uint32_t*)&value64; | ||
499 | value_size = sizeof(uint64_t); | ||
500 | value64 = atomic64_read(&rdev->num_bytes_moved); | ||
501 | break; | ||
502 | case RADEON_INFO_VRAM_USAGE: | ||
503 | value = (uint32_t*)&value64; | ||
504 | value_size = sizeof(uint64_t); | ||
505 | value64 = atomic64_read(&rdev->vram_usage); | ||
506 | break; | ||
507 | case RADEON_INFO_GTT_USAGE: | ||
508 | value = (uint32_t*)&value64; | ||
509 | value_size = sizeof(uint64_t); | ||
510 | value64 = atomic64_read(&rdev->gtt_usage); | ||
511 | break; | ||
480 | default: | 512 | default: |
481 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 513 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
482 | return -EINVAL; | 514 | return -EINVAL; |
@@ -535,7 +567,13 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
535 | return -ENOMEM; | 567 | return -ENOMEM; |
536 | } | 568 | } |
537 | 569 | ||
538 | radeon_vm_init(rdev, &fpriv->vm); | 570 | r = radeon_vm_init(rdev, &fpriv->vm); |
571 | if (r) | ||
572 | return r; | ||
573 | |||
574 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | ||
575 | if (r) | ||
576 | return r; | ||
539 | 577 | ||
540 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 578 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
541 | if (r) | 579 | if (r) |
@@ -616,6 +654,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
616 | if (rdev->cmask_filp == file_priv) | 654 | if (rdev->cmask_filp == file_priv) |
617 | rdev->cmask_filp = NULL; | 655 | rdev->cmask_filp = NULL; |
618 | radeon_uvd_free_handles(rdev, file_priv); | 656 | radeon_uvd_free_handles(rdev, file_priv); |
657 | radeon_vce_free_handles(rdev, file_priv); | ||
619 | } | 658 | } |
620 | 659 | ||
621 | /* | 660 | /* |
@@ -810,5 +849,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
810 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 849 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
811 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 850 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
812 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 851 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
852 | DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
813 | }; | 853 | }; |
814 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 854 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 08595cf90b01..1375ff85b08a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -56,11 +56,36 @@ static void radeon_bo_clear_va(struct radeon_bo *bo) | |||
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | static void radeon_update_memory_usage(struct radeon_bo *bo, | ||
60 | unsigned mem_type, int sign) | ||
61 | { | ||
62 | struct radeon_device *rdev = bo->rdev; | ||
63 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | ||
64 | |||
65 | switch (mem_type) { | ||
66 | case TTM_PL_TT: | ||
67 | if (sign > 0) | ||
68 | atomic64_add(size, &rdev->gtt_usage); | ||
69 | else | ||
70 | atomic64_sub(size, &rdev->gtt_usage); | ||
71 | break; | ||
72 | case TTM_PL_VRAM: | ||
73 | if (sign > 0) | ||
74 | atomic64_add(size, &rdev->vram_usage); | ||
75 | else | ||
76 | atomic64_sub(size, &rdev->vram_usage); | ||
77 | break; | ||
78 | } | ||
79 | } | ||
80 | |||
59 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | 81 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
60 | { | 82 | { |
61 | struct radeon_bo *bo; | 83 | struct radeon_bo *bo; |
62 | 84 | ||
63 | bo = container_of(tbo, struct radeon_bo, tbo); | 85 | bo = container_of(tbo, struct radeon_bo, tbo); |
86 | |||
87 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | ||
88 | |||
64 | mutex_lock(&bo->rdev->gem.mutex); | 89 | mutex_lock(&bo->rdev->gem.mutex); |
65 | list_del_init(&bo->list); | 90 | list_del_init(&bo->list); |
66 | mutex_unlock(&bo->rdev->gem.mutex); | 91 | mutex_unlock(&bo->rdev->gem.mutex); |
@@ -120,7 +145,6 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
120 | 145 | ||
121 | size = ALIGN(size, PAGE_SIZE); | 146 | size = ALIGN(size, PAGE_SIZE); |
122 | 147 | ||
123 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | ||
124 | if (kernel) { | 148 | if (kernel) { |
125 | type = ttm_bo_type_kernel; | 149 | type = ttm_bo_type_kernel; |
126 | } else if (sg) { | 150 | } else if (sg) { |
@@ -145,6 +169,9 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
145 | bo->surface_reg = -1; | 169 | bo->surface_reg = -1; |
146 | INIT_LIST_HEAD(&bo->list); | 170 | INIT_LIST_HEAD(&bo->list); |
147 | INIT_LIST_HEAD(&bo->va); | 171 | INIT_LIST_HEAD(&bo->va); |
172 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | | ||
173 | RADEON_GEM_DOMAIN_GTT | | ||
174 | RADEON_GEM_DOMAIN_CPU); | ||
148 | radeon_ttm_placement_from_domain(bo, domain); | 175 | radeon_ttm_placement_from_domain(bo, domain); |
149 | /* Kernel allocation are uninterruptible */ | 176 | /* Kernel allocation are uninterruptible */ |
150 | down_read(&rdev->pm.mclk_lock); | 177 | down_read(&rdev->pm.mclk_lock); |
@@ -338,39 +365,105 @@ void radeon_bo_fini(struct radeon_device *rdev) | |||
338 | arch_phys_wc_del(rdev->mc.vram_mtrr); | 365 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
339 | } | 366 | } |
340 | 367 | ||
341 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | 368 | /* Returns how many bytes TTM can move per IB. |
342 | struct list_head *head) | 369 | */ |
370 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | ||
343 | { | 371 | { |
344 | if (lobj->written) { | 372 | u64 real_vram_size = rdev->mc.real_vram_size; |
345 | list_add(&lobj->tv.head, head); | 373 | u64 vram_usage = atomic64_read(&rdev->vram_usage); |
346 | } else { | 374 | |
347 | list_add_tail(&lobj->tv.head, head); | 375 | /* This function is based on the current VRAM usage. |
348 | } | 376 | * |
377 | * - If all of VRAM is free, allow relocating the number of bytes that | ||
378 | * is equal to 1/4 of the size of VRAM for this IB. | ||
379 | |||
380 | * - If more than one half of VRAM is occupied, only allow relocating | ||
381 | * 1 MB of data for this IB. | ||
382 | * | ||
383 | * - From 0 to one half of used VRAM, the threshold decreases | ||
384 | * linearly. | ||
385 | * __________________ | ||
386 | * 1/4 of -|\ | | ||
387 | * VRAM | \ | | ||
388 | * | \ | | ||
389 | * | \ | | ||
390 | * | \ | | ||
391 | * | \ | | ||
392 | * | \ | | ||
393 | * | \________|1 MB | ||
394 | * |----------------| | ||
395 | * VRAM 0 % 100 % | ||
396 | * used used | ||
397 | * | ||
398 | * Note: It's a threshold, not a limit. The threshold must be crossed | ||
399 | * for buffer relocations to stop, so any buffer of an arbitrary size | ||
400 | * can be moved as long as the threshold isn't crossed before | ||
401 | * the relocation takes place. We don't want to disable buffer | ||
402 | * relocations completely. | ||
403 | * | ||
404 | * The idea is that buffers should be placed in VRAM at creation time | ||
405 | * and TTM should only do a minimum number of relocations during | ||
406 | * command submission. In practice, you need to submit at least | ||
407 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | ||
408 | * | ||
409 | * Also, things can get pretty crazy under memory pressure and actual | ||
410 | * VRAM usage can change a lot, so playing safe even at 50% does | ||
411 | * consistently increase performance. | ||
412 | */ | ||
413 | |||
414 | u64 half_vram = real_vram_size >> 1; | ||
415 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | ||
416 | u64 bytes_moved_threshold = half_free_vram >> 1; | ||
417 | return max(bytes_moved_threshold, 1024*1024ull); | ||
349 | } | 418 | } |
350 | 419 | ||
351 | int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, | 420 | int radeon_bo_list_validate(struct radeon_device *rdev, |
421 | struct ww_acquire_ctx *ticket, | ||
352 | struct list_head *head, int ring) | 422 | struct list_head *head, int ring) |
353 | { | 423 | { |
354 | struct radeon_bo_list *lobj; | 424 | struct radeon_cs_reloc *lobj; |
355 | struct radeon_bo *bo; | 425 | struct radeon_bo *bo; |
356 | u32 domain; | ||
357 | int r; | 426 | int r; |
427 | u64 bytes_moved = 0, initial_bytes_moved; | ||
428 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | ||
358 | 429 | ||
359 | r = ttm_eu_reserve_buffers(ticket, head); | 430 | r = ttm_eu_reserve_buffers(ticket, head); |
360 | if (unlikely(r != 0)) { | 431 | if (unlikely(r != 0)) { |
361 | return r; | 432 | return r; |
362 | } | 433 | } |
434 | |||
363 | list_for_each_entry(lobj, head, tv.head) { | 435 | list_for_each_entry(lobj, head, tv.head) { |
364 | bo = lobj->bo; | 436 | bo = lobj->robj; |
365 | if (!bo->pin_count) { | 437 | if (!bo->pin_count) { |
366 | domain = lobj->domain; | 438 | u32 domain = lobj->domain; |
367 | 439 | u32 current_domain = | |
440 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | ||
441 | |||
442 | /* Check if this buffer will be moved and don't move it | ||
443 | * if we have moved too many buffers for this IB already. | ||
444 | * | ||
445 | * Note that this allows moving at least one buffer of | ||
446 | * any size, because it doesn't take the current "bo" | ||
447 | * into account. We don't want to disallow buffer moves | ||
448 | * completely. | ||
449 | */ | ||
450 | if (current_domain != RADEON_GEM_DOMAIN_CPU && | ||
451 | (domain & current_domain) == 0 && /* will be moved */ | ||
452 | bytes_moved > bytes_moved_threshold) { | ||
453 | /* don't move it */ | ||
454 | domain = current_domain; | ||
455 | } | ||
456 | |||
368 | retry: | 457 | retry: |
369 | radeon_ttm_placement_from_domain(bo, domain); | 458 | radeon_ttm_placement_from_domain(bo, domain); |
370 | if (ring == R600_RING_TYPE_UVD_INDEX) | 459 | if (ring == R600_RING_TYPE_UVD_INDEX) |
371 | radeon_uvd_force_into_uvd_segment(bo); | 460 | radeon_uvd_force_into_uvd_segment(bo); |
372 | r = ttm_bo_validate(&bo->tbo, &bo->placement, | 461 | |
373 | true, false); | 462 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
463 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
464 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - | ||
465 | initial_bytes_moved; | ||
466 | |||
374 | if (unlikely(r)) { | 467 | if (unlikely(r)) { |
375 | if (r != -ERESTARTSYS && domain != lobj->alt_domain) { | 468 | if (r != -ERESTARTSYS && domain != lobj->alt_domain) { |
376 | domain = lobj->alt_domain; | 469 | domain = lobj->alt_domain; |
@@ -564,14 +657,23 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | |||
564 | } | 657 | } |
565 | 658 | ||
566 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | 659 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
567 | struct ttm_mem_reg *mem) | 660 | struct ttm_mem_reg *new_mem) |
568 | { | 661 | { |
569 | struct radeon_bo *rbo; | 662 | struct radeon_bo *rbo; |
663 | |||
570 | if (!radeon_ttm_bo_is_radeon_bo(bo)) | 664 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
571 | return; | 665 | return; |
666 | |||
572 | rbo = container_of(bo, struct radeon_bo, tbo); | 667 | rbo = container_of(bo, struct radeon_bo, tbo); |
573 | radeon_bo_check_tiling(rbo, 0, 1); | 668 | radeon_bo_check_tiling(rbo, 0, 1); |
574 | radeon_vm_bo_invalidate(rbo->rdev, rbo); | 669 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
670 | |||
671 | /* update statistics */ | ||
672 | if (!new_mem) | ||
673 | return; | ||
674 | |||
675 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | ||
676 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | ||
575 | } | 677 | } |
576 | 678 | ||
577 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 679 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 209b11150263..9e7b25a0629d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -138,9 +138,8 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev); | |||
138 | extern void radeon_bo_force_delete(struct radeon_device *rdev); | 138 | extern void radeon_bo_force_delete(struct radeon_device *rdev); |
139 | extern int radeon_bo_init(struct radeon_device *rdev); | 139 | extern int radeon_bo_init(struct radeon_device *rdev); |
140 | extern void radeon_bo_fini(struct radeon_device *rdev); | 140 | extern void radeon_bo_fini(struct radeon_device *rdev); |
141 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | 141 | extern int radeon_bo_list_validate(struct radeon_device *rdev, |
142 | struct list_head *head); | 142 | struct ww_acquire_ctx *ticket, |
143 | extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, | ||
144 | struct list_head *head, int ring); | 143 | struct list_head *head, int ring); |
145 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 144 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
146 | struct vm_area_struct *vma); | 145 | struct vm_area_struct *vma); |
@@ -151,7 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, | |||
151 | extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | 150 | extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
152 | bool force_drop); | 151 | bool force_drop); |
153 | extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, | 152 | extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
154 | struct ttm_mem_reg *mem); | 153 | struct ttm_mem_reg *new_mem); |
155 | extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | 154 | extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
156 | extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); | 155 | extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); |
157 | 156 | ||
@@ -181,7 +180,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, | |||
181 | extern int radeon_sa_bo_new(struct radeon_device *rdev, | 180 | extern int radeon_sa_bo_new(struct radeon_device *rdev, |
182 | struct radeon_sa_manager *sa_manager, | 181 | struct radeon_sa_manager *sa_manager, |
183 | struct radeon_sa_bo **sa_bo, | 182 | struct radeon_sa_bo **sa_bo, |
184 | unsigned size, unsigned align, bool block); | 183 | unsigned size, unsigned align); |
185 | extern void radeon_sa_bo_free(struct radeon_device *rdev, | 184 | extern void radeon_sa_bo_free(struct radeon_device *rdev, |
186 | struct radeon_sa_bo **sa_bo, | 185 | struct radeon_sa_bo **sa_bo, |
187 | struct radeon_fence *fence); | 186 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8e8153e471c2..ee738a524639 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -260,7 +260,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
260 | if (!ring->ready) { | 260 | if (!ring->ready) { |
261 | continue; | 261 | continue; |
262 | } | 262 | } |
263 | r = radeon_fence_wait_empty_locked(rdev, i); | 263 | r = radeon_fence_wait_empty(rdev, i); |
264 | if (r) { | 264 | if (r) { |
265 | /* needs a GPU reset dont reset here */ | 265 | /* needs a GPU reset dont reset here */ |
266 | mutex_unlock(&rdev->ring_lock); | 266 | mutex_unlock(&rdev->ring_lock); |
@@ -826,6 +826,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
826 | 826 | ||
827 | /* no need to reprogram if nothing changed unless we are on BTC+ */ | 827 | /* no need to reprogram if nothing changed unless we are on BTC+ */ |
828 | if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { | 828 | if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { |
829 | /* vce just modifies an existing state so force a change */ | ||
830 | if (ps->vce_active != rdev->pm.dpm.vce_active) | ||
831 | goto force; | ||
829 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { | 832 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { |
830 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, | 833 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, |
831 | * all we need to do is update the display configuration. | 834 | * all we need to do is update the display configuration. |
@@ -862,16 +865,21 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
862 | } | 865 | } |
863 | } | 866 | } |
864 | 867 | ||
868 | force: | ||
865 | if (radeon_dpm == 1) { | 869 | if (radeon_dpm == 1) { |
866 | printk("switching from power state:\n"); | 870 | printk("switching from power state:\n"); |
867 | radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); | 871 | radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); |
868 | printk("switching to power state:\n"); | 872 | printk("switching to power state:\n"); |
869 | radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); | 873 | radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); |
870 | } | 874 | } |
875 | |||
871 | mutex_lock(&rdev->ddev->struct_mutex); | 876 | mutex_lock(&rdev->ddev->struct_mutex); |
872 | down_write(&rdev->pm.mclk_lock); | 877 | down_write(&rdev->pm.mclk_lock); |
873 | mutex_lock(&rdev->ring_lock); | 878 | mutex_lock(&rdev->ring_lock); |
874 | 879 | ||
880 | /* update whether vce is active */ | ||
881 | ps->vce_active = rdev->pm.dpm.vce_active; | ||
882 | |||
875 | ret = radeon_dpm_pre_set_power_state(rdev); | 883 | ret = radeon_dpm_pre_set_power_state(rdev); |
876 | if (ret) | 884 | if (ret) |
877 | goto done; | 885 | goto done; |
@@ -888,7 +896,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
888 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 896 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
889 | struct radeon_ring *ring = &rdev->ring[i]; | 897 | struct radeon_ring *ring = &rdev->ring[i]; |
890 | if (ring->ready) | 898 | if (ring->ready) |
891 | radeon_fence_wait_empty_locked(rdev, i); | 899 | radeon_fence_wait_empty(rdev, i); |
892 | } | 900 | } |
893 | 901 | ||
894 | /* program the new power state */ | 902 | /* program the new power state */ |
@@ -935,8 +943,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
935 | if (enable) { | 943 | if (enable) { |
936 | mutex_lock(&rdev->pm.mutex); | 944 | mutex_lock(&rdev->pm.mutex); |
937 | rdev->pm.dpm.uvd_active = true; | 945 | rdev->pm.dpm.uvd_active = true; |
938 | /* disable this for now */ | ||
939 | #if 0 | ||
940 | if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) | 946 | if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) |
941 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; | 947 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; |
942 | else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) | 948 | else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) |
@@ -946,7 +952,6 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
946 | else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) | 952 | else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) |
947 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; | 953 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; |
948 | else | 954 | else |
949 | #endif | ||
950 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; | 955 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; |
951 | rdev->pm.dpm.state = dpm_state; | 956 | rdev->pm.dpm.state = dpm_state; |
952 | mutex_unlock(&rdev->pm.mutex); | 957 | mutex_unlock(&rdev->pm.mutex); |
@@ -960,6 +965,23 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
960 | } | 965 | } |
961 | } | 966 | } |
962 | 967 | ||
968 | void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) | ||
969 | { | ||
970 | if (enable) { | ||
971 | mutex_lock(&rdev->pm.mutex); | ||
972 | rdev->pm.dpm.vce_active = true; | ||
973 | /* XXX select vce level based on ring/task */ | ||
974 | rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; | ||
975 | mutex_unlock(&rdev->pm.mutex); | ||
976 | } else { | ||
977 | mutex_lock(&rdev->pm.mutex); | ||
978 | rdev->pm.dpm.vce_active = false; | ||
979 | mutex_unlock(&rdev->pm.mutex); | ||
980 | } | ||
981 | |||
982 | radeon_pm_compute_clocks(rdev); | ||
983 | } | ||
984 | |||
963 | static void radeon_pm_suspend_old(struct radeon_device *rdev) | 985 | static void radeon_pm_suspend_old(struct radeon_device *rdev) |
964 | { | 986 | { |
965 | mutex_lock(&rdev->pm.mutex); | 987 | mutex_lock(&rdev->pm.mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 15e44a7281ab..8b0dfdd23793 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -63,7 +63,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, | |||
63 | { | 63 | { |
64 | int r; | 64 | int r; |
65 | 65 | ||
66 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); | 66 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); |
67 | if (r) { | 67 | if (r) { |
68 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); | 68 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); |
69 | return r; | 69 | return r; |
@@ -145,6 +145,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
145 | return r; | 145 | return r; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* grab a vm id if necessary */ | ||
149 | if (ib->vm) { | ||
150 | struct radeon_fence *vm_id_fence; | ||
151 | vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); | ||
152 | radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); | ||
153 | } | ||
154 | |||
148 | /* sync with other rings */ | 155 | /* sync with other rings */ |
149 | r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); | 156 | r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); |
150 | if (r) { | 157 | if (r) { |
@@ -153,11 +160,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
153 | return r; | 160 | return r; |
154 | } | 161 | } |
155 | 162 | ||
156 | /* if we can't remember our last VM flush then flush now! */ | 163 | if (ib->vm) |
157 | /* XXX figure out why we have to flush for every IB */ | 164 | radeon_vm_flush(rdev, ib->vm, ib->ring); |
158 | if (ib->vm /*&& !ib->vm->last_flush*/) { | 165 | |
159 | radeon_ring_vm_flush(rdev, ib->ring, ib->vm); | ||
160 | } | ||
161 | if (const_ib) { | 166 | if (const_ib) { |
162 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); | 167 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
163 | radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); | 168 | radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); |
@@ -172,10 +177,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
172 | if (const_ib) { | 177 | if (const_ib) { |
173 | const_ib->fence = radeon_fence_ref(ib->fence); | 178 | const_ib->fence = radeon_fence_ref(ib->fence); |
174 | } | 179 | } |
175 | /* we just flushed the VM, remember that */ | 180 | |
176 | if (ib->vm && !ib->vm->last_flush) { | 181 | if (ib->vm) |
177 | ib->vm->last_flush = radeon_fence_ref(ib->fence); | 182 | radeon_vm_fence(rdev, ib->vm, ib->fence); |
178 | } | 183 | |
179 | radeon_ring_unlock_commit(rdev, ring); | 184 | radeon_ring_unlock_commit(rdev, ring); |
180 | return 0; | 185 | return 0; |
181 | } | 186 | } |
@@ -342,13 +347,17 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, | |||
342 | */ | 347 | */ |
343 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) | 348 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
344 | { | 349 | { |
345 | ring->rptr = radeon_ring_get_rptr(rdev, ring); | 350 | uint32_t rptr = radeon_ring_get_rptr(rdev, ring); |
351 | |||
346 | /* This works because ring_size is a power of 2 */ | 352 | /* This works because ring_size is a power of 2 */ |
347 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); | 353 | ring->ring_free_dw = rptr + (ring->ring_size / 4); |
348 | ring->ring_free_dw -= ring->wptr; | 354 | ring->ring_free_dw -= ring->wptr; |
349 | ring->ring_free_dw &= ring->ptr_mask; | 355 | ring->ring_free_dw &= ring->ptr_mask; |
350 | if (!ring->ring_free_dw) { | 356 | if (!ring->ring_free_dw) { |
357 | /* this is an empty ring */ | ||
351 | ring->ring_free_dw = ring->ring_size / 4; | 358 | ring->ring_free_dw = ring->ring_size / 4; |
359 | /* update lockup info to avoid false positive */ | ||
360 | radeon_ring_lockup_update(rdev, ring); | ||
352 | } | 361 | } |
353 | } | 362 | } |
354 | 363 | ||
@@ -372,19 +381,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi | |||
372 | /* Align requested size with padding so unlock_commit can | 381 | /* Align requested size with padding so unlock_commit can |
373 | * pad safely */ | 382 | * pad safely */ |
374 | radeon_ring_free_size(rdev, ring); | 383 | radeon_ring_free_size(rdev, ring); |
375 | if (ring->ring_free_dw == (ring->ring_size / 4)) { | ||
376 | /* This is an empty ring update lockup info to avoid | ||
377 | * false positive. | ||
378 | */ | ||
379 | radeon_ring_lockup_update(ring); | ||
380 | } | ||
381 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | 384 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
382 | while (ndw > (ring->ring_free_dw - 1)) { | 385 | while (ndw > (ring->ring_free_dw - 1)) { |
383 | radeon_ring_free_size(rdev, ring); | 386 | radeon_ring_free_size(rdev, ring); |
384 | if (ndw < ring->ring_free_dw) { | 387 | if (ndw < ring->ring_free_dw) { |
385 | break; | 388 | break; |
386 | } | 389 | } |
387 | r = radeon_fence_wait_next_locked(rdev, ring->idx); | 390 | r = radeon_fence_wait_next(rdev, ring->idx); |
388 | if (r) | 391 | if (r) |
389 | return r; | 392 | return r; |
390 | } | 393 | } |
@@ -478,39 +481,17 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin | |||
478 | } | 481 | } |
479 | 482 | ||
480 | /** | 483 | /** |
481 | * radeon_ring_force_activity - add some nop packets to the ring | ||
482 | * | ||
483 | * @rdev: radeon_device pointer | ||
484 | * @ring: radeon_ring structure holding ring information | ||
485 | * | ||
486 | * Add some nop packets to the ring to force activity (all asics). | ||
487 | * Used for lockup detection to see if the rptr is advancing. | ||
488 | */ | ||
489 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) | ||
490 | { | ||
491 | int r; | ||
492 | |||
493 | radeon_ring_free_size(rdev, ring); | ||
494 | if (ring->rptr == ring->wptr) { | ||
495 | r = radeon_ring_alloc(rdev, ring, 1); | ||
496 | if (!r) { | ||
497 | radeon_ring_write(ring, ring->nop); | ||
498 | radeon_ring_commit(rdev, ring); | ||
499 | } | ||
500 | } | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * radeon_ring_lockup_update - update lockup variables | 484 | * radeon_ring_lockup_update - update lockup variables |
505 | * | 485 | * |
506 | * @ring: radeon_ring structure holding ring information | 486 | * @ring: radeon_ring structure holding ring information |
507 | * | 487 | * |
508 | * Update the last rptr value and timestamp (all asics). | 488 | * Update the last rptr value and timestamp (all asics). |
509 | */ | 489 | */ |
510 | void radeon_ring_lockup_update(struct radeon_ring *ring) | 490 | void radeon_ring_lockup_update(struct radeon_device *rdev, |
491 | struct radeon_ring *ring) | ||
511 | { | 492 | { |
512 | ring->last_rptr = ring->rptr; | 493 | atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring)); |
513 | ring->last_activity = jiffies; | 494 | atomic64_set(&ring->last_activity, jiffies_64); |
514 | } | 495 | } |
515 | 496 | ||
516 | /** | 497 | /** |
@@ -518,40 +499,23 @@ void radeon_ring_lockup_update(struct radeon_ring *ring) | |||
518 | * @rdev: radeon device structure | 499 | * @rdev: radeon device structure |
519 | * @ring: radeon_ring structure holding ring information | 500 | * @ring: radeon_ring structure holding ring information |
520 | * | 501 | * |
521 | * We don't need to initialize the lockup tracking information as we will either | 502 | */ |
522 | * have CP rptr to a different value of jiffies wrap around which will force | ||
523 | * initialization of the lockup tracking informations. | ||
524 | * | ||
525 | * A possible false positivie is if we get call after while and last_cp_rptr == | ||
526 | * the current CP rptr, even if it's unlikely it might happen. To avoid this | ||
527 | * if the elapsed time since last call is bigger than 2 second than we return | ||
528 | * false and update the tracking information. Due to this the caller must call | ||
529 | * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported | ||
530 | * the fencing code should be cautious about that. | ||
531 | * | ||
532 | * Caller should write to the ring to force CP to do something so we don't get | ||
533 | * false positive when CP is just gived nothing to do. | ||
534 | * | ||
535 | **/ | ||
536 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | 503 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
537 | { | 504 | { |
538 | unsigned long cjiffies, elapsed; | 505 | uint32_t rptr = radeon_ring_get_rptr(rdev, ring); |
506 | uint64_t last = atomic64_read(&ring->last_activity); | ||
507 | uint64_t elapsed; | ||
539 | 508 | ||
540 | cjiffies = jiffies; | 509 | if (rptr != atomic_read(&ring->last_rptr)) { |
541 | if (!time_after(cjiffies, ring->last_activity)) { | 510 | /* ring is still working, no lockup */ |
542 | /* likely a wrap around */ | 511 | radeon_ring_lockup_update(rdev, ring); |
543 | radeon_ring_lockup_update(ring); | ||
544 | return false; | 512 | return false; |
545 | } | 513 | } |
546 | ring->rptr = radeon_ring_get_rptr(rdev, ring); | 514 | |
547 | if (ring->rptr != ring->last_rptr) { | 515 | elapsed = jiffies_to_msecs(jiffies_64 - last); |
548 | /* CP is still working no lockup */ | ||
549 | radeon_ring_lockup_update(ring); | ||
550 | return false; | ||
551 | } | ||
552 | elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); | ||
553 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { | 516 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
554 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); | 517 | dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n", |
518 | ring->idx, elapsed); | ||
555 | return true; | 519 | return true; |
556 | } | 520 | } |
557 | /* give a chance to the GPU ... */ | 521 | /* give a chance to the GPU ... */ |
@@ -709,7 +673,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig | |||
709 | if (radeon_debugfs_ring_init(rdev, ring)) { | 673 | if (radeon_debugfs_ring_init(rdev, ring)) { |
710 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | 674 | DRM_ERROR("Failed to register debugfs file for rings !\n"); |
711 | } | 675 | } |
712 | radeon_ring_lockup_update(ring); | 676 | radeon_ring_lockup_update(rdev, ring); |
713 | return 0; | 677 | return 0; |
714 | } | 678 | } |
715 | 679 | ||
@@ -780,8 +744,6 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
780 | 744 | ||
781 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", | 745 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", |
782 | ring->wptr, ring->wptr); | 746 | ring->wptr, ring->wptr); |
783 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", | ||
784 | ring->rptr, ring->rptr); | ||
785 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", | 747 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", |
786 | ring->last_semaphore_signal_addr); | 748 | ring->last_semaphore_signal_addr); |
787 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", | 749 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", |
@@ -814,6 +776,8 @@ static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; | |||
814 | static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; | 776 | static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; |
815 | static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; | 777 | static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; |
816 | static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; | 778 | static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; |
779 | static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX; | ||
780 | static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX; | ||
817 | 781 | ||
818 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { | 782 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { |
819 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, | 783 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, |
@@ -822,6 +786,8 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |||
822 | {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, | 786 | {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, |
823 | {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, | 787 | {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, |
824 | {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, | 788 | {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, |
789 | {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index}, | ||
790 | {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index}, | ||
825 | }; | 791 | }; |
826 | 792 | ||
827 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) | 793 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c0625805cdd7..adcf3e2f07da 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
@@ -312,7 +312,7 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, | |||
312 | int radeon_sa_bo_new(struct radeon_device *rdev, | 312 | int radeon_sa_bo_new(struct radeon_device *rdev, |
313 | struct radeon_sa_manager *sa_manager, | 313 | struct radeon_sa_manager *sa_manager, |
314 | struct radeon_sa_bo **sa_bo, | 314 | struct radeon_sa_bo **sa_bo, |
315 | unsigned size, unsigned align, bool block) | 315 | unsigned size, unsigned align) |
316 | { | 316 | { |
317 | struct radeon_fence *fences[RADEON_NUM_RINGS]; | 317 | struct radeon_fence *fences[RADEON_NUM_RINGS]; |
318 | unsigned tries[RADEON_NUM_RINGS]; | 318 | unsigned tries[RADEON_NUM_RINGS]; |
@@ -353,14 +353,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev, | |||
353 | r = radeon_fence_wait_any(rdev, fences, false); | 353 | r = radeon_fence_wait_any(rdev, fences, false); |
354 | spin_lock(&sa_manager->wq.lock); | 354 | spin_lock(&sa_manager->wq.lock); |
355 | /* if we have nothing to wait for block */ | 355 | /* if we have nothing to wait for block */ |
356 | if (r == -ENOENT && block) { | 356 | if (r == -ENOENT) { |
357 | r = wait_event_interruptible_locked( | 357 | r = wait_event_interruptible_locked( |
358 | sa_manager->wq, | 358 | sa_manager->wq, |
359 | radeon_sa_event(sa_manager, size, align) | 359 | radeon_sa_event(sa_manager, size, align) |
360 | ); | 360 | ); |
361 | |||
362 | } else if (r == -ENOENT) { | ||
363 | r = -ENOMEM; | ||
364 | } | 361 | } |
365 | 362 | ||
366 | } while (!r); | 363 | } while (!r); |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 9006b32d5eed..dbd6bcde92de 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
@@ -42,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev, | |||
42 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | } | 43 | } |
44 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, | 44 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, |
45 | 8 * RADEON_NUM_SYNCS, 8, true); | 45 | 8 * RADEON_NUM_SYNCS, 8); |
46 | if (r) { | 46 | if (r) { |
47 | kfree(*semaphore); | 47 | kfree(*semaphore); |
48 | *semaphore = NULL; | 48 | *semaphore = NULL; |
@@ -147,7 +147,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
147 | 147 | ||
148 | if (++count > RADEON_NUM_SYNCS) { | 148 | if (++count > RADEON_NUM_SYNCS) { |
149 | /* not enough room, wait manually */ | 149 | /* not enough room, wait manually */ |
150 | radeon_fence_wait_locked(fence); | 150 | r = radeon_fence_wait(fence, false); |
151 | if (r) | ||
152 | return r; | ||
151 | continue; | 153 | continue; |
152 | } | 154 | } |
153 | 155 | ||
@@ -161,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
161 | if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { | 163 | if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { |
162 | /* signaling wasn't successful wait manually */ | 164 | /* signaling wasn't successful wait manually */ |
163 | radeon_ring_undo(&rdev->ring[i]); | 165 | radeon_ring_undo(&rdev->ring[i]); |
164 | radeon_fence_wait_locked(fence); | 166 | r = radeon_fence_wait(fence, false); |
167 | if (r) | ||
168 | return r; | ||
165 | continue; | 169 | continue; |
166 | } | 170 | } |
167 | 171 | ||
@@ -169,7 +173,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
169 | if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { | 173 | if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { |
170 | /* waiting wasn't successful wait manually */ | 174 | /* waiting wasn't successful wait manually */ |
171 | radeon_ring_undo(&rdev->ring[i]); | 175 | radeon_ring_undo(&rdev->ring[i]); |
172 | radeon_fence_wait_locked(fence); | 176 | r = radeon_fence_wait(fence, false); |
177 | if (r) | ||
178 | return r; | ||
173 | continue; | 179 | continue; |
174 | } | 180 | } |
175 | 181 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 12e8099a0823..3a13e0d1055c 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -257,20 +257,36 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, | |||
257 | struct radeon_ring *ring, | 257 | struct radeon_ring *ring, |
258 | struct radeon_fence **fence) | 258 | struct radeon_fence **fence) |
259 | { | 259 | { |
260 | uint32_t handle = ring->idx ^ 0xdeafbeef; | ||
260 | int r; | 261 | int r; |
261 | 262 | ||
262 | if (ring->idx == R600_RING_TYPE_UVD_INDEX) { | 263 | if (ring->idx == R600_RING_TYPE_UVD_INDEX) { |
263 | r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); | 264 | r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); |
264 | if (r) { | 265 | if (r) { |
265 | DRM_ERROR("Failed to get dummy create msg\n"); | 266 | DRM_ERROR("Failed to get dummy create msg\n"); |
266 | return r; | 267 | return r; |
267 | } | 268 | } |
268 | 269 | ||
269 | r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence); | 270 | r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); |
270 | if (r) { | 271 | if (r) { |
271 | DRM_ERROR("Failed to get dummy destroy msg\n"); | 272 | DRM_ERROR("Failed to get dummy destroy msg\n"); |
272 | return r; | 273 | return r; |
273 | } | 274 | } |
275 | |||
276 | } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || | ||
277 | ring->idx == TN_RING_TYPE_VCE2_INDEX) { | ||
278 | r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); | ||
279 | if (r) { | ||
280 | DRM_ERROR("Failed to get dummy create msg\n"); | ||
281 | return r; | ||
282 | } | ||
283 | |||
284 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); | ||
285 | if (r) { | ||
286 | DRM_ERROR("Failed to get dummy destroy msg\n"); | ||
287 | return r; | ||
288 | } | ||
289 | |||
274 | } else { | 290 | } else { |
275 | r = radeon_ring_lock(rdev, ring, 64); | 291 | r = radeon_ring_lock(rdev, ring, 64); |
276 | if (r) { | 292 | if (r) { |
@@ -486,6 +502,16 @@ out_cleanup: | |||
486 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | 502 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); |
487 | } | 503 | } |
488 | 504 | ||
505 | static bool radeon_test_sync_possible(struct radeon_ring *ringA, | ||
506 | struct radeon_ring *ringB) | ||
507 | { | ||
508 | if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && | ||
509 | ringB->idx == TN_RING_TYPE_VCE1_INDEX) | ||
510 | return false; | ||
511 | |||
512 | return true; | ||
513 | } | ||
514 | |||
489 | void radeon_test_syncing(struct radeon_device *rdev) | 515 | void radeon_test_syncing(struct radeon_device *rdev) |
490 | { | 516 | { |
491 | int i, j, k; | 517 | int i, j, k; |
@@ -500,6 +526,9 @@ void radeon_test_syncing(struct radeon_device *rdev) | |||
500 | if (!ringB->ready) | 526 | if (!ringB->ready) |
501 | continue; | 527 | continue; |
502 | 528 | ||
529 | if (!radeon_test_sync_possible(ringA, ringB)) | ||
530 | continue; | ||
531 | |||
503 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); | 532 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); |
504 | radeon_test_ring_sync(rdev, ringA, ringB); | 533 | radeon_test_ring_sync(rdev, ringA, ringB); |
505 | 534 | ||
@@ -511,6 +540,12 @@ void radeon_test_syncing(struct radeon_device *rdev) | |||
511 | if (!ringC->ready) | 540 | if (!ringC->ready) |
512 | continue; | 541 | continue; |
513 | 542 | ||
543 | if (!radeon_test_sync_possible(ringA, ringC)) | ||
544 | continue; | ||
545 | |||
546 | if (!radeon_test_sync_possible(ringB, ringC)) | ||
547 | continue; | ||
548 | |||
514 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | 549 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); |
515 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); | 550 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); |
516 | 551 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 040a2a10ea17..c8a8a5144ec1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -406,8 +406,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
406 | if (r) { | 406 | if (r) { |
407 | memcpy: | 407 | memcpy: |
408 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | 408 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
409 | if (r) { | ||
410 | return r; | ||
411 | } | ||
409 | } | 412 | } |
410 | return r; | 413 | |
414 | /* update statistics */ | ||
415 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); | ||
416 | return 0; | ||
411 | } | 417 | } |
412 | 418 | ||
413 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 419 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
@@ -701,7 +707,9 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
701 | /* No others user of address space so set it to 0 */ | 707 | /* No others user of address space so set it to 0 */ |
702 | r = ttm_bo_device_init(&rdev->mman.bdev, | 708 | r = ttm_bo_device_init(&rdev->mman.bdev, |
703 | rdev->mman.bo_global_ref.ref.object, | 709 | rdev->mman.bo_global_ref.ref.object, |
704 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, | 710 | &radeon_bo_driver, |
711 | rdev->ddev->anon_inode->i_mapping, | ||
712 | DRM_FILE_PAGE_OFFSET, | ||
705 | rdev->need_dma32); | 713 | rdev->need_dma32); |
706 | if (r) { | 714 | if (r) { |
707 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 715 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
@@ -742,7 +750,6 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
742 | } | 750 | } |
743 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | 751 | DRM_INFO("radeon: %uM of GTT memory ready.\n", |
744 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); | 752 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
745 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | ||
746 | 753 | ||
747 | r = radeon_ttm_debugfs_init(rdev); | 754 | r = radeon_ttm_debugfs_init(rdev); |
748 | if (r) { | 755 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 3e6804b2b2ef..5748bdaeacce 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -455,7 +455,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
455 | } | 455 | } |
456 | 456 | ||
457 | reloc = p->relocs_ptr[(idx / 4)]; | 457 | reloc = p->relocs_ptr[(idx / 4)]; |
458 | start = reloc->lobj.gpu_offset; | 458 | start = reloc->gpu_offset; |
459 | end = start + radeon_bo_size(reloc->robj); | 459 | end = start + radeon_bo_size(reloc->robj); |
460 | start += offset; | 460 | start += offset; |
461 | 461 | ||
@@ -807,8 +807,7 @@ void radeon_uvd_note_usage(struct radeon_device *rdev) | |||
807 | (rdev->pm.dpm.hd != hd)) { | 807 | (rdev->pm.dpm.hd != hd)) { |
808 | rdev->pm.dpm.sd = sd; | 808 | rdev->pm.dpm.sd = sd; |
809 | rdev->pm.dpm.hd = hd; | 809 | rdev->pm.dpm.hd = hd; |
810 | /* disable this for now */ | 810 | streams_changed = true; |
811 | /*streams_changed = true;*/ | ||
812 | } | 811 | } |
813 | } | 812 | } |
814 | 813 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c new file mode 100644 index 000000000000..76e9904bc537 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -0,0 +1,699 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | * Authors: Christian König <christian.koenig@amd.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/firmware.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <drm/drmP.h> | ||
31 | #include <drm/drm.h> | ||
32 | |||
33 | #include "radeon.h" | ||
34 | #include "radeon_asic.h" | ||
35 | #include "sid.h" | ||
36 | |||
37 | /* 1 second timeout */ | ||
38 | #define VCE_IDLE_TIMEOUT_MS 1000 | ||
39 | |||
40 | /* Firmware Names */ | ||
41 | #define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" | ||
42 | |||
43 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | ||
44 | |||
45 | static void radeon_vce_idle_work_handler(struct work_struct *work); | ||
46 | |||
47 | /** | ||
48 | * radeon_vce_init - allocate memory, load vce firmware | ||
49 | * | ||
50 | * @rdev: radeon_device pointer | ||
51 | * | ||
52 | * First step to get VCE online, allocate memory and load the firmware | ||
53 | */ | ||
54 | int radeon_vce_init(struct radeon_device *rdev) | ||
55 | { | ||
56 | static const char *fw_version = "[ATI LIB=VCEFW,"; | ||
57 | static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; | ||
58 | unsigned long size; | ||
59 | const char *fw_name, *c; | ||
60 | uint8_t start, mid, end; | ||
61 | int i, r; | ||
62 | |||
63 | INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); | ||
64 | |||
65 | switch (rdev->family) { | ||
66 | case CHIP_BONAIRE: | ||
67 | case CHIP_KAVERI: | ||
68 | case CHIP_KABINI: | ||
69 | fw_name = FIRMWARE_BONAIRE; | ||
70 | break; | ||
71 | |||
72 | default: | ||
73 | return -EINVAL; | ||
74 | } | ||
75 | |||
76 | r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); | ||
77 | if (r) { | ||
78 | dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", | ||
79 | fw_name); | ||
80 | return r; | ||
81 | } | ||
82 | |||
83 | /* search for firmware version */ | ||
84 | |||
85 | size = rdev->vce_fw->size - strlen(fw_version) - 9; | ||
86 | c = rdev->vce_fw->data; | ||
87 | for (;size > 0; --size, ++c) | ||
88 | if (strncmp(c, fw_version, strlen(fw_version)) == 0) | ||
89 | break; | ||
90 | |||
91 | if (size == 0) | ||
92 | return -EINVAL; | ||
93 | |||
94 | c += strlen(fw_version); | ||
95 | if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) | ||
96 | return -EINVAL; | ||
97 | |||
98 | /* search for feedback version */ | ||
99 | |||
100 | size = rdev->vce_fw->size - strlen(fb_version) - 3; | ||
101 | c = rdev->vce_fw->data; | ||
102 | for (;size > 0; --size, ++c) | ||
103 | if (strncmp(c, fb_version, strlen(fb_version)) == 0) | ||
104 | break; | ||
105 | |||
106 | if (size == 0) | ||
107 | return -EINVAL; | ||
108 | |||
109 | c += strlen(fb_version); | ||
110 | if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) | ||
111 | return -EINVAL; | ||
112 | |||
113 | DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", | ||
114 | start, mid, end, rdev->vce.fb_version); | ||
115 | |||
116 | rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); | ||
117 | |||
118 | /* we can only work with this fw version for now */ | ||
119 | if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) | ||
120 | return -EINVAL; | ||
121 | |||
122 | /* allocate firmware, stack and heap BO */ | ||
123 | |||
124 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + | ||
125 | RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; | ||
126 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, | ||
127 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); | ||
128 | if (r) { | ||
129 | dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); | ||
130 | return r; | ||
131 | } | ||
132 | |||
133 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); | ||
134 | if (r) { | ||
135 | radeon_bo_unref(&rdev->vce.vcpu_bo); | ||
136 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
137 | return r; | ||
138 | } | ||
139 | |||
140 | r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, | ||
141 | &rdev->vce.gpu_addr); | ||
142 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | ||
143 | if (r) { | ||
144 | radeon_bo_unref(&rdev->vce.vcpu_bo); | ||
145 | dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); | ||
146 | return r; | ||
147 | } | ||
148 | |||
149 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
150 | atomic_set(&rdev->vce.handles[i], 0); | ||
151 | rdev->vce.filp[i] = NULL; | ||
152 | } | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * radeon_vce_fini - free memory | ||
159 | * | ||
160 | * @rdev: radeon_device pointer | ||
161 | * | ||
162 | * Last step on VCE teardown, free firmware memory | ||
163 | */ | ||
164 | void radeon_vce_fini(struct radeon_device *rdev) | ||
165 | { | ||
166 | if (rdev->vce.vcpu_bo == NULL) | ||
167 | return; | ||
168 | |||
169 | radeon_bo_unref(&rdev->vce.vcpu_bo); | ||
170 | |||
171 | release_firmware(rdev->vce_fw); | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * radeon_vce_suspend - unpin VCE fw memory | ||
176 | * | ||
177 | * @rdev: radeon_device pointer | ||
178 | * | ||
179 | */ | ||
180 | int radeon_vce_suspend(struct radeon_device *rdev) | ||
181 | { | ||
182 | int i; | ||
183 | |||
184 | if (rdev->vce.vcpu_bo == NULL) | ||
185 | return 0; | ||
186 | |||
187 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) | ||
188 | if (atomic_read(&rdev->vce.handles[i])) | ||
189 | break; | ||
190 | |||
191 | if (i == RADEON_MAX_VCE_HANDLES) | ||
192 | return 0; | ||
193 | |||
194 | /* TODO: suspending running encoding sessions isn't supported */ | ||
195 | return -EINVAL; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * radeon_vce_resume - pin VCE fw memory | ||
200 | * | ||
201 | * @rdev: radeon_device pointer | ||
202 | * | ||
203 | */ | ||
204 | int radeon_vce_resume(struct radeon_device *rdev) | ||
205 | { | ||
206 | void *cpu_addr; | ||
207 | int r; | ||
208 | |||
209 | if (rdev->vce.vcpu_bo == NULL) | ||
210 | return -EINVAL; | ||
211 | |||
212 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); | ||
213 | if (r) { | ||
214 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); | ||
215 | return r; | ||
216 | } | ||
217 | |||
218 | r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); | ||
219 | if (r) { | ||
220 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | ||
221 | dev_err(rdev->dev, "(%d) VCE map failed\n", r); | ||
222 | return r; | ||
223 | } | ||
224 | |||
225 | memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); | ||
226 | |||
227 | radeon_bo_kunmap(rdev->vce.vcpu_bo); | ||
228 | |||
229 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * radeon_vce_idle_work_handler - power off VCE | ||
236 | * | ||
237 | * @work: pointer to work structure | ||
238 | * | ||
239 | * power of VCE when it's not used any more | ||
240 | */ | ||
241 | static void radeon_vce_idle_work_handler(struct work_struct *work) | ||
242 | { | ||
243 | struct radeon_device *rdev = | ||
244 | container_of(work, struct radeon_device, vce.idle_work.work); | ||
245 | |||
246 | if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && | ||
247 | (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { | ||
248 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||
249 | radeon_dpm_enable_vce(rdev, false); | ||
250 | } else { | ||
251 | radeon_set_vce_clocks(rdev, 0, 0); | ||
252 | } | ||
253 | } else { | ||
254 | schedule_delayed_work(&rdev->vce.idle_work, | ||
255 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * radeon_vce_note_usage - power up VCE | ||
261 | * | ||
262 | * @rdev: radeon_device pointer | ||
263 | * | ||
264 | * Make sure VCE is powerd up when we want to use it | ||
265 | */ | ||
266 | void radeon_vce_note_usage(struct radeon_device *rdev) | ||
267 | { | ||
268 | bool streams_changed = false; | ||
269 | bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); | ||
270 | set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, | ||
271 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); | ||
272 | |||
273 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||
274 | /* XXX figure out if the streams changed */ | ||
275 | streams_changed = false; | ||
276 | } | ||
277 | |||
278 | if (set_clocks || streams_changed) { | ||
279 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | ||
280 | radeon_dpm_enable_vce(rdev, true); | ||
281 | } else { | ||
282 | radeon_set_vce_clocks(rdev, 53300, 40000); | ||
283 | } | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * radeon_vce_free_handles - free still open VCE handles | ||
289 | * | ||
290 | * @rdev: radeon_device pointer | ||
291 | * @filp: drm file pointer | ||
292 | * | ||
293 | * Close all VCE handles still open by this file pointer | ||
294 | */ | ||
295 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) | ||
296 | { | ||
297 | int i, r; | ||
298 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
299 | uint32_t handle = atomic_read(&rdev->vce.handles[i]); | ||
300 | if (!handle || rdev->vce.filp[i] != filp) | ||
301 | continue; | ||
302 | |||
303 | radeon_vce_note_usage(rdev); | ||
304 | |||
305 | r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX, | ||
306 | handle, NULL); | ||
307 | if (r) | ||
308 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r); | ||
309 | |||
310 | rdev->vce.filp[i] = NULL; | ||
311 | atomic_set(&rdev->vce.handles[i], 0); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * radeon_vce_get_create_msg - generate a VCE create msg | ||
317 | * | ||
318 | * @rdev: radeon_device pointer | ||
319 | * @ring: ring we should submit the msg to | ||
320 | * @handle: VCE session handle to use | ||
321 | * @fence: optional fence to return | ||
322 | * | ||
323 | * Open up a stream for HW test | ||
324 | */ | ||
325 | int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | ||
326 | uint32_t handle, struct radeon_fence **fence) | ||
327 | { | ||
328 | const unsigned ib_size_dw = 1024; | ||
329 | struct radeon_ib ib; | ||
330 | uint64_t dummy; | ||
331 | int i, r; | ||
332 | |||
333 | r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); | ||
334 | if (r) { | ||
335 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | ||
336 | return r; | ||
337 | } | ||
338 | |||
339 | dummy = ib.gpu_addr + 1024; | ||
340 | |||
341 | /* stitch together an VCE create msg */ | ||
342 | ib.length_dw = 0; | ||
343 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
344 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
345 | ib.ptr[ib.length_dw++] = handle; | ||
346 | |||
347 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | ||
348 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | ||
349 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
350 | ib.ptr[ib.length_dw++] = 0x00000042; | ||
351 | ib.ptr[ib.length_dw++] = 0x0000000a; | ||
352 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
353 | ib.ptr[ib.length_dw++] = 0x00000080; | ||
354 | ib.ptr[ib.length_dw++] = 0x00000060; | ||
355 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
356 | ib.ptr[ib.length_dw++] = 0x00000100; | ||
357 | ib.ptr[ib.length_dw++] = 0x0000000c; | ||
358 | ib.ptr[ib.length_dw++] = 0x00000000; | ||
359 | |||
360 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
361 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
362 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
363 | ib.ptr[ib.length_dw++] = dummy; | ||
364 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
365 | |||
366 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
367 | ib.ptr[i] = 0x0; | ||
368 | |||
369 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
370 | if (r) { | ||
371 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | ||
372 | } | ||
373 | |||
374 | if (fence) | ||
375 | *fence = radeon_fence_ref(ib.fence); | ||
376 | |||
377 | radeon_ib_free(rdev, &ib); | ||
378 | |||
379 | return r; | ||
380 | } | ||
381 | |||
382 | /** | ||
383 | * radeon_vce_get_destroy_msg - generate a VCE destroy msg | ||
384 | * | ||
385 | * @rdev: radeon_device pointer | ||
386 | * @ring: ring we should submit the msg to | ||
387 | * @handle: VCE session handle to use | ||
388 | * @fence: optional fence to return | ||
389 | * | ||
390 | * Close up a stream for HW test or if userspace failed to do so | ||
391 | */ | ||
392 | int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | ||
393 | uint32_t handle, struct radeon_fence **fence) | ||
394 | { | ||
395 | const unsigned ib_size_dw = 1024; | ||
396 | struct radeon_ib ib; | ||
397 | uint64_t dummy; | ||
398 | int i, r; | ||
399 | |||
400 | r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); | ||
401 | if (r) { | ||
402 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | ||
403 | return r; | ||
404 | } | ||
405 | |||
406 | dummy = ib.gpu_addr + 1024; | ||
407 | |||
408 | /* stitch together an VCE destroy msg */ | ||
409 | ib.length_dw = 0; | ||
410 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | ||
411 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | ||
412 | ib.ptr[ib.length_dw++] = handle; | ||
413 | |||
414 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | ||
415 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | ||
416 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | ||
417 | ib.ptr[ib.length_dw++] = dummy; | ||
418 | ib.ptr[ib.length_dw++] = 0x00000001; | ||
419 | |||
420 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | ||
421 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | ||
422 | |||
423 | for (i = ib.length_dw; i < ib_size_dw; ++i) | ||
424 | ib.ptr[i] = 0x0; | ||
425 | |||
426 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
427 | if (r) { | ||
428 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | ||
429 | } | ||
430 | |||
431 | if (fence) | ||
432 | *fence = radeon_fence_ref(ib.fence); | ||
433 | |||
434 | radeon_ib_free(rdev, &ib); | ||
435 | |||
436 | return r; | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * radeon_vce_cs_reloc - command submission relocation | ||
441 | * | ||
442 | * @p: parser context | ||
443 | * @lo: address of lower dword | ||
444 | * @hi: address of higher dword | ||
445 | * | ||
446 | * Patch relocation inside command stream with real buffer address | ||
447 | */ | ||
448 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | ||
449 | { | ||
450 | struct radeon_cs_chunk *relocs_chunk; | ||
451 | uint64_t offset; | ||
452 | unsigned idx; | ||
453 | |||
454 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | ||
455 | offset = radeon_get_ib_value(p, lo); | ||
456 | idx = radeon_get_ib_value(p, hi); | ||
457 | |||
458 | if (idx >= relocs_chunk->length_dw) { | ||
459 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | ||
460 | idx, relocs_chunk->length_dw); | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | |||
464 | offset += p->relocs_ptr[(idx / 4)]->gpu_offset; | ||
465 | |||
466 | p->ib.ptr[lo] = offset & 0xFFFFFFFF; | ||
467 | p->ib.ptr[hi] = offset >> 32; | ||
468 | |||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * radeon_vce_cs_parse - parse and validate the command stream | ||
474 | * | ||
475 | * @p: parser context | ||
476 | * | ||
477 | */ | ||
478 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | ||
479 | { | ||
480 | uint32_t handle = 0; | ||
481 | bool destroy = false; | ||
482 | int i, r; | ||
483 | |||
484 | while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { | ||
485 | uint32_t len = radeon_get_ib_value(p, p->idx); | ||
486 | uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); | ||
487 | |||
488 | if ((len < 8) || (len & 3)) { | ||
489 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | switch (cmd) { | ||
494 | case 0x00000001: // session | ||
495 | handle = radeon_get_ib_value(p, p->idx + 2); | ||
496 | break; | ||
497 | |||
498 | case 0x00000002: // task info | ||
499 | case 0x01000001: // create | ||
500 | case 0x04000001: // config extension | ||
501 | case 0x04000002: // pic control | ||
502 | case 0x04000005: // rate control | ||
503 | case 0x04000007: // motion estimation | ||
504 | case 0x04000008: // rdo | ||
505 | break; | ||
506 | |||
507 | case 0x03000001: // encode | ||
508 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); | ||
509 | if (r) | ||
510 | return r; | ||
511 | |||
512 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); | ||
513 | if (r) | ||
514 | return r; | ||
515 | break; | ||
516 | |||
517 | case 0x02000001: // destroy | ||
518 | destroy = true; | ||
519 | break; | ||
520 | |||
521 | case 0x05000001: // context buffer | ||
522 | case 0x05000004: // video bitstream buffer | ||
523 | case 0x05000005: // feedback buffer | ||
524 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); | ||
525 | if (r) | ||
526 | return r; | ||
527 | break; | ||
528 | |||
529 | default: | ||
530 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | |||
534 | p->idx += len / 4; | ||
535 | } | ||
536 | |||
537 | if (destroy) { | ||
538 | /* IB contains a destroy msg, free the handle */ | ||
539 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) | ||
540 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | /* create or encode, validate the handle */ | ||
546 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
547 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | /* handle not found try to alloc a new one */ | ||
552 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
553 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | ||
554 | p->rdev->vce.filp[i] = p->filp; | ||
555 | return 0; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | DRM_ERROR("No more free VCE handles!\n"); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
563 | /** | ||
564 | * radeon_vce_semaphore_emit - emit a semaphore command | ||
565 | * | ||
566 | * @rdev: radeon_device pointer | ||
567 | * @ring: engine to use | ||
568 | * @semaphore: address of semaphore | ||
569 | * @emit_wait: true=emit wait, false=emit signal | ||
570 | * | ||
571 | */ | ||
572 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | ||
573 | struct radeon_ring *ring, | ||
574 | struct radeon_semaphore *semaphore, | ||
575 | bool emit_wait) | ||
576 | { | ||
577 | uint64_t addr = semaphore->gpu_addr; | ||
578 | |||
579 | radeon_ring_write(ring, VCE_CMD_SEMAPHORE); | ||
580 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
581 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
582 | radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | ||
583 | if (!emit_wait) | ||
584 | radeon_ring_write(ring, VCE_CMD_END); | ||
585 | |||
586 | return true; | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * radeon_vce_ib_execute - execute indirect buffer | ||
591 | * | ||
592 | * @rdev: radeon_device pointer | ||
593 | * @ib: the IB to execute | ||
594 | * | ||
595 | */ | ||
596 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | ||
597 | { | ||
598 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | ||
599 | radeon_ring_write(ring, VCE_CMD_IB); | ||
600 | radeon_ring_write(ring, ib->gpu_addr); | ||
601 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
602 | radeon_ring_write(ring, ib->length_dw); | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * radeon_vce_fence_emit - add a fence command to the ring | ||
607 | * | ||
608 | * @rdev: radeon_device pointer | ||
609 | * @fence: the fence | ||
610 | * | ||
611 | */ | ||
612 | void radeon_vce_fence_emit(struct radeon_device *rdev, | ||
613 | struct radeon_fence *fence) | ||
614 | { | ||
615 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | ||
616 | uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; | ||
617 | |||
618 | radeon_ring_write(ring, VCE_CMD_FENCE); | ||
619 | radeon_ring_write(ring, addr); | ||
620 | radeon_ring_write(ring, upper_32_bits(addr)); | ||
621 | radeon_ring_write(ring, fence->seq); | ||
622 | radeon_ring_write(ring, VCE_CMD_TRAP); | ||
623 | radeon_ring_write(ring, VCE_CMD_END); | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * radeon_vce_ring_test - test if VCE ring is working | ||
628 | * | ||
629 | * @rdev: radeon_device pointer | ||
630 | * @ring: the engine to test on | ||
631 | * | ||
632 | */ | ||
633 | int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | ||
634 | { | ||
635 | uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); | ||
636 | unsigned i; | ||
637 | int r; | ||
638 | |||
639 | r = radeon_ring_lock(rdev, ring, 16); | ||
640 | if (r) { | ||
641 | DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n", | ||
642 | ring->idx, r); | ||
643 | return r; | ||
644 | } | ||
645 | radeon_ring_write(ring, VCE_CMD_END); | ||
646 | radeon_ring_unlock_commit(rdev, ring); | ||
647 | |||
648 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
649 | if (vce_v1_0_get_rptr(rdev, ring) != rptr) | ||
650 | break; | ||
651 | DRM_UDELAY(1); | ||
652 | } | ||
653 | |||
654 | if (i < rdev->usec_timeout) { | ||
655 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
656 | ring->idx, i); | ||
657 | } else { | ||
658 | DRM_ERROR("radeon: ring %d test failed\n", | ||
659 | ring->idx); | ||
660 | r = -ETIMEDOUT; | ||
661 | } | ||
662 | |||
663 | return r; | ||
664 | } | ||
665 | |||
666 | /** | ||
667 | * radeon_vce_ib_test - test if VCE IBs are working | ||
668 | * | ||
669 | * @rdev: radeon_device pointer | ||
670 | * @ring: the engine to test on | ||
671 | * | ||
672 | */ | ||
673 | int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | ||
674 | { | ||
675 | struct radeon_fence *fence = NULL; | ||
676 | int r; | ||
677 | |||
678 | r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); | ||
679 | if (r) { | ||
680 | DRM_ERROR("radeon: failed to get create msg (%d).\n", r); | ||
681 | goto error; | ||
682 | } | ||
683 | |||
684 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); | ||
685 | if (r) { | ||
686 | DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); | ||
687 | goto error; | ||
688 | } | ||
689 | |||
690 | r = radeon_fence_wait(fence, false); | ||
691 | if (r) { | ||
692 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); | ||
693 | } else { | ||
694 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
695 | } | ||
696 | error: | ||
697 | radeon_fence_unref(&fence); | ||
698 | return r; | ||
699 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c new file mode 100644 index 000000000000..2aae6ce49d32 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -0,0 +1,966 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #include <drm/drmP.h> | ||
29 | #include <drm/radeon_drm.h> | ||
30 | #include "radeon.h" | ||
31 | #include "radeon_trace.h" | ||
32 | |||
33 | /* | ||
34 | * GPUVM | ||
35 | * GPUVM is similar to the legacy gart on older asics, however | ||
36 | * rather than there being a single global gart table | ||
37 | * for the entire GPU, there are multiple VM page tables active | ||
38 | * at any given time. The VM page tables can contain a mix | ||
39 | * vram pages and system memory pages and system memory pages | ||
40 | * can be mapped as snooped (cached system pages) or unsnooped | ||
41 | * (uncached system pages). | ||
42 | * Each VM has an ID associated with it and there is a page table | ||
43 | * associated with each VMID. When execting a command buffer, | ||
44 | * the kernel tells the the ring what VMID to use for that command | ||
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | ||
46 | * The userspace drivers maintain their own address space and the kernel | ||
47 | * sets up their pages tables accordingly when they submit their | ||
48 | * command buffers and a VMID is assigned. | ||
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | ||
50 | * SI supports 16. | ||
51 | */ | ||
52 | |||
53 | /** | ||
54 | * radeon_vm_num_pde - return the number of page directory entries | ||
55 | * | ||
56 | * @rdev: radeon_device pointer | ||
57 | * | ||
58 | * Calculate the number of page directory entries (cayman+). | ||
59 | */ | ||
60 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) | ||
61 | { | ||
62 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * radeon_vm_directory_size - returns the size of the page directory in bytes | ||
67 | * | ||
68 | * @rdev: radeon_device pointer | ||
69 | * | ||
70 | * Calculate the size of the page directory in bytes (cayman+). | ||
71 | */ | ||
72 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) | ||
73 | { | ||
74 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * radeon_vm_manager_init - init the vm manager | ||
79 | * | ||
80 | * @rdev: radeon_device pointer | ||
81 | * | ||
82 | * Init the vm manager (cayman+). | ||
83 | * Returns 0 for success, error for failure. | ||
84 | */ | ||
85 | int radeon_vm_manager_init(struct radeon_device *rdev) | ||
86 | { | ||
87 | int r; | ||
88 | |||
89 | if (!rdev->vm_manager.enabled) { | ||
90 | r = radeon_asic_vm_init(rdev); | ||
91 | if (r) | ||
92 | return r; | ||
93 | |||
94 | rdev->vm_manager.enabled = true; | ||
95 | } | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * radeon_vm_manager_fini - tear down the vm manager | ||
101 | * | ||
102 | * @rdev: radeon_device pointer | ||
103 | * | ||
104 | * Tear down the VM manager (cayman+). | ||
105 | */ | ||
106 | void radeon_vm_manager_fini(struct radeon_device *rdev) | ||
107 | { | ||
108 | int i; | ||
109 | |||
110 | if (!rdev->vm_manager.enabled) | ||
111 | return; | ||
112 | |||
113 | for (i = 0; i < RADEON_NUM_VM; ++i) | ||
114 | radeon_fence_unref(&rdev->vm_manager.active[i]); | ||
115 | radeon_asic_vm_fini(rdev); | ||
116 | rdev->vm_manager.enabled = false; | ||
117 | } | ||
118 | |||
119 | /** | ||
120 | * radeon_vm_get_bos - add the vm BOs to a validation list | ||
121 | * | ||
122 | * @vm: vm providing the BOs | ||
123 | * @head: head of validation list | ||
124 | * | ||
125 | * Add the page directory to the list of BOs to | ||
126 | * validate for command submission (cayman+). | ||
127 | */ | ||
128 | struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, | ||
129 | struct radeon_vm *vm, | ||
130 | struct list_head *head) | ||
131 | { | ||
132 | struct radeon_cs_reloc *list; | ||
133 | unsigned i, idx, size; | ||
134 | |||
135 | size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); | ||
136 | list = kmalloc(size, GFP_KERNEL); | ||
137 | if (!list) | ||
138 | return NULL; | ||
139 | |||
140 | /* add the vm page table to the list */ | ||
141 | list[0].gobj = NULL; | ||
142 | list[0].robj = vm->page_directory; | ||
143 | list[0].domain = RADEON_GEM_DOMAIN_VRAM; | ||
144 | list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM; | ||
145 | list[0].tv.bo = &vm->page_directory->tbo; | ||
146 | list[0].tiling_flags = 0; | ||
147 | list[0].handle = 0; | ||
148 | list_add(&list[0].tv.head, head); | ||
149 | |||
150 | for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { | ||
151 | if (!vm->page_tables[i].bo) | ||
152 | continue; | ||
153 | |||
154 | list[idx].gobj = NULL; | ||
155 | list[idx].robj = vm->page_tables[i].bo; | ||
156 | list[idx].domain = RADEON_GEM_DOMAIN_VRAM; | ||
157 | list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM; | ||
158 | list[idx].tv.bo = &list[idx].robj->tbo; | ||
159 | list[idx].tiling_flags = 0; | ||
160 | list[idx].handle = 0; | ||
161 | list_add(&list[idx++].tv.head, head); | ||
162 | } | ||
163 | |||
164 | return list; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * radeon_vm_grab_id - allocate the next free VMID | ||
169 | * | ||
170 | * @rdev: radeon_device pointer | ||
171 | * @vm: vm to allocate id for | ||
172 | * @ring: ring we want to submit job to | ||
173 | * | ||
174 | * Allocate an id for the vm (cayman+). | ||
175 | * Returns the fence we need to sync to (if any). | ||
176 | * | ||
177 | * Global and local mutex must be locked! | ||
178 | */ | ||
179 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | ||
180 | struct radeon_vm *vm, int ring) | ||
181 | { | ||
182 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; | ||
183 | unsigned choices[2] = {}; | ||
184 | unsigned i; | ||
185 | |||
186 | /* check if the id is still valid */ | ||
187 | if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) | ||
188 | return NULL; | ||
189 | |||
190 | /* we definately need to flush */ | ||
191 | radeon_fence_unref(&vm->last_flush); | ||
192 | |||
193 | /* skip over VMID 0, since it is the system VM */ | ||
194 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { | ||
195 | struct radeon_fence *fence = rdev->vm_manager.active[i]; | ||
196 | |||
197 | if (fence == NULL) { | ||
198 | /* found a free one */ | ||
199 | vm->id = i; | ||
200 | trace_radeon_vm_grab_id(vm->id, ring); | ||
201 | return NULL; | ||
202 | } | ||
203 | |||
204 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { | ||
205 | best[fence->ring] = fence; | ||
206 | choices[fence->ring == ring ? 0 : 1] = i; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | for (i = 0; i < 2; ++i) { | ||
211 | if (choices[i]) { | ||
212 | vm->id = choices[i]; | ||
213 | trace_radeon_vm_grab_id(vm->id, ring); | ||
214 | return rdev->vm_manager.active[choices[i]]; | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /* should never happen */ | ||
219 | BUG(); | ||
220 | return NULL; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * radeon_vm_flush - hardware flush the vm | ||
225 | * | ||
226 | * @rdev: radeon_device pointer | ||
227 | * @vm: vm we want to flush | ||
228 | * @ring: ring to use for flush | ||
229 | * | ||
230 | * Flush the vm (cayman+). | ||
231 | * | ||
232 | * Global and local mutex must be locked! | ||
233 | */ | ||
234 | void radeon_vm_flush(struct radeon_device *rdev, | ||
235 | struct radeon_vm *vm, | ||
236 | int ring) | ||
237 | { | ||
238 | uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); | ||
239 | |||
240 | /* if we can't remember our last VM flush then flush now! */ | ||
241 | /* XXX figure out why we have to flush all the time */ | ||
242 | if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { | ||
243 | vm->pd_gpu_addr = pd_addr; | ||
244 | radeon_ring_vm_flush(rdev, ring, vm); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * radeon_vm_fence - remember fence for vm | ||
250 | * | ||
251 | * @rdev: radeon_device pointer | ||
252 | * @vm: vm we want to fence | ||
253 | * @fence: fence to remember | ||
254 | * | ||
255 | * Fence the vm (cayman+). | ||
256 | * Set the fence used to protect page table and id. | ||
257 | * | ||
258 | * Global and local mutex must be locked! | ||
259 | */ | ||
260 | void radeon_vm_fence(struct radeon_device *rdev, | ||
261 | struct radeon_vm *vm, | ||
262 | struct radeon_fence *fence) | ||
263 | { | ||
264 | radeon_fence_unref(&vm->fence); | ||
265 | vm->fence = radeon_fence_ref(fence); | ||
266 | |||
267 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); | ||
268 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); | ||
269 | |||
270 | radeon_fence_unref(&vm->last_id_use); | ||
271 | vm->last_id_use = radeon_fence_ref(fence); | ||
272 | |||
273 | /* we just flushed the VM, remember that */ | ||
274 | if (!vm->last_flush) | ||
275 | vm->last_flush = radeon_fence_ref(fence); | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo | ||
280 | * | ||
281 | * @vm: requested vm | ||
282 | * @bo: requested buffer object | ||
283 | * | ||
284 | * Find @bo inside the requested vm (cayman+). | ||
285 | * Search inside the @bos vm list for the requested vm | ||
286 | * Returns the found bo_va or NULL if none is found | ||
287 | * | ||
288 | * Object has to be reserved! | ||
289 | */ | ||
290 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | ||
291 | struct radeon_bo *bo) | ||
292 | { | ||
293 | struct radeon_bo_va *bo_va; | ||
294 | |||
295 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
296 | if (bo_va->vm == vm) { | ||
297 | return bo_va; | ||
298 | } | ||
299 | } | ||
300 | return NULL; | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * radeon_vm_bo_add - add a bo to a specific vm | ||
305 | * | ||
306 | * @rdev: radeon_device pointer | ||
307 | * @vm: requested vm | ||
308 | * @bo: radeon buffer object | ||
309 | * | ||
310 | * Add @bo into the requested vm (cayman+). | ||
311 | * Add @bo to the list of bos associated with the vm | ||
312 | * Returns newly added bo_va or NULL for failure | ||
313 | * | ||
314 | * Object has to be reserved! | ||
315 | */ | ||
316 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, | ||
317 | struct radeon_vm *vm, | ||
318 | struct radeon_bo *bo) | ||
319 | { | ||
320 | struct radeon_bo_va *bo_va; | ||
321 | |||
322 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | ||
323 | if (bo_va == NULL) { | ||
324 | return NULL; | ||
325 | } | ||
326 | bo_va->vm = vm; | ||
327 | bo_va->bo = bo; | ||
328 | bo_va->soffset = 0; | ||
329 | bo_va->eoffset = 0; | ||
330 | bo_va->flags = 0; | ||
331 | bo_va->valid = false; | ||
332 | bo_va->ref_count = 1; | ||
333 | INIT_LIST_HEAD(&bo_va->bo_list); | ||
334 | INIT_LIST_HEAD(&bo_va->vm_list); | ||
335 | |||
336 | mutex_lock(&vm->mutex); | ||
337 | list_add(&bo_va->vm_list, &vm->va); | ||
338 | list_add_tail(&bo_va->bo_list, &bo->va); | ||
339 | mutex_unlock(&vm->mutex); | ||
340 | |||
341 | return bo_va; | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * radeon_vm_clear_bo - initially clear the page dir/table | ||
346 | * | ||
347 | * @rdev: radeon_device pointer | ||
348 | * @bo: bo to clear | ||
349 | */ | ||
350 | static int radeon_vm_clear_bo(struct radeon_device *rdev, | ||
351 | struct radeon_bo *bo) | ||
352 | { | ||
353 | struct ttm_validate_buffer tv; | ||
354 | struct ww_acquire_ctx ticket; | ||
355 | struct list_head head; | ||
356 | struct radeon_ib ib; | ||
357 | unsigned entries; | ||
358 | uint64_t addr; | ||
359 | int r; | ||
360 | |||
361 | memset(&tv, 0, sizeof(tv)); | ||
362 | tv.bo = &bo->tbo; | ||
363 | |||
364 | INIT_LIST_HEAD(&head); | ||
365 | list_add(&tv.head, &head); | ||
366 | |||
367 | r = ttm_eu_reserve_buffers(&ticket, &head); | ||
368 | if (r) | ||
369 | return r; | ||
370 | |||
371 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
372 | if (r) | ||
373 | goto error; | ||
374 | |||
375 | addr = radeon_bo_gpu_offset(bo); | ||
376 | entries = radeon_bo_size(bo) / 8; | ||
377 | |||
378 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, | ||
379 | NULL, entries * 2 + 64); | ||
380 | if (r) | ||
381 | goto error; | ||
382 | |||
383 | ib.length_dw = 0; | ||
384 | |||
385 | radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); | ||
386 | |||
387 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
388 | if (r) | ||
389 | goto error; | ||
390 | |||
391 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); | ||
392 | radeon_ib_free(rdev, &ib); | ||
393 | |||
394 | return 0; | ||
395 | |||
396 | error: | ||
397 | ttm_eu_backoff_reservation(&ticket, &head); | ||
398 | return r; | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm | ||
403 | * | ||
404 | * @rdev: radeon_device pointer | ||
405 | * @bo_va: bo_va to store the address | ||
406 | * @soffset: requested offset of the buffer in the VM address space | ||
407 | * @flags: attributes of pages (read/write/valid/etc.) | ||
408 | * | ||
409 | * Set offset of @bo_va (cayman+). | ||
410 | * Validate and set the offset requested within the vm address space. | ||
411 | * Returns 0 for success, error for failure. | ||
412 | * | ||
413 | * Object has to be reserved! | ||
414 | */ | ||
415 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, | ||
416 | struct radeon_bo_va *bo_va, | ||
417 | uint64_t soffset, | ||
418 | uint32_t flags) | ||
419 | { | ||
420 | uint64_t size = radeon_bo_size(bo_va->bo); | ||
421 | uint64_t eoffset, last_offset = 0; | ||
422 | struct radeon_vm *vm = bo_va->vm; | ||
423 | struct radeon_bo_va *tmp; | ||
424 | struct list_head *head; | ||
425 | unsigned last_pfn, pt_idx; | ||
426 | int r; | ||
427 | |||
428 | if (soffset) { | ||
429 | /* make sure object fit at this offset */ | ||
430 | eoffset = soffset + size; | ||
431 | if (soffset >= eoffset) { | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | |||
435 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | ||
436 | if (last_pfn > rdev->vm_manager.max_pfn) { | ||
437 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | ||
438 | last_pfn, rdev->vm_manager.max_pfn); | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | |||
442 | } else { | ||
443 | eoffset = last_pfn = 0; | ||
444 | } | ||
445 | |||
446 | mutex_lock(&vm->mutex); | ||
447 | head = &vm->va; | ||
448 | last_offset = 0; | ||
449 | list_for_each_entry(tmp, &vm->va, vm_list) { | ||
450 | if (bo_va == tmp) { | ||
451 | /* skip over currently modified bo */ | ||
452 | continue; | ||
453 | } | ||
454 | |||
455 | if (soffset >= last_offset && eoffset <= tmp->soffset) { | ||
456 | /* bo can be added before this one */ | ||
457 | break; | ||
458 | } | ||
459 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { | ||
460 | /* bo and tmp overlap, invalid offset */ | ||
461 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", | ||
462 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, | ||
463 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); | ||
464 | mutex_unlock(&vm->mutex); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | last_offset = tmp->eoffset; | ||
468 | head = &tmp->vm_list; | ||
469 | } | ||
470 | |||
471 | bo_va->soffset = soffset; | ||
472 | bo_va->eoffset = eoffset; | ||
473 | bo_va->flags = flags; | ||
474 | bo_va->valid = false; | ||
475 | list_move(&bo_va->vm_list, head); | ||
476 | |||
477 | soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
478 | eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
479 | |||
480 | if (eoffset > vm->max_pde_used) | ||
481 | vm->max_pde_used = eoffset; | ||
482 | |||
483 | radeon_bo_unreserve(bo_va->bo); | ||
484 | |||
485 | /* walk over the address space and allocate the page tables */ | ||
486 | for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { | ||
487 | struct radeon_bo *pt; | ||
488 | |||
489 | if (vm->page_tables[pt_idx].bo) | ||
490 | continue; | ||
491 | |||
492 | /* drop mutex to allocate and clear page table */ | ||
493 | mutex_unlock(&vm->mutex); | ||
494 | |||
495 | r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, | ||
496 | RADEON_GPU_PAGE_SIZE, false, | ||
497 | RADEON_GEM_DOMAIN_VRAM, NULL, &pt); | ||
498 | if (r) | ||
499 | return r; | ||
500 | |||
501 | r = radeon_vm_clear_bo(rdev, pt); | ||
502 | if (r) { | ||
503 | radeon_bo_unref(&pt); | ||
504 | radeon_bo_reserve(bo_va->bo, false); | ||
505 | return r; | ||
506 | } | ||
507 | |||
508 | /* aquire mutex again */ | ||
509 | mutex_lock(&vm->mutex); | ||
510 | if (vm->page_tables[pt_idx].bo) { | ||
511 | /* someone else allocated the pt in the meantime */ | ||
512 | mutex_unlock(&vm->mutex); | ||
513 | radeon_bo_unref(&pt); | ||
514 | mutex_lock(&vm->mutex); | ||
515 | continue; | ||
516 | } | ||
517 | |||
518 | vm->page_tables[pt_idx].addr = 0; | ||
519 | vm->page_tables[pt_idx].bo = pt; | ||
520 | } | ||
521 | |||
522 | mutex_unlock(&vm->mutex); | ||
523 | return radeon_bo_reserve(bo_va->bo, false); | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * radeon_vm_map_gart - get the physical address of a gart page | ||
528 | * | ||
529 | * @rdev: radeon_device pointer | ||
530 | * @addr: the unmapped addr | ||
531 | * | ||
532 | * Look up the physical address of the page that the pte resolves | ||
533 | * to (cayman+). | ||
534 | * Returns the physical address of the page. | ||
535 | */ | ||
536 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | ||
537 | { | ||
538 | uint64_t result; | ||
539 | |||
540 | /* page table offset */ | ||
541 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | ||
542 | |||
543 | /* in case cpu page size != gpu page size*/ | ||
544 | result |= addr & (~PAGE_MASK); | ||
545 | |||
546 | return result; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * radeon_vm_page_flags - translate page flags to what the hw uses | ||
551 | * | ||
552 | * @flags: flags comming from userspace | ||
553 | * | ||
554 | * Translate the flags the userspace ABI uses to hw flags. | ||
555 | */ | ||
556 | static uint32_t radeon_vm_page_flags(uint32_t flags) | ||
557 | { | ||
558 | uint32_t hw_flags = 0; | ||
559 | hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | ||
560 | hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | ||
561 | hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | ||
562 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
563 | hw_flags |= R600_PTE_SYSTEM; | ||
564 | hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | ||
565 | } | ||
566 | return hw_flags; | ||
567 | } | ||
568 | |||
569 | /** | ||
570 | * radeon_vm_update_pdes - make sure that page directory is valid | ||
571 | * | ||
572 | * @rdev: radeon_device pointer | ||
573 | * @vm: requested vm | ||
574 | * @start: start of GPU address range | ||
575 | * @end: end of GPU address range | ||
576 | * | ||
577 | * Allocates new page tables if necessary | ||
578 | * and updates the page directory (cayman+). | ||
579 | * Returns 0 for success, error for failure. | ||
580 | * | ||
581 | * Global and local mutex must be locked! | ||
582 | */ | ||
583 | int radeon_vm_update_page_directory(struct radeon_device *rdev, | ||
584 | struct radeon_vm *vm) | ||
585 | { | ||
586 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; | ||
587 | |||
588 | uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); | ||
589 | uint64_t last_pde = ~0, last_pt = ~0; | ||
590 | unsigned count = 0, pt_idx, ndw; | ||
591 | struct radeon_ib ib; | ||
592 | int r; | ||
593 | |||
594 | /* padding, etc. */ | ||
595 | ndw = 64; | ||
596 | |||
597 | /* assume the worst case */ | ||
598 | ndw += vm->max_pde_used * 12; | ||
599 | |||
600 | /* update too big for an IB */ | ||
601 | if (ndw > 0xfffff) | ||
602 | return -ENOMEM; | ||
603 | |||
604 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | ||
605 | if (r) | ||
606 | return r; | ||
607 | ib.length_dw = 0; | ||
608 | |||
609 | /* walk over the address space and update the page directory */ | ||
610 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | ||
611 | struct radeon_bo *bo = vm->page_tables[pt_idx].bo; | ||
612 | uint64_t pde, pt; | ||
613 | |||
614 | if (bo == NULL) | ||
615 | continue; | ||
616 | |||
617 | pt = radeon_bo_gpu_offset(bo); | ||
618 | if (vm->page_tables[pt_idx].addr == pt) | ||
619 | continue; | ||
620 | vm->page_tables[pt_idx].addr = pt; | ||
621 | |||
622 | pde = pd_addr + pt_idx * 8; | ||
623 | if (((last_pde + 8 * count) != pde) || | ||
624 | ((last_pt + incr * count) != pt)) { | ||
625 | |||
626 | if (count) { | ||
627 | radeon_asic_vm_set_page(rdev, &ib, last_pde, | ||
628 | last_pt, count, incr, | ||
629 | R600_PTE_VALID); | ||
630 | } | ||
631 | |||
632 | count = 1; | ||
633 | last_pde = pde; | ||
634 | last_pt = pt; | ||
635 | } else { | ||
636 | ++count; | ||
637 | } | ||
638 | } | ||
639 | |||
640 | if (count) | ||
641 | radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, | ||
642 | incr, R600_PTE_VALID); | ||
643 | |||
644 | if (ib.length_dw != 0) { | ||
645 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | ||
646 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
647 | if (r) { | ||
648 | radeon_ib_free(rdev, &ib); | ||
649 | return r; | ||
650 | } | ||
651 | radeon_fence_unref(&vm->fence); | ||
652 | vm->fence = radeon_fence_ref(ib.fence); | ||
653 | radeon_fence_unref(&vm->last_flush); | ||
654 | } | ||
655 | radeon_ib_free(rdev, &ib); | ||
656 | |||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * radeon_vm_update_ptes - make sure that page tables are valid | ||
662 | * | ||
663 | * @rdev: radeon_device pointer | ||
664 | * @vm: requested vm | ||
665 | * @start: start of GPU address range | ||
666 | * @end: end of GPU address range | ||
667 | * @dst: destination address to map to | ||
668 | * @flags: mapping flags | ||
669 | * | ||
670 | * Update the page tables in the range @start - @end (cayman+). | ||
671 | * | ||
672 | * Global and local mutex must be locked! | ||
673 | */ | ||
674 | static void radeon_vm_update_ptes(struct radeon_device *rdev, | ||
675 | struct radeon_vm *vm, | ||
676 | struct radeon_ib *ib, | ||
677 | uint64_t start, uint64_t end, | ||
678 | uint64_t dst, uint32_t flags) | ||
679 | { | ||
680 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; | ||
681 | |||
682 | uint64_t last_pte = ~0, last_dst = ~0; | ||
683 | unsigned count = 0; | ||
684 | uint64_t addr; | ||
685 | |||
686 | start = start / RADEON_GPU_PAGE_SIZE; | ||
687 | end = end / RADEON_GPU_PAGE_SIZE; | ||
688 | |||
689 | /* walk over the address space and update the page tables */ | ||
690 | for (addr = start; addr < end; ) { | ||
691 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; | ||
692 | unsigned nptes; | ||
693 | uint64_t pte; | ||
694 | |||
695 | if ((addr & ~mask) == (end & ~mask)) | ||
696 | nptes = end - addr; | ||
697 | else | ||
698 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | ||
699 | |||
700 | pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); | ||
701 | pte += (addr & mask) * 8; | ||
702 | |||
703 | if ((last_pte + 8 * count) != pte) { | ||
704 | |||
705 | if (count) { | ||
706 | radeon_asic_vm_set_page(rdev, ib, last_pte, | ||
707 | last_dst, count, | ||
708 | RADEON_GPU_PAGE_SIZE, | ||
709 | flags); | ||
710 | } | ||
711 | |||
712 | count = nptes; | ||
713 | last_pte = pte; | ||
714 | last_dst = dst; | ||
715 | } else { | ||
716 | count += nptes; | ||
717 | } | ||
718 | |||
719 | addr += nptes; | ||
720 | dst += nptes * RADEON_GPU_PAGE_SIZE; | ||
721 | } | ||
722 | |||
723 | if (count) { | ||
724 | radeon_asic_vm_set_page(rdev, ib, last_pte, | ||
725 | last_dst, count, | ||
726 | RADEON_GPU_PAGE_SIZE, flags); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * radeon_vm_bo_update - map a bo into the vm page table | ||
732 | * | ||
733 | * @rdev: radeon_device pointer | ||
734 | * @vm: requested vm | ||
735 | * @bo: radeon buffer object | ||
736 | * @mem: ttm mem | ||
737 | * | ||
738 | * Fill in the page table entries for @bo (cayman+). | ||
739 | * Returns 0 for success, -EINVAL for failure. | ||
740 | * | ||
741 | * Object have to be reserved and mutex must be locked! | ||
742 | */ | ||
743 | int radeon_vm_bo_update(struct radeon_device *rdev, | ||
744 | struct radeon_vm *vm, | ||
745 | struct radeon_bo *bo, | ||
746 | struct ttm_mem_reg *mem) | ||
747 | { | ||
748 | struct radeon_ib ib; | ||
749 | struct radeon_bo_va *bo_va; | ||
750 | unsigned nptes, ndw; | ||
751 | uint64_t addr; | ||
752 | int r; | ||
753 | |||
754 | bo_va = radeon_vm_bo_find(vm, bo); | ||
755 | if (bo_va == NULL) { | ||
756 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); | ||
757 | return -EINVAL; | ||
758 | } | ||
759 | |||
760 | if (!bo_va->soffset) { | ||
761 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", | ||
762 | bo, vm); | ||
763 | return -EINVAL; | ||
764 | } | ||
765 | |||
766 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) | ||
767 | return 0; | ||
768 | |||
769 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | ||
770 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | ||
771 | if (mem) { | ||
772 | addr = mem->start << PAGE_SHIFT; | ||
773 | if (mem->mem_type != TTM_PL_SYSTEM) { | ||
774 | bo_va->flags |= RADEON_VM_PAGE_VALID; | ||
775 | bo_va->valid = true; | ||
776 | } | ||
777 | if (mem->mem_type == TTM_PL_TT) { | ||
778 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; | ||
779 | } else { | ||
780 | addr += rdev->vm_manager.vram_base_offset; | ||
781 | } | ||
782 | } else { | ||
783 | addr = 0; | ||
784 | bo_va->valid = false; | ||
785 | } | ||
786 | |||
787 | trace_radeon_vm_bo_update(bo_va); | ||
788 | |||
789 | nptes = radeon_bo_ngpu_pages(bo); | ||
790 | |||
791 | /* padding, etc. */ | ||
792 | ndw = 64; | ||
793 | |||
794 | if (RADEON_VM_BLOCK_SIZE > 11) | ||
795 | /* reserve space for one header for every 2k dwords */ | ||
796 | ndw += (nptes >> 11) * 4; | ||
797 | else | ||
798 | /* reserve space for one header for | ||
799 | every (1 << BLOCK_SIZE) entries */ | ||
800 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; | ||
801 | |||
802 | /* reserve space for pte addresses */ | ||
803 | ndw += nptes * 2; | ||
804 | |||
805 | /* update too big for an IB */ | ||
806 | if (ndw > 0xfffff) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | ||
810 | if (r) | ||
811 | return r; | ||
812 | ib.length_dw = 0; | ||
813 | |||
814 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, | ||
815 | addr, radeon_vm_page_flags(bo_va->flags)); | ||
816 | |||
817 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | ||
818 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
819 | if (r) { | ||
820 | radeon_ib_free(rdev, &ib); | ||
821 | return r; | ||
822 | } | ||
823 | radeon_fence_unref(&vm->fence); | ||
824 | vm->fence = radeon_fence_ref(ib.fence); | ||
825 | radeon_ib_free(rdev, &ib); | ||
826 | radeon_fence_unref(&vm->last_flush); | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | /** | ||
832 | * radeon_vm_bo_rmv - remove a bo to a specific vm | ||
833 | * | ||
834 | * @rdev: radeon_device pointer | ||
835 | * @bo_va: requested bo_va | ||
836 | * | ||
837 | * Remove @bo_va->bo from the requested vm (cayman+). | ||
838 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and | ||
839 | * remove the ptes for @bo_va in the page table. | ||
840 | * Returns 0 for success. | ||
841 | * | ||
842 | * Object have to be reserved! | ||
843 | */ | ||
844 | int radeon_vm_bo_rmv(struct radeon_device *rdev, | ||
845 | struct radeon_bo_va *bo_va) | ||
846 | { | ||
847 | int r = 0; | ||
848 | |||
849 | mutex_lock(&bo_va->vm->mutex); | ||
850 | if (bo_va->soffset) | ||
851 | r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); | ||
852 | |||
853 | list_del(&bo_va->vm_list); | ||
854 | mutex_unlock(&bo_va->vm->mutex); | ||
855 | list_del(&bo_va->bo_list); | ||
856 | |||
857 | kfree(bo_va); | ||
858 | return r; | ||
859 | } | ||
860 | |||
861 | /** | ||
862 | * radeon_vm_bo_invalidate - mark the bo as invalid | ||
863 | * | ||
864 | * @rdev: radeon_device pointer | ||
865 | * @vm: requested vm | ||
866 | * @bo: radeon buffer object | ||
867 | * | ||
868 | * Mark @bo as invalid (cayman+). | ||
869 | */ | ||
870 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, | ||
871 | struct radeon_bo *bo) | ||
872 | { | ||
873 | struct radeon_bo_va *bo_va; | ||
874 | |||
875 | list_for_each_entry(bo_va, &bo->va, bo_list) { | ||
876 | bo_va->valid = false; | ||
877 | } | ||
878 | } | ||
879 | |||
880 | /** | ||
881 | * radeon_vm_init - initialize a vm instance | ||
882 | * | ||
883 | * @rdev: radeon_device pointer | ||
884 | * @vm: requested vm | ||
885 | * | ||
886 | * Init @vm fields (cayman+). | ||
887 | */ | ||
888 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | ||
889 | { | ||
890 | unsigned pd_size, pd_entries, pts_size; | ||
891 | int r; | ||
892 | |||
893 | vm->id = 0; | ||
894 | vm->fence = NULL; | ||
895 | vm->last_flush = NULL; | ||
896 | vm->last_id_use = NULL; | ||
897 | mutex_init(&vm->mutex); | ||
898 | INIT_LIST_HEAD(&vm->va); | ||
899 | |||
900 | pd_size = radeon_vm_directory_size(rdev); | ||
901 | pd_entries = radeon_vm_num_pdes(rdev); | ||
902 | |||
903 | /* allocate page table array */ | ||
904 | pts_size = pd_entries * sizeof(struct radeon_vm_pt); | ||
905 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | ||
906 | if (vm->page_tables == NULL) { | ||
907 | DRM_ERROR("Cannot allocate memory for page table array\n"); | ||
908 | return -ENOMEM; | ||
909 | } | ||
910 | |||
911 | r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false, | ||
912 | RADEON_GEM_DOMAIN_VRAM, NULL, | ||
913 | &vm->page_directory); | ||
914 | if (r) | ||
915 | return r; | ||
916 | |||
917 | r = radeon_vm_clear_bo(rdev, vm->page_directory); | ||
918 | if (r) { | ||
919 | radeon_bo_unref(&vm->page_directory); | ||
920 | vm->page_directory = NULL; | ||
921 | return r; | ||
922 | } | ||
923 | |||
924 | return 0; | ||
925 | } | ||
926 | |||
927 | /** | ||
928 | * radeon_vm_fini - tear down a vm instance | ||
929 | * | ||
930 | * @rdev: radeon_device pointer | ||
931 | * @vm: requested vm | ||
932 | * | ||
933 | * Tear down @vm (cayman+). | ||
934 | * Unbind the VM and remove all bos from the vm bo list | ||
935 | */ | ||
936 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | ||
937 | { | ||
938 | struct radeon_bo_va *bo_va, *tmp; | ||
939 | int i, r; | ||
940 | |||
941 | if (!list_empty(&vm->va)) { | ||
942 | dev_err(rdev->dev, "still active bo inside vm\n"); | ||
943 | } | ||
944 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { | ||
945 | list_del_init(&bo_va->vm_list); | ||
946 | r = radeon_bo_reserve(bo_va->bo, false); | ||
947 | if (!r) { | ||
948 | list_del_init(&bo_va->bo_list); | ||
949 | radeon_bo_unreserve(bo_va->bo); | ||
950 | kfree(bo_va); | ||
951 | } | ||
952 | } | ||
953 | |||
954 | |||
955 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) | ||
956 | radeon_bo_unref(&vm->page_tables[i].bo); | ||
957 | kfree(vm->page_tables); | ||
958 | |||
959 | radeon_bo_unref(&vm->page_directory); | ||
960 | |||
961 | radeon_fence_unref(&vm->fence); | ||
962 | radeon_fence_unref(&vm->last_flush); | ||
963 | radeon_fence_unref(&vm->last_id_use); | ||
964 | |||
965 | mutex_destroy(&vm->mutex); | ||
966 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 8512085b0aef..02f7710de470 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c | |||
@@ -807,9 +807,6 @@ static int rs780_parse_power_table(struct radeon_device *rdev) | |||
807 | power_info->pplib.ucNumStates, GFP_KERNEL); | 807 | power_info->pplib.ucNumStates, GFP_KERNEL); |
808 | if (!rdev->pm.dpm.ps) | 808 | if (!rdev->pm.dpm.ps) |
809 | return -ENOMEM; | 809 | return -ENOMEM; |
810 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
811 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
812 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
813 | 810 | ||
814 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 811 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
815 | power_state = (union pplib_power_state *) | 812 | power_state = (union pplib_power_state *) |
@@ -859,6 +856,10 @@ int rs780_dpm_init(struct radeon_device *rdev) | |||
859 | return -ENOMEM; | 856 | return -ENOMEM; |
860 | rdev->pm.dpm.priv = pi; | 857 | rdev->pm.dpm.priv = pi; |
861 | 858 | ||
859 | ret = r600_get_platform_caps(rdev); | ||
860 | if (ret) | ||
861 | return ret; | ||
862 | |||
862 | ret = rs780_parse_power_table(rdev); | 863 | ret = rs780_parse_power_table(rdev); |
863 | if (ret) | 864 | if (ret) |
864 | return ret; | 865 | return ret; |
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index bebf31c4d841..e7045b085715 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
@@ -1891,9 +1891,6 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) | |||
1891 | power_info->pplib.ucNumStates, GFP_KERNEL); | 1891 | power_info->pplib.ucNumStates, GFP_KERNEL); |
1892 | if (!rdev->pm.dpm.ps) | 1892 | if (!rdev->pm.dpm.ps) |
1893 | return -ENOMEM; | 1893 | return -ENOMEM; |
1894 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
1895 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
1896 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
1897 | 1894 | ||
1898 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 1895 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
1899 | power_state = (union pplib_power_state *) | 1896 | power_state = (union pplib_power_state *) |
@@ -1943,6 +1940,10 @@ int rv6xx_dpm_init(struct radeon_device *rdev) | |||
1943 | return -ENOMEM; | 1940 | return -ENOMEM; |
1944 | rdev->pm.dpm.priv = pi; | 1941 | rdev->pm.dpm.priv = pi; |
1945 | 1942 | ||
1943 | ret = r600_get_platform_caps(rdev); | ||
1944 | if (ret) | ||
1945 | return ret; | ||
1946 | |||
1946 | ret = rv6xx_parse_power_table(rdev); | 1947 | ret = rv6xx_parse_power_table(rdev); |
1947 | if (ret) | 1948 | if (ret) |
1948 | return ret; | 1949 | return ret; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b5f63f5e22a3..da041a43d82e 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2281,9 +2281,6 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) | |||
2281 | power_info->pplib.ucNumStates, GFP_KERNEL); | 2281 | power_info->pplib.ucNumStates, GFP_KERNEL); |
2282 | if (!rdev->pm.dpm.ps) | 2282 | if (!rdev->pm.dpm.ps) |
2283 | return -ENOMEM; | 2283 | return -ENOMEM; |
2284 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
2285 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
2286 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
2287 | 2284 | ||
2288 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 2285 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
2289 | power_state = (union pplib_power_state *) | 2286 | power_state = (union pplib_power_state *) |
@@ -2361,6 +2358,10 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
2361 | pi->min_vddc_in_table = 0; | 2358 | pi->min_vddc_in_table = 0; |
2362 | pi->max_vddc_in_table = 0; | 2359 | pi->max_vddc_in_table = 0; |
2363 | 2360 | ||
2361 | ret = r600_get_platform_caps(rdev); | ||
2362 | if (ret) | ||
2363 | return ret; | ||
2364 | |||
2364 | ret = rv7xx_parse_power_table(rdev); | 2365 | ret = rv7xx_parse_power_table(rdev); |
2365 | if (ret) | 2366 | if (ret) |
2366 | return ret; | 2367 | return ret; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 9a124d0608b3..d589475fe9e6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -3434,8 +3434,6 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
3434 | 3434 | ||
3435 | WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); | 3435 | WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); |
3436 | 3436 | ||
3437 | ring->rptr = RREG32(CP_RB0_RPTR); | ||
3438 | |||
3439 | /* ring1 - compute only */ | 3437 | /* ring1 - compute only */ |
3440 | /* Set ring buffer size */ | 3438 | /* Set ring buffer size */ |
3441 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | 3439 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
@@ -3460,8 +3458,6 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
3460 | 3458 | ||
3461 | WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); | 3459 | WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); |
3462 | 3460 | ||
3463 | ring->rptr = RREG32(CP_RB1_RPTR); | ||
3464 | |||
3465 | /* ring2 - compute only */ | 3461 | /* ring2 - compute only */ |
3466 | /* Set ring buffer size */ | 3462 | /* Set ring buffer size */ |
3467 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | 3463 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
@@ -3486,8 +3482,6 @@ static int si_cp_resume(struct radeon_device *rdev) | |||
3486 | 3482 | ||
3487 | WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); | 3483 | WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); |
3488 | 3484 | ||
3489 | ring->rptr = RREG32(CP_RB2_RPTR); | ||
3490 | |||
3491 | /* start the rings */ | 3485 | /* start the rings */ |
3492 | si_cp_start(rdev); | 3486 | si_cp_start(rdev); |
3493 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; | 3487 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; |
@@ -3872,11 +3866,9 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
3872 | if (!(reset_mask & (RADEON_RESET_GFX | | 3866 | if (!(reset_mask & (RADEON_RESET_GFX | |
3873 | RADEON_RESET_COMPUTE | | 3867 | RADEON_RESET_COMPUTE | |
3874 | RADEON_RESET_CP))) { | 3868 | RADEON_RESET_CP))) { |
3875 | radeon_ring_lockup_update(ring); | 3869 | radeon_ring_lockup_update(rdev, ring); |
3876 | return false; | 3870 | return false; |
3877 | } | 3871 | } |
3878 | /* force CP activities */ | ||
3879 | radeon_ring_force_activity(rdev, ring); | ||
3880 | return radeon_ring_test_lockup(rdev, ring); | 3872 | return radeon_ring_test_lockup(rdev, ring); |
3881 | } | 3873 | } |
3882 | 3874 | ||
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 59be2cfcbb47..cf0fdad8c278 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -49,11 +49,9 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |||
49 | mask = RADEON_RESET_DMA1; | 49 | mask = RADEON_RESET_DMA1; |
50 | 50 | ||
51 | if (!(reset_mask & mask)) { | 51 | if (!(reset_mask & mask)) { |
52 | radeon_ring_lockup_update(ring); | 52 | radeon_ring_lockup_update(rdev, ring); |
53 | return false; | 53 | return false; |
54 | } | 54 | } |
55 | /* force ring activities */ | ||
56 | radeon_ring_force_activity(rdev, ring); | ||
57 | return radeon_ring_test_lockup(rdev, ring); | 55 | return radeon_ring_test_lockup(rdev, ring); |
58 | } | 56 | } |
59 | 57 | ||
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0a2f5b4bca43..9a3567bedaae 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -6271,9 +6271,6 @@ static int si_parse_power_table(struct radeon_device *rdev) | |||
6271 | if (!rdev->pm.dpm.ps) | 6271 | if (!rdev->pm.dpm.ps) |
6272 | return -ENOMEM; | 6272 | return -ENOMEM; |
6273 | power_state_offset = (u8 *)state_array->states; | 6273 | power_state_offset = (u8 *)state_array->states; |
6274 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
6275 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
6276 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
6277 | for (i = 0; i < state_array->ucNumEntries; i++) { | 6274 | for (i = 0; i < state_array->ucNumEntries; i++) { |
6278 | u8 *idx; | 6275 | u8 *idx; |
6279 | power_state = (union pplib_power_state *)power_state_offset; | 6276 | power_state = (union pplib_power_state *)power_state_offset; |
@@ -6350,6 +6347,10 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6350 | pi->min_vddc_in_table = 0; | 6347 | pi->min_vddc_in_table = 0; |
6351 | pi->max_vddc_in_table = 0; | 6348 | pi->max_vddc_in_table = 0; |
6352 | 6349 | ||
6350 | ret = r600_get_platform_caps(rdev); | ||
6351 | if (ret) | ||
6352 | return ret; | ||
6353 | |||
6353 | ret = si_parse_power_table(rdev); | 6354 | ret = si_parse_power_table(rdev); |
6354 | if (ret) | 6355 | if (ret) |
6355 | return ret; | 6356 | return ret; |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 9239a6d29128..683532f84931 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -1798,4 +1798,51 @@ | |||
1798 | #define DMA_PACKET_CONSTANT_FILL 0xd | 1798 | #define DMA_PACKET_CONSTANT_FILL 0xd |
1799 | #define DMA_PACKET_NOP 0xf | 1799 | #define DMA_PACKET_NOP 0xf |
1800 | 1800 | ||
1801 | #define VCE_STATUS 0x20004 | ||
1802 | #define VCE_VCPU_CNTL 0x20014 | ||
1803 | #define VCE_CLK_EN (1 << 0) | ||
1804 | #define VCE_VCPU_CACHE_OFFSET0 0x20024 | ||
1805 | #define VCE_VCPU_CACHE_SIZE0 0x20028 | ||
1806 | #define VCE_VCPU_CACHE_OFFSET1 0x2002c | ||
1807 | #define VCE_VCPU_CACHE_SIZE1 0x20030 | ||
1808 | #define VCE_VCPU_CACHE_OFFSET2 0x20034 | ||
1809 | #define VCE_VCPU_CACHE_SIZE2 0x20038 | ||
1810 | #define VCE_SOFT_RESET 0x20120 | ||
1811 | #define VCE_ECPU_SOFT_RESET (1 << 0) | ||
1812 | #define VCE_FME_SOFT_RESET (1 << 2) | ||
1813 | #define VCE_RB_BASE_LO2 0x2016c | ||
1814 | #define VCE_RB_BASE_HI2 0x20170 | ||
1815 | #define VCE_RB_SIZE2 0x20174 | ||
1816 | #define VCE_RB_RPTR2 0x20178 | ||
1817 | #define VCE_RB_WPTR2 0x2017c | ||
1818 | #define VCE_RB_BASE_LO 0x20180 | ||
1819 | #define VCE_RB_BASE_HI 0x20184 | ||
1820 | #define VCE_RB_SIZE 0x20188 | ||
1821 | #define VCE_RB_RPTR 0x2018c | ||
1822 | #define VCE_RB_WPTR 0x20190 | ||
1823 | #define VCE_CLOCK_GATING_A 0x202f8 | ||
1824 | #define VCE_CLOCK_GATING_B 0x202fc | ||
1825 | #define VCE_UENC_CLOCK_GATING 0x205bc | ||
1826 | #define VCE_UENC_REG_CLOCK_GATING 0x205c0 | ||
1827 | #define VCE_FW_REG_STATUS 0x20e10 | ||
1828 | # define VCE_FW_REG_STATUS_BUSY (1 << 0) | ||
1829 | # define VCE_FW_REG_STATUS_PASS (1 << 3) | ||
1830 | # define VCE_FW_REG_STATUS_DONE (1 << 11) | ||
1831 | #define VCE_LMI_FW_START_KEYSEL 0x20e18 | ||
1832 | #define VCE_LMI_FW_PERIODIC_CTRL 0x20e20 | ||
1833 | #define VCE_LMI_CTRL2 0x20e74 | ||
1834 | #define VCE_LMI_CTRL 0x20e98 | ||
1835 | #define VCE_LMI_VM_CTRL 0x20ea0 | ||
1836 | #define VCE_LMI_SWAP_CNTL 0x20eb4 | ||
1837 | #define VCE_LMI_SWAP_CNTL1 0x20eb8 | ||
1838 | #define VCE_LMI_CACHE_CTRL 0x20ef4 | ||
1839 | |||
1840 | #define VCE_CMD_NO_OP 0x00000000 | ||
1841 | #define VCE_CMD_END 0x00000001 | ||
1842 | #define VCE_CMD_IB 0x00000002 | ||
1843 | #define VCE_CMD_FENCE 0x00000003 | ||
1844 | #define VCE_CMD_TRAP 0x00000004 | ||
1845 | #define VCE_CMD_IB_AUTO 0x00000005 | ||
1846 | #define VCE_CMD_SEMAPHORE 0x00000006 | ||
1847 | |||
1801 | #endif | 1848 | #endif |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 8b47b3cd0357..3f0e8d7b8dbe 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
@@ -1484,9 +1484,6 @@ static int sumo_parse_power_table(struct radeon_device *rdev) | |||
1484 | if (!rdev->pm.dpm.ps) | 1484 | if (!rdev->pm.dpm.ps) |
1485 | return -ENOMEM; | 1485 | return -ENOMEM; |
1486 | power_state_offset = (u8 *)state_array->states; | 1486 | power_state_offset = (u8 *)state_array->states; |
1487 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
1488 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
1489 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
1490 | for (i = 0; i < state_array->ucNumEntries; i++) { | 1487 | for (i = 0; i < state_array->ucNumEntries; i++) { |
1491 | u8 *idx; | 1488 | u8 *idx; |
1492 | power_state = (union pplib_power_state *)power_state_offset; | 1489 | power_state = (union pplib_power_state *)power_state_offset; |
@@ -1772,6 +1769,10 @@ int sumo_dpm_init(struct radeon_device *rdev) | |||
1772 | 1769 | ||
1773 | sumo_construct_boot_and_acpi_state(rdev); | 1770 | sumo_construct_boot_and_acpi_state(rdev); |
1774 | 1771 | ||
1772 | ret = r600_get_platform_caps(rdev); | ||
1773 | if (ret) | ||
1774 | return ret; | ||
1775 | |||
1775 | ret = sumo_parse_power_table(rdev); | 1776 | ret = sumo_parse_power_table(rdev); |
1776 | if (ret) | 1777 | if (ret) |
1777 | return ret; | 1778 | return ret; |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2da0e17eb960..2a2822c03329 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -1694,9 +1694,6 @@ static int trinity_parse_power_table(struct radeon_device *rdev) | |||
1694 | if (!rdev->pm.dpm.ps) | 1694 | if (!rdev->pm.dpm.ps) |
1695 | return -ENOMEM; | 1695 | return -ENOMEM; |
1696 | power_state_offset = (u8 *)state_array->states; | 1696 | power_state_offset = (u8 *)state_array->states; |
1697 | rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
1698 | rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
1699 | rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); | ||
1700 | for (i = 0; i < state_array->ucNumEntries; i++) { | 1697 | for (i = 0; i < state_array->ucNumEntries; i++) { |
1701 | u8 *idx; | 1698 | u8 *idx; |
1702 | power_state = (union pplib_power_state *)power_state_offset; | 1699 | power_state = (union pplib_power_state *)power_state_offset; |
@@ -1895,6 +1892,10 @@ int trinity_dpm_init(struct radeon_device *rdev) | |||
1895 | 1892 | ||
1896 | trinity_construct_boot_state(rdev); | 1893 | trinity_construct_boot_state(rdev); |
1897 | 1894 | ||
1895 | ret = r600_get_platform_caps(rdev); | ||
1896 | if (ret) | ||
1897 | return ret; | ||
1898 | |||
1898 | ret = trinity_parse_power_table(rdev); | 1899 | ret = trinity_parse_power_table(rdev); |
1899 | if (ret) | 1900 | if (ret) |
1900 | return ret; | 1901 | return ret; |
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index d4a68af1a279..0a243f0e5d68 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
@@ -262,7 +262,7 @@ int uvd_v1_0_start(struct radeon_device *rdev) | |||
262 | /* Initialize the ring buffer's read and write pointers */ | 262 | /* Initialize the ring buffer's read and write pointers */ |
263 | WREG32(UVD_RBC_RB_RPTR, 0x0); | 263 | WREG32(UVD_RBC_RB_RPTR, 0x0); |
264 | 264 | ||
265 | ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); | 265 | ring->wptr = RREG32(UVD_RBC_RB_RPTR); |
266 | WREG32(UVD_RBC_RB_WPTR, ring->wptr); | 266 | WREG32(UVD_RBC_RB_WPTR, ring->wptr); |
267 | 267 | ||
268 | /* set the ring address */ | 268 | /* set the ring address */ |
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c new file mode 100644 index 000000000000..b44d9c842f7b --- /dev/null +++ b/drivers/gpu/drm/radeon/vce_v1_0.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | * Authors: Christian König <christian.koenig@amd.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/firmware.h> | ||
29 | #include <drm/drmP.h> | ||
30 | #include "radeon.h" | ||
31 | #include "radeon_asic.h" | ||
32 | #include "sid.h" | ||
33 | |||
34 | /** | ||
35 | * vce_v1_0_get_rptr - get read pointer | ||
36 | * | ||
37 | * @rdev: radeon_device pointer | ||
38 | * @ring: radeon_ring pointer | ||
39 | * | ||
40 | * Returns the current hardware read pointer | ||
41 | */ | ||
42 | uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev, | ||
43 | struct radeon_ring *ring) | ||
44 | { | ||
45 | if (ring->idx == TN_RING_TYPE_VCE1_INDEX) | ||
46 | return RREG32(VCE_RB_RPTR); | ||
47 | else | ||
48 | return RREG32(VCE_RB_RPTR2); | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * vce_v1_0_get_wptr - get write pointer | ||
53 | * | ||
54 | * @rdev: radeon_device pointer | ||
55 | * @ring: radeon_ring pointer | ||
56 | * | ||
57 | * Returns the current hardware write pointer | ||
58 | */ | ||
59 | uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev, | ||
60 | struct radeon_ring *ring) | ||
61 | { | ||
62 | if (ring->idx == TN_RING_TYPE_VCE1_INDEX) | ||
63 | return RREG32(VCE_RB_WPTR); | ||
64 | else | ||
65 | return RREG32(VCE_RB_WPTR2); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * vce_v1_0_set_wptr - set write pointer | ||
70 | * | ||
71 | * @rdev: radeon_device pointer | ||
72 | * @ring: radeon_ring pointer | ||
73 | * | ||
74 | * Commits the write pointer to the hardware | ||
75 | */ | ||
76 | void vce_v1_0_set_wptr(struct radeon_device *rdev, | ||
77 | struct radeon_ring *ring) | ||
78 | { | ||
79 | if (ring->idx == TN_RING_TYPE_VCE1_INDEX) | ||
80 | WREG32(VCE_RB_WPTR, ring->wptr); | ||
81 | else | ||
82 | WREG32(VCE_RB_WPTR2, ring->wptr); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * vce_v1_0_start - start VCE block | ||
87 | * | ||
88 | * @rdev: radeon_device pointer | ||
89 | * | ||
90 | * Setup and start the VCE block | ||
91 | */ | ||
92 | int vce_v1_0_start(struct radeon_device *rdev) | ||
93 | { | ||
94 | struct radeon_ring *ring; | ||
95 | int i, j, r; | ||
96 | |||
97 | /* set BUSY flag */ | ||
98 | WREG32_P(VCE_STATUS, 1, ~1); | ||
99 | |||
100 | ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; | ||
101 | WREG32(VCE_RB_RPTR, ring->wptr); | ||
102 | WREG32(VCE_RB_WPTR, ring->wptr); | ||
103 | WREG32(VCE_RB_BASE_LO, ring->gpu_addr); | ||
104 | WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
105 | WREG32(VCE_RB_SIZE, ring->ring_size / 4); | ||
106 | |||
107 | ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; | ||
108 | WREG32(VCE_RB_RPTR2, ring->wptr); | ||
109 | WREG32(VCE_RB_WPTR2, ring->wptr); | ||
110 | WREG32(VCE_RB_BASE_LO2, ring->gpu_addr); | ||
111 | WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
112 | WREG32(VCE_RB_SIZE2, ring->ring_size / 4); | ||
113 | |||
114 | WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN); | ||
115 | |||
116 | WREG32_P(VCE_SOFT_RESET, | ||
117 | VCE_ECPU_SOFT_RESET | | ||
118 | VCE_FME_SOFT_RESET, ~( | ||
119 | VCE_ECPU_SOFT_RESET | | ||
120 | VCE_FME_SOFT_RESET)); | ||
121 | |||
122 | mdelay(100); | ||
123 | |||
124 | WREG32_P(VCE_SOFT_RESET, 0, ~( | ||
125 | VCE_ECPU_SOFT_RESET | | ||
126 | VCE_FME_SOFT_RESET)); | ||
127 | |||
128 | for (i = 0; i < 10; ++i) { | ||
129 | uint32_t status; | ||
130 | for (j = 0; j < 100; ++j) { | ||
131 | status = RREG32(VCE_STATUS); | ||
132 | if (status & 2) | ||
133 | break; | ||
134 | mdelay(10); | ||
135 | } | ||
136 | r = 0; | ||
137 | if (status & 2) | ||
138 | break; | ||
139 | |||
140 | DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); | ||
141 | WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET); | ||
142 | mdelay(10); | ||
143 | WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET); | ||
144 | mdelay(10); | ||
145 | r = -1; | ||
146 | } | ||
147 | |||
148 | /* clear BUSY flag */ | ||
149 | WREG32_P(VCE_STATUS, 0, ~1); | ||
150 | |||
151 | if (r) { | ||
152 | DRM_ERROR("VCE not responding, giving up!!!\n"); | ||
153 | return r; | ||
154 | } | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int vce_v1_0_init(struct radeon_device *rdev) | ||
160 | { | ||
161 | struct radeon_ring *ring; | ||
162 | int r; | ||
163 | |||
164 | r = vce_v1_0_start(rdev); | ||
165 | if (r) | ||
166 | return r; | ||
167 | |||
168 | ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; | ||
169 | ring->ready = true; | ||
170 | r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring); | ||
171 | if (r) { | ||
172 | ring->ready = false; | ||
173 | return r; | ||
174 | } | ||
175 | |||
176 | ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; | ||
177 | ring->ready = true; | ||
178 | r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring); | ||
179 | if (r) { | ||
180 | ring->ready = false; | ||
181 | return r; | ||
182 | } | ||
183 | |||
184 | DRM_INFO("VCE initialized successfully.\n"); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c new file mode 100644 index 000000000000..1ac7bb825a1b --- /dev/null +++ b/drivers/gpu/drm/radeon/vce_v2_0.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | * Authors: Christian König <christian.koenig@amd.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/firmware.h> | ||
29 | #include <drm/drmP.h> | ||
30 | #include "radeon.h" | ||
31 | #include "radeon_asic.h" | ||
32 | #include "cikd.h" | ||
33 | |||
34 | static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated) | ||
35 | { | ||
36 | u32 tmp; | ||
37 | |||
38 | if (gated) { | ||
39 | tmp = RREG32(VCE_CLOCK_GATING_B); | ||
40 | tmp |= 0xe70000; | ||
41 | WREG32(VCE_CLOCK_GATING_B, tmp); | ||
42 | |||
43 | tmp = RREG32(VCE_UENC_CLOCK_GATING); | ||
44 | tmp |= 0xff000000; | ||
45 | WREG32(VCE_UENC_CLOCK_GATING, tmp); | ||
46 | |||
47 | tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); | ||
48 | tmp &= ~0x3fc; | ||
49 | WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); | ||
50 | |||
51 | WREG32(VCE_CGTT_CLK_OVERRIDE, 0); | ||
52 | } else { | ||
53 | tmp = RREG32(VCE_CLOCK_GATING_B); | ||
54 | tmp |= 0xe7; | ||
55 | tmp &= ~0xe70000; | ||
56 | WREG32(VCE_CLOCK_GATING_B, tmp); | ||
57 | |||
58 | tmp = RREG32(VCE_UENC_CLOCK_GATING); | ||
59 | tmp |= 0x1fe000; | ||
60 | tmp &= ~0xff000000; | ||
61 | WREG32(VCE_UENC_CLOCK_GATING, tmp); | ||
62 | |||
63 | tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); | ||
64 | tmp |= 0x3fc; | ||
65 | WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static void vce_v2_0_set_dyn_cg(struct radeon_device *rdev, bool gated) | ||
70 | { | ||
71 | u32 orig, tmp; | ||
72 | |||
73 | tmp = RREG32(VCE_CLOCK_GATING_B); | ||
74 | tmp &= ~0x00060006; | ||
75 | if (gated) { | ||
76 | tmp |= 0xe10000; | ||
77 | } else { | ||
78 | tmp |= 0xe1; | ||
79 | tmp &= ~0xe10000; | ||
80 | } | ||
81 | WREG32(VCE_CLOCK_GATING_B, tmp); | ||
82 | |||
83 | orig = tmp = RREG32(VCE_UENC_CLOCK_GATING); | ||
84 | tmp &= ~0x1fe000; | ||
85 | tmp &= ~0xff000000; | ||
86 | if (tmp != orig) | ||
87 | WREG32(VCE_UENC_CLOCK_GATING, tmp); | ||
88 | |||
89 | orig = tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); | ||
90 | tmp &= ~0x3fc; | ||
91 | if (tmp != orig) | ||
92 | WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); | ||
93 | |||
94 | if (gated) | ||
95 | WREG32(VCE_CGTT_CLK_OVERRIDE, 0); | ||
96 | } | ||
97 | |||
98 | static void vce_v2_0_disable_cg(struct radeon_device *rdev) | ||
99 | { | ||
100 | WREG32(VCE_CGTT_CLK_OVERRIDE, 7); | ||
101 | } | ||
102 | |||
103 | void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable) | ||
104 | { | ||
105 | bool sw_cg = false; | ||
106 | |||
107 | if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) { | ||
108 | if (sw_cg) | ||
109 | vce_v2_0_set_sw_cg(rdev, true); | ||
110 | else | ||
111 | vce_v2_0_set_dyn_cg(rdev, true); | ||
112 | } else { | ||
113 | vce_v2_0_disable_cg(rdev); | ||
114 | |||
115 | if (sw_cg) | ||
116 | vce_v2_0_set_sw_cg(rdev, false); | ||
117 | else | ||
118 | vce_v2_0_set_dyn_cg(rdev, false); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void vce_v2_0_init_cg(struct radeon_device *rdev) | ||
123 | { | ||
124 | u32 tmp; | ||
125 | |||
126 | tmp = RREG32(VCE_CLOCK_GATING_A); | ||
127 | tmp &= ~(CGC_CLK_GATE_DLY_TIMER_MASK | CGC_CLK_GATER_OFF_DLY_TIMER_MASK); | ||
128 | tmp |= (CGC_CLK_GATE_DLY_TIMER(0) | CGC_CLK_GATER_OFF_DLY_TIMER(4)); | ||
129 | tmp |= CGC_UENC_WAIT_AWAKE; | ||
130 | WREG32(VCE_CLOCK_GATING_A, tmp); | ||
131 | |||
132 | tmp = RREG32(VCE_UENC_CLOCK_GATING); | ||
133 | tmp &= ~(CLOCK_ON_DELAY_MASK | CLOCK_OFF_DELAY_MASK); | ||
134 | tmp |= (CLOCK_ON_DELAY(0) | CLOCK_OFF_DELAY(4)); | ||
135 | WREG32(VCE_UENC_CLOCK_GATING, tmp); | ||
136 | |||
137 | tmp = RREG32(VCE_CLOCK_GATING_B); | ||
138 | tmp |= 0x10; | ||
139 | tmp &= ~0x100000; | ||
140 | WREG32(VCE_CLOCK_GATING_B, tmp); | ||
141 | } | ||
142 | |||
143 | int vce_v2_0_resume(struct radeon_device *rdev) | ||
144 | { | ||
145 | uint64_t addr = rdev->vce.gpu_addr; | ||
146 | uint32_t size; | ||
147 | |||
148 | WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16)); | ||
149 | WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); | ||
150 | WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); | ||
151 | WREG32(VCE_CLOCK_GATING_B, 0xf7); | ||
152 | |||
153 | WREG32(VCE_LMI_CTRL, 0x00398000); | ||
154 | WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1); | ||
155 | WREG32(VCE_LMI_SWAP_CNTL, 0); | ||
156 | WREG32(VCE_LMI_SWAP_CNTL1, 0); | ||
157 | WREG32(VCE_LMI_VM_CTRL, 0); | ||
158 | |||
159 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); | ||
160 | WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); | ||
161 | WREG32(VCE_VCPU_CACHE_SIZE0, size); | ||
162 | |||
163 | addr += size; | ||
164 | size = RADEON_VCE_STACK_SIZE; | ||
165 | WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); | ||
166 | WREG32(VCE_VCPU_CACHE_SIZE1, size); | ||
167 | |||
168 | addr += size; | ||
169 | size = RADEON_VCE_HEAP_SIZE; | ||
170 | WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); | ||
171 | WREG32(VCE_VCPU_CACHE_SIZE2, size); | ||
172 | |||
173 | WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100); | ||
174 | |||
175 | WREG32_P(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, | ||
176 | ~VCE_SYS_INT_TRAP_INTERRUPT_EN); | ||
177 | |||
178 | vce_v2_0_init_cg(rdev); | ||
179 | |||
180 | return 0; | ||
181 | } | ||
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c index e38e5967d77b..71cef5c13dc8 100644 --- a/drivers/gpu/drm/tegra/bus.c +++ b/drivers/gpu/drm/tegra/bus.c | |||
@@ -63,7 +63,7 @@ int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device) | |||
63 | return 0; | 63 | return 0; |
64 | 64 | ||
65 | err_free: | 65 | err_free: |
66 | drm_dev_free(drm); | 66 | drm_dev_unref(drm); |
67 | return ret; | 67 | return ret; |
68 | } | 68 | } |
69 | 69 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a06651309388..9df79ac7b8f5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
351 | 351 | ||
352 | moved: | 352 | moved: |
353 | if (bo->evicted) { | 353 | if (bo->evicted) { |
354 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | 354 | if (bdev->driver->invalidate_caches) { |
355 | if (ret) | 355 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
356 | pr_err("Can not flush read caches\n"); | 356 | if (ret) |
357 | pr_err("Can not flush read caches\n"); | ||
358 | } | ||
357 | bo->evicted = false; | 359 | bo->evicted = false; |
358 | } | 360 | } |
359 | 361 | ||
@@ -1449,6 +1451,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); | |||
1449 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | 1451 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1450 | struct ttm_bo_global *glob, | 1452 | struct ttm_bo_global *glob, |
1451 | struct ttm_bo_driver *driver, | 1453 | struct ttm_bo_driver *driver, |
1454 | struct address_space *mapping, | ||
1452 | uint64_t file_page_offset, | 1455 | uint64_t file_page_offset, |
1453 | bool need_dma32) | 1456 | bool need_dma32) |
1454 | { | 1457 | { |
@@ -1470,7 +1473,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1470 | 0x10000000); | 1473 | 0x10000000); |
1471 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); | 1474 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
1472 | INIT_LIST_HEAD(&bdev->ddestroy); | 1475 | INIT_LIST_HEAD(&bdev->ddestroy); |
1473 | bdev->dev_mapping = NULL; | 1476 | bdev->dev_mapping = mapping; |
1474 | bdev->glob = glob; | 1477 | bdev->glob = glob; |
1475 | bdev->need_dma32 = need_dma32; | 1478 | bdev->need_dma32 = need_dma32; |
1476 | bdev->val_seq = 0; | 1479 | bdev->val_seq = 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c9ae48..0ce48e5a9cb4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
339 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
340 | 340 | ||
341 | /* | 341 | /* |
342 | * PFNMAP is faster than MIXEDMAP due to reduced page | 342 | * We'd like to use VM_PFNMAP on shared mappings, where |
343 | * administration. So use MIXEDMAP only if private VMA, where | 343 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
344 | * we need to support COW. | 344 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
345 | * bad for performance. Until that has been sorted out, use | ||
346 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | ||
345 | */ | 347 | */ |
346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 348 | vma->vm_flags |= VM_MIXEDMAP; |
347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 349 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
348 | return 0; | 350 | return 0; |
349 | out_unref: | 351 | out_unref: |
@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
359 | 361 | ||
360 | vma->vm_ops = &ttm_bo_vm_ops; | 362 | vma->vm_ops = &ttm_bo_vm_ops; |
361 | vma->vm_private_data = ttm_bo_reference(bo); | 363 | vma->vm_private_data = ttm_bo_reference(bo); |
362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 364 | vma->vm_flags |= VM_MIXEDMAP; |
363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | 365 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
364 | return 0; | 366 | return 0; |
365 | } | 367 | } |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dbadd49e4c4a..377176372da8 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -421,7 +421,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | |||
421 | clips[i].x2 - clips[i].x1, | 421 | clips[i].x2 - clips[i].x1, |
422 | clips[i].y2 - clips[i].y1); | 422 | clips[i].y2 - clips[i].y1); |
423 | if (ret) | 423 | if (ret) |
424 | goto unlock; | 424 | break; |
425 | } | 425 | } |
426 | 426 | ||
427 | if (ufb->obj->base.import_attach) { | 427 | if (ufb->obj->base.import_attach) { |
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8d67b943ac05..be4fcd0f0e0f 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -60,7 +60,7 @@ int udl_dumb_create(struct drm_file *file, | |||
60 | struct drm_device *dev, | 60 | struct drm_device *dev, |
61 | struct drm_mode_create_dumb *args) | 61 | struct drm_mode_create_dumb *args) |
62 | { | 62 | { |
63 | args->pitch = args->width * ((args->bpp + 1) / 8); | 63 | args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8); |
64 | args->size = args->pitch * args->height; | 64 | args->size = args->pitch * args->height; |
65 | return udl_gem_create(file, dev, | 65 | return udl_gem_create(file, dev, |
66 | args->size, &args->handle); | 66 | args->size, &args->handle); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0083cbf99edf..c35715f26f40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -722,7 +722,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
722 | 722 | ||
723 | ret = ttm_bo_device_init(&dev_priv->bdev, | 723 | ret = ttm_bo_device_init(&dev_priv->bdev, |
724 | dev_priv->bo_global_ref.ref.object, | 724 | dev_priv->bo_global_ref.ref.object, |
725 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, | 725 | &vmw_bo_driver, |
726 | dev->anon_inode->i_mapping, | ||
727 | VMWGFX_FILE_PAGE_OFFSET, | ||
726 | false); | 728 | false); |
727 | if (unlikely(ret != 0)) { | 729 | if (unlikely(ret != 0)) { |
728 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | 730 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
@@ -969,7 +971,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
969 | goto out_no_shman; | 971 | goto out_no_shman; |
970 | 972 | ||
971 | file_priv->driver_priv = vmw_fp; | 973 | file_priv->driver_priv = vmw_fp; |
972 | dev_priv->bdev.dev_mapping = dev->dev_mapping; | ||
973 | 974 | ||
974 | return 0; | 975 | return 0; |
975 | 976 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 82468d902915..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
830 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
831 | goto out_unlock; | 831 | goto out_unlock; |
832 | 832 | ||
833 | /* | ||
834 | * A gb-aware client referencing a shared surface will | ||
835 | * expect a backup buffer to be present. | ||
836 | */ | ||
837 | if (dev_priv->has_mob && req->shareable) { | ||
838 | uint32_t backup_handle; | ||
839 | |||
840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
841 | res->backup_size, | ||
842 | true, | ||
843 | &backup_handle, | ||
844 | &res->backup); | ||
845 | if (unlikely(ret != 0)) { | ||
846 | vmw_resource_unreference(&res); | ||
847 | goto out_unlock; | ||
848 | } | ||
849 | } | ||
850 | |||
833 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
834 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
835 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f5ed03164d86..de17c5593d97 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -387,7 +387,7 @@ config I2C_CBUS_GPIO | |||
387 | 387 | ||
388 | config I2C_CPM | 388 | config I2C_CPM |
389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" | 389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" |
390 | depends on (CPM1 || CPM2) && OF_I2C | 390 | depends on CPM1 || CPM2 |
391 | help | 391 | help |
392 | This supports the use of the I2C interface on Freescale | 392 | This supports the use of the I2C interface on Freescale |
393 | processors with CPM1 or CPM2. | 393 | processors with CPM1 or CPM2. |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af70145fab9..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
979 | int r; | 979 | int r; |
980 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
981 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
982 | sector_t cblock = from_cblock(mg->cblock); | ||
982 | 983 | ||
983 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
984 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
985 | 986 | ||
986 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
987 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
988 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
989 | 990 | ||
990 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
@@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2464 | bool discarded_block; | 2465 | bool discarded_block; |
2465 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
2466 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
2467 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
2468 | 2469 | ||
2469 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
2470 | /* | 2471 | /* |
2471 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
2472 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
2473 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
2474 | */ | 2475 | */ |
2475 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
2476 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
2477 | } | 2478 | } |
2478 | 2479 | ||
2479 | pb = init_per_bio_data(bio, pb_data_size); | ||
2480 | |||
2481 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2482 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
2483 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index b9e2000969f0..95c894482fdd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
240 | 240 | ||
241 | nid = cpu_to_node(cpu); | 241 | nid = cpu_to_node(cpu); |
242 | page = alloc_pages_exact_node(nid, | 242 | page = alloc_pages_exact_node(nid, |
243 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 243 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
244 | pg_order); | 244 | pg_order); |
245 | if (page == NULL) { | 245 | if (page == NULL) { |
246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | 246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index a2c47476804d..e8f133e926aa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -730,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
730 | client_info->ntt = 0; | 730 | client_info->ntt = 0; |
731 | } | 731 | } |
732 | 732 | ||
733 | if (!vlan_get_tag(skb, &client_info->vlan_id)) | 733 | if (vlan_get_tag(skb, &client_info->vlan_id)) |
734 | client_info->vlan_id = 0; | 734 | client_info->vlan_id = 0; |
735 | 735 | ||
736 | if (!client_info->assigned) { | 736 | if (!client_info->assigned) { |
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index c37878432717..298c26509095 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -121,6 +121,7 @@ static struct bond_opt_value bond_resend_igmp_tbl[] = { | |||
121 | static struct bond_opt_value bond_lp_interval_tbl[] = { | 121 | static struct bond_opt_value bond_lp_interval_tbl[] = { |
122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, | 122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, |
123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, | 123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, |
124 | { NULL, -1, 0}, | ||
124 | }; | 125 | }; |
125 | 126 | ||
126 | static struct bond_option bond_opts[] = { | 127 | static struct bond_option bond_opts[] = { |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index cda25ac45b47..6c9e1c9bdeb8 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) | |||
2507 | 2507 | ||
2508 | bp->fw_wr_seq++; | 2508 | bp->fw_wr_seq++; |
2509 | msg_data |= bp->fw_wr_seq; | 2509 | msg_data |= bp->fw_wr_seq; |
2510 | bp->fw_last_msg = msg_data; | ||
2510 | 2511 | ||
2511 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); | 2512 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); |
2512 | 2513 | ||
@@ -4000,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp) | |||
4000 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 4001 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
4001 | } | 4002 | } |
4002 | 4003 | ||
4003 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) | 4004 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) { |
4004 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); | 4005 | u32 val; |
4006 | |||
4007 | wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; | ||
4008 | if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { | ||
4009 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
4010 | return; | ||
4011 | } | ||
4012 | /* Tell firmware not to power down the PHY yet, otherwise | ||
4013 | * the chip will take a long time to respond to MMIO reads. | ||
4014 | */ | ||
4015 | val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); | ||
4016 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, | ||
4017 | val | BNX2_PORT_FEATURE_ASF_ENABLED); | ||
4018 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
4019 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); | ||
4020 | } | ||
4005 | 4021 | ||
4006 | } | 4022 | } |
4007 | 4023 | ||
@@ -4033,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) | |||
4033 | 4049 | ||
4034 | if (bp->wol) | 4050 | if (bp->wol) |
4035 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4051 | pci_set_power_state(bp->pdev, PCI_D3hot); |
4036 | } else { | 4052 | break; |
4037 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4053 | |
4054 | } | ||
4055 | if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { | ||
4056 | u32 val; | ||
4057 | |||
4058 | /* Tell firmware not to power down the PHY yet, | ||
4059 | * otherwise the other port may not respond to | ||
4060 | * MMIO reads. | ||
4061 | */ | ||
4062 | val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); | ||
4063 | val &= ~BNX2_CONDITION_PM_STATE_MASK; | ||
4064 | val |= BNX2_CONDITION_PM_STATE_UNPREP; | ||
4065 | bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); | ||
4038 | } | 4066 | } |
4067 | pci_set_power_state(bp->pdev, PCI_D3hot); | ||
4039 | 4068 | ||
4040 | /* No more memory access after this point until | 4069 | /* No more memory access after this point until |
4041 | * device is brought back to D0. | 4070 | * device is brought back to D0. |
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index f1cf2c44e7ed..e341bc366fa5 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h | |||
@@ -6900,6 +6900,7 @@ struct bnx2 { | |||
6900 | 6900 | ||
6901 | u16 fw_wr_seq; | 6901 | u16 fw_wr_seq; |
6902 | u16 fw_drv_pulse_wr_seq; | 6902 | u16 fw_drv_pulse_wr_seq; |
6903 | u32 fw_last_msg; | ||
6903 | 6904 | ||
6904 | int rx_max_ring; | 6905 | int rx_max_ring; |
6905 | int rx_ring_size; | 6906 | int rx_ring_size; |
@@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file { | |||
7406 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 | 7407 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 |
7407 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 | 7408 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 |
7408 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 | 7409 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 |
7410 | #define BNX2_CONDITION_PM_STATE_MASK 0x00030000 | ||
7411 | #define BNX2_CONDITION_PM_STATE_FULL 0x00030000 | ||
7412 | #define BNX2_CONDITION_PM_STATE_PREP 0x00020000 | ||
7413 | #define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 | ||
7409 | 7414 | ||
7410 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc | 7415 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc |
7411 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 | 7416 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 1803c3959044..354ae9792bad 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar) | |||
1704 | while (!bfa_raw_sem_get(bar)) { | 1704 | while (!bfa_raw_sem_get(bar)) { |
1705 | if (--n <= 0) | 1705 | if (--n <= 0) |
1706 | return BFA_STATUS_BADFLASH; | 1706 | return BFA_STATUS_BADFLASH; |
1707 | udelay(10000); | 1707 | mdelay(10); |
1708 | } | 1708 | } |
1709 | return BFA_STATUS_OK; | 1709 | return BFA_STATUS_OK; |
1710 | } | 1710 | } |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3190d38e16fb..d0c38e01e99f 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp) | |||
632 | "Unable to allocate sk_buff\n"); | 632 | "Unable to allocate sk_buff\n"); |
633 | break; | 633 | break; |
634 | } | 634 | } |
635 | bp->rx_skbuff[entry] = skb; | ||
636 | 635 | ||
637 | /* now fill corresponding descriptor entry */ | 636 | /* now fill corresponding descriptor entry */ |
638 | paddr = dma_map_single(&bp->pdev->dev, skb->data, | 637 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
639 | bp->rx_buffer_size, DMA_FROM_DEVICE); | 638 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
639 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { | ||
640 | dev_kfree_skb(skb); | ||
641 | break; | ||
642 | } | ||
643 | |||
644 | bp->rx_skbuff[entry] = skb; | ||
640 | 645 | ||
641 | if (entry == RX_RING_SIZE - 1) | 646 | if (entry == RX_RING_SIZE - 1) |
642 | paddr |= MACB_BIT(RX_WRAP); | 647 | paddr |= MACB_BIT(RX_WRAP); |
@@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget) | |||
725 | skb_put(skb, len); | 730 | skb_put(skb, len); |
726 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); | 731 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); |
727 | dma_unmap_single(&bp->pdev->dev, addr, | 732 | dma_unmap_single(&bp->pdev->dev, addr, |
728 | len, DMA_FROM_DEVICE); | 733 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
729 | 734 | ||
730 | skb->protocol = eth_type_trans(skb, bp->dev); | 735 | skb->protocol = eth_type_trans(skb, bp->dev); |
731 | skb_checksum_none_assert(skb); | 736 | skb_checksum_none_assert(skb); |
@@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1036 | } | 1041 | } |
1037 | 1042 | ||
1038 | entry = macb_tx_ring_wrap(bp->tx_head); | 1043 | entry = macb_tx_ring_wrap(bp->tx_head); |
1039 | bp->tx_head++; | ||
1040 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); | 1044 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); |
1041 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 1045 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
1042 | len, DMA_TO_DEVICE); | 1046 | len, DMA_TO_DEVICE); |
1047 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { | ||
1048 | kfree_skb(skb); | ||
1049 | goto unlock; | ||
1050 | } | ||
1043 | 1051 | ||
1052 | bp->tx_head++; | ||
1044 | tx_skb = &bp->tx_skb[entry]; | 1053 | tx_skb = &bp->tx_skb[entry]; |
1045 | tx_skb->skb = skb; | 1054 | tx_skb->skb = skb; |
1046 | tx_skb->mapping = mapping; | 1055 | tx_skb->mapping = mapping; |
@@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1066 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) | 1075 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) |
1067 | netif_stop_queue(dev); | 1076 | netif_stop_queue(dev); |
1068 | 1077 | ||
1078 | unlock: | ||
1069 | spin_unlock_irqrestore(&bp->lock, flags); | 1079 | spin_unlock_irqrestore(&bp->lock, flags); |
1070 | 1080 | ||
1071 | return NETDEV_TX_OK; | 1081 | return NETDEV_TX_OK; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 479a7cba45c0..03a351300013 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
528 | /* Clear any outstanding interrupt. */ | 528 | /* Clear any outstanding interrupt. */ |
529 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | 529 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
530 | 530 | ||
531 | /* Setup multicast filter. */ | ||
532 | set_multicast_list(ndev); | ||
533 | #ifndef CONFIG_M5272 | ||
534 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
535 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
536 | #endif | ||
537 | |||
538 | /* Set maximum receive buffer size. */ | 531 | /* Set maximum receive buffer size. */ |
539 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 532 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
540 | 533 | ||
@@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex) | |||
655 | 648 | ||
656 | writel(rcntl, fep->hwp + FEC_R_CNTRL); | 649 | writel(rcntl, fep->hwp + FEC_R_CNTRL); |
657 | 650 | ||
651 | /* Setup multicast filter. */ | ||
652 | set_multicast_list(ndev); | ||
653 | #ifndef CONFIG_M5272 | ||
654 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
655 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
656 | #endif | ||
657 | |||
658 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | 658 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
659 | /* enable ENET endian swap */ | 659 | /* enable ENET endian swap */ |
660 | ecntl |= (1 << 8); | 660 | ecntl |= (1 << 8); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4be971590461..1fc8334fc181 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -522,10 +522,21 @@ retry: | |||
522 | return rc; | 522 | return rc; |
523 | } | 523 | } |
524 | 524 | ||
525 | static u64 ibmveth_encode_mac_addr(u8 *mac) | ||
526 | { | ||
527 | int i; | ||
528 | u64 encoded = 0; | ||
529 | |||
530 | for (i = 0; i < ETH_ALEN; i++) | ||
531 | encoded = (encoded << 8) | mac[i]; | ||
532 | |||
533 | return encoded; | ||
534 | } | ||
535 | |||
525 | static int ibmveth_open(struct net_device *netdev) | 536 | static int ibmveth_open(struct net_device *netdev) |
526 | { | 537 | { |
527 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 538 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
528 | u64 mac_address = 0; | 539 | u64 mac_address; |
529 | int rxq_entries = 1; | 540 | int rxq_entries = 1; |
530 | unsigned long lpar_rc; | 541 | unsigned long lpar_rc; |
531 | int rc; | 542 | int rc; |
@@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
579 | adapter->rx_queue.num_slots = rxq_entries; | 590 | adapter->rx_queue.num_slots = rxq_entries; |
580 | adapter->rx_queue.toggle = 1; | 591 | adapter->rx_queue.toggle = 1; |
581 | 592 | ||
582 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 593 | mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); |
583 | mac_address = mac_address >> 16; | ||
584 | 594 | ||
585 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | | 595 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
586 | adapter->rx_queue.queue_len; | 596 | adapter->rx_queue.queue_len; |
@@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1183 | /* add the addresses to the filter table */ | 1193 | /* add the addresses to the filter table */ |
1184 | netdev_for_each_mc_addr(ha, netdev) { | 1194 | netdev_for_each_mc_addr(ha, netdev) { |
1185 | /* add the multicast address to the filter table */ | 1195 | /* add the multicast address to the filter table */ |
1186 | unsigned long mcast_addr = 0; | 1196 | u64 mcast_addr; |
1187 | memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); | 1197 | mcast_addr = ibmveth_encode_mac_addr(ha->addr); |
1188 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1198 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
1189 | IbmVethMcastAddFilter, | 1199 | IbmVethMcastAddFilter, |
1190 | mcast_addr); | 1200 | mcast_addr); |
@@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
1372 | 1382 | ||
1373 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | 1383 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
1374 | 1384 | ||
1375 | adapter->mac_addr = 0; | ||
1376 | memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); | ||
1377 | |||
1378 | netdev->irq = dev->irq; | 1385 | netdev->irq = dev->irq; |
1379 | netdev->netdev_ops = &ibmveth_netdev_ops; | 1386 | netdev->netdev_ops = &ibmveth_netdev_ops; |
1380 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1387 | netdev->ethtool_ops = &netdev_ethtool_ops; |
@@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
1383 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1390 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1384 | netdev->features |= netdev->hw_features; | 1391 | netdev->features |= netdev->hw_features; |
1385 | 1392 | ||
1386 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1393 | memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); |
1387 | 1394 | ||
1388 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1395 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1389 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1396 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 451ba7949e15..1f37499d4398 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h | |||
@@ -138,7 +138,6 @@ struct ibmveth_adapter { | |||
138 | struct napi_struct napi; | 138 | struct napi_struct napi; |
139 | struct net_device_stats stats; | 139 | struct net_device_stats stats; |
140 | unsigned int mcastFilterSize; | 140 | unsigned int mcastFilterSize; |
141 | unsigned long mac_addr; | ||
142 | void * buffer_list_addr; | 141 | void * buffer_list_addr; |
143 | void * filter_list_addr; | 142 | void * filter_list_addr; |
144 | dma_addr_t buffer_list_dma; | 143 | dma_addr_t buffer_list_dma; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fad45316200a..84a96f70dfb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, | |||
742 | err = mlx4_en_uc_steer_add(priv, new_mac, | 742 | err = mlx4_en_uc_steer_add(priv, new_mac, |
743 | &qpn, | 743 | &qpn, |
744 | &entry->reg_id); | 744 | &entry->reg_id); |
745 | if (err) | ||
746 | return err; | ||
747 | if (priv->tunnel_reg_id) { | ||
748 | mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); | ||
749 | priv->tunnel_reg_id = 0; | ||
750 | } | ||
751 | err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, | ||
752 | &priv->tunnel_reg_id); | ||
745 | return err; | 753 | return err; |
746 | } | 754 | } |
747 | } | 755 | } |
@@ -1792,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1792 | mc_list[5] = priv->port; | 1800 | mc_list[5] = priv->port; |
1793 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | 1801 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, |
1794 | mc_list, MLX4_PROT_ETH, mclist->reg_id); | 1802 | mc_list, MLX4_PROT_ETH, mclist->reg_id); |
1803 | if (mclist->tunnel_reg_id) | ||
1804 | mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); | ||
1795 | } | 1805 | } |
1796 | mlx4_en_clear_list(dev); | 1806 | mlx4_en_clear_list(dev); |
1797 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { | 1807 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 91b69ff4b4a2..7e2995ecea6f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
129 | [0] = "RSS support", | 129 | [0] = "RSS support", |
130 | [1] = "RSS Toeplitz Hash Function support", | 130 | [1] = "RSS Toeplitz Hash Function support", |
131 | [2] = "RSS XOR Hash Function support", | 131 | [2] = "RSS XOR Hash Function support", |
132 | [3] = "Device manage flow steering support", | 132 | [3] = "Device managed flow steering support", |
133 | [4] = "Automatic MAC reassignment support", | 133 | [4] = "Automatic MAC reassignment support", |
134 | [5] = "Time stamping support", | 134 | [5] = "Time stamping support", |
135 | [6] = "VST (control vlan insertion/stripping) support", | 135 | [6] = "VST (control vlan insertion/stripping) support", |
136 | [7] = "FSM (MAC anti-spoofing) support", | 136 | [7] = "FSM (MAC anti-spoofing) support", |
137 | [8] = "Dynamic QP updates support", | 137 | [8] = "Dynamic QP updates support", |
138 | [9] = "TCP/IP offloads/flow-steering for VXLAN support" | 138 | [9] = "Device managed flow steering IPoIB support", |
139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support" | ||
139 | }; | 140 | }; |
140 | int i; | 141 | int i; |
141 | 142 | ||
@@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
859 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); | 860 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); |
860 | 861 | ||
861 | /* For guests, disable vxlan tunneling */ | 862 | /* For guests, disable vxlan tunneling */ |
862 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); | 863 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); |
863 | field &= 0xf7; | 864 | field &= 0xf7; |
864 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); | 865 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); |
865 | 866 | ||
@@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
869 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); | 870 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); |
870 | 871 | ||
871 | /* For guests, disable mw type 2 */ | 872 | /* For guests, disable mw type 2 */ |
872 | MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 873 | MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
873 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; | 874 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; |
874 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 875 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
875 | 876 | ||
@@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
883 | } | 884 | } |
884 | 885 | ||
885 | /* turn off ipoib managed steering for guests */ | 886 | /* turn off ipoib managed steering for guests */ |
886 | MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 887 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
887 | field &= ~0x80; | 888 | field &= ~0x80; |
888 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 889 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
889 | 890 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d711158b0d4b..936c15364739 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -150,6 +150,8 @@ struct mlx4_port_config { | |||
150 | struct pci_dev *pdev; | 150 | struct pci_dev *pdev; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static atomic_t pf_loading = ATOMIC_INIT(0); | ||
154 | |||
153 | int mlx4_check_port_params(struct mlx4_dev *dev, | 155 | int mlx4_check_port_params(struct mlx4_dev *dev, |
154 | enum mlx4_port_type *port_type) | 156 | enum mlx4_port_type *port_type) |
155 | { | 157 | { |
@@ -749,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev) | |||
749 | has_eth_port = true; | 751 | has_eth_port = true; |
750 | } | 752 | } |
751 | 753 | ||
752 | if (has_ib_port) | 754 | if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
753 | request_module_nowait(IB_DRV_NAME); | 755 | request_module_nowait(IB_DRV_NAME); |
754 | if (has_eth_port) | 756 | if (has_eth_port) |
755 | request_module_nowait(EN_DRV_NAME); | 757 | request_module_nowait(EN_DRV_NAME); |
@@ -1407,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev) | |||
1407 | u32 slave_read; | 1409 | u32 slave_read; |
1408 | u32 cmd_channel_ver; | 1410 | u32 cmd_channel_ver; |
1409 | 1411 | ||
1412 | if (atomic_read(&pf_loading)) { | ||
1413 | mlx4_warn(dev, "PF is not ready. Deferring probe\n"); | ||
1414 | return -EPROBE_DEFER; | ||
1415 | } | ||
1416 | |||
1410 | mutex_lock(&priv->cmd.slave_cmd_mutex); | 1417 | mutex_lock(&priv->cmd.slave_cmd_mutex); |
1411 | priv->cmd.max_cmds = 1; | 1418 | priv->cmd.max_cmds = 1; |
1412 | mlx4_warn(dev, "Sending reset\n"); | 1419 | mlx4_warn(dev, "Sending reset\n"); |
@@ -2319,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
2319 | 2326 | ||
2320 | if (num_vfs) { | 2327 | if (num_vfs) { |
2321 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); | 2328 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); |
2329 | |||
2330 | atomic_inc(&pf_loading); | ||
2322 | err = pci_enable_sriov(pdev, num_vfs); | 2331 | err = pci_enable_sriov(pdev, num_vfs); |
2332 | atomic_dec(&pf_loading); | ||
2333 | |||
2323 | if (err) { | 2334 | if (err) { |
2324 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", | 2335 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", |
2325 | err); | 2336 | err); |
@@ -2684,6 +2695,7 @@ static struct pci_driver mlx4_driver = { | |||
2684 | .name = DRV_NAME, | 2695 | .name = DRV_NAME, |
2685 | .id_table = mlx4_pci_table, | 2696 | .id_table = mlx4_pci_table, |
2686 | .probe = mlx4_init_one, | 2697 | .probe = mlx4_init_one, |
2698 | .shutdown = mlx4_remove_one, | ||
2687 | .remove = mlx4_remove_one, | 2699 | .remove = mlx4_remove_one, |
2688 | .err_handler = &mlx4_err_handler, | 2700 | .err_handler = &mlx4_err_handler, |
2689 | }; | 2701 | }; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e9779653cd4c..3ff7bc3e7a23 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -209,7 +209,7 @@ static const struct { | |||
209 | [RTL_GIGA_MAC_VER_16] = | 209 | [RTL_GIGA_MAC_VER_16] = |
210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), | 210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), |
211 | [RTL_GIGA_MAC_VER_17] = | 211 | [RTL_GIGA_MAC_VER_17] = |
212 | _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), | 212 | _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), |
213 | [RTL_GIGA_MAC_VER_18] = | 213 | [RTL_GIGA_MAC_VER_18] = |
214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), | 214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), |
215 | [RTL_GIGA_MAC_VER_19] = | 215 | [RTL_GIGA_MAC_VER_19] = |
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 72d282bf33a5..c553f6b5a913 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) | |||
151 | sizeof(struct dma_desc))); | 151 | sizeof(struct dma_desc))); |
152 | } | 152 | } |
153 | 153 | ||
154 | const struct stmmac_chain_mode_ops chain_mode_ops = { | 154 | const struct stmmac_mode_ops chain_mode_ops = { |
155 | .init = stmmac_init_dma_chain, | 155 | .init = stmmac_init_dma_chain, |
156 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 156 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
157 | .jumbo_frm = stmmac_jumbo_frm, | 157 | .jumbo_frm = stmmac_jumbo_frm, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7834a3993946..74610f3aca9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -419,20 +419,13 @@ struct mii_regs { | |||
419 | unsigned int data; /* MII Data */ | 419 | unsigned int data; /* MII Data */ |
420 | }; | 420 | }; |
421 | 421 | ||
422 | struct stmmac_ring_mode_ops { | 422 | struct stmmac_mode_ops { |
423 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | ||
424 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | ||
425 | void (*refill_desc3) (void *priv, struct dma_desc *p); | ||
426 | void (*init_desc3) (struct dma_desc *p); | ||
427 | void (*clean_desc3) (void *priv, struct dma_desc *p); | ||
428 | int (*set_16kib_bfsize) (int mtu); | ||
429 | }; | ||
430 | |||
431 | struct stmmac_chain_mode_ops { | ||
432 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 423 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
433 | unsigned int extend_desc); | 424 | unsigned int extend_desc); |
434 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 425 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
435 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 426 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); |
427 | int (*set_16kib_bfsize)(int mtu); | ||
428 | void (*init_desc3)(struct dma_desc *p); | ||
436 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 429 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
437 | void (*clean_desc3) (void *priv, struct dma_desc *p); | 430 | void (*clean_desc3) (void *priv, struct dma_desc *p); |
438 | }; | 431 | }; |
@@ -441,8 +434,7 @@ struct mac_device_info { | |||
441 | const struct stmmac_ops *mac; | 434 | const struct stmmac_ops *mac; |
442 | const struct stmmac_desc_ops *desc; | 435 | const struct stmmac_desc_ops *desc; |
443 | const struct stmmac_dma_ops *dma; | 436 | const struct stmmac_dma_ops *dma; |
444 | const struct stmmac_ring_mode_ops *ring; | 437 | const struct stmmac_mode_ops *mode; |
445 | const struct stmmac_chain_mode_ops *chain; | ||
446 | const struct stmmac_hwtimestamp *ptp; | 438 | const struct stmmac_hwtimestamp *ptp; |
447 | struct mii_regs mii; /* MII register Addresses */ | 439 | struct mii_regs mii; /* MII register Addresses */ |
448 | struct mac_link link; | 440 | struct mac_link link; |
@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |||
460 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); | 452 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); |
461 | 453 | ||
462 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); | 454 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); |
463 | extern const struct stmmac_ring_mode_ops ring_mode_ops; | 455 | extern const struct stmmac_mode_ops ring_mode_ops; |
464 | extern const struct stmmac_chain_mode_ops chain_mode_ops; | 456 | extern const struct stmmac_mode_ops chain_mode_ops; |
465 | 457 | ||
466 | #endif /* __COMMON_H__ */ | 458 | #endif /* __COMMON_H__ */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a96c7c2f5f3f..650a4be6bce5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) | |||
100 | { | 100 | { |
101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; | 101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
102 | 102 | ||
103 | if (unlikely(priv->plat->has_gmac)) | 103 | /* Fill DES3 in case of RING mode */ |
104 | /* Fill DES3 in case of RING mode */ | 104 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) |
105 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) | 105 | p->des3 = p->des2 + BUF_SIZE_8KiB; |
106 | p->des3 = p->des2 + BUF_SIZE_8KiB; | ||
107 | } | 106 | } |
108 | 107 | ||
109 | /* In ring mode we need to fill the desc3 because it is used as buffer */ | 108 | /* In ring mode we need to fill the desc3 because it is used as buffer */ |
@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) | |||
126 | return ret; | 125 | return ret; |
127 | } | 126 | } |
128 | 127 | ||
129 | const struct stmmac_ring_mode_ops ring_mode_ops = { | 128 | const struct stmmac_mode_ops ring_mode_ops = { |
130 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 129 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
131 | .jumbo_frm = stmmac_jumbo_frm, | 130 | .jumbo_frm = stmmac_jumbo_frm, |
132 | .refill_desc3 = stmmac_refill_desc3, | 131 | .refill_desc3 = stmmac_refill_desc3, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 078ad0ec8593..8543e1cfd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; | |||
92 | module_param(tc, int, S_IRUGO | S_IWUSR); | 92 | module_param(tc, int, S_IRUGO | S_IWUSR); |
93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); |
94 | 94 | ||
95 | #define DMA_BUFFER_SIZE BUF_SIZE_4KiB | 95 | #define DEFAULT_BUFSIZE 1536 |
96 | static int buf_sz = DMA_BUFFER_SIZE; | 96 | static int buf_sz = DEFAULT_BUFSIZE; |
97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); | 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); |
99 | 99 | ||
@@ -136,8 +136,8 @@ static void stmmac_verify_args(void) | |||
136 | dma_rxsize = DMA_RX_SIZE; | 136 | dma_rxsize = DMA_RX_SIZE; |
137 | if (unlikely(dma_txsize < 0)) | 137 | if (unlikely(dma_txsize < 0)) |
138 | dma_txsize = DMA_TX_SIZE; | 138 | dma_txsize = DMA_TX_SIZE; |
139 | if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) | 139 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
140 | buf_sz = DMA_BUFFER_SIZE; | 140 | buf_sz = DEFAULT_BUFSIZE; |
141 | if (unlikely(flow_ctrl > 1)) | 141 | if (unlikely(flow_ctrl > 1)) |
142 | flow_ctrl = FLOW_AUTO; | 142 | flow_ctrl = FLOW_AUTO; |
143 | else if (likely(flow_ctrl < 0)) | 143 | else if (likely(flow_ctrl < 0)) |
@@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
286 | 286 | ||
287 | /* MAC core supports the EEE feature. */ | 287 | /* MAC core supports the EEE feature. */ |
288 | if (priv->dma_cap.eee) { | 288 | if (priv->dma_cap.eee) { |
289 | int tx_lpi_timer = priv->tx_lpi_timer; | ||
290 | |||
289 | /* Check if the PHY supports EEE */ | 291 | /* Check if the PHY supports EEE */ |
290 | if (phy_init_eee(priv->phydev, 1)) | 292 | if (phy_init_eee(priv->phydev, 1)) { |
293 | /* To manage at run-time if the EEE cannot be supported | ||
294 | * anymore (for example because the lp caps have been | ||
295 | * changed). | ||
296 | * In that case the driver disable own timers. | ||
297 | */ | ||
298 | if (priv->eee_active) { | ||
299 | pr_debug("stmmac: disable EEE\n"); | ||
300 | del_timer_sync(&priv->eee_ctrl_timer); | ||
301 | priv->hw->mac->set_eee_timer(priv->ioaddr, 0, | ||
302 | tx_lpi_timer); | ||
303 | } | ||
304 | priv->eee_active = 0; | ||
291 | goto out; | 305 | goto out; |
292 | 306 | } | |
307 | /* Activate the EEE and start timers */ | ||
293 | if (!priv->eee_active) { | 308 | if (!priv->eee_active) { |
294 | priv->eee_active = 1; | 309 | priv->eee_active = 1; |
295 | init_timer(&priv->eee_ctrl_timer); | 310 | init_timer(&priv->eee_ctrl_timer); |
@@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
300 | 315 | ||
301 | priv->hw->mac->set_eee_timer(priv->ioaddr, | 316 | priv->hw->mac->set_eee_timer(priv->ioaddr, |
302 | STMMAC_DEFAULT_LIT_LS, | 317 | STMMAC_DEFAULT_LIT_LS, |
303 | priv->tx_lpi_timer); | 318 | tx_lpi_timer); |
304 | } else | 319 | } else |
305 | /* Set HW EEE according to the speed */ | 320 | /* Set HW EEE according to the speed */ |
306 | priv->hw->mac->set_eee_pls(priv->ioaddr, | 321 | priv->hw->mac->set_eee_pls(priv->ioaddr, |
307 | priv->phydev->link); | 322 | priv->phydev->link); |
308 | 323 | ||
309 | pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); | 324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
310 | 325 | ||
311 | ret = true; | 326 | ret = true; |
312 | } | 327 | } |
@@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) | |||
886 | ret = BUF_SIZE_8KiB; | 901 | ret = BUF_SIZE_8KiB; |
887 | else if (mtu >= BUF_SIZE_2KiB) | 902 | else if (mtu >= BUF_SIZE_2KiB) |
888 | ret = BUF_SIZE_4KiB; | 903 | ret = BUF_SIZE_4KiB; |
889 | else if (mtu >= DMA_BUFFER_SIZE) | 904 | else if (mtu > DEFAULT_BUFSIZE) |
890 | ret = BUF_SIZE_2KiB; | 905 | ret = BUF_SIZE_2KiB; |
891 | else | 906 | else |
892 | ret = DMA_BUFFER_SIZE; | 907 | ret = DEFAULT_BUFSIZE; |
893 | 908 | ||
894 | return ret; | 909 | return ret; |
895 | } | 910 | } |
@@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
951 | 966 | ||
952 | p->des2 = priv->rx_skbuff_dma[i]; | 967 | p->des2 = priv->rx_skbuff_dma[i]; |
953 | 968 | ||
954 | if ((priv->mode == STMMAC_RING_MODE) && | 969 | if ((priv->hw->mode->init_desc3) && |
955 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) | 970 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
956 | priv->hw->ring->init_desc3(p); | 971 | priv->hw->mode->init_desc3(p); |
957 | 972 | ||
958 | return 0; | 973 | return 0; |
959 | } | 974 | } |
@@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
984 | unsigned int bfsize = 0; | 999 | unsigned int bfsize = 0; |
985 | int ret = -ENOMEM; | 1000 | int ret = -ENOMEM; |
986 | 1001 | ||
987 | /* Set the max buffer size according to the DESC mode | 1002 | if (priv->hw->mode->set_16kib_bfsize) |
988 | * and the MTU. Note that RING mode allows 16KiB bsize. | 1003 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); |
989 | */ | ||
990 | if (priv->mode == STMMAC_RING_MODE) | ||
991 | bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); | ||
992 | 1004 | ||
993 | if (bfsize < BUF_SIZE_16KiB) | 1005 | if (bfsize < BUF_SIZE_16KiB) |
994 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); | 1006 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
@@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
1029 | /* Setup the chained descriptor addresses */ | 1041 | /* Setup the chained descriptor addresses */ |
1030 | if (priv->mode == STMMAC_CHAIN_MODE) { | 1042 | if (priv->mode == STMMAC_CHAIN_MODE) { |
1031 | if (priv->extend_desc) { | 1043 | if (priv->extend_desc) { |
1032 | priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, | 1044 | priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, |
1033 | rxsize, 1); | 1045 | rxsize, 1); |
1034 | priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, | 1046 | priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, |
1035 | txsize, 1); | 1047 | txsize, 1); |
1036 | } else { | 1048 | } else { |
1037 | priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, | 1049 | priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, |
1038 | rxsize, 0); | 1050 | rxsize, 0); |
1039 | priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, | 1051 | priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, |
1040 | txsize, 0); | 1052 | txsize, 0); |
1041 | } | 1053 | } |
1042 | } | 1054 | } |
1043 | 1055 | ||
@@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
1288 | DMA_TO_DEVICE); | 1300 | DMA_TO_DEVICE); |
1289 | priv->tx_skbuff_dma[entry] = 0; | 1301 | priv->tx_skbuff_dma[entry] = 0; |
1290 | } | 1302 | } |
1291 | priv->hw->ring->clean_desc3(priv, p); | 1303 | priv->hw->mode->clean_desc3(priv, p); |
1292 | 1304 | ||
1293 | if (likely(skb != NULL)) { | 1305 | if (likely(skb != NULL)) { |
1294 | dev_kfree_skb(skb); | 1306 | dev_kfree_skb(skb); |
@@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1844 | int nfrags = skb_shinfo(skb)->nr_frags; | 1856 | int nfrags = skb_shinfo(skb)->nr_frags; |
1845 | struct dma_desc *desc, *first; | 1857 | struct dma_desc *desc, *first; |
1846 | unsigned int nopaged_len = skb_headlen(skb); | 1858 | unsigned int nopaged_len = skb_headlen(skb); |
1859 | unsigned int enh_desc = priv->plat->enh_desc; | ||
1847 | 1860 | ||
1848 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | 1861 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
1849 | if (!netif_queue_stopped(dev)) { | 1862 | if (!netif_queue_stopped(dev)) { |
@@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1871 | first = desc; | 1884 | first = desc; |
1872 | 1885 | ||
1873 | /* To program the descriptors according to the size of the frame */ | 1886 | /* To program the descriptors according to the size of the frame */ |
1874 | if (priv->mode == STMMAC_RING_MODE) { | 1887 | if (enh_desc) |
1875 | is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, | 1888 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); |
1876 | priv->plat->enh_desc); | 1889 | |
1877 | if (unlikely(is_jumbo)) | ||
1878 | entry = priv->hw->ring->jumbo_frm(priv, skb, | ||
1879 | csum_insertion); | ||
1880 | } else { | ||
1881 | is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, | ||
1882 | priv->plat->enh_desc); | ||
1883 | if (unlikely(is_jumbo)) | ||
1884 | entry = priv->hw->chain->jumbo_frm(priv, skb, | ||
1885 | csum_insertion); | ||
1886 | } | ||
1887 | if (likely(!is_jumbo)) { | 1890 | if (likely(!is_jumbo)) { |
1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1891 | desc->des2 = dma_map_single(priv->device, skb->data, |
1889 | nopaged_len, DMA_TO_DEVICE); | 1892 | nopaged_len, DMA_TO_DEVICE); |
1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1893 | priv->tx_skbuff_dma[entry] = desc->des2; |
1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1894 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
1892 | csum_insertion, priv->mode); | 1895 | csum_insertion, priv->mode); |
1893 | } else | 1896 | } else { |
1894 | desc = first; | 1897 | desc = first; |
1898 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | ||
1899 | } | ||
1895 | 1900 | ||
1896 | for (i = 0; i < nfrags; i++) { | 1901 | for (i = 0; i < nfrags; i++) { |
1897 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1902 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
@@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
2029 | 2034 | ||
2030 | p->des2 = priv->rx_skbuff_dma[entry]; | 2035 | p->des2 = priv->rx_skbuff_dma[entry]; |
2031 | 2036 | ||
2032 | priv->hw->ring->refill_desc3(priv, p); | 2037 | priv->hw->mode->refill_desc3(priv, p); |
2033 | 2038 | ||
2034 | if (netif_msg_rx_status(priv)) | 2039 | if (netif_msg_rx_status(priv)) |
2035 | pr_debug("\trefill entry #%d\n", entry); | 2040 | pr_debug("\trefill entry #%d\n", entry); |
@@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
2633 | 2638 | ||
2634 | /* To use the chained or ring mode */ | 2639 | /* To use the chained or ring mode */ |
2635 | if (chain_mode) { | 2640 | if (chain_mode) { |
2636 | priv->hw->chain = &chain_mode_ops; | 2641 | priv->hw->mode = &chain_mode_ops; |
2637 | pr_info(" Chain mode enabled\n"); | 2642 | pr_info(" Chain mode enabled\n"); |
2638 | priv->mode = STMMAC_CHAIN_MODE; | 2643 | priv->mode = STMMAC_CHAIN_MODE; |
2639 | } else { | 2644 | } else { |
2640 | priv->hw->ring = &ring_mode_ops; | 2645 | priv->hw->mode = &ring_mode_ops; |
2641 | pr_info(" Ring mode enabled\n"); | 2646 | pr_info(" Ring mode enabled\n"); |
2642 | priv->mode = STMMAC_RING_MODE; | 2647 | priv->mode = STMMAC_RING_MODE; |
2643 | } | 2648 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c61bc72b8e90..8fb32a80f1c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -36,7 +36,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
36 | #ifdef CONFIG_DWMAC_STI | 36 | #ifdef CONFIG_DWMAC_STI |
37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, | 37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, |
38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, | 38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, |
39 | { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, | 39 | { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, |
40 | #endif | 40 | #endif |
41 | /* SoC specific glue layers should come before generic bindings */ | 41 | /* SoC specific glue layers should come before generic bindings */ |
42 | { .compatible = "st,spear600-gmac"}, | 42 | { .compatible = "st,spear600-gmac"}, |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7141a1937360..d6fce9750b95 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -442,6 +442,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
442 | if (!net) | 442 | if (!net) |
443 | return -ENOMEM; | 443 | return -ENOMEM; |
444 | 444 | ||
445 | netif_carrier_off(net); | ||
446 | |||
445 | net_device_ctx = netdev_priv(net); | 447 | net_device_ctx = netdev_priv(net); |
446 | net_device_ctx->device_ctx = dev; | 448 | net_device_ctx->device_ctx = dev; |
447 | hv_set_drvdata(dev, net); | 449 | hv_set_drvdata(dev, net); |
@@ -473,6 +475,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
473 | pr_err("Unable to register netdev.\n"); | 475 | pr_err("Unable to register netdev.\n"); |
474 | rndis_filter_device_remove(dev); | 476 | rndis_filter_device_remove(dev); |
475 | free_netdev(net); | 477 | free_netdev(net); |
478 | } else { | ||
479 | schedule_delayed_work(&net_device_ctx->dwork, 0); | ||
476 | } | 480 | } |
477 | 481 | ||
478 | return ret; | 482 | return ret; |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 1084e5de3ceb..b54fd257652b 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -243,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
243 | return ret; | 243 | return ret; |
244 | } | 244 | } |
245 | 245 | ||
246 | static void rndis_set_link_state(struct rndis_device *rdev, | ||
247 | struct rndis_request *request) | ||
248 | { | ||
249 | u32 link_status; | ||
250 | struct rndis_query_complete *query_complete; | ||
251 | |||
252 | query_complete = &request->response_msg.msg.query_complete; | ||
253 | |||
254 | if (query_complete->status == RNDIS_STATUS_SUCCESS && | ||
255 | query_complete->info_buflen == sizeof(u32)) { | ||
256 | memcpy(&link_status, (void *)((unsigned long)query_complete + | ||
257 | query_complete->info_buf_offset), sizeof(u32)); | ||
258 | rdev->link_state = link_status != 0; | ||
259 | } | ||
260 | } | ||
261 | |||
246 | static void rndis_filter_receive_response(struct rndis_device *dev, | 262 | static void rndis_filter_receive_response(struct rndis_device *dev, |
247 | struct rndis_message *resp) | 263 | struct rndis_message *resp) |
248 | { | 264 | { |
@@ -272,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev, | |||
272 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { | 288 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { |
273 | memcpy(&request->response_msg, resp, | 289 | memcpy(&request->response_msg, resp, |
274 | resp->msg_len); | 290 | resp->msg_len); |
291 | if (request->request_msg.ndis_msg_type == | ||
292 | RNDIS_MSG_QUERY && request->request_msg.msg. | ||
293 | query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) | ||
294 | rndis_set_link_state(dev, request); | ||
275 | } else { | 295 | } else { |
276 | netdev_err(ndev, | 296 | netdev_err(ndev, |
277 | "rndis response buffer overflow " | 297 | "rndis response buffer overflow " |
@@ -620,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) | |||
620 | ret = rndis_filter_query_device(dev, | 640 | ret = rndis_filter_query_device(dev, |
621 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, | 641 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, |
622 | &link_status, &size); | 642 | &link_status, &size); |
623 | dev->link_state = (link_status != 0) ? true : false; | ||
624 | 643 | ||
625 | return ret; | 644 | return ret; |
626 | } | 645 | } |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index ab31544bc254..a30258aad139 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
@@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) | |||
546 | int rc; | 546 | int rc; |
547 | unsigned long flags; | 547 | unsigned long flags; |
548 | 548 | ||
549 | spin_lock(&lp->lock); | 549 | spin_lock_irqsave(&lp->lock, flags); |
550 | if (lp->irq_busy) { | 550 | if (lp->irq_busy) { |
551 | spin_unlock(&lp->lock); | 551 | spin_unlock_irqrestore(&lp->lock, flags); |
552 | return -EBUSY; | 552 | return -EBUSY; |
553 | } | 553 | } |
554 | spin_unlock(&lp->lock); | 554 | spin_unlock_irqrestore(&lp->lock, flags); |
555 | 555 | ||
556 | might_sleep(); | 556 | might_sleep(); |
557 | 557 | ||
@@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work) | |||
725 | static irqreturn_t at86rf230_isr(int irq, void *data) | 725 | static irqreturn_t at86rf230_isr(int irq, void *data) |
726 | { | 726 | { |
727 | struct at86rf230_local *lp = data; | 727 | struct at86rf230_local *lp = data; |
728 | unsigned long flags; | ||
728 | 729 | ||
729 | spin_lock(&lp->lock); | 730 | spin_lock_irqsave(&lp->lock, flags); |
730 | lp->irq_busy = 1; | 731 | lp->irq_busy = 1; |
731 | spin_unlock(&lp->lock); | 732 | spin_unlock_irqrestore(&lp->lock, flags); |
732 | 733 | ||
733 | schedule_work(&lp->irqwork); | 734 | schedule_work(&lp->irqwork); |
734 | 735 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 19c9eca0ef26..76d96b9ebcdb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -164,9 +164,9 @@ static const struct phy_setting settings[] = { | |||
164 | * of that setting. Returns the index of the last setting if | 164 | * of that setting. Returns the index of the last setting if |
165 | * none of the others match. | 165 | * none of the others match. |
166 | */ | 166 | */ |
167 | static inline int phy_find_setting(int speed, int duplex) | 167 | static inline unsigned int phy_find_setting(int speed, int duplex) |
168 | { | 168 | { |
169 | int idx = 0; | 169 | unsigned int idx = 0; |
170 | 170 | ||
171 | while (idx < ARRAY_SIZE(settings) && | 171 | while (idx < ARRAY_SIZE(settings) && |
172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) | 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) |
@@ -185,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex) | |||
185 | * the mask in features. Returns the index of the last setting | 185 | * the mask in features. Returns the index of the last setting |
186 | * if nothing else matches. | 186 | * if nothing else matches. |
187 | */ | 187 | */ |
188 | static inline int phy_find_valid(int idx, u32 features) | 188 | static inline unsigned int phy_find_valid(unsigned int idx, u32 features) |
189 | { | 189 | { |
190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) | 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) |
191 | idx++; | 191 | idx++; |
@@ -204,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features) | |||
204 | static void phy_sanitize_settings(struct phy_device *phydev) | 204 | static void phy_sanitize_settings(struct phy_device *phydev) |
205 | { | 205 | { |
206 | u32 features = phydev->supported; | 206 | u32 features = phydev->supported; |
207 | int idx; | 207 | unsigned int idx; |
208 | 208 | ||
209 | /* Sanitize settings based on PHY capabilities */ | 209 | /* Sanitize settings based on PHY capabilities */ |
210 | if ((features & SUPPORTED_Autoneg) == 0) | 210 | if ((features & SUPPORTED_Autoneg) == 0) |
@@ -954,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { | 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { |
955 | int eee_lp, eee_cap, eee_adv; | 955 | int eee_lp, eee_cap, eee_adv; |
956 | u32 lp, cap, adv; | 956 | u32 lp, cap, adv; |
957 | int idx, status; | 957 | int status; |
958 | unsigned int idx; | ||
958 | 959 | ||
959 | /* Read phy status to properly get the right settings */ | 960 | /* Read phy status to properly get the right settings */ |
960 | status = phy_read_status(phydev); | 961 | status = phy_read_status(phydev); |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 433f0a00c683..e2797f1e1b31 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o | |||
11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o | 11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o |
12 | asix-y := asix_devices.o asix_common.o ax88172a.o | 12 | asix-y := asix_devices.o asix_common.o ax88172a.o |
13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o | 13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o |
14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o | 14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o |
15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o | 15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o |
16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o | 16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o |
17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o | 17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 42e176912c8e..bd363b27e854 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -652,6 +652,13 @@ static const struct usb_device_id products[] = { | |||
652 | .driver_info = 0, | 652 | .driver_info = 0, |
653 | }, | 653 | }, |
654 | 654 | ||
655 | /* Samsung USB Ethernet Adapters */ | ||
656 | { | ||
657 | USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM, | ||
658 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
659 | .driver_info = 0, | ||
660 | }, | ||
661 | |||
655 | /* WHITELIST!!! | 662 | /* WHITELIST!!! |
656 | * | 663 | * |
657 | * CDC Ether uses two interfaces, not necessarily consecutive. | 664 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d89dbe395ad2..adb12f349a61 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -449,9 +449,6 @@ enum rtl8152_flags { | |||
449 | #define MCU_TYPE_PLA 0x0100 | 449 | #define MCU_TYPE_PLA 0x0100 |
450 | #define MCU_TYPE_USB 0x0000 | 450 | #define MCU_TYPE_USB 0x0000 |
451 | 451 | ||
452 | #define REALTEK_USB_DEVICE(vend, prod) \ | ||
453 | USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) | ||
454 | |||
455 | struct rx_desc { | 452 | struct rx_desc { |
456 | __le32 opts1; | 453 | __le32 opts1; |
457 | #define RX_LEN_MASK 0x7fff | 454 | #define RX_LEN_MASK 0x7fff |
@@ -2739,6 +2736,12 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
2739 | struct net_device *netdev; | 2736 | struct net_device *netdev; |
2740 | int ret; | 2737 | int ret; |
2741 | 2738 | ||
2739 | if (udev->actconfig->desc.bConfigurationValue != 1) { | ||
2740 | usb_driver_set_configuration(udev, 1); | ||
2741 | return -ENODEV; | ||
2742 | } | ||
2743 | |||
2744 | usb_reset_device(udev); | ||
2742 | netdev = alloc_etherdev(sizeof(struct r8152)); | 2745 | netdev = alloc_etherdev(sizeof(struct r8152)); |
2743 | if (!netdev) { | 2746 | if (!netdev) { |
2744 | dev_err(&intf->dev, "Out of memory\n"); | 2747 | dev_err(&intf->dev, "Out of memory\n"); |
@@ -2819,9 +2822,9 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
2819 | 2822 | ||
2820 | /* table of devices that work with this driver */ | 2823 | /* table of devices that work with this driver */ |
2821 | static struct usb_device_id rtl8152_table[] = { | 2824 | static struct usb_device_id rtl8152_table[] = { |
2822 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, | 2825 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, |
2823 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, | 2826 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, |
2824 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, | 2827 | {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, |
2825 | {} | 2828 | {} |
2826 | }; | 2829 | }; |
2827 | 2830 | ||
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c deleted file mode 100644 index f0a8791b7636..000000000000 --- a/drivers/net/usb/r815x.c +++ /dev/null | |||
@@ -1,248 +0,0 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/netdevice.h> | ||
3 | #include <linux/mii.h> | ||
4 | #include <linux/usb.h> | ||
5 | #include <linux/usb/cdc.h> | ||
6 | #include <linux/usb/usbnet.h> | ||
7 | |||
8 | #define RTL815x_REQT_READ 0xc0 | ||
9 | #define RTL815x_REQT_WRITE 0x40 | ||
10 | #define RTL815x_REQ_GET_REGS 0x05 | ||
11 | #define RTL815x_REQ_SET_REGS 0x05 | ||
12 | |||
13 | #define MCU_TYPE_PLA 0x0100 | ||
14 | #define OCP_BASE 0xe86c | ||
15 | #define BASE_MII 0xa400 | ||
16 | |||
17 | #define BYTE_EN_DWORD 0xff | ||
18 | #define BYTE_EN_WORD 0x33 | ||
19 | #define BYTE_EN_BYTE 0x11 | ||
20 | |||
21 | #define R815x_PHY_ID 32 | ||
22 | #define REALTEK_VENDOR_ID 0x0bda | ||
23 | |||
24 | |||
25 | static int pla_read_word(struct usb_device *udev, u16 index) | ||
26 | { | ||
27 | int ret; | ||
28 | u8 shift = index & 2; | ||
29 | __le32 *tmp; | ||
30 | |||
31 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
32 | if (!tmp) | ||
33 | return -ENOMEM; | ||
34 | |||
35 | index &= ~3; | ||
36 | |||
37 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
38 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
39 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
40 | if (ret < 0) | ||
41 | goto out2; | ||
42 | |||
43 | ret = __le32_to_cpu(*tmp); | ||
44 | ret >>= (shift * 8); | ||
45 | ret &= 0xffff; | ||
46 | |||
47 | out2: | ||
48 | kfree(tmp); | ||
49 | return ret; | ||
50 | } | ||
51 | |||
52 | static int pla_write_word(struct usb_device *udev, u16 index, u32 data) | ||
53 | { | ||
54 | __le32 *tmp; | ||
55 | u32 mask = 0xffff; | ||
56 | u16 byen = BYTE_EN_WORD; | ||
57 | u8 shift = index & 2; | ||
58 | int ret; | ||
59 | |||
60 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
61 | if (!tmp) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | data &= mask; | ||
65 | |||
66 | if (shift) { | ||
67 | byen <<= shift; | ||
68 | mask <<= (shift * 8); | ||
69 | data <<= (shift * 8); | ||
70 | index &= ~3; | ||
71 | } | ||
72 | |||
73 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
74 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
75 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
76 | if (ret < 0) | ||
77 | goto out3; | ||
78 | |||
79 | data |= __le32_to_cpu(*tmp) & ~mask; | ||
80 | *tmp = __cpu_to_le32(data); | ||
81 | |||
82 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
83 | RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, | ||
84 | index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), | ||
85 | 500); | ||
86 | |||
87 | out3: | ||
88 | kfree(tmp); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static int ocp_reg_read(struct usbnet *dev, u16 addr) | ||
93 | { | ||
94 | u16 ocp_base, ocp_index; | ||
95 | int ret; | ||
96 | |||
97 | ocp_base = addr & 0xf000; | ||
98 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
99 | if (ret < 0) | ||
100 | goto out; | ||
101 | |||
102 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
103 | ret = pla_read_word(dev->udev, ocp_index); | ||
104 | |||
105 | out: | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data) | ||
110 | { | ||
111 | u16 ocp_base, ocp_index; | ||
112 | int ret; | ||
113 | |||
114 | ocp_base = addr & 0xf000; | ||
115 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
116 | if (ret < 0) | ||
117 | goto out1; | ||
118 | |||
119 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
120 | ret = pla_write_word(dev->udev, ocp_index, data); | ||
121 | |||
122 | out1: | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) | ||
127 | { | ||
128 | struct usbnet *dev = netdev_priv(netdev); | ||
129 | int ret; | ||
130 | |||
131 | if (phy_id != R815x_PHY_ID) | ||
132 | return -EINVAL; | ||
133 | |||
134 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
135 | return -ENODEV; | ||
136 | |||
137 | ret = ocp_reg_read(dev, BASE_MII + reg * 2); | ||
138 | |||
139 | usb_autopm_put_interface(dev->intf); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | static | ||
144 | void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) | ||
145 | { | ||
146 | struct usbnet *dev = netdev_priv(netdev); | ||
147 | |||
148 | if (phy_id != R815x_PHY_ID) | ||
149 | return; | ||
150 | |||
151 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
152 | return; | ||
153 | |||
154 | ocp_reg_write(dev, BASE_MII + reg * 2, val); | ||
155 | |||
156 | usb_autopm_put_interface(dev->intf); | ||
157 | } | ||
158 | |||
159 | static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) | ||
160 | { | ||
161 | int status; | ||
162 | |||
163 | status = usbnet_cdc_bind(dev, intf); | ||
164 | if (status < 0) | ||
165 | return status; | ||
166 | |||
167 | dev->mii.dev = dev->net; | ||
168 | dev->mii.mdio_read = r815x_mdio_read; | ||
169 | dev->mii.mdio_write = r815x_mdio_write; | ||
170 | dev->mii.phy_id_mask = 0x3f; | ||
171 | dev->mii.reg_num_mask = 0x1f; | ||
172 | dev->mii.phy_id = R815x_PHY_ID; | ||
173 | dev->mii.supports_gmii = 1; | ||
174 | |||
175 | return status; | ||
176 | } | ||
177 | |||
178 | static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) | ||
179 | { | ||
180 | int status; | ||
181 | |||
182 | status = usbnet_cdc_bind(dev, intf); | ||
183 | if (status < 0) | ||
184 | return status; | ||
185 | |||
186 | dev->mii.dev = dev->net; | ||
187 | dev->mii.mdio_read = r815x_mdio_read; | ||
188 | dev->mii.mdio_write = r815x_mdio_write; | ||
189 | dev->mii.phy_id_mask = 0x3f; | ||
190 | dev->mii.reg_num_mask = 0x1f; | ||
191 | dev->mii.phy_id = R815x_PHY_ID; | ||
192 | dev->mii.supports_gmii = 0; | ||
193 | |||
194 | return status; | ||
195 | } | ||
196 | |||
197 | static const struct driver_info r8152_info = { | ||
198 | .description = "RTL8152 ECM Device", | ||
199 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
200 | .bind = r8152_bind, | ||
201 | .unbind = usbnet_cdc_unbind, | ||
202 | .status = usbnet_cdc_status, | ||
203 | .manage_power = usbnet_manage_power, | ||
204 | }; | ||
205 | |||
206 | static const struct driver_info r8153_info = { | ||
207 | .description = "RTL8153 ECM Device", | ||
208 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
209 | .bind = r8153_bind, | ||
210 | .unbind = usbnet_cdc_unbind, | ||
211 | .status = usbnet_cdc_status, | ||
212 | .manage_power = usbnet_manage_power, | ||
213 | }; | ||
214 | |||
215 | static const struct usb_device_id products[] = { | ||
216 | { | ||
217 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, | ||
218 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
219 | .driver_info = (unsigned long) &r8152_info, | ||
220 | }, | ||
221 | |||
222 | { | ||
223 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, | ||
224 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
225 | .driver_info = (unsigned long) &r8153_info, | ||
226 | }, | ||
227 | |||
228 | { }, /* END */ | ||
229 | }; | ||
230 | MODULE_DEVICE_TABLE(usb, products); | ||
231 | |||
232 | static struct usb_driver r815x_driver = { | ||
233 | .name = "r815x", | ||
234 | .id_table = products, | ||
235 | .probe = usbnet_probe, | ||
236 | .disconnect = usbnet_disconnect, | ||
237 | .suspend = usbnet_suspend, | ||
238 | .resume = usbnet_resume, | ||
239 | .reset_resume = usbnet_resume, | ||
240 | .supports_autosuspend = 1, | ||
241 | .disable_hub_initiated_lpm = 1, | ||
242 | }; | ||
243 | |||
244 | module_usb_driver(r815x_driver); | ||
245 | |||
246 | MODULE_AUTHOR("Hayes Wang"); | ||
247 | MODULE_DESCRIPTION("Realtek USB ECM device"); | ||
248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3be786faaaec..0fa3b44f7342 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) | |||
1762 | { | 1762 | { |
1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1764 | 1764 | ||
1765 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | 1765 | switch (adapter->intr.type) { |
1766 | vmxnet3_disable_all_intrs(adapter); | 1766 | #ifdef CONFIG_PCI_MSI |
1767 | 1767 | case VMXNET3_IT_MSIX: { | |
1768 | vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); | 1768 | int i; |
1769 | vmxnet3_enable_all_intrs(adapter); | 1769 | for (i = 0; i < adapter->num_rx_queues; i++) |
1770 | vmxnet3_msix_rx(0, &adapter->rx_queue[i]); | ||
1771 | break; | ||
1772 | } | ||
1773 | #endif | ||
1774 | case VMXNET3_IT_MSI: | ||
1775 | default: | ||
1776 | vmxnet3_intr(0, adapter->netdev); | ||
1777 | break; | ||
1778 | } | ||
1770 | 1779 | ||
1771 | } | 1780 | } |
1772 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 1781 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 76cde6ce6551..18a895a949d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c | |||
@@ -872,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
872 | 872 | ||
873 | lockdep_assert_held(&mvm->mutex); | 873 | lockdep_assert_held(&mvm->mutex); |
874 | 874 | ||
875 | /* Rssi update while not associated ?! */ | 875 | /* |
876 | if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) | 876 | * Rssi update while not associated - can happen since the statistics |
877 | * are handled asynchronously | ||
878 | */ | ||
879 | if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) | ||
877 | return; | 880 | return; |
878 | 881 | ||
879 | /* No BT - reports should be disabled */ | 882 | /* No BT - reports should be disabled */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f47bcbe2945a..3872ead75488 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -359,13 +359,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
359 | /* 7265 Series */ | 359 | /* 7265 Series */ |
360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
362 | {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)}, | ||
363 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, | 362 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, |
364 | {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)}, | ||
365 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, | 363 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, |
366 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, | 364 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, |
367 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, | 365 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, |
368 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, | 366 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, |
367 | {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, | ||
369 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, |
370 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, |
371 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c index 5e0eec4d71c7..5d9a8084665d 100644 --- a/drivers/net/wireless/mwifiex/11ac.c +++ b/drivers/net/wireless/mwifiex/11ac.c | |||
@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, | |||
189 | vht_cap->header.len = | 189 | vht_cap->header.len = |
190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); | 190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); |
191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), | 191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), |
192 | (u8 *)bss_desc->bcn_vht_cap + | 192 | (u8 *)bss_desc->bcn_vht_cap, |
193 | sizeof(struct ieee_types_header), | ||
194 | le16_to_cpu(vht_cap->header.len)); | 193 | le16_to_cpu(vht_cap->header.len)); |
195 | 194 | ||
196 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); | 195 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); |
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 6261f8c53d44..7db1a89fdd95 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c | |||
@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, | |||
308 | ht_cap->header.len = | 308 | ht_cap->header.len = |
309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); | 309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); |
310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), | 310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), |
311 | (u8 *) bss_desc->bcn_ht_cap + | 311 | (u8 *)bss_desc->bcn_ht_cap, |
312 | sizeof(struct ieee_types_header), | ||
313 | le16_to_cpu(ht_cap->header.len)); | 312 | le16_to_cpu(ht_cap->header.len)); |
314 | 313 | ||
315 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); | 314 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0a8a26e10f01..668547c2de84 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) | |||
2101 | curr_bss->ht_info_offset); | 2101 | curr_bss->ht_info_offset); |
2102 | 2102 | ||
2103 | if (curr_bss->bcn_vht_cap) | 2103 | if (curr_bss->bcn_vht_cap) |
2104 | curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + | 2104 | curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf + |
2105 | curr_bss->vht_cap_offset); | 2105 | curr_bss->vht_cap_offset); |
2106 | 2106 | ||
2107 | if (curr_bss->bcn_vht_oper) | 2107 | if (curr_bss->bcn_vht_oper) |
2108 | curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + | 2108 | curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf + |
2109 | curr_bss->vht_info_offset); | 2109 | curr_bss->vht_info_offset); |
2110 | 2110 | ||
2111 | if (curr_bss->bcn_bss_co_2040) | 2111 | if (curr_bss->bcn_bss_co_2040) |
2112 | curr_bss->bcn_bss_co_2040 = | 2112 | curr_bss->bcn_bss_co_2040 = |
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index 123c4bb50e0a..cde0eaf99714 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c | |||
@@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl, | |||
180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); | 180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); |
181 | 181 | ||
182 | /* The actual length doesn't include the target's alignment */ | 182 | /* The actual length doesn't include the target's alignment */ |
183 | skb->len = desc->length - PLCP_HEADER_LENGTH; | 183 | skb_trim(skb, desc->length - PLCP_HEADER_LENGTH); |
184 | 184 | ||
185 | fc = (u16 *)skb->data; | 185 | fc = (u16 *)skb->data; |
186 | 186 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7669d49a67e2..301cc037fda8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -132,8 +132,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
132 | /* If the skb is GSO then we'll also need an extra slot for the | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
133 | * metadata. | 133 | * metadata. |
134 | */ | 134 | */ |
135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 135 | if (skb_is_gso(skb)) |
136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
137 | min_slots_needed++; | 136 | min_slots_needed++; |
138 | 137 | ||
139 | /* If the skb can't possibly fit in the remaining slots | 138 | /* If the skb can't possibly fit in the remaining slots |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e5284bca2d90..438d0c09b7e6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
240 | struct gnttab_copy *copy_gop; | 240 | struct gnttab_copy *copy_gop; |
241 | struct xenvif_rx_meta *meta; | 241 | struct xenvif_rx_meta *meta; |
242 | unsigned long bytes; | 242 | unsigned long bytes; |
243 | int gso_type; | 243 | int gso_type = XEN_NETIF_GSO_TYPE_NONE; |
244 | 244 | ||
245 | /* Data must not cross a page boundary. */ | 245 | /* Data must not cross a page boundary. */ |
246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); | 246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
299 | } | 299 | } |
300 | 300 | ||
301 | /* Leave a gap for the GSO descriptor. */ | 301 | /* Leave a gap for the GSO descriptor. */ |
302 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 302 | if (skb_is_gso(skb)) { |
303 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 303 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
304 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 304 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
305 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 305 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
306 | else | 306 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
307 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | 307 | } |
308 | 308 | ||
309 | if (*head && ((1 << gso_type) & vif->gso_mask)) | 309 | if (*head && ((1 << gso_type) & vif->gso_mask)) |
310 | vif->rx.req_cons++; | 310 | vif->rx.req_cons++; |
@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
338 | int head = 1; | 338 | int head = 1; |
339 | int old_meta_prod; | 339 | int old_meta_prod; |
340 | int gso_type; | 340 | int gso_type; |
341 | int gso_size; | ||
342 | 341 | ||
343 | old_meta_prod = npo->meta_prod; | 342 | old_meta_prod = npo->meta_prod; |
344 | 343 | ||
345 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 344 | gso_type = XEN_NETIF_GSO_TYPE_NONE; |
346 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 345 | if (skb_is_gso(skb)) { |
347 | gso_size = skb_shinfo(skb)->gso_size; | 346 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
348 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 347 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 348 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
350 | gso_size = skb_shinfo(skb)->gso_size; | 349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
351 | } else { | ||
352 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
353 | gso_size = 0; | ||
354 | } | 350 | } |
355 | 351 | ||
356 | /* Set up a GSO prefix descriptor, if necessary */ | 352 | /* Set up a GSO prefix descriptor, if necessary */ |
@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
358 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 354 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
359 | meta = npo->meta + npo->meta_prod++; | 355 | meta = npo->meta + npo->meta_prod++; |
360 | meta->gso_type = gso_type; | 356 | meta->gso_type = gso_type; |
361 | meta->gso_size = gso_size; | 357 | meta->gso_size = skb_shinfo(skb)->gso_size; |
362 | meta->size = 0; | 358 | meta->size = 0; |
363 | meta->id = req->id; | 359 | meta->id = req->id; |
364 | } | 360 | } |
@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
368 | 364 | ||
369 | if ((1 << gso_type) & vif->gso_mask) { | 365 | if ((1 << gso_type) & vif->gso_mask) { |
370 | meta->gso_type = gso_type; | 366 | meta->gso_type = gso_type; |
371 | meta->gso_size = gso_size; | 367 | meta->gso_size = skb_shinfo(skb)->gso_size; |
372 | } else { | 368 | } else { |
373 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 369 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
374 | meta->gso_size = 0; | 370 | meta->gso_size = 0; |
@@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
500 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 496 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
501 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | 497 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); |
502 | } | 498 | } |
503 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 499 | if (skb_is_gso(skb) && |
504 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 500 | (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || |
501 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) | ||
505 | max_slots_needed++; | 502 | max_slots_needed++; |
506 | 503 | ||
507 | /* If the skb may not fit then bail out now */ | 504 | /* If the skb may not fit then bail out now */ |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 00660cc502c5..38901665c770 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, | |||
162 | 162 | ||
163 | avail = *r; | 163 | avail = *r; |
164 | pci_clip_resource_to_region(bus, &avail, region); | 164 | pci_clip_resource_to_region(bus, &avail, region); |
165 | if (!resource_size(&avail)) | ||
166 | continue; | ||
167 | 165 | ||
168 | /* | 166 | /* |
169 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to | 167 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6b05f6134b68..fdbc294821e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) | |||
1192 | return err; | 1192 | return err; |
1193 | pci_fixup_device(pci_fixup_enable, dev); | 1193 | pci_fixup_device(pci_fixup_enable, dev); |
1194 | 1194 | ||
1195 | if (dev->msi_enabled || dev->msix_enabled) | ||
1196 | return 0; | ||
1197 | |||
1195 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1198 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
1196 | if (pin) { | 1199 | if (pin) { |
1197 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 1200 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 167f3d00c916..66977ebf13b3 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
183 | struct resource r = {0}; | 183 | struct resource r = {0}; |
184 | int i, flags; | 184 | int i, flags; |
185 | 185 | ||
186 | if (acpi_dev_resource_memory(res, &r) | 186 | if (acpi_dev_resource_address_space(res, &r) |
187 | || acpi_dev_resource_io(res, &r) | ||
188 | || acpi_dev_resource_address_space(res, &r) | ||
189 | || acpi_dev_resource_ext_address_space(res, &r)) { | 187 | || acpi_dev_resource_ext_address_space(res, &r)) { |
190 | pnp_add_resource(dev, &r); | 188 | pnp_add_resource(dev, &r); |
191 | return AE_OK; | 189 | return AE_OK; |
@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
217 | } | 215 | } |
218 | 216 | ||
219 | switch (res->type) { | 217 | switch (res->type) { |
218 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
219 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
220 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
221 | if (acpi_dev_resource_memory(res, &r)) | ||
222 | pnp_add_resource(dev, &r); | ||
223 | break; | ||
224 | case ACPI_RESOURCE_TYPE_IO: | ||
225 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
226 | if (acpi_dev_resource_io(res, &r)) | ||
227 | pnp_add_resource(dev, &r); | ||
228 | break; | ||
220 | case ACPI_RESOURCE_TYPE_DMA: | 229 | case ACPI_RESOURCE_TYPE_DMA: |
221 | dma = &res->data.dma; | 230 | dma = &res->data.dma; |
222 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) | 231 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1f375051483a..5642a9b250c2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) | 325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) |
326 | continue; | 326 | continue; |
327 | 327 | ||
328 | if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) | 328 | if (sc->device->lun != abrt_task->sc->device->lun) |
329 | continue; | 329 | continue; |
330 | 330 | ||
331 | /* Invalidate WRB Posted for this Task */ | 331 | /* Invalidate WRB Posted for this Task */ |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f5..22a9bb1abae1 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | #define for_each_isci_host(id, ihost, pdev) \ | 313 | #define for_each_isci_host(id, ihost, pdev) \ |
314 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ | 314 | for (id = 0; id < SCI_MAX_CONTROLLERS && \ |
315 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ | 315 | (ihost = to_pci_info(pdev)->hosts[id]); id++) |
316 | ihost = to_pci_info(pdev)->hosts[++id]) | ||
317 | 316 | ||
318 | static inline void wait_for_start(struct isci_host *ihost) | 317 | static inline void wait_for_start(struct isci_host *ihost) |
319 | { | 318 | { |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 85c77f6b802b..ac879745ef80 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, | |||
615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | 615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); |
616 | } else { | 616 | } else { |
617 | /* the phy is already the part of the port */ | 617 | /* the phy is already the part of the port */ |
618 | u32 port_state = iport->sm.current_state_id; | ||
619 | |||
620 | /* if the PORT'S state is resetting then the link up is from | ||
621 | * port hard reset in this case, we need to tell the port | ||
622 | * that link up is recieved | ||
623 | */ | ||
624 | BUG_ON(port_state != SCI_PORT_RESETTING); | ||
625 | port_agent->phy_ready_mask |= 1 << phy_index; | 618 | port_agent->phy_ready_mask |= 1 << phy_index; |
626 | sci_port_link_up(iport, iphy); | 619 | sci_port_link_up(iport, iphy); |
627 | } | 620 | } |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 0d30ca849e8f..5d6fda72d659 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
801 | /* XXX: need to cleanup any ireqs targeting this | 801 | /* XXX: need to cleanup any ireqs targeting this |
802 | * domain_device | 802 | * domain_device |
803 | */ | 803 | */ |
804 | ret = TMF_RESP_FUNC_COMPLETE; | 804 | ret = -ENODEV; |
805 | goto out; | 805 | goto out; |
806 | } | 806 | } |
807 | 807 | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1fe95ef23e1..266724b6b899 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2996,8 +2996,7 @@ struct qla_hw_data { | |||
2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ | 2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ |
2997 | IS_QLA8044(ha)) | 2997 | IS_QLA8044(ha)) |
2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
2999 | #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ | 2999 | #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) |
3000 | IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) | ||
3001 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3000 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
3002 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3001 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
3003 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) | 3002 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9bc86b9e86b1..0a1dcb43d18b 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -2880,6 +2880,7 @@ static int | |||
2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | 2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
2881 | { | 2881 | { |
2882 | #define MIN_MSIX_COUNT 2 | 2882 | #define MIN_MSIX_COUNT 2 |
2883 | #define ATIO_VECTOR 2 | ||
2883 | int i, ret; | 2884 | int i, ret; |
2884 | struct msix_entry *entries; | 2885 | struct msix_entry *entries; |
2885 | struct qla_msix_entry *qentry; | 2886 | struct qla_msix_entry *qentry; |
@@ -2936,34 +2937,47 @@ msix_failed: | |||
2936 | } | 2937 | } |
2937 | 2938 | ||
2938 | /* Enable MSI-X vectors for the base queue */ | 2939 | /* Enable MSI-X vectors for the base queue */ |
2939 | for (i = 0; i < ha->msix_count; i++) { | 2940 | for (i = 0; i < 2; i++) { |
2940 | qentry = &ha->msix_entries[i]; | 2941 | qentry = &ha->msix_entries[i]; |
2941 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | 2942 | if (IS_P3P_TYPE(ha)) |
2942 | ret = request_irq(qentry->vector, | ||
2943 | qla83xx_msix_entries[i].handler, | ||
2944 | 0, qla83xx_msix_entries[i].name, rsp); | ||
2945 | } else if (IS_P3P_TYPE(ha)) { | ||
2946 | ret = request_irq(qentry->vector, | 2943 | ret = request_irq(qentry->vector, |
2947 | qla82xx_msix_entries[i].handler, | 2944 | qla82xx_msix_entries[i].handler, |
2948 | 0, qla82xx_msix_entries[i].name, rsp); | 2945 | 0, qla82xx_msix_entries[i].name, rsp); |
2949 | } else { | 2946 | else |
2950 | ret = request_irq(qentry->vector, | 2947 | ret = request_irq(qentry->vector, |
2951 | msix_entries[i].handler, | 2948 | msix_entries[i].handler, |
2952 | 0, msix_entries[i].name, rsp); | 2949 | 0, msix_entries[i].name, rsp); |
2953 | } | 2950 | if (ret) |
2954 | if (ret) { | 2951 | goto msix_register_fail; |
2955 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
2956 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
2957 | qentry->vector, ret); | ||
2958 | qla24xx_disable_msix(ha); | ||
2959 | ha->mqenable = 0; | ||
2960 | goto msix_out; | ||
2961 | } | ||
2962 | qentry->have_irq = 1; | 2952 | qentry->have_irq = 1; |
2963 | qentry->rsp = rsp; | 2953 | qentry->rsp = rsp; |
2964 | rsp->msix = qentry; | 2954 | rsp->msix = qentry; |
2965 | } | 2955 | } |
2966 | 2956 | ||
2957 | /* | ||
2958 | * If target mode is enable, also request the vector for the ATIO | ||
2959 | * queue. | ||
2960 | */ | ||
2961 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | ||
2962 | qentry = &ha->msix_entries[ATIO_VECTOR]; | ||
2963 | ret = request_irq(qentry->vector, | ||
2964 | qla83xx_msix_entries[ATIO_VECTOR].handler, | ||
2965 | 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); | ||
2966 | qentry->have_irq = 1; | ||
2967 | qentry->rsp = rsp; | ||
2968 | rsp->msix = qentry; | ||
2969 | } | ||
2970 | |||
2971 | msix_register_fail: | ||
2972 | if (ret) { | ||
2973 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
2974 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
2975 | qentry->vector, ret); | ||
2976 | qla24xx_disable_msix(ha); | ||
2977 | ha->mqenable = 0; | ||
2978 | goto msix_out; | ||
2979 | } | ||
2980 | |||
2967 | /* Enable MSI-X vector for response queue update for queue 0 */ | 2981 | /* Enable MSI-X vector for response queue update for queue 0 */ |
2968 | if (IS_QLA83XX(ha)) { | 2982 | if (IS_QLA83XX(ha)) { |
2969 | if (ha->msixbase && ha->mqiobase && | 2983 | if (ha->msixbase && ha->mqiobase && |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 17d740427240..9969fa1ef7c4 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice) | |||
1419 | { | 1419 | { |
1420 | struct stor_mem_pools *memp = sdevice->hostdata; | 1420 | struct stor_mem_pools *memp = sdevice->hostdata; |
1421 | 1421 | ||
1422 | if (!memp) | ||
1423 | return; | ||
1424 | |||
1422 | mempool_destroy(memp->request_mempool); | 1425 | mempool_destroy(memp->request_mempool); |
1423 | kmem_cache_destroy(memp->request_pool); | 1426 | kmem_cache_destroy(memp->request_pool); |
1424 | kfree(memp); | 1427 | kfree(memp); |
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c index 62ce0e86f14b..f996e082faf4 100644 --- a/drivers/staging/imx-drm/imx-hdmi.c +++ b/drivers/staging/imx-drm/imx-hdmi.c | |||
@@ -1883,7 +1883,6 @@ static int imx_hdmi_platform_remove(struct platform_device *pdev) | |||
1883 | struct drm_connector *connector = &hdmi->connector; | 1883 | struct drm_connector *connector = &hdmi->connector; |
1884 | struct drm_encoder *encoder = &hdmi->encoder; | 1884 | struct drm_encoder *encoder = &hdmi->encoder; |
1885 | 1885 | ||
1886 | drm_mode_connector_detach_encoder(connector, encoder); | ||
1887 | imx_drm_remove_connector(hdmi->imx_drm_connector); | 1886 | imx_drm_remove_connector(hdmi->imx_drm_connector); |
1888 | imx_drm_remove_encoder(hdmi->imx_drm_encoder); | 1887 | imx_drm_remove_encoder(hdmi->imx_drm_encoder); |
1889 | 1888 | ||
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c index 7e593296ac47..c703e986b44c 100644 --- a/drivers/staging/imx-drm/imx-ldb.c +++ b/drivers/staging/imx-drm/imx-ldb.c | |||
@@ -595,8 +595,6 @@ static int imx_ldb_remove(struct platform_device *pdev) | |||
595 | struct drm_connector *connector = &channel->connector; | 595 | struct drm_connector *connector = &channel->connector; |
596 | struct drm_encoder *encoder = &channel->encoder; | 596 | struct drm_encoder *encoder = &channel->encoder; |
597 | 597 | ||
598 | drm_mode_connector_detach_encoder(connector, encoder); | ||
599 | |||
600 | imx_drm_remove_connector(channel->imx_drm_connector); | 598 | imx_drm_remove_connector(channel->imx_drm_connector); |
601 | imx_drm_remove_encoder(channel->imx_drm_encoder); | 599 | imx_drm_remove_encoder(channel->imx_drm_encoder); |
602 | } | 600 | } |
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 9abc7ca8b6cf..64729fa4a3d4 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c | |||
@@ -709,8 +709,6 @@ static int imx_tve_remove(struct platform_device *pdev) | |||
709 | struct drm_connector *connector = &tve->connector; | 709 | struct drm_connector *connector = &tve->connector; |
710 | struct drm_encoder *encoder = &tve->encoder; | 710 | struct drm_encoder *encoder = &tve->encoder; |
711 | 711 | ||
712 | drm_mode_connector_detach_encoder(connector, encoder); | ||
713 | |||
714 | imx_drm_remove_connector(tve->imx_drm_connector); | 712 | imx_drm_remove_connector(tve->imx_drm_connector); |
715 | imx_drm_remove_encoder(tve->imx_drm_encoder); | 713 | imx_drm_remove_encoder(tve->imx_drm_encoder); |
716 | 714 | ||
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c index 351d61dede00..823d015d2140 100644 --- a/drivers/staging/imx-drm/parallel-display.c +++ b/drivers/staging/imx-drm/parallel-display.c | |||
@@ -244,8 +244,6 @@ static int imx_pd_remove(struct platform_device *pdev) | |||
244 | struct drm_connector *connector = &imxpd->connector; | 244 | struct drm_connector *connector = &imxpd->connector; |
245 | struct drm_encoder *encoder = &imxpd->encoder; | 245 | struct drm_encoder *encoder = &imxpd->encoder; |
246 | 246 | ||
247 | drm_mode_connector_detach_encoder(connector, encoder); | ||
248 | |||
249 | imx_drm_remove_connector(imxpd->imx_drm_connector); | 247 | imx_drm_remove_connector(imxpd->imx_drm_connector); |
250 | imx_drm_remove_encoder(imxpd->imx_drm_encoder); | 248 | imx_drm_remove_encoder(imxpd->imx_drm_encoder); |
251 | 249 | ||
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index cf32f0393369..c0f3718b77a8 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -513,7 +513,7 @@ struct cifs_mnt_data { | |||
513 | static inline unsigned int | 513 | static inline unsigned int |
514 | get_rfc1002_length(void *buf) | 514 | get_rfc1002_length(void *buf) |
515 | { | 515 | { |
516 | return be32_to_cpu(*((__be32 *)buf)); | 516 | return be32_to_cpu(*((__be32 *)buf)) & 0xffffff; |
517 | } | 517 | } |
518 | 518 | ||
519 | static inline void | 519 | static inline void |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 53c15074bb36..834fce759d80 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -2579,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov, | |||
2579 | struct cifsInodeInfo *cinode = CIFS_I(inode); | 2579 | struct cifsInodeInfo *cinode = CIFS_I(inode); |
2580 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; | 2580 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; |
2581 | ssize_t rc = -EACCES; | 2581 | ssize_t rc = -EACCES; |
2582 | loff_t lock_pos = pos; | ||
2582 | 2583 | ||
2583 | BUG_ON(iocb->ki_pos != pos); | 2584 | if (file->f_flags & O_APPEND) |
2584 | 2585 | lock_pos = i_size_read(inode); | |
2585 | /* | 2586 | /* |
2586 | * We need to hold the sem to be sure nobody modifies lock list | 2587 | * We need to hold the sem to be sure nobody modifies lock list |
2587 | * with a brlock that prevents writing. | 2588 | * with a brlock that prevents writing. |
2588 | */ | 2589 | */ |
2589 | down_read(&cinode->lock_sem); | 2590 | down_read(&cinode->lock_sem); |
2590 | if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), | 2591 | if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs), |
2591 | server->vals->exclusive_lock_type, NULL, | 2592 | server->vals->exclusive_lock_type, NULL, |
2592 | CIFS_WRITE_OP)) { | 2593 | CIFS_WRITE_OP)) |
2593 | mutex_lock(&inode->i_mutex); | 2594 | rc = generic_file_aio_write(iocb, iov, nr_segs, pos); |
2594 | rc = __generic_file_aio_write(iocb, iov, nr_segs, | ||
2595 | &iocb->ki_pos); | ||
2596 | mutex_unlock(&inode->i_mutex); | ||
2597 | } | ||
2598 | |||
2599 | if (rc > 0) { | ||
2600 | ssize_t err; | ||
2601 | |||
2602 | err = generic_write_sync(file, iocb->ki_pos - rc, rc); | ||
2603 | if (err < 0) | ||
2604 | rc = err; | ||
2605 | } | ||
2606 | |||
2607 | up_read(&cinode->lock_sem); | 2595 | up_read(&cinode->lock_sem); |
2608 | return rc; | 2596 | return rc; |
2609 | } | 2597 | } |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b37570952846..18cd5650a5fc 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, | |||
270 | iov->iov_len = rqst->rq_pagesz; | 270 | iov->iov_len = rqst->rq_pagesz; |
271 | } | 271 | } |
272 | 272 | ||
273 | static unsigned long | ||
274 | rqst_len(struct smb_rqst *rqst) | ||
275 | { | ||
276 | unsigned int i; | ||
277 | struct kvec *iov = rqst->rq_iov; | ||
278 | unsigned long buflen = 0; | ||
279 | |||
280 | /* total up iov array first */ | ||
281 | for (i = 0; i < rqst->rq_nvec; i++) | ||
282 | buflen += iov[i].iov_len; | ||
283 | |||
284 | /* add in the page array if there is one */ | ||
285 | if (rqst->rq_npages) { | ||
286 | buflen += rqst->rq_pagesz * (rqst->rq_npages - 1); | ||
287 | buflen += rqst->rq_tailsz; | ||
288 | } | ||
289 | |||
290 | return buflen; | ||
291 | } | ||
292 | |||
273 | static int | 293 | static int |
274 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | 294 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
275 | { | 295 | { |
@@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
277 | struct kvec *iov = rqst->rq_iov; | 297 | struct kvec *iov = rqst->rq_iov; |
278 | int n_vec = rqst->rq_nvec; | 298 | int n_vec = rqst->rq_nvec; |
279 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | 299 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); |
300 | unsigned long send_length; | ||
280 | unsigned int i; | 301 | unsigned int i; |
281 | size_t total_len = 0, sent; | 302 | size_t total_len = 0, sent; |
282 | struct socket *ssocket = server->ssocket; | 303 | struct socket *ssocket = server->ssocket; |
@@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
285 | if (ssocket == NULL) | 306 | if (ssocket == NULL) |
286 | return -ENOTSOCK; | 307 | return -ENOTSOCK; |
287 | 308 | ||
309 | /* sanity check send length */ | ||
310 | send_length = rqst_len(rqst); | ||
311 | if (send_length != smb_buf_length + 4) { | ||
312 | WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", | ||
313 | send_length, smb_buf_length); | ||
314 | return -EIO; | ||
315 | } | ||
316 | |||
288 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); | 317 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); |
289 | dump_smb(iov[0].iov_base, iov[0].iov_len); | 318 | dump_smb(iov[0].iov_base, iov[0].iov_len); |
290 | 319 | ||
diff --git a/fs/dcache.c b/fs/dcache.c index 265e0ce9769c..66dc62cb766d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -3112,6 +3112,7 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen) | |||
3112 | end = ERR_PTR(-ENAMETOOLONG); | 3112 | end = ERR_PTR(-ENAMETOOLONG); |
3113 | return end; | 3113 | return end; |
3114 | } | 3114 | } |
3115 | EXPORT_SYMBOL(simple_dname); | ||
3115 | 3116 | ||
3116 | /* | 3117 | /* |
3117 | * Write full pathname from the root of the filesystem into the buffer. | 3118 | * Write full pathname from the root of the filesystem into the buffer. |
@@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw); | |||
683 | * The fput_needed flag returned by fget_light should be passed to the | 683 | * The fput_needed flag returned by fget_light should be passed to the |
684 | * corresponding fput_light. | 684 | * corresponding fput_light. |
685 | */ | 685 | */ |
686 | struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed) | 686 | static unsigned long __fget_light(unsigned int fd, fmode_t mask) |
687 | { | 687 | { |
688 | struct files_struct *files = current->files; | 688 | struct files_struct *files = current->files; |
689 | struct file *file; | 689 | struct file *file; |
690 | 690 | ||
691 | *fput_needed = 0; | ||
692 | if (atomic_read(&files->count) == 1) { | 691 | if (atomic_read(&files->count) == 1) { |
693 | file = __fcheck_files(files, fd); | 692 | file = __fcheck_files(files, fd); |
694 | if (file && (file->f_mode & mask)) | 693 | if (!file || unlikely(file->f_mode & mask)) |
695 | file = NULL; | 694 | return 0; |
695 | return (unsigned long)file; | ||
696 | } else { | 696 | } else { |
697 | file = __fget(fd, mask); | 697 | file = __fget(fd, mask); |
698 | if (file) | 698 | if (!file) |
699 | *fput_needed = 1; | 699 | return 0; |
700 | return FDPUT_FPUT | (unsigned long)file; | ||
700 | } | 701 | } |
701 | |||
702 | return file; | ||
703 | } | 702 | } |
704 | struct file *fget_light(unsigned int fd, int *fput_needed) | 703 | unsigned long __fdget(unsigned int fd) |
705 | { | 704 | { |
706 | return __fget_light(fd, FMODE_PATH, fput_needed); | 705 | return __fget_light(fd, FMODE_PATH); |
707 | } | 706 | } |
708 | EXPORT_SYMBOL(fget_light); | 707 | EXPORT_SYMBOL(__fdget); |
709 | 708 | ||
710 | struct file *fget_raw_light(unsigned int fd, int *fput_needed) | 709 | unsigned long __fdget_raw(unsigned int fd) |
711 | { | 710 | { |
712 | return __fget_light(fd, 0, fput_needed); | 711 | return __fget_light(fd, 0); |
712 | } | ||
713 | |||
714 | unsigned long __fdget_pos(unsigned int fd) | ||
715 | { | ||
716 | struct files_struct *files = current->files; | ||
717 | struct file *file; | ||
718 | unsigned long v; | ||
719 | |||
720 | if (atomic_read(&files->count) == 1) { | ||
721 | file = __fcheck_files(files, fd); | ||
722 | v = 0; | ||
723 | } else { | ||
724 | file = __fget(fd, 0); | ||
725 | v = FDPUT_FPUT; | ||
726 | } | ||
727 | if (!file) | ||
728 | return 0; | ||
729 | |||
730 | if (file->f_mode & FMODE_ATOMIC_POS) { | ||
731 | if (file_count(file) > 1) { | ||
732 | v |= FDPUT_POS_UNLOCK; | ||
733 | mutex_lock(&file->f_pos_lock); | ||
734 | } | ||
735 | } | ||
736 | return v | (unsigned long)file; | ||
713 | } | 737 | } |
714 | 738 | ||
739 | /* | ||
740 | * We only lock f_pos if we have threads or if the file might be | ||
741 | * shared with another process. In both cases we'll have an elevated | ||
742 | * file count (done either by fdget() or by fork()). | ||
743 | */ | ||
744 | |||
715 | void set_close_on_exec(unsigned int fd, int flag) | 745 | void set_close_on_exec(unsigned int fd, int flag) |
716 | { | 746 | { |
717 | struct files_struct *files = current->files; | 747 | struct files_struct *files = current->files; |
diff --git a/fs/file_table.c b/fs/file_table.c index 5fff9030be34..5b24008ea4f6 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -135,6 +135,7 @@ struct file *get_empty_filp(void) | |||
135 | atomic_long_set(&f->f_count, 1); | 135 | atomic_long_set(&f->f_count, 1); |
136 | rwlock_init(&f->f_owner.lock); | 136 | rwlock_init(&f->f_owner.lock); |
137 | spin_lock_init(&f->f_lock); | 137 | spin_lock_init(&f->f_lock); |
138 | mutex_init(&f->f_pos_lock); | ||
138 | eventpoll_init_file(f); | 139 | eventpoll_init_file(f); |
139 | /* f->f_version: 0 */ | 140 | /* f->f_version: 0 */ |
140 | return f; | 141 | return f; |
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 968ce411db53..32602c667b4a 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c | |||
@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, | |||
103 | folder = &entry->folder; | 103 | folder = &entry->folder; |
104 | memset(folder, 0, sizeof(*folder)); | 104 | memset(folder, 0, sizeof(*folder)); |
105 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); | 105 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); |
106 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) | ||
107 | folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT); | ||
106 | folder->id = cpu_to_be32(inode->i_ino); | 108 | folder->id = cpu_to_be32(inode->i_ino); |
107 | HFSPLUS_I(inode)->create_date = | 109 | HFSPLUS_I(inode)->create_date = |
108 | folder->create_date = | 110 | folder->create_date = |
@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, | |||
203 | return hfs_brec_find(fd, hfs_find_rec_by_key); | 205 | return hfs_brec_find(fd, hfs_find_rec_by_key); |
204 | } | 206 | } |
205 | 207 | ||
208 | static void hfsplus_subfolders_inc(struct inode *dir) | ||
209 | { | ||
210 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); | ||
211 | |||
212 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { | ||
213 | /* | ||
214 | * Increment subfolder count. Note, the value is only meaningful | ||
215 | * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. | ||
216 | */ | ||
217 | HFSPLUS_I(dir)->subfolders++; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void hfsplus_subfolders_dec(struct inode *dir) | ||
222 | { | ||
223 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); | ||
224 | |||
225 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { | ||
226 | /* | ||
227 | * Decrement subfolder count. Note, the value is only meaningful | ||
228 | * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. | ||
229 | * | ||
230 | * Check for zero. Some subfolders may have been created | ||
231 | * by an implementation ignorant of this counter. | ||
232 | */ | ||
233 | if (HFSPLUS_I(dir)->subfolders) | ||
234 | HFSPLUS_I(dir)->subfolders--; | ||
235 | } | ||
236 | } | ||
237 | |||
206 | int hfsplus_create_cat(u32 cnid, struct inode *dir, | 238 | int hfsplus_create_cat(u32 cnid, struct inode *dir, |
207 | struct qstr *str, struct inode *inode) | 239 | struct qstr *str, struct inode *inode) |
208 | { | 240 | { |
@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, | |||
247 | goto err1; | 279 | goto err1; |
248 | 280 | ||
249 | dir->i_size++; | 281 | dir->i_size++; |
282 | if (S_ISDIR(inode->i_mode)) | ||
283 | hfsplus_subfolders_inc(dir); | ||
250 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | 284 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; |
251 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); | 285 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); |
252 | 286 | ||
@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) | |||
336 | goto out; | 370 | goto out; |
337 | 371 | ||
338 | dir->i_size--; | 372 | dir->i_size--; |
373 | if (type == HFSPLUS_FOLDER) | ||
374 | hfsplus_subfolders_dec(dir); | ||
339 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | 375 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; |
340 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); | 376 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); |
341 | 377 | ||
@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid, | |||
380 | 416 | ||
381 | hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, | 417 | hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, |
382 | src_fd.entrylength); | 418 | src_fd.entrylength); |
419 | type = be16_to_cpu(entry.type); | ||
383 | 420 | ||
384 | /* create new dir entry with the data from the old entry */ | 421 | /* create new dir entry with the data from the old entry */ |
385 | hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); | 422 | hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); |
@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid, | |||
394 | if (err) | 431 | if (err) |
395 | goto out; | 432 | goto out; |
396 | dst_dir->i_size++; | 433 | dst_dir->i_size++; |
434 | if (type == HFSPLUS_FOLDER) | ||
435 | hfsplus_subfolders_inc(dst_dir); | ||
397 | dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; | 436 | dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; |
398 | 437 | ||
399 | /* finally remove the old entry */ | 438 | /* finally remove the old entry */ |
@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid, | |||
405 | if (err) | 444 | if (err) |
406 | goto out; | 445 | goto out; |
407 | src_dir->i_size--; | 446 | src_dir->i_size--; |
447 | if (type == HFSPLUS_FOLDER) | ||
448 | hfsplus_subfolders_dec(src_dir); | ||
408 | src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; | 449 | src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; |
409 | 450 | ||
410 | /* remove old thread entry */ | 451 | /* remove old thread entry */ |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 08846425b67f..62d571eb69ba 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -242,6 +242,7 @@ struct hfsplus_inode_info { | |||
242 | */ | 242 | */ |
243 | sector_t fs_blocks; | 243 | sector_t fs_blocks; |
244 | u8 userflags; /* BSD user file flags */ | 244 | u8 userflags; /* BSD user file flags */ |
245 | u32 subfolders; /* Subfolder count (HFSX only) */ | ||
245 | struct list_head open_dir_list; | 246 | struct list_head open_dir_list; |
246 | loff_t phys_size; | 247 | loff_t phys_size; |
247 | 248 | ||
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h index 8ffb3a8ffe75..5a126828d85e 100644 --- a/fs/hfsplus/hfsplus_raw.h +++ b/fs/hfsplus/hfsplus_raw.h | |||
@@ -261,7 +261,7 @@ struct hfsplus_cat_folder { | |||
261 | struct DInfo user_info; | 261 | struct DInfo user_info; |
262 | struct DXInfo finder_info; | 262 | struct DXInfo finder_info; |
263 | __be32 text_encoding; | 263 | __be32 text_encoding; |
264 | u32 reserved; | 264 | __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */ |
265 | } __packed; | 265 | } __packed; |
266 | 266 | ||
267 | /* HFS file info (stolen from hfs.h) */ | 267 | /* HFS file info (stolen from hfs.h) */ |
@@ -301,11 +301,13 @@ struct hfsplus_cat_file { | |||
301 | struct hfsplus_fork_raw rsrc_fork; | 301 | struct hfsplus_fork_raw rsrc_fork; |
302 | } __packed; | 302 | } __packed; |
303 | 303 | ||
304 | /* File attribute bits */ | 304 | /* File and folder flag bits */ |
305 | #define HFSPLUS_FILE_LOCKED 0x0001 | 305 | #define HFSPLUS_FILE_LOCKED 0x0001 |
306 | #define HFSPLUS_FILE_THREAD_EXISTS 0x0002 | 306 | #define HFSPLUS_FILE_THREAD_EXISTS 0x0002 |
307 | #define HFSPLUS_XATTR_EXISTS 0x0004 | 307 | #define HFSPLUS_XATTR_EXISTS 0x0004 |
308 | #define HFSPLUS_ACL_EXISTS 0x0008 | 308 | #define HFSPLUS_ACL_EXISTS 0x0008 |
309 | #define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count | ||
310 | * (HFSX only) */ | ||
309 | 311 | ||
310 | /* HFS+ catalog thread (part of a cat_entry) */ | 312 | /* HFS+ catalog thread (part of a cat_entry) */ |
311 | struct hfsplus_cat_thread { | 313 | struct hfsplus_cat_thread { |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index fa929f325f87..a4f45bd88a63 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) | |||
375 | hip->extent_state = 0; | 375 | hip->extent_state = 0; |
376 | hip->flags = 0; | 376 | hip->flags = 0; |
377 | hip->userflags = 0; | 377 | hip->userflags = 0; |
378 | hip->subfolders = 0; | ||
378 | memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); | 379 | memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); |
379 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); | 380 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); |
380 | hip->alloc_blocks = 0; | 381 | hip->alloc_blocks = 0; |
@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | |||
494 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); | 495 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); |
495 | HFSPLUS_I(inode)->create_date = folder->create_date; | 496 | HFSPLUS_I(inode)->create_date = folder->create_date; |
496 | HFSPLUS_I(inode)->fs_blocks = 0; | 497 | HFSPLUS_I(inode)->fs_blocks = 0; |
498 | if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { | ||
499 | HFSPLUS_I(inode)->subfolders = | ||
500 | be32_to_cpu(folder->subfolders); | ||
501 | } | ||
497 | inode->i_op = &hfsplus_dir_inode_operations; | 502 | inode->i_op = &hfsplus_dir_inode_operations; |
498 | inode->i_fop = &hfsplus_dir_operations; | 503 | inode->i_fop = &hfsplus_dir_operations; |
499 | } else if (type == HFSPLUS_FILE) { | 504 | } else if (type == HFSPLUS_FILE) { |
@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode) | |||
566 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); | 571 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); |
567 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); | 572 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); |
568 | folder->valence = cpu_to_be32(inode->i_size - 2); | 573 | folder->valence = cpu_to_be32(inode->i_size - 2); |
574 | if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { | ||
575 | folder->subfolders = | ||
576 | cpu_to_be32(HFSPLUS_I(inode)->subfolders); | ||
577 | } | ||
569 | hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, | 578 | hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, |
570 | sizeof(struct hfsplus_cat_folder)); | 579 | sizeof(struct hfsplus_cat_folder)); |
571 | } else if (HFSPLUS_IS_RSRC(inode)) { | 580 | } else if (HFSPLUS_IS_RSRC(inode)) { |
diff --git a/fs/namei.c b/fs/namei.c index 385f7817bfcc..2f730ef9b4b3 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1884,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, | |||
1884 | 1884 | ||
1885 | nd->path = f.file->f_path; | 1885 | nd->path = f.file->f_path; |
1886 | if (flags & LOOKUP_RCU) { | 1886 | if (flags & LOOKUP_RCU) { |
1887 | if (f.need_put) | 1887 | if (f.flags & FDPUT_FPUT) |
1888 | *fp = f.file; | 1888 | *fp = f.file; |
1889 | nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); | 1889 | nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); |
1890 | rcu_read_lock(); | 1890 | rcu_read_lock(); |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8450262bcf2a..51632c40e896 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -2393,8 +2393,8 @@ out_dio: | |||
2393 | 2393 | ||
2394 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2394 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || |
2395 | ((file->f_flags & O_DIRECT) && !direct_io)) { | 2395 | ((file->f_flags & O_DIRECT) && !direct_io)) { |
2396 | ret = filemap_fdatawrite_range(file->f_mapping, pos, | 2396 | ret = filemap_fdatawrite_range(file->f_mapping, *ppos, |
2397 | pos + count - 1); | 2397 | *ppos + count - 1); |
2398 | if (ret < 0) | 2398 | if (ret < 0) |
2399 | written = ret; | 2399 | written = ret; |
2400 | 2400 | ||
@@ -2407,8 +2407,8 @@ out_dio: | |||
2407 | } | 2407 | } |
2408 | 2408 | ||
2409 | if (!ret) | 2409 | if (!ret) |
2410 | ret = filemap_fdatawait_range(file->f_mapping, pos, | 2410 | ret = filemap_fdatawait_range(file->f_mapping, *ppos, |
2411 | pos + count - 1); | 2411 | *ppos + count - 1); |
2412 | } | 2412 | } |
2413 | 2413 | ||
2414 | /* | 2414 | /* |
@@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f, | |||
705 | return 0; | 705 | return 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ | ||
709 | if (S_ISREG(inode->i_mode)) | ||
710 | f->f_mode |= FMODE_ATOMIC_POS; | ||
711 | |||
708 | f->f_op = fops_get(inode->i_fop); | 712 | f->f_op = fops_get(inode->i_fop); |
709 | if (unlikely(WARN_ON(!f->f_op))) { | 713 | if (unlikely(WARN_ON(!f->f_op))) { |
710 | error = -ENODEV; | 714 | error = -ENODEV; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 51507065263b..b9760628e1fd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) | |||
1824 | if (rc) | 1824 | if (rc) |
1825 | goto out_mmput; | 1825 | goto out_mmput; |
1826 | 1826 | ||
1827 | rc = -ENOENT; | ||
1827 | down_read(&mm->mmap_sem); | 1828 | down_read(&mm->mmap_sem); |
1828 | vma = find_exact_vma(mm, vm_start, vm_end); | 1829 | vma = find_exact_vma(mm, vm_start, vm_end); |
1829 | if (vma && vma->vm_file) { | 1830 | if (vma && vma->vm_file) { |
diff --git a/fs/read_write.c b/fs/read_write.c index edc5746a902a..54e19b9392dc 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) | |||
264 | } | 264 | } |
265 | EXPORT_SYMBOL(vfs_llseek); | 265 | EXPORT_SYMBOL(vfs_llseek); |
266 | 266 | ||
267 | static inline struct fd fdget_pos(int fd) | ||
268 | { | ||
269 | return __to_fd(__fdget_pos(fd)); | ||
270 | } | ||
271 | |||
272 | static inline void fdput_pos(struct fd f) | ||
273 | { | ||
274 | if (f.flags & FDPUT_POS_UNLOCK) | ||
275 | mutex_unlock(&f.file->f_pos_lock); | ||
276 | fdput(f); | ||
277 | } | ||
278 | |||
267 | SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) | 279 | SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) |
268 | { | 280 | { |
269 | off_t retval; | 281 | off_t retval; |
270 | struct fd f = fdget(fd); | 282 | struct fd f = fdget_pos(fd); |
271 | if (!f.file) | 283 | if (!f.file) |
272 | return -EBADF; | 284 | return -EBADF; |
273 | 285 | ||
@@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) | |||
278 | if (res != (loff_t)retval) | 290 | if (res != (loff_t)retval) |
279 | retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ | 291 | retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ |
280 | } | 292 | } |
281 | fdput(f); | 293 | fdput_pos(f); |
282 | return retval; | 294 | return retval; |
283 | } | 295 | } |
284 | 296 | ||
@@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos) | |||
498 | 510 | ||
499 | SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | 511 | SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) |
500 | { | 512 | { |
501 | struct fd f = fdget(fd); | 513 | struct fd f = fdget_pos(fd); |
502 | ssize_t ret = -EBADF; | 514 | ssize_t ret = -EBADF; |
503 | 515 | ||
504 | if (f.file) { | 516 | if (f.file) { |
@@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | |||
506 | ret = vfs_read(f.file, buf, count, &pos); | 518 | ret = vfs_read(f.file, buf, count, &pos); |
507 | if (ret >= 0) | 519 | if (ret >= 0) |
508 | file_pos_write(f.file, pos); | 520 | file_pos_write(f.file, pos); |
509 | fdput(f); | 521 | fdput_pos(f); |
510 | } | 522 | } |
511 | return ret; | 523 | return ret; |
512 | } | 524 | } |
@@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | |||
514 | SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, | 526 | SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, |
515 | size_t, count) | 527 | size_t, count) |
516 | { | 528 | { |
517 | struct fd f = fdget(fd); | 529 | struct fd f = fdget_pos(fd); |
518 | ssize_t ret = -EBADF; | 530 | ssize_t ret = -EBADF; |
519 | 531 | ||
520 | if (f.file) { | 532 | if (f.file) { |
@@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, | |||
522 | ret = vfs_write(f.file, buf, count, &pos); | 534 | ret = vfs_write(f.file, buf, count, &pos); |
523 | if (ret >= 0) | 535 | if (ret >= 0) |
524 | file_pos_write(f.file, pos); | 536 | file_pos_write(f.file, pos); |
525 | fdput(f); | 537 | fdput_pos(f); |
526 | } | 538 | } |
527 | 539 | ||
528 | return ret; | 540 | return ret; |
@@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev); | |||
797 | SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | 809 | SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, |
798 | unsigned long, vlen) | 810 | unsigned long, vlen) |
799 | { | 811 | { |
800 | struct fd f = fdget(fd); | 812 | struct fd f = fdget_pos(fd); |
801 | ssize_t ret = -EBADF; | 813 | ssize_t ret = -EBADF; |
802 | 814 | ||
803 | if (f.file) { | 815 | if (f.file) { |
@@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | |||
805 | ret = vfs_readv(f.file, vec, vlen, &pos); | 817 | ret = vfs_readv(f.file, vec, vlen, &pos); |
806 | if (ret >= 0) | 818 | if (ret >= 0) |
807 | file_pos_write(f.file, pos); | 819 | file_pos_write(f.file, pos); |
808 | fdput(f); | 820 | fdput_pos(f); |
809 | } | 821 | } |
810 | 822 | ||
811 | if (ret > 0) | 823 | if (ret > 0) |
@@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | |||
817 | SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, | 829 | SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, |
818 | unsigned long, vlen) | 830 | unsigned long, vlen) |
819 | { | 831 | { |
820 | struct fd f = fdget(fd); | 832 | struct fd f = fdget_pos(fd); |
821 | ssize_t ret = -EBADF; | 833 | ssize_t ret = -EBADF; |
822 | 834 | ||
823 | if (f.file) { | 835 | if (f.file) { |
@@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, | |||
825 | ret = vfs_writev(f.file, vec, vlen, &pos); | 837 | ret = vfs_writev(f.file, vec, vlen, &pos); |
826 | if (ret >= 0) | 838 | if (ret >= 0) |
827 | file_pos_write(f.file, pos); | 839 | file_pos_write(f.file, pos); |
828 | fdput(f); | 840 | fdput_pos(f); |
829 | } | 841 | } |
830 | 842 | ||
831 | if (ret > 0) | 843 | if (ret > 0) |
@@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, | |||
968 | const struct compat_iovec __user *,vec, | 980 | const struct compat_iovec __user *,vec, |
969 | compat_ulong_t, vlen) | 981 | compat_ulong_t, vlen) |
970 | { | 982 | { |
971 | struct fd f = fdget(fd); | 983 | struct fd f = fdget_pos(fd); |
972 | ssize_t ret; | 984 | ssize_t ret; |
973 | loff_t pos; | 985 | loff_t pos; |
974 | 986 | ||
@@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, | |||
978 | ret = compat_readv(f.file, vec, vlen, &pos); | 990 | ret = compat_readv(f.file, vec, vlen, &pos); |
979 | if (ret >= 0) | 991 | if (ret >= 0) |
980 | f.file->f_pos = pos; | 992 | f.file->f_pos = pos; |
981 | fdput(f); | 993 | fdput_pos(f); |
982 | return ret; | 994 | return ret; |
983 | } | 995 | } |
984 | 996 | ||
@@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, | |||
1035 | const struct compat_iovec __user *, vec, | 1047 | const struct compat_iovec __user *, vec, |
1036 | compat_ulong_t, vlen) | 1048 | compat_ulong_t, vlen) |
1037 | { | 1049 | { |
1038 | struct fd f = fdget(fd); | 1050 | struct fd f = fdget_pos(fd); |
1039 | ssize_t ret; | 1051 | ssize_t ret; |
1040 | loff_t pos; | 1052 | loff_t pos; |
1041 | 1053 | ||
@@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, | |||
1045 | ret = compat_writev(f.file, vec, vlen, &pos); | 1057 | ret = compat_writev(f.file, vec, vlen, &pos); |
1046 | if (ret >= 0) | 1058 | if (ret >= 0) |
1047 | f.file->f_pos = pos; | 1059 | f.file->f_pos = pos; |
1048 | fdput(f); | 1060 | fdput_pos(f); |
1049 | return ret; | 1061 | return ret; |
1050 | } | 1062 | } |
1051 | 1063 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 04a7f31301f8..daac00a93126 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/current.h> | 43 | #include <asm/current.h> |
44 | #endif /* __alpha__ */ | 44 | #endif /* __alpha__ */ |
45 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
46 | #include <linux/kref.h> | ||
46 | #include <linux/miscdevice.h> | 47 | #include <linux/miscdevice.h> |
47 | #include <linux/fs.h> | 48 | #include <linux/fs.h> |
48 | #include <linux/init.h> | 49 | #include <linux/init.h> |
@@ -1008,10 +1009,12 @@ struct drm_driver { | |||
1008 | struct list_head legacy_dev_list; | 1009 | struct list_head legacy_dev_list; |
1009 | }; | 1010 | }; |
1010 | 1011 | ||
1011 | #define DRM_MINOR_UNASSIGNED 0 | 1012 | enum drm_minor_type { |
1012 | #define DRM_MINOR_LEGACY 1 | 1013 | DRM_MINOR_LEGACY, |
1013 | #define DRM_MINOR_CONTROL 2 | 1014 | DRM_MINOR_CONTROL, |
1014 | #define DRM_MINOR_RENDER 3 | 1015 | DRM_MINOR_RENDER, |
1016 | DRM_MINOR_CNT, | ||
1017 | }; | ||
1015 | 1018 | ||
1016 | /** | 1019 | /** |
1017 | * Info file list entry. This structure represents a debugfs or proc file to | 1020 | * Info file list entry. This structure represents a debugfs or proc file to |
@@ -1040,7 +1043,6 @@ struct drm_info_node { | |||
1040 | struct drm_minor { | 1043 | struct drm_minor { |
1041 | int index; /**< Minor device number */ | 1044 | int index; /**< Minor device number */ |
1042 | int type; /**< Control or render */ | 1045 | int type; /**< Control or render */ |
1043 | dev_t device; /**< Device number for mknod */ | ||
1044 | struct device *kdev; /**< Linux device */ | 1046 | struct device *kdev; /**< Linux device */ |
1045 | struct drm_device *dev; | 1047 | struct drm_device *dev; |
1046 | 1048 | ||
@@ -1054,21 +1056,6 @@ struct drm_minor { | |||
1054 | struct drm_mode_group mode_group; | 1056 | struct drm_mode_group mode_group; |
1055 | }; | 1057 | }; |
1056 | 1058 | ||
1057 | /* mode specified on the command line */ | ||
1058 | struct drm_cmdline_mode { | ||
1059 | bool specified; | ||
1060 | bool refresh_specified; | ||
1061 | bool bpp_specified; | ||
1062 | int xres, yres; | ||
1063 | int bpp; | ||
1064 | int refresh; | ||
1065 | bool rb; | ||
1066 | bool interlace; | ||
1067 | bool cvt; | ||
1068 | bool margins; | ||
1069 | enum drm_connector_force force; | ||
1070 | }; | ||
1071 | |||
1072 | 1059 | ||
1073 | struct drm_pending_vblank_event { | 1060 | struct drm_pending_vblank_event { |
1074 | struct drm_pending_event base; | 1061 | struct drm_pending_event base; |
@@ -1098,6 +1085,19 @@ struct drm_device { | |||
1098 | char *devname; /**< For /proc/interrupts */ | 1085 | char *devname; /**< For /proc/interrupts */ |
1099 | int if_version; /**< Highest interface version set */ | 1086 | int if_version; /**< Highest interface version set */ |
1100 | 1087 | ||
1088 | /** \name Lifetime Management */ | ||
1089 | /*@{ */ | ||
1090 | struct kref ref; /**< Object ref-count */ | ||
1091 | struct device *dev; /**< Device structure of bus-device */ | ||
1092 | struct drm_driver *driver; /**< DRM driver managing the device */ | ||
1093 | void *dev_private; /**< DRM driver private data */ | ||
1094 | struct drm_minor *control; /**< Control node */ | ||
1095 | struct drm_minor *primary; /**< Primary node */ | ||
1096 | struct drm_minor *render; /**< Render node */ | ||
1097 | atomic_t unplugged; /**< Flag whether dev is dead */ | ||
1098 | struct inode *anon_inode; /**< inode for private address-space */ | ||
1099 | /*@} */ | ||
1100 | |||
1101 | /** \name Locks */ | 1101 | /** \name Locks */ |
1102 | /*@{ */ | 1102 | /*@{ */ |
1103 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ | 1103 | spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
@@ -1171,7 +1171,6 @@ struct drm_device { | |||
1171 | 1171 | ||
1172 | struct drm_agp_head *agp; /**< AGP data */ | 1172 | struct drm_agp_head *agp; /**< AGP data */ |
1173 | 1173 | ||
1174 | struct device *dev; /**< Device structure */ | ||
1175 | struct pci_dev *pdev; /**< PCI device structure */ | 1174 | struct pci_dev *pdev; /**< PCI device structure */ |
1176 | #ifdef __alpha__ | 1175 | #ifdef __alpha__ |
1177 | struct pci_controller *hose; | 1176 | struct pci_controller *hose; |
@@ -1182,17 +1181,11 @@ struct drm_device { | |||
1182 | 1181 | ||
1183 | struct drm_sg_mem *sg; /**< Scatter gather memory */ | 1182 | struct drm_sg_mem *sg; /**< Scatter gather memory */ |
1184 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ | 1183 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
1185 | void *dev_private; /**< device private data */ | ||
1186 | struct address_space *dev_mapping; | ||
1187 | struct drm_sigdata sigdata; /**< For block_all_signals */ | 1184 | struct drm_sigdata sigdata; /**< For block_all_signals */ |
1188 | sigset_t sigmask; | 1185 | sigset_t sigmask; |
1189 | 1186 | ||
1190 | struct drm_driver *driver; | ||
1191 | struct drm_local_map *agp_buffer_map; | 1187 | struct drm_local_map *agp_buffer_map; |
1192 | unsigned int agp_buffer_token; | 1188 | unsigned int agp_buffer_token; |
1193 | struct drm_minor *control; /**< Control node for card */ | ||
1194 | struct drm_minor *primary; /**< render type primary screen head */ | ||
1195 | struct drm_minor *render; /**< render node for card */ | ||
1196 | 1189 | ||
1197 | struct drm_mode_config mode_config; /**< Current mode config */ | 1190 | struct drm_mode_config mode_config; /**< Current mode config */ |
1198 | 1191 | ||
@@ -1203,8 +1196,6 @@ struct drm_device { | |||
1203 | struct drm_vma_offset_manager *vma_offset_manager; | 1196 | struct drm_vma_offset_manager *vma_offset_manager; |
1204 | /*@} */ | 1197 | /*@} */ |
1205 | int switch_power_state; | 1198 | int switch_power_state; |
1206 | |||
1207 | atomic_t unplugged; /* device has been unplugged or gone away */ | ||
1208 | }; | 1199 | }; |
1209 | 1200 | ||
1210 | #define DRM_SWITCH_POWER_ON 0 | 1201 | #define DRM_SWITCH_POWER_ON 0 |
@@ -1411,20 +1402,6 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, | |||
1411 | extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, | 1402 | extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, |
1412 | const struct drm_display_mode *mode); | 1403 | const struct drm_display_mode *mode); |
1413 | 1404 | ||
1414 | extern bool | ||
1415 | drm_mode_parse_command_line_for_connector(const char *mode_option, | ||
1416 | struct drm_connector *connector, | ||
1417 | struct drm_cmdline_mode *mode); | ||
1418 | |||
1419 | extern struct drm_display_mode * | ||
1420 | drm_mode_create_from_cmdline_mode(struct drm_device *dev, | ||
1421 | struct drm_cmdline_mode *cmd); | ||
1422 | |||
1423 | extern int drm_display_mode_from_videomode(const struct videomode *vm, | ||
1424 | struct drm_display_mode *dmode); | ||
1425 | extern int of_get_drm_display_mode(struct device_node *np, | ||
1426 | struct drm_display_mode *dmode, | ||
1427 | int index); | ||
1428 | 1405 | ||
1429 | /* Modesetting support */ | 1406 | /* Modesetting support */ |
1430 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); | 1407 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
@@ -1661,9 +1638,14 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map) | |||
1661 | 1638 | ||
1662 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, | 1639 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
1663 | struct device *parent); | 1640 | struct device *parent); |
1664 | void drm_dev_free(struct drm_device *dev); | 1641 | void drm_dev_ref(struct drm_device *dev); |
1642 | void drm_dev_unref(struct drm_device *dev); | ||
1665 | int drm_dev_register(struct drm_device *dev, unsigned long flags); | 1643 | int drm_dev_register(struct drm_device *dev, unsigned long flags); |
1666 | void drm_dev_unregister(struct drm_device *dev); | 1644 | void drm_dev_unregister(struct drm_device *dev); |
1645 | |||
1646 | struct drm_minor *drm_minor_acquire(unsigned int minor_id); | ||
1647 | void drm_minor_release(struct drm_minor *minor); | ||
1648 | |||
1667 | /*@}*/ | 1649 | /*@}*/ |
1668 | 1650 | ||
1669 | /* PCI section */ | 1651 | /* PCI section */ |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f7646548660d..27f828c9d7f2 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/fb.h> | 32 | #include <linux/fb.h> |
33 | #include <linux/hdmi.h> | 33 | #include <linux/hdmi.h> |
34 | #include <drm/drm_mode.h> | 34 | #include <drm/drm_mode.h> |
35 | |||
36 | #include <drm/drm_fourcc.h> | 35 | #include <drm/drm_fourcc.h> |
37 | 36 | ||
38 | struct drm_device; | 37 | struct drm_device; |
@@ -65,130 +64,14 @@ struct drm_object_properties { | |||
65 | uint64_t values[DRM_OBJECT_MAX_PROPERTY]; | 64 | uint64_t values[DRM_OBJECT_MAX_PROPERTY]; |
66 | }; | 65 | }; |
67 | 66 | ||
68 | /* | 67 | enum drm_connector_force { |
69 | * Note on terminology: here, for brevity and convenience, we refer to connector | 68 | DRM_FORCE_UNSPECIFIED, |
70 | * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, | 69 | DRM_FORCE_OFF, |
71 | * DVI, etc. And 'screen' refers to the whole of the visible display, which | 70 | DRM_FORCE_ON, /* force on analog part normally */ |
72 | * may span multiple monitors (and therefore multiple CRTC and connector | 71 | DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ |
73 | * structures). | ||
74 | */ | ||
75 | |||
76 | enum drm_mode_status { | ||
77 | MODE_OK = 0, /* Mode OK */ | ||
78 | MODE_HSYNC, /* hsync out of range */ | ||
79 | MODE_VSYNC, /* vsync out of range */ | ||
80 | MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ | ||
81 | MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ | ||
82 | MODE_BAD_WIDTH, /* requires an unsupported linepitch */ | ||
83 | MODE_NOMODE, /* no mode with a matching name */ | ||
84 | MODE_NO_INTERLACE, /* interlaced mode not supported */ | ||
85 | MODE_NO_DBLESCAN, /* doublescan mode not supported */ | ||
86 | MODE_NO_VSCAN, /* multiscan mode not supported */ | ||
87 | MODE_MEM, /* insufficient video memory */ | ||
88 | MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ | ||
89 | MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ | ||
90 | MODE_MEM_VIRT, /* insufficient video memory given virtual size */ | ||
91 | MODE_NOCLOCK, /* no fixed clock available */ | ||
92 | MODE_CLOCK_HIGH, /* clock required is too high */ | ||
93 | MODE_CLOCK_LOW, /* clock required is too low */ | ||
94 | MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ | ||
95 | MODE_BAD_HVALUE, /* horizontal timing was out of range */ | ||
96 | MODE_BAD_VVALUE, /* vertical timing was out of range */ | ||
97 | MODE_BAD_VSCAN, /* VScan value out of range */ | ||
98 | MODE_HSYNC_NARROW, /* horizontal sync too narrow */ | ||
99 | MODE_HSYNC_WIDE, /* horizontal sync too wide */ | ||
100 | MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ | ||
101 | MODE_HBLANK_WIDE, /* horizontal blanking too wide */ | ||
102 | MODE_VSYNC_NARROW, /* vertical sync too narrow */ | ||
103 | MODE_VSYNC_WIDE, /* vertical sync too wide */ | ||
104 | MODE_VBLANK_NARROW, /* vertical blanking too narrow */ | ||
105 | MODE_VBLANK_WIDE, /* vertical blanking too wide */ | ||
106 | MODE_PANEL, /* exceeds panel dimensions */ | ||
107 | MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ | ||
108 | MODE_ONE_WIDTH, /* only one width is supported */ | ||
109 | MODE_ONE_HEIGHT, /* only one height is supported */ | ||
110 | MODE_ONE_SIZE, /* only one resolution is supported */ | ||
111 | MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ | ||
112 | MODE_NO_STEREO, /* stereo modes not supported */ | ||
113 | MODE_UNVERIFIED = -3, /* mode needs to reverified */ | ||
114 | MODE_BAD = -2, /* unspecified reason */ | ||
115 | MODE_ERROR = -1 /* error condition */ | ||
116 | }; | ||
117 | |||
118 | #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ | ||
119 | DRM_MODE_TYPE_CRTC_C) | ||
120 | |||
121 | #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ | ||
122 | .name = nm, .status = 0, .type = (t), .clock = (c), \ | ||
123 | .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ | ||
124 | .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ | ||
125 | .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ | ||
126 | .vscan = (vs), .flags = (f), \ | ||
127 | .base.type = DRM_MODE_OBJECT_MODE | ||
128 | |||
129 | #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ | ||
130 | #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ | ||
131 | |||
132 | #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF | ||
133 | |||
134 | struct drm_display_mode { | ||
135 | /* Header */ | ||
136 | struct list_head head; | ||
137 | struct drm_mode_object base; | ||
138 | |||
139 | char name[DRM_DISPLAY_MODE_LEN]; | ||
140 | |||
141 | enum drm_mode_status status; | ||
142 | unsigned int type; | ||
143 | |||
144 | /* Proposed mode values */ | ||
145 | int clock; /* in kHz */ | ||
146 | int hdisplay; | ||
147 | int hsync_start; | ||
148 | int hsync_end; | ||
149 | int htotal; | ||
150 | int hskew; | ||
151 | int vdisplay; | ||
152 | int vsync_start; | ||
153 | int vsync_end; | ||
154 | int vtotal; | ||
155 | int vscan; | ||
156 | unsigned int flags; | ||
157 | |||
158 | /* Addressable image size (may be 0 for projectors, etc.) */ | ||
159 | int width_mm; | ||
160 | int height_mm; | ||
161 | |||
162 | /* Actual mode we give to hw */ | ||
163 | int crtc_clock; /* in KHz */ | ||
164 | int crtc_hdisplay; | ||
165 | int crtc_hblank_start; | ||
166 | int crtc_hblank_end; | ||
167 | int crtc_hsync_start; | ||
168 | int crtc_hsync_end; | ||
169 | int crtc_htotal; | ||
170 | int crtc_hskew; | ||
171 | int crtc_vdisplay; | ||
172 | int crtc_vblank_start; | ||
173 | int crtc_vblank_end; | ||
174 | int crtc_vsync_start; | ||
175 | int crtc_vsync_end; | ||
176 | int crtc_vtotal; | ||
177 | |||
178 | /* Driver private mode info */ | ||
179 | int private_size; | ||
180 | int *private; | ||
181 | int private_flags; | ||
182 | |||
183 | int vrefresh; /* in Hz */ | ||
184 | int hsync; /* in kHz */ | ||
185 | enum hdmi_picture_aspect picture_aspect_ratio; | ||
186 | }; | 72 | }; |
187 | 73 | ||
188 | static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) | 74 | #include <drm/drm_modes.h> |
189 | { | ||
190 | return mode->flags & DRM_MODE_FLAG_3D_MASK; | ||
191 | } | ||
192 | 75 | ||
193 | enum drm_connector_status { | 76 | enum drm_connector_status { |
194 | connector_status_connected = 1, | 77 | connector_status_connected = 1, |
@@ -540,13 +423,6 @@ struct drm_encoder { | |||
540 | void *helper_private; | 423 | void *helper_private; |
541 | }; | 424 | }; |
542 | 425 | ||
543 | enum drm_connector_force { | ||
544 | DRM_FORCE_UNSPECIFIED, | ||
545 | DRM_FORCE_OFF, | ||
546 | DRM_FORCE_ON, /* force on analog part normally */ | ||
547 | DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ | ||
548 | }; | ||
549 | |||
550 | /* should we poll this connector for connects and disconnects */ | 426 | /* should we poll this connector for connects and disconnects */ |
551 | /* hot plug detectable */ | 427 | /* hot plug detectable */ |
552 | #define DRM_CONNECTOR_POLL_HPD (1 << 0) | 428 | #define DRM_CONNECTOR_POLL_HPD (1 << 0) |
@@ -1007,34 +883,10 @@ extern struct edid *drm_get_edid(struct drm_connector *connector, | |||
1007 | struct i2c_adapter *adapter); | 883 | struct i2c_adapter *adapter); |
1008 | extern struct edid *drm_edid_duplicate(const struct edid *edid); | 884 | extern struct edid *drm_edid_duplicate(const struct edid *edid); |
1009 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 885 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
1010 | extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); | ||
1011 | extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); | ||
1012 | extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | ||
1013 | const struct drm_display_mode *mode); | ||
1014 | extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); | ||
1015 | extern void drm_mode_config_init(struct drm_device *dev); | 886 | extern void drm_mode_config_init(struct drm_device *dev); |
1016 | extern void drm_mode_config_reset(struct drm_device *dev); | 887 | extern void drm_mode_config_reset(struct drm_device *dev); |
1017 | extern void drm_mode_config_cleanup(struct drm_device *dev); | 888 | extern void drm_mode_config_cleanup(struct drm_device *dev); |
1018 | extern void drm_mode_set_name(struct drm_display_mode *mode); | 889 | |
1019 | extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); | ||
1020 | extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); | ||
1021 | extern int drm_mode_width(const struct drm_display_mode *mode); | ||
1022 | extern int drm_mode_height(const struct drm_display_mode *mode); | ||
1023 | |||
1024 | /* for us by fb module */ | ||
1025 | extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); | ||
1026 | extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); | ||
1027 | extern void drm_mode_validate_size(struct drm_device *dev, | ||
1028 | struct list_head *mode_list, | ||
1029 | int maxX, int maxY, int maxPitch); | ||
1030 | extern void drm_mode_prune_invalid(struct drm_device *dev, | ||
1031 | struct list_head *mode_list, bool verbose); | ||
1032 | extern void drm_mode_sort(struct list_head *mode_list); | ||
1033 | extern int drm_mode_hsync(const struct drm_display_mode *mode); | ||
1034 | extern int drm_mode_vrefresh(const struct drm_display_mode *mode); | ||
1035 | extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, | ||
1036 | int adjust_flags); | ||
1037 | extern void drm_mode_connector_list_update(struct drm_connector *connector); | ||
1038 | extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, | 890 | extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
1039 | struct edid *edid); | 891 | struct edid *edid); |
1040 | extern int drm_object_property_set_value(struct drm_mode_object *obj, | 892 | extern int drm_object_property_set_value(struct drm_mode_object *obj, |
@@ -1082,8 +934,6 @@ extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); | |||
1082 | 934 | ||
1083 | extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, | 935 | extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
1084 | struct drm_encoder *encoder); | 936 | struct drm_encoder *encoder); |
1085 | extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, | ||
1086 | struct drm_encoder *encoder); | ||
1087 | extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | 937 | extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
1088 | int gamma_size); | 938 | int gamma_size); |
1089 | extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, | 939 | extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
@@ -1138,16 +988,6 @@ extern bool drm_detect_monitor_audio(struct edid *edid); | |||
1138 | extern bool drm_rgb_quant_range_selectable(struct edid *edid); | 988 | extern bool drm_rgb_quant_range_selectable(struct edid *edid); |
1139 | extern int drm_mode_page_flip_ioctl(struct drm_device *dev, | 989 | extern int drm_mode_page_flip_ioctl(struct drm_device *dev, |
1140 | void *data, struct drm_file *file_priv); | 990 | void *data, struct drm_file *file_priv); |
1141 | extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, | ||
1142 | int hdisplay, int vdisplay, int vrefresh, | ||
1143 | bool reduced, bool interlaced, bool margins); | ||
1144 | extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, | ||
1145 | int hdisplay, int vdisplay, int vrefresh, | ||
1146 | bool interlaced, int margins); | ||
1147 | extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, | ||
1148 | int hdisplay, int vdisplay, int vrefresh, | ||
1149 | bool interlaced, int margins, int GTF_M, | ||
1150 | int GTF_2C, int GTF_K, int GTF_2J); | ||
1151 | extern int drm_add_modes_noedid(struct drm_connector *connector, | 991 | extern int drm_add_modes_noedid(struct drm_connector *connector, |
1152 | int hdisplay, int vdisplay); | 992 | int hdisplay, int vdisplay); |
1153 | extern void drm_set_preferred_mode(struct drm_connector *connector, | 993 | extern void drm_set_preferred_mode(struct drm_connector *connector, |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index b1388b5fe7ac..0bb34ca2ad2b 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
@@ -139,8 +139,8 @@ extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode) | |||
139 | 139 | ||
140 | extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); | 140 | extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); |
141 | 141 | ||
142 | extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | 142 | extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
143 | struct drm_mode_fb_cmd2 *mode_cmd); | 143 | struct drm_mode_fb_cmd2 *mode_cmd); |
144 | 144 | ||
145 | static inline void drm_crtc_helper_add(struct drm_crtc *crtc, | 145 | static inline void drm_crtc_helper_add(struct drm_crtc *crtc, |
146 | const struct drm_crtc_helper_funcs *funcs) | 146 | const struct drm_crtc_helper_funcs *funcs) |
@@ -160,7 +160,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, | |||
160 | connector->helper_private = (void *)funcs; | 160 | connector->helper_private = (void *)funcs; |
161 | } | 161 | } |
162 | 162 | ||
163 | extern int drm_helper_resume_force_mode(struct drm_device *dev); | 163 | extern void drm_helper_resume_force_mode(struct drm_device *dev); |
164 | extern void drm_kms_helper_poll_init(struct drm_device *dev); | 164 | extern void drm_kms_helper_poll_init(struct drm_device *dev); |
165 | extern void drm_kms_helper_poll_fini(struct drm_device *dev); | 165 | extern void drm_kms_helper_poll_fini(struct drm_device *dev); |
166 | extern bool drm_helper_hpd_irq_event(struct drm_device *dev); | 166 | extern bool drm_helper_hpd_irq_event(struct drm_device *dev); |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 73c3d1f2b20d..b4f58914bf7d 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -301,6 +301,7 @@ | |||
301 | #define DP_SET_POWER 0x600 | 301 | #define DP_SET_POWER 0x600 |
302 | # define DP_SET_POWER_D0 0x1 | 302 | # define DP_SET_POWER_D0 0x1 |
303 | # define DP_SET_POWER_D3 0x2 | 303 | # define DP_SET_POWER_D3 0x2 |
304 | # define DP_SET_POWER_MASK 0x3 | ||
304 | 305 | ||
305 | #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ | 306 | #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ |
306 | # define DP_PSR_LINK_CRC_ERROR (1 << 0) | 307 | # define DP_PSR_LINK_CRC_ERROR (1 << 0) |
@@ -408,4 +409,118 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) | |||
408 | (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); | 409 | (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); |
409 | } | 410 | } |
410 | 411 | ||
412 | /* | ||
413 | * DisplayPort AUX channel | ||
414 | */ | ||
415 | |||
416 | /** | ||
417 | * struct drm_dp_aux_msg - DisplayPort AUX channel transaction | ||
418 | * @address: address of the (first) register to access | ||
419 | * @request: contains the type of transaction (see DP_AUX_* macros) | ||
420 | * @reply: upon completion, contains the reply type of the transaction | ||
421 | * @buffer: pointer to a transmission or reception buffer | ||
422 | * @size: size of @buffer | ||
423 | */ | ||
424 | struct drm_dp_aux_msg { | ||
425 | unsigned int address; | ||
426 | u8 request; | ||
427 | u8 reply; | ||
428 | void *buffer; | ||
429 | size_t size; | ||
430 | }; | ||
431 | |||
432 | /** | ||
433 | * struct drm_dp_aux - DisplayPort AUX channel | ||
434 | * @ddc: I2C adapter that can be used for I2C-over-AUX communication | ||
435 | * @dev: pointer to struct device that is the parent for this AUX channel | ||
436 | * @transfer: transfers a message representing a single AUX transaction | ||
437 | * | ||
438 | * The .dev field should be set to a pointer to the device that implements | ||
439 | * the AUX channel. | ||
440 | * | ||
441 | * The .name field may be used to specify the name of the I2C adapter. If set to | ||
442 | * NULL, dev_name() of .dev will be used. | ||
443 | * | ||
444 | * Drivers provide a hardware-specific implementation of how transactions | ||
445 | * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg | ||
446 | * structure describing the transaction is passed into this function. Upon | ||
447 | * success, the implementation should return the number of payload bytes | ||
448 | * that were transferred, or a negative error-code on failure. Helpers | ||
449 | * propagate errors from the .transfer() function, with the exception of | ||
450 | * the -EBUSY error, which causes a transaction to be retried. On a short, | ||
451 | * helpers will return -EPROTO to make it simpler to check for failure. | ||
452 | * | ||
453 | * An AUX channel can also be used to transport I2C messages to a sink. A | ||
454 | * typical application of that is to access an EDID that's present in the | ||
455 | * sink device. The .transfer() function can also be used to execute such | ||
456 | * transactions. The drm_dp_aux_register_i2c_bus() function registers an | ||
457 | * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers | ||
458 | * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. | ||
459 | */ | ||
460 | struct drm_dp_aux { | ||
461 | const char *name; | ||
462 | struct i2c_adapter ddc; | ||
463 | struct device *dev; | ||
464 | |||
465 | ssize_t (*transfer)(struct drm_dp_aux *aux, | ||
466 | struct drm_dp_aux_msg *msg); | ||
467 | }; | ||
468 | |||
469 | ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, | ||
470 | void *buffer, size_t size); | ||
471 | ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, | ||
472 | void *buffer, size_t size); | ||
473 | |||
474 | /** | ||
475 | * drm_dp_dpcd_readb() - read a single byte from the DPCD | ||
476 | * @aux: DisplayPort AUX channel | ||
477 | * @offset: address of the register to read | ||
478 | * @valuep: location where the value of the register will be stored | ||
479 | * | ||
480 | * Returns the number of bytes transferred (1) on success, or a negative | ||
481 | * error code on failure. | ||
482 | */ | ||
483 | static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, | ||
484 | unsigned int offset, u8 *valuep) | ||
485 | { | ||
486 | return drm_dp_dpcd_read(aux, offset, valuep, 1); | ||
487 | } | ||
488 | |||
489 | /** | ||
490 | * drm_dp_dpcd_writeb() - write a single byte to the DPCD | ||
491 | * @aux: DisplayPort AUX channel | ||
492 | * @offset: address of the register to write | ||
493 | * @value: value to write to the register | ||
494 | * | ||
495 | * Returns the number of bytes transferred (1) on success, or a negative | ||
496 | * error code on failure. | ||
497 | */ | ||
498 | static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, | ||
499 | unsigned int offset, u8 value) | ||
500 | { | ||
501 | return drm_dp_dpcd_write(aux, offset, &value, 1); | ||
502 | } | ||
503 | |||
504 | int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, | ||
505 | u8 status[DP_LINK_STATUS_SIZE]); | ||
506 | |||
507 | /* | ||
508 | * DisplayPort link | ||
509 | */ | ||
510 | #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) | ||
511 | |||
512 | struct drm_dp_link { | ||
513 | unsigned char revision; | ||
514 | unsigned int rate; | ||
515 | unsigned int num_lanes; | ||
516 | unsigned long capabilities; | ||
517 | }; | ||
518 | |||
519 | int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); | ||
520 | int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); | ||
521 | int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); | ||
522 | |||
523 | int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux); | ||
524 | void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux); | ||
525 | |||
411 | #endif /* _DRM_DP_HELPER_H_ */ | 526 | #endif /* _DRM_DP_HELPER_H_ */ |
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index cba67865d18f..8b6981ab3fcf 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h | |||
@@ -85,11 +85,31 @@ struct drm_mm { | |||
85 | unsigned long *start, unsigned long *end); | 85 | unsigned long *start, unsigned long *end); |
86 | }; | 86 | }; |
87 | 87 | ||
88 | /** | ||
89 | * drm_mm_node_allocated - checks whether a node is allocated | ||
90 | * @node: drm_mm_node to check | ||
91 | * | ||
92 | * Drivers should use this helpers for proper encapusulation of drm_mm | ||
93 | * internals. | ||
94 | * | ||
95 | * Returns: | ||
96 | * True if the @node is allocated. | ||
97 | */ | ||
88 | static inline bool drm_mm_node_allocated(struct drm_mm_node *node) | 98 | static inline bool drm_mm_node_allocated(struct drm_mm_node *node) |
89 | { | 99 | { |
90 | return node->allocated; | 100 | return node->allocated; |
91 | } | 101 | } |
92 | 102 | ||
103 | /** | ||
104 | * drm_mm_initialized - checks whether an allocator is initialized | ||
105 | * @mm: drm_mm to check | ||
106 | * | ||
107 | * Drivers should use this helpers for proper encapusulation of drm_mm | ||
108 | * internals. | ||
109 | * | ||
110 | * Returns: | ||
111 | * True if the @mm is initialized. | ||
112 | */ | ||
93 | static inline bool drm_mm_initialized(struct drm_mm *mm) | 113 | static inline bool drm_mm_initialized(struct drm_mm *mm) |
94 | { | 114 | { |
95 | return mm->hole_stack.next; | 115 | return mm->hole_stack.next; |
@@ -100,6 +120,17 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no | |||
100 | return hole_node->start + hole_node->size; | 120 | return hole_node->start + hole_node->size; |
101 | } | 121 | } |
102 | 122 | ||
123 | /** | ||
124 | * drm_mm_hole_node_start - computes the start of the hole following @node | ||
125 | * @hole_node: drm_mm_node which implicitly tracks the following hole | ||
126 | * | ||
127 | * This is useful for driver-sepific debug dumpers. Otherwise drivers should not | ||
128 | * inspect holes themselves. Drivers must check first whether a hole indeed | ||
129 | * follows by looking at node->hole_follows. | ||
130 | * | ||
131 | * Returns: | ||
132 | * Start of the subsequent hole. | ||
133 | */ | ||
103 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 134 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
104 | { | 135 | { |
105 | BUG_ON(!hole_node->hole_follows); | 136 | BUG_ON(!hole_node->hole_follows); |
@@ -112,18 +143,49 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node | |||
112 | struct drm_mm_node, node_list)->start; | 143 | struct drm_mm_node, node_list)->start; |
113 | } | 144 | } |
114 | 145 | ||
146 | /** | ||
147 | * drm_mm_hole_node_end - computes the end of the hole following @node | ||
148 | * @hole_node: drm_mm_node which implicitly tracks the following hole | ||
149 | * | ||
150 | * This is useful for driver-sepific debug dumpers. Otherwise drivers should not | ||
151 | * inspect holes themselves. Drivers must check first whether a hole indeed | ||
152 | * follows by looking at node->hole_follows. | ||
153 | * | ||
154 | * Returns: | ||
155 | * End of the subsequent hole. | ||
156 | */ | ||
115 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 157 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
116 | { | 158 | { |
117 | return __drm_mm_hole_node_end(hole_node); | 159 | return __drm_mm_hole_node_end(hole_node); |
118 | } | 160 | } |
119 | 161 | ||
162 | /** | ||
163 | * drm_mm_for_each_node - iterator to walk over all allocated nodes | ||
164 | * @entry: drm_mm_node structure to assign to in each iteration step | ||
165 | * @mm: drm_mm allocator to walk | ||
166 | * | ||
167 | * This iterator walks over all nodes in the range allocator. It is implemented | ||
168 | * with list_for_each, so not save against removal of elements. | ||
169 | */ | ||
120 | #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ | 170 | #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ |
121 | &(mm)->head_node.node_list, \ | 171 | &(mm)->head_node.node_list, \ |
122 | node_list) | 172 | node_list) |
123 | 173 | ||
124 | /* Note that we need to unroll list_for_each_entry in order to inline | 174 | /** |
125 | * setting hole_start and hole_end on each iteration and keep the | 175 | * drm_mm_for_each_hole - iterator to walk over all holes |
126 | * macro sane. | 176 | * @entry: drm_mm_node used internally to track progress |
177 | * @mm: drm_mm allocator to walk | ||
178 | * @hole_start: ulong variable to assign the hole start to on each iteration | ||
179 | * @hole_end: ulong variable to assign the hole end to on each iteration | ||
180 | * | ||
181 | * This iterator walks over all holes in the range allocator. It is implemented | ||
182 | * with list_for_each, so not save against removal of elements. @entry is used | ||
183 | * internally and will not reflect a real drm_mm_node for the very first hole. | ||
184 | * Hence users of this iterator may not access it. | ||
185 | * | ||
186 | * Implementation Note: | ||
187 | * We need to inline list_for_each_entry in order to be able to set hole_start | ||
188 | * and hole_end on each iteration while keeping the macro sane. | ||
127 | */ | 189 | */ |
128 | #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ | 190 | #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ |
129 | for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ | 191 | for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ |
@@ -136,14 +198,30 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | |||
136 | /* | 198 | /* |
137 | * Basic range manager support (drm_mm.c) | 199 | * Basic range manager support (drm_mm.c) |
138 | */ | 200 | */ |
139 | extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); | 201 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); |
140 | 202 | ||
141 | extern int drm_mm_insert_node_generic(struct drm_mm *mm, | 203 | int drm_mm_insert_node_generic(struct drm_mm *mm, |
142 | struct drm_mm_node *node, | 204 | struct drm_mm_node *node, |
143 | unsigned long size, | 205 | unsigned long size, |
144 | unsigned alignment, | 206 | unsigned alignment, |
145 | unsigned long color, | 207 | unsigned long color, |
146 | enum drm_mm_search_flags flags); | 208 | enum drm_mm_search_flags flags); |
209 | /** | ||
210 | * drm_mm_insert_node - search for space and insert @node | ||
211 | * @mm: drm_mm to allocate from | ||
212 | * @node: preallocate node to insert | ||
213 | * @size: size of the allocation | ||
214 | * @alignment: alignment of the allocation | ||
215 | * @flags: flags to fine-tune the allocation | ||
216 | * | ||
217 | * This is a simplified version of drm_mm_insert_node_generic() with @color set | ||
218 | * to 0. | ||
219 | * | ||
220 | * The preallocated node must be cleared to 0. | ||
221 | * | ||
222 | * Returns: | ||
223 | * 0 on success, -ENOSPC if there's no suitable hole. | ||
224 | */ | ||
147 | static inline int drm_mm_insert_node(struct drm_mm *mm, | 225 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
148 | struct drm_mm_node *node, | 226 | struct drm_mm_node *node, |
149 | unsigned long size, | 227 | unsigned long size, |
@@ -153,14 +231,32 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, | |||
153 | return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); | 231 | return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); |
154 | } | 232 | } |
155 | 233 | ||
156 | extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, | 234 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
157 | struct drm_mm_node *node, | 235 | struct drm_mm_node *node, |
158 | unsigned long size, | 236 | unsigned long size, |
159 | unsigned alignment, | 237 | unsigned alignment, |
160 | unsigned long color, | 238 | unsigned long color, |
161 | unsigned long start, | 239 | unsigned long start, |
162 | unsigned long end, | 240 | unsigned long end, |
163 | enum drm_mm_search_flags flags); | 241 | enum drm_mm_search_flags flags); |
242 | /** | ||
243 | * drm_mm_insert_node_in_range - ranged search for space and insert @node | ||
244 | * @mm: drm_mm to allocate from | ||
245 | * @node: preallocate node to insert | ||
246 | * @size: size of the allocation | ||
247 | * @alignment: alignment of the allocation | ||
248 | * @start: start of the allowed range for this node | ||
249 | * @end: end of the allowed range for this node | ||
250 | * @flags: flags to fine-tune the allocation | ||
251 | * | ||
252 | * This is a simplified version of drm_mm_insert_node_in_range_generic() with | ||
253 | * @color set to 0. | ||
254 | * | ||
255 | * The preallocated node must be cleared to 0. | ||
256 | * | ||
257 | * Returns: | ||
258 | * 0 on success, -ENOSPC if there's no suitable hole. | ||
259 | */ | ||
164 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | 260 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
165 | struct drm_mm_node *node, | 261 | struct drm_mm_node *node, |
166 | unsigned long size, | 262 | unsigned long size, |
@@ -173,13 +269,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | |||
173 | 0, start, end, flags); | 269 | 0, start, end, flags); |
174 | } | 270 | } |
175 | 271 | ||
176 | extern void drm_mm_remove_node(struct drm_mm_node *node); | 272 | void drm_mm_remove_node(struct drm_mm_node *node); |
177 | extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); | 273 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
178 | extern void drm_mm_init(struct drm_mm *mm, | 274 | void drm_mm_init(struct drm_mm *mm, |
179 | unsigned long start, | 275 | unsigned long start, |
180 | unsigned long size); | 276 | unsigned long size); |
181 | extern void drm_mm_takedown(struct drm_mm *mm); | 277 | void drm_mm_takedown(struct drm_mm *mm); |
182 | extern int drm_mm_clean(struct drm_mm *mm); | 278 | bool drm_mm_clean(struct drm_mm *mm); |
183 | 279 | ||
184 | void drm_mm_init_scan(struct drm_mm *mm, | 280 | void drm_mm_init_scan(struct drm_mm *mm, |
185 | unsigned long size, | 281 | unsigned long size, |
@@ -191,10 +287,10 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, | |||
191 | unsigned long color, | 287 | unsigned long color, |
192 | unsigned long start, | 288 | unsigned long start, |
193 | unsigned long end); | 289 | unsigned long end); |
194 | int drm_mm_scan_add_block(struct drm_mm_node *node); | 290 | bool drm_mm_scan_add_block(struct drm_mm_node *node); |
195 | int drm_mm_scan_remove_block(struct drm_mm_node *node); | 291 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); |
196 | 292 | ||
197 | extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); | 293 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); |
198 | #ifdef CONFIG_DEBUG_FS | 294 | #ifdef CONFIG_DEBUG_FS |
199 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); | 295 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); |
200 | #endif | 296 | #endif |
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h new file mode 100644 index 000000000000..2dbbf9976669 --- /dev/null +++ b/include/drm/drm_modes.h | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Copyright © 2006 Keith Packard | ||
3 | * Copyright © 2007-2008 Dave Airlie | ||
4 | * Copyright © 2007-2008 Intel Corporation | ||
5 | * Jesse Barnes <jesse.barnes@intel.com> | ||
6 | * Copyright © 2014 Intel Corporation | ||
7 | * Daniel Vetter <daniel.vetter@ffwll.ch> | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice shall be included in | ||
17 | * all copies or substantial portions of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | #ifndef __DRM_MODES_H__ | ||
28 | #define __DRM_MODES_H__ | ||
29 | |||
30 | /* | ||
31 | * Note on terminology: here, for brevity and convenience, we refer to connector | ||
32 | * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, | ||
33 | * DVI, etc. And 'screen' refers to the whole of the visible display, which | ||
34 | * may span multiple monitors (and therefore multiple CRTC and connector | ||
35 | * structures). | ||
36 | */ | ||
37 | |||
38 | enum drm_mode_status { | ||
39 | MODE_OK = 0, /* Mode OK */ | ||
40 | MODE_HSYNC, /* hsync out of range */ | ||
41 | MODE_VSYNC, /* vsync out of range */ | ||
42 | MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ | ||
43 | MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ | ||
44 | MODE_BAD_WIDTH, /* requires an unsupported linepitch */ | ||
45 | MODE_NOMODE, /* no mode with a matching name */ | ||
46 | MODE_NO_INTERLACE, /* interlaced mode not supported */ | ||
47 | MODE_NO_DBLESCAN, /* doublescan mode not supported */ | ||
48 | MODE_NO_VSCAN, /* multiscan mode not supported */ | ||
49 | MODE_MEM, /* insufficient video memory */ | ||
50 | MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ | ||
51 | MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ | ||
52 | MODE_MEM_VIRT, /* insufficient video memory given virtual size */ | ||
53 | MODE_NOCLOCK, /* no fixed clock available */ | ||
54 | MODE_CLOCK_HIGH, /* clock required is too high */ | ||
55 | MODE_CLOCK_LOW, /* clock required is too low */ | ||
56 | MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ | ||
57 | MODE_BAD_HVALUE, /* horizontal timing was out of range */ | ||
58 | MODE_BAD_VVALUE, /* vertical timing was out of range */ | ||
59 | MODE_BAD_VSCAN, /* VScan value out of range */ | ||
60 | MODE_HSYNC_NARROW, /* horizontal sync too narrow */ | ||
61 | MODE_HSYNC_WIDE, /* horizontal sync too wide */ | ||
62 | MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ | ||
63 | MODE_HBLANK_WIDE, /* horizontal blanking too wide */ | ||
64 | MODE_VSYNC_NARROW, /* vertical sync too narrow */ | ||
65 | MODE_VSYNC_WIDE, /* vertical sync too wide */ | ||
66 | MODE_VBLANK_NARROW, /* vertical blanking too narrow */ | ||
67 | MODE_VBLANK_WIDE, /* vertical blanking too wide */ | ||
68 | MODE_PANEL, /* exceeds panel dimensions */ | ||
69 | MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ | ||
70 | MODE_ONE_WIDTH, /* only one width is supported */ | ||
71 | MODE_ONE_HEIGHT, /* only one height is supported */ | ||
72 | MODE_ONE_SIZE, /* only one resolution is supported */ | ||
73 | MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ | ||
74 | MODE_NO_STEREO, /* stereo modes not supported */ | ||
75 | MODE_UNVERIFIED = -3, /* mode needs to reverified */ | ||
76 | MODE_BAD = -2, /* unspecified reason */ | ||
77 | MODE_ERROR = -1 /* error condition */ | ||
78 | }; | ||
79 | |||
80 | #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ | ||
81 | DRM_MODE_TYPE_CRTC_C) | ||
82 | |||
83 | #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ | ||
84 | .name = nm, .status = 0, .type = (t), .clock = (c), \ | ||
85 | .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ | ||
86 | .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ | ||
87 | .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ | ||
88 | .vscan = (vs), .flags = (f), \ | ||
89 | .base.type = DRM_MODE_OBJECT_MODE | ||
90 | |||
91 | #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ | ||
92 | #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ | ||
93 | |||
94 | #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF | ||
95 | |||
96 | struct drm_display_mode { | ||
97 | /* Header */ | ||
98 | struct list_head head; | ||
99 | struct drm_mode_object base; | ||
100 | |||
101 | char name[DRM_DISPLAY_MODE_LEN]; | ||
102 | |||
103 | enum drm_mode_status status; | ||
104 | unsigned int type; | ||
105 | |||
106 | /* Proposed mode values */ | ||
107 | int clock; /* in kHz */ | ||
108 | int hdisplay; | ||
109 | int hsync_start; | ||
110 | int hsync_end; | ||
111 | int htotal; | ||
112 | int hskew; | ||
113 | int vdisplay; | ||
114 | int vsync_start; | ||
115 | int vsync_end; | ||
116 | int vtotal; | ||
117 | int vscan; | ||
118 | unsigned int flags; | ||
119 | |||
120 | /* Addressable image size (may be 0 for projectors, etc.) */ | ||
121 | int width_mm; | ||
122 | int height_mm; | ||
123 | |||
124 | /* Actual mode we give to hw */ | ||
125 | int crtc_clock; /* in KHz */ | ||
126 | int crtc_hdisplay; | ||
127 | int crtc_hblank_start; | ||
128 | int crtc_hblank_end; | ||
129 | int crtc_hsync_start; | ||
130 | int crtc_hsync_end; | ||
131 | int crtc_htotal; | ||
132 | int crtc_hskew; | ||
133 | int crtc_vdisplay; | ||
134 | int crtc_vblank_start; | ||
135 | int crtc_vblank_end; | ||
136 | int crtc_vsync_start; | ||
137 | int crtc_vsync_end; | ||
138 | int crtc_vtotal; | ||
139 | |||
140 | /* Driver private mode info */ | ||
141 | int *private; | ||
142 | int private_flags; | ||
143 | |||
144 | int vrefresh; /* in Hz */ | ||
145 | int hsync; /* in kHz */ | ||
146 | enum hdmi_picture_aspect picture_aspect_ratio; | ||
147 | }; | ||
148 | |||
149 | /* mode specified on the command line */ | ||
150 | struct drm_cmdline_mode { | ||
151 | bool specified; | ||
152 | bool refresh_specified; | ||
153 | bool bpp_specified; | ||
154 | int xres, yres; | ||
155 | int bpp; | ||
156 | int refresh; | ||
157 | bool rb; | ||
158 | bool interlace; | ||
159 | bool cvt; | ||
160 | bool margins; | ||
161 | enum drm_connector_force force; | ||
162 | }; | ||
163 | |||
164 | /** | ||
165 | * drm_mode_is_stereo - check for stereo mode flags | ||
166 | * @mode: drm_display_mode to check | ||
167 | * | ||
168 | * Returns: | ||
169 | * True if the mode is one of the stereo modes (like side-by-side), false if | ||
170 | * not. | ||
171 | */ | ||
172 | static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) | ||
173 | { | ||
174 | return mode->flags & DRM_MODE_FLAG_3D_MASK; | ||
175 | } | ||
176 | |||
177 | struct drm_connector; | ||
178 | struct drm_cmdline_mode; | ||
179 | |||
180 | struct drm_display_mode *drm_mode_create(struct drm_device *dev); | ||
181 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); | ||
182 | void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); | ||
183 | void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); | ||
184 | |||
185 | struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, | ||
186 | int hdisplay, int vdisplay, int vrefresh, | ||
187 | bool reduced, bool interlaced, | ||
188 | bool margins); | ||
189 | struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, | ||
190 | int hdisplay, int vdisplay, int vrefresh, | ||
191 | bool interlaced, int margins); | ||
192 | struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, | ||
193 | int hdisplay, int vdisplay, | ||
194 | int vrefresh, bool interlaced, | ||
195 | int margins, | ||
196 | int GTF_M, int GTF_2C, | ||
197 | int GTF_K, int GTF_2J); | ||
198 | void drm_display_mode_from_videomode(const struct videomode *vm, | ||
199 | struct drm_display_mode *dmode); | ||
200 | int of_get_drm_display_mode(struct device_node *np, | ||
201 | struct drm_display_mode *dmode, | ||
202 | int index); | ||
203 | |||
204 | void drm_mode_set_name(struct drm_display_mode *mode); | ||
205 | int drm_mode_hsync(const struct drm_display_mode *mode); | ||
206 | int drm_mode_vrefresh(const struct drm_display_mode *mode); | ||
207 | |||
208 | void drm_mode_set_crtcinfo(struct drm_display_mode *p, | ||
209 | int adjust_flags); | ||
210 | void drm_mode_copy(struct drm_display_mode *dst, | ||
211 | const struct drm_display_mode *src); | ||
212 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | ||
213 | const struct drm_display_mode *mode); | ||
214 | bool drm_mode_equal(const struct drm_display_mode *mode1, | ||
215 | const struct drm_display_mode *mode2); | ||
216 | bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, | ||
217 | const struct drm_display_mode *mode2); | ||
218 | |||
219 | /* for use by the crtc helper probe functions */ | ||
220 | void drm_mode_validate_size(struct drm_device *dev, | ||
221 | struct list_head *mode_list, | ||
222 | int maxX, int maxY); | ||
223 | void drm_mode_prune_invalid(struct drm_device *dev, | ||
224 | struct list_head *mode_list, bool verbose); | ||
225 | void drm_mode_sort(struct list_head *mode_list); | ||
226 | void drm_mode_connector_list_update(struct drm_connector *connector); | ||
227 | |||
228 | /* parsing cmdline modes */ | ||
229 | bool | ||
230 | drm_mode_parse_command_line_for_connector(const char *mode_option, | ||
231 | struct drm_connector *connector, | ||
232 | struct drm_cmdline_mode *mode); | ||
233 | struct drm_display_mode * | ||
234 | drm_mode_create_from_cmdline_mode(struct drm_device *dev, | ||
235 | struct drm_cmdline_mode *cmd); | ||
236 | |||
237 | #endif /* __DRM_MODES_H__ */ | ||
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index c18a593d1744..8cd402c73a5f 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h | |||
@@ -221,8 +221,8 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) | |||
221 | * @file_mapping: Address space to unmap @node from | 221 | * @file_mapping: Address space to unmap @node from |
222 | * | 222 | * |
223 | * Unmap all userspace mappings for a given offset node. The mappings must be | 223 | * Unmap all userspace mappings for a given offset node. The mappings must be |
224 | * associated with the @file_mapping address-space. If no offset exists or | 224 | * associated with the @file_mapping address-space. If no offset exists |
225 | * the address-space is invalid, nothing is done. | 225 | * nothing is done. |
226 | * | 226 | * |
227 | * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() | 227 | * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() |
228 | * is not called on this node concurrently. | 228 | * is not called on this node concurrently. |
@@ -230,7 +230,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) | |||
230 | static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, | 230 | static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, |
231 | struct address_space *file_mapping) | 231 | struct address_space *file_mapping) |
232 | { | 232 | { |
233 | if (file_mapping && drm_vma_node_has_offset(node)) | 233 | if (drm_vma_node_has_offset(node)) |
234 | unmap_mapping_range(file_mapping, | 234 | unmap_mapping_range(file_mapping, |
235 | drm_vma_node_offset_addr(node), | 235 | drm_vma_node_offset_addr(node), |
236 | drm_vma_node_size(node) << PAGE_SHIFT, 1); | 236 | drm_vma_node_size(node) << PAGE_SHIFT, 1); |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 32d34ebf0706..5d8aabe68f6c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -747,6 +747,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); | |||
747 | * @bdev: A pointer to a struct ttm_bo_device to initialize. | 747 | * @bdev: A pointer to a struct ttm_bo_device to initialize. |
748 | * @glob: A pointer to an initialized struct ttm_bo_global. | 748 | * @glob: A pointer to an initialized struct ttm_bo_global. |
749 | * @driver: A pointer to a struct ttm_bo_driver set up by the caller. | 749 | * @driver: A pointer to a struct ttm_bo_driver set up by the caller. |
750 | * @mapping: The address space to use for this bo. | ||
750 | * @file_page_offset: Offset into the device address space that is available | 751 | * @file_page_offset: Offset into the device address space that is available |
751 | * for buffer data. This ensures compatibility with other users of the | 752 | * for buffer data. This ensures compatibility with other users of the |
752 | * address space. | 753 | * address space. |
@@ -758,6 +759,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); | |||
758 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, | 759 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
759 | struct ttm_bo_global *glob, | 760 | struct ttm_bo_global *glob, |
760 | struct ttm_bo_driver *driver, | 761 | struct ttm_bo_driver *driver, |
762 | struct address_space *mapping, | ||
761 | uint64_t file_page_offset, bool need_dma32); | 763 | uint64_t file_page_offset, bool need_dma32); |
762 | 764 | ||
763 | /** | 765 | /** |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index be85127bfed3..f27000f55a83 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add | |||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
175 | { | ||
176 | return -ENXIO; | ||
177 | } | ||
178 | |||
174 | static inline int kvm_vgic_init(struct kvm *kvm) | 179 | static inline int kvm_vgic_init(struct kvm *kvm) |
175 | { | 180 | { |
176 | return 0; | 181 | return 0; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index aa865a9a4c4f..ec1464df4c60 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -43,6 +43,7 @@ struct mq_attr; | |||
43 | struct mqstat; | 43 | struct mqstat; |
44 | struct audit_watch; | 44 | struct audit_watch; |
45 | struct audit_tree; | 45 | struct audit_tree; |
46 | struct sk_buff; | ||
46 | 47 | ||
47 | struct audit_krule { | 48 | struct audit_krule { |
48 | int vers_ops; | 49 | int vers_ops; |
@@ -463,7 +464,7 @@ extern int audit_filter_user(int type); | |||
463 | extern int audit_filter_type(int type); | 464 | extern int audit_filter_type(int type); |
464 | extern int audit_rule_change(int type, __u32 portid, int seq, | 465 | extern int audit_rule_change(int type, __u32 portid, int seq, |
465 | void *data, size_t datasz); | 466 | void *data, size_t datasz); |
466 | extern int audit_list_rules_send(__u32 portid, int seq); | 467 | extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); |
467 | 468 | ||
468 | extern u32 audit_enabled; | 469 | extern u32 audit_enabled; |
469 | #else /* CONFIG_AUDIT */ | 470 | #else /* CONFIG_AUDIT */ |
diff --git a/include/linux/file.h b/include/linux/file.h index cbacf4faf447..4d69123377a2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
@@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed) | |||
28 | 28 | ||
29 | struct fd { | 29 | struct fd { |
30 | struct file *file; | 30 | struct file *file; |
31 | int need_put; | 31 | unsigned int flags; |
32 | }; | 32 | }; |
33 | #define FDPUT_FPUT 1 | ||
34 | #define FDPUT_POS_UNLOCK 2 | ||
33 | 35 | ||
34 | static inline void fdput(struct fd fd) | 36 | static inline void fdput(struct fd fd) |
35 | { | 37 | { |
36 | if (fd.need_put) | 38 | if (fd.flags & FDPUT_FPUT) |
37 | fput(fd.file); | 39 | fput(fd.file); |
38 | } | 40 | } |
39 | 41 | ||
40 | extern struct file *fget(unsigned int fd); | 42 | extern struct file *fget(unsigned int fd); |
41 | extern struct file *fget_light(unsigned int fd, int *fput_needed); | 43 | extern struct file *fget_raw(unsigned int fd); |
44 | extern unsigned long __fdget(unsigned int fd); | ||
45 | extern unsigned long __fdget_raw(unsigned int fd); | ||
46 | extern unsigned long __fdget_pos(unsigned int fd); | ||
42 | 47 | ||
43 | static inline struct fd fdget(unsigned int fd) | 48 | static inline struct fd __to_fd(unsigned long v) |
44 | { | 49 | { |
45 | int b; | 50 | return (struct fd){(struct file *)(v & ~3),v & 3}; |
46 | struct file *f = fget_light(fd, &b); | ||
47 | return (struct fd){f,b}; | ||
48 | } | 51 | } |
49 | 52 | ||
50 | extern struct file *fget_raw(unsigned int fd); | 53 | static inline struct fd fdget(unsigned int fd) |
51 | extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); | 54 | { |
55 | return __to_fd(__fdget(fd)); | ||
56 | } | ||
52 | 57 | ||
53 | static inline struct fd fdget_raw(unsigned int fd) | 58 | static inline struct fd fdget_raw(unsigned int fd) |
54 | { | 59 | { |
55 | int b; | 60 | return __to_fd(__fdget_raw(fd)); |
56 | struct file *f = fget_raw_light(fd, &b); | ||
57 | return (struct fd){f,b}; | ||
58 | } | 61 | } |
59 | 62 | ||
60 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); | 63 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 60829565e552..23b2a35d712e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
123 | /* File is opened with O_PATH; almost nothing can be done with it */ | 123 | /* File is opened with O_PATH; almost nothing can be done with it */ |
124 | #define FMODE_PATH ((__force fmode_t)0x4000) | 124 | #define FMODE_PATH ((__force fmode_t)0x4000) |
125 | 125 | ||
126 | /* File needs atomic accesses to f_pos */ | ||
127 | #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) | ||
128 | |||
126 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 129 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
127 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | 130 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) |
128 | 131 | ||
@@ -780,13 +783,14 @@ struct file { | |||
780 | const struct file_operations *f_op; | 783 | const struct file_operations *f_op; |
781 | 784 | ||
782 | /* | 785 | /* |
783 | * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. | 786 | * Protects f_ep_links, f_flags. |
784 | * Must not be taken from IRQ context. | 787 | * Must not be taken from IRQ context. |
785 | */ | 788 | */ |
786 | spinlock_t f_lock; | 789 | spinlock_t f_lock; |
787 | atomic_long_t f_count; | 790 | atomic_long_t f_count; |
788 | unsigned int f_flags; | 791 | unsigned int f_flags; |
789 | fmode_t f_mode; | 792 | fmode_t f_mode; |
793 | struct mutex f_pos_lock; | ||
790 | loff_t f_pos; | 794 | loff_t f_pos; |
791 | struct fown_struct f_owner; | 795 | struct fown_struct f_owner; |
792 | const struct cred *f_cred; | 796 | const struct cred *f_cred; |
@@ -808,7 +812,7 @@ struct file { | |||
808 | #ifdef CONFIG_DEBUG_WRITECOUNT | 812 | #ifdef CONFIG_DEBUG_WRITECOUNT |
809 | unsigned long f_mnt_write_state; | 813 | unsigned long f_mnt_write_state; |
810 | #endif | 814 | #endif |
811 | }; | 815 | } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ |
812 | 816 | ||
813 | struct file_handle { | 817 | struct file_handle { |
814 | __u32 handle_bytes; | 818 | __u32 handle_bytes; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0437439bc047..39b81dc7d01a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -123,6 +123,10 @@ struct vm_area_struct; | |||
123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
124 | __GFP_NO_KSWAPD) | 124 | __GFP_NO_KSWAPD) |
125 | 125 | ||
126 | /* | ||
127 | * GFP_THISNODE does not perform any reclaim, you most likely want to | ||
128 | * use __GFP_THISNODE to allocate from a given node without fallback! | ||
129 | */ | ||
126 | #ifdef CONFIG_NUMA | 130 | #ifdef CONFIG_NUMA |
127 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 131 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
128 | #else | 132 | #else |
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9231be9e90a2..11c0182a153b 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h | |||
@@ -262,6 +262,18 @@ union hdmi_vendor_any_infoframe { | |||
262 | struct hdmi_vendor_infoframe hdmi; | 262 | struct hdmi_vendor_infoframe hdmi; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | /** | ||
266 | * union hdmi_infoframe - overall union of all abstract infoframe representations | ||
267 | * @any: generic infoframe | ||
268 | * @avi: avi infoframe | ||
269 | * @spd: spd infoframe | ||
270 | * @vendor: union of all vendor infoframes | ||
271 | * @audio: audio infoframe | ||
272 | * | ||
273 | * This is used by the generic pack function. This works since all infoframes | ||
274 | * have the same header which also indicates which type of infoframe should be | ||
275 | * packed. | ||
276 | */ | ||
265 | union hdmi_infoframe { | 277 | union hdmi_infoframe { |
266 | struct hdmi_any_infoframe any; | 278 | struct hdmi_any_infoframe any; |
267 | struct hdmi_avi_infoframe avi; | 279 | struct hdmi_avi_infoframe avi; |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5f2052c83154..9b61b9bf81ac 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) | |||
590 | 590 | ||
591 | /* | 591 | /* |
592 | * The NUMA zonelists are doubled because we need zonelists that restrict the | 592 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
593 | * allocations to a single node for GFP_THISNODE. | 593 | * allocations to a single node for __GFP_THISNODE. |
594 | * | 594 | * |
595 | * [0] : Zonelist with fallback | 595 | * [0] : Zonelist with fallback |
596 | * [1] : No fallback (GFP_THISNODE) | 596 | * [1] : No fallback (__GFP_THISNODE) |
597 | */ | 597 | */ |
598 | #define MAX_ZONELISTS 2 | 598 | #define MAX_ZONELISTS 2 |
599 | 599 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abdd67df..b5b2df60299e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |||
410 | * | 410 | * |
411 | * %GFP_NOWAIT - Allocation will not sleep. | 411 | * %GFP_NOWAIT - Allocation will not sleep. |
412 | * | 412 | * |
413 | * %GFP_THISNODE - Allocate node-local memory only. | 413 | * %__GFP_THISNODE - Allocate node-local memory only. |
414 | * | 414 | * |
415 | * %GFP_DMA - Allocation suitable for DMA. | 415 | * %GFP_DMA - Allocation suitable for DMA. |
416 | * Should only be used for kmalloc() caches. Otherwise, use a | 416 | * Should only be used for kmalloc() caches. Otherwise, use a |
diff --git a/include/net/sock.h b/include/net/sock.h index 5c3f7c3624aa..b9586a137cad 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
1488 | */ | 1488 | */ |
1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) | 1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) |
1490 | 1490 | ||
1491 | static inline void sock_release_ownership(struct sock *sk) | ||
1492 | { | ||
1493 | sk->sk_lock.owned = 0; | ||
1494 | } | ||
1495 | |||
1491 | /* | 1496 | /* |
1492 | * Macro so as to not evaluate some arguments when | 1497 | * Macro so as to not evaluate some arguments when |
1493 | * lockdep is not enabled. | 1498 | * lockdep is not enabled. |
@@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
2186 | { | 2191 | { |
2187 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ | 2192 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ |
2188 | (1UL << SOCK_RCVTSTAMP) | \ | 2193 | (1UL << SOCK_RCVTSTAMP) | \ |
2189 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | ||
2190 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2194 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
2191 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2195 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
2192 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2196 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index d9ea3a73afe2..aefa2f6afa3b 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
@@ -510,6 +510,7 @@ typedef struct { | |||
510 | #define DRM_RADEON_GEM_GET_TILING 0x29 | 510 | #define DRM_RADEON_GEM_GET_TILING 0x29 |
511 | #define DRM_RADEON_GEM_BUSY 0x2a | 511 | #define DRM_RADEON_GEM_BUSY 0x2a |
512 | #define DRM_RADEON_GEM_VA 0x2b | 512 | #define DRM_RADEON_GEM_VA 0x2b |
513 | #define DRM_RADEON_GEM_OP 0x2c | ||
513 | 514 | ||
514 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) | 515 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
515 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) | 516 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
@@ -552,6 +553,7 @@ typedef struct { | |||
552 | #define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | 553 | #define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
553 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) | 554 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
554 | #define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) | 555 | #define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) |
556 | #define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) | ||
555 | 557 | ||
556 | typedef struct drm_radeon_init { | 558 | typedef struct drm_radeon_init { |
557 | enum { | 559 | enum { |
@@ -884,6 +886,16 @@ struct drm_radeon_gem_pwrite { | |||
884 | uint64_t data_ptr; | 886 | uint64_t data_ptr; |
885 | }; | 887 | }; |
886 | 888 | ||
889 | /* Sets or returns a value associated with a buffer. */ | ||
890 | struct drm_radeon_gem_op { | ||
891 | uint32_t handle; /* buffer */ | ||
892 | uint32_t op; /* RADEON_GEM_OP_* */ | ||
893 | uint64_t value; /* input or return value */ | ||
894 | }; | ||
895 | |||
896 | #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 | ||
897 | #define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1 | ||
898 | |||
887 | #define RADEON_VA_MAP 1 | 899 | #define RADEON_VA_MAP 1 |
888 | #define RADEON_VA_UNMAP 2 | 900 | #define RADEON_VA_UNMAP 2 |
889 | 901 | ||
@@ -919,6 +931,7 @@ struct drm_radeon_gem_va { | |||
919 | #define RADEON_CS_RING_COMPUTE 1 | 931 | #define RADEON_CS_RING_COMPUTE 1 |
920 | #define RADEON_CS_RING_DMA 2 | 932 | #define RADEON_CS_RING_DMA 2 |
921 | #define RADEON_CS_RING_UVD 3 | 933 | #define RADEON_CS_RING_UVD 3 |
934 | #define RADEON_CS_RING_VCE 4 | ||
922 | /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ | 935 | /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ |
923 | /* 0 = normal, + = higher priority, - = lower priority */ | 936 | /* 0 = normal, + = higher priority, - = lower priority */ |
924 | 937 | ||
@@ -987,6 +1000,13 @@ struct drm_radeon_cs { | |||
987 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 | 1000 | #define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
988 | /* max engine clock - needed for OpenCL */ | 1001 | /* max engine clock - needed for OpenCL */ |
989 | #define RADEON_INFO_MAX_SCLK 0x1a | 1002 | #define RADEON_INFO_MAX_SCLK 0x1a |
1003 | /* version of VCE firmware */ | ||
1004 | #define RADEON_INFO_VCE_FW_VERSION 0x1b | ||
1005 | /* version of VCE feedback */ | ||
1006 | #define RADEON_INFO_VCE_FB_VERSION 0x1c | ||
1007 | #define RADEON_INFO_NUM_BYTES_MOVED 0x1d | ||
1008 | #define RADEON_INFO_VRAM_USAGE 0x1e | ||
1009 | #define RADEON_INFO_GTT_USAGE 0x1f | ||
990 | 1010 | ||
991 | 1011 | ||
992 | struct drm_radeon_info { | 1012 | struct drm_radeon_info { |
diff --git a/init/main.c b/init/main.c index eb03090cdced..9c7fd4c9249f 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -561,7 +561,6 @@ asmlinkage void __init start_kernel(void) | |||
561 | init_timers(); | 561 | init_timers(); |
562 | hrtimers_init(); | 562 | hrtimers_init(); |
563 | softirq_init(); | 563 | softirq_init(); |
564 | acpi_early_init(); | ||
565 | timekeeping_init(); | 564 | timekeeping_init(); |
566 | time_init(); | 565 | time_init(); |
567 | sched_clock_postinit(); | 566 | sched_clock_postinit(); |
@@ -613,6 +612,7 @@ asmlinkage void __init start_kernel(void) | |||
613 | calibrate_delay(); | 612 | calibrate_delay(); |
614 | pidmap_init(); | 613 | pidmap_init(); |
615 | anon_vma_init(); | 614 | anon_vma_init(); |
615 | acpi_early_init(); | ||
616 | #ifdef CONFIG_X86 | 616 | #ifdef CONFIG_X86 |
617 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 617 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
618 | efi_enter_virtual_mode(); | 618 | efi_enter_virtual_mode(); |
@@ -901,6 +901,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
901 | return -EINVAL; | 901 | return -EINVAL; |
902 | 902 | ||
903 | if (msgflg & MSG_COPY) { | 903 | if (msgflg & MSG_COPY) { |
904 | if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) | ||
905 | return -EINVAL; | ||
904 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); | 906 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); |
905 | if (IS_ERR(copy)) | 907 | if (IS_ERR(copy)) |
906 | return PTR_ERR(copy); | 908 | return PTR_ERR(copy); |
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..3392d3e0254a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -182,7 +182,7 @@ struct audit_buffer { | |||
182 | 182 | ||
183 | struct audit_reply { | 183 | struct audit_reply { |
184 | __u32 portid; | 184 | __u32 portid; |
185 | pid_t pid; | 185 | struct net *net; |
186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
187 | }; | 187 | }; |
188 | 188 | ||
@@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
500 | { | 500 | { |
501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
505 | 505 | ||
506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
@@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
512 | 512 | ||
513 | put_net(net); | ||
513 | kfree(dest); | 514 | kfree(dest); |
514 | 515 | ||
515 | return 0; | 516 | return 0; |
@@ -543,7 +544,7 @@ out_kfree_skb: | |||
543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
544 | { | 545 | { |
545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
548 | 549 | ||
549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
@@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
556 | put_net(net); | ||
555 | kfree(reply); | 557 | kfree(reply); |
556 | return 0; | 558 | return 0; |
557 | } | 559 | } |
558 | /** | 560 | /** |
559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
561 | * @seq: sequence number | 563 | * @seq: sequence number |
562 | * @type: audit message type | 564 | * @type: audit message type |
563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
@@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
569 | * No failure notifications. | 571 | * No failure notifications. |
570 | */ | 572 | */ |
571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
573 | { | 575 | { |
576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
@@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
583 | if (!skb) | 587 | if (!skb) |
584 | goto out; | 588 | goto out; |
585 | 589 | ||
590 | reply->net = get_net(net); | ||
586 | reply->portid = portid; | 591 | reply->portid = portid; |
587 | reply->pid = task_pid_vnr(current); | ||
588 | reply->skb = skb; | 592 | reply->skb = skb; |
589 | 593 | ||
590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
@@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
673 | 677 | ||
674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 678 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
675 | 679 | ||
676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 680 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
677 | &af, sizeof(af)); | ||
678 | 681 | ||
679 | return 0; | 682 | return 0; |
680 | } | 683 | } |
@@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
794 | s.backlog = skb_queue_len(&audit_skb_queue); | 797 | s.backlog = skb_queue_len(&audit_skb_queue); |
795 | s.version = AUDIT_VERSION_LATEST; | 798 | s.version = AUDIT_VERSION_LATEST; |
796 | s.backlog_wait_time = audit_backlog_wait_time; | 799 | s.backlog_wait_time = audit_backlog_wait_time; |
797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 800 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
798 | &s, sizeof(s)); | ||
799 | break; | 801 | break; |
800 | } | 802 | } |
801 | case AUDIT_SET: { | 803 | case AUDIT_SET: { |
@@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
905 | seq, data, nlmsg_len(nlh)); | 907 | seq, data, nlmsg_len(nlh)); |
906 | break; | 908 | break; |
907 | case AUDIT_LIST_RULES: | 909 | case AUDIT_LIST_RULES: |
908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 910 | err = audit_list_rules_send(skb, seq); |
909 | break; | 911 | break; |
910 | case AUDIT_TRIM: | 912 | case AUDIT_TRIM: |
911 | audit_trim_trees(); | 913 | audit_trim_trees(); |
@@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
970 | memcpy(sig_data->ctx, ctx, len); | 972 | memcpy(sig_data->ctx, ctx, len); |
971 | security_release_secctx(ctx, len); | 973 | security_release_secctx(ctx, len); |
972 | } | 974 | } |
973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 975 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 976 | sig_data, sizeof(*sig_data) + len); |
975 | kfree(sig_data); | 977 | kfree(sig_data); |
976 | break; | 978 | break; |
977 | case AUDIT_TTY_GET: { | 979 | case AUDIT_TTY_GET: { |
@@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 985 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
984 | spin_unlock(&tsk->sighand->siglock); | 986 | spin_unlock(&tsk->sighand->siglock); |
985 | 987 | ||
986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 988 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
988 | break; | 989 | break; |
989 | } | 990 | } |
990 | case AUDIT_TTY_SET: { | 991 | case AUDIT_TTY_SET: { |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..8df132214606 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -247,7 +247,7 @@ extern void audit_panic(const char *message); | |||
247 | 247 | ||
248 | struct audit_netlink_list { | 248 | struct audit_netlink_list { |
249 | __u32 portid; | 249 | __u32 portid; |
250 | pid_t pid; | 250 | struct net *net; |
251 | struct sk_buff_head q; | 251 | struct sk_buff_head q; |
252 | }; | 252 | }; |
253 | 253 | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..92062fd6cc8c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <net/net_namespace.h> | ||
33 | #include <net/sock.h> | ||
32 | #include "audit.h" | 34 | #include "audit.h" |
33 | 35 | ||
34 | /* | 36 | /* |
@@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
1065 | 1067 | ||
1066 | /** | 1068 | /** |
1067 | * audit_list_rules_send - list the audit rules | 1069 | * audit_list_rules_send - list the audit rules |
1068 | * @portid: target portid for netlink audit messages | 1070 | * @request_skb: skb of request we are replying to (used to target the reply) |
1069 | * @seq: netlink audit message sequence (serial) number | 1071 | * @seq: netlink audit message sequence (serial) number |
1070 | */ | 1072 | */ |
1071 | int audit_list_rules_send(__u32 portid, int seq) | 1073 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
1072 | { | 1074 | { |
1075 | u32 portid = NETLINK_CB(request_skb).portid; | ||
1076 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
1073 | struct task_struct *tsk; | 1077 | struct task_struct *tsk; |
1074 | struct audit_netlink_list *dest; | 1078 | struct audit_netlink_list *dest; |
1075 | int err = 0; | 1079 | int err = 0; |
@@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1087 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
1084 | if (!dest) | 1088 | if (!dest) |
1085 | return -ENOMEM; | 1089 | return -ENOMEM; |
1090 | dest->net = get_net(net); | ||
1086 | dest->portid = portid; | 1091 | dest->portid = portid; |
1087 | dest->pid = task_pid_vnr(current); | ||
1088 | skb_queue_head_init(&dest->q); | 1092 | skb_queue_head_init(&dest->q); |
1089 | 1093 | ||
1090 | mutex_lock(&audit_filter_mutex); | 1094 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..ebdd9c1a86b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
549 | struct page *page; | 549 | struct page *page; |
550 | 550 | ||
551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
553 | 0); | 553 | 0); |
554 | if (!page) | 554 | if (!page) |
555 | goto out_cleanup; | 555 | goto out_cleanup; |
556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
560 | 0); | 560 | 0); |
561 | if (!page) | 561 | if (!page) |
562 | goto out_cleanup; | 562 | goto out_cleanup; |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..b30a2924ef14 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu) | |||
301 | if (unlikely(!sched_clock_running)) | 301 | if (unlikely(!sched_clock_running)) |
302 | return 0ull; | 302 | return 0ull; |
303 | 303 | ||
304 | preempt_disable(); | 304 | preempt_disable_notrace(); |
305 | scd = cpu_sdc(cpu); | 305 | scd = cpu_sdc(cpu); |
306 | 306 | ||
307 | if (cpu != smp_processor_id()) | 307 | if (cpu != smp_processor_id()) |
308 | clock = sched_clock_remote(scd); | 308 | clock = sched_clock_remote(scd); |
309 | else | 309 | else |
310 | clock = sched_clock_local(scd); | 310 | clock = sched_clock_local(scd); |
311 | preempt_enable(); | 311 | preempt_enable_notrace(); |
312 | 312 | ||
313 | return clock; | 313 | return clock; |
314 | } | 314 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ece..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3338,6 +3338,15 @@ recheck: | |||
3338 | return -EPERM; | 3338 | return -EPERM; |
3339 | } | 3339 | } |
3340 | 3340 | ||
3341 | /* | ||
3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
3343 | * (safest behavior); in the future we would like to allow | ||
3344 | * unprivileged DL tasks to increase their relative deadline | ||
3345 | * or reduce their runtime (both ways reducing utilization) | ||
3346 | */ | ||
3347 | if (dl_policy(policy)) | ||
3348 | return -EPERM; | ||
3349 | |||
3341 | /* | 3350 | /* |
3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
293 | */ | 293 | */ |
294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
296 | &call_args, 0); | 296 | &call_args, 1); |
297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
298 | preempt_enable(); | 298 | preempt_enable(); |
299 | 299 | ||
diff --git a/mm/Kconfig b/mm/Kconfig index 2d9f1504d75e..2888024e0b0a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -575,5 +575,5 @@ config PGTABLE_MAPPING | |||
575 | then you should select this. This causes zsmalloc to use page table | 575 | then you should select this. This causes zsmalloc to use page table |
576 | mapping rather than copying for object mapping. | 576 | mapping rather than copying for object mapping. |
577 | 577 | ||
578 | You can check speed with zsmalloc benchmark[1]. | 578 | You can check speed with zsmalloc benchmark: |
579 | [1] https://github.com/spartacus06/zsmalloc | 579 | https://github.com/spartacus06/zsmapbench |
diff --git a/mm/compaction.c b/mm/compaction.c index b48c5259ea33..918577595ea8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
251 | { | 251 | { |
252 | int nr_scanned = 0, total_isolated = 0; | 252 | int nr_scanned = 0, total_isolated = 0; |
253 | struct page *cursor, *valid_page = NULL; | 253 | struct page *cursor, *valid_page = NULL; |
254 | unsigned long nr_strict_required = end_pfn - blockpfn; | ||
255 | unsigned long flags; | 254 | unsigned long flags; |
256 | bool locked = false; | 255 | bool locked = false; |
257 | 256 | ||
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
264 | 263 | ||
265 | nr_scanned++; | 264 | nr_scanned++; |
266 | if (!pfn_valid_within(blockpfn)) | 265 | if (!pfn_valid_within(blockpfn)) |
267 | continue; | 266 | goto isolate_fail; |
267 | |||
268 | if (!valid_page) | 268 | if (!valid_page) |
269 | valid_page = page; | 269 | valid_page = page; |
270 | if (!PageBuddy(page)) | 270 | if (!PageBuddy(page)) |
271 | continue; | 271 | goto isolate_fail; |
272 | 272 | ||
273 | /* | 273 | /* |
274 | * The zone lock must be held to isolate freepages. | 274 | * The zone lock must be held to isolate freepages. |
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
289 | 289 | ||
290 | /* Recheck this is a buddy page under lock */ | 290 | /* Recheck this is a buddy page under lock */ |
291 | if (!PageBuddy(page)) | 291 | if (!PageBuddy(page)) |
292 | continue; | 292 | goto isolate_fail; |
293 | 293 | ||
294 | /* Found a free page, break it into order-0 pages */ | 294 | /* Found a free page, break it into order-0 pages */ |
295 | isolated = split_free_page(page); | 295 | isolated = split_free_page(page); |
296 | if (!isolated && strict) | ||
297 | break; | ||
298 | total_isolated += isolated; | 296 | total_isolated += isolated; |
299 | for (i = 0; i < isolated; i++) { | 297 | for (i = 0; i < isolated; i++) { |
300 | list_add(&page->lru, freelist); | 298 | list_add(&page->lru, freelist); |
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
305 | if (isolated) { | 303 | if (isolated) { |
306 | blockpfn += isolated - 1; | 304 | blockpfn += isolated - 1; |
307 | cursor += isolated - 1; | 305 | cursor += isolated - 1; |
306 | continue; | ||
308 | } | 307 | } |
308 | |||
309 | isolate_fail: | ||
310 | if (strict) | ||
311 | break; | ||
312 | else | ||
313 | continue; | ||
314 | |||
309 | } | 315 | } |
310 | 316 | ||
311 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); | 317 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); |
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
315 | * pages requested were isolated. If there were any failures, 0 is | 321 | * pages requested were isolated. If there were any failures, 0 is |
316 | * returned and CMA will fail. | 322 | * returned and CMA will fail. |
317 | */ | 323 | */ |
318 | if (strict && nr_strict_required > total_isolated) | 324 | if (strict && blockpfn < end_pfn) |
319 | total_isolated = 0; | 325 | total_isolated = 0; |
320 | 326 | ||
321 | if (locked) | 327 | if (locked) |
diff --git a/mm/migrate.c b/mm/migrate.c index 482a33d89134..b494fdb9a636 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, | |||
1158 | pm->node); | 1158 | pm->node); |
1159 | else | 1159 | else |
1160 | return alloc_pages_exact_node(pm->node, | 1160 | return alloc_pages_exact_node(pm->node, |
1161 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 1161 | GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | /* | 1164 | /* |
@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page, | |||
1544 | struct page *newpage; | 1544 | struct page *newpage; |
1545 | 1545 | ||
1546 | newpage = alloc_pages_exact_node(nid, | 1546 | newpage = alloc_pages_exact_node(nid, |
1547 | (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | | 1547 | (GFP_HIGHUSER_MOVABLE | |
1548 | __GFP_NOMEMALLOC | __GFP_NORETRY | | 1548 | __GFP_THISNODE | __GFP_NOMEMALLOC | |
1549 | __GFP_NOWARN) & | 1549 | __GFP_NORETRY | __GFP_NOWARN) & |
1550 | ~GFP_IOFS, 0); | 1550 | ~GFP_IOFS, 0); |
1551 | 1551 | ||
1552 | return newpage; | 1552 | return newpage; |
@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
1747 | goto out_dropref; | 1747 | goto out_dropref; |
1748 | 1748 | ||
1749 | new_page = alloc_pages_node(node, | 1749 | new_page = alloc_pages_node(node, |
1750 | (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); | 1750 | (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, |
1751 | HPAGE_PMD_ORDER); | ||
1751 | if (!new_page) | 1752 | if (!new_page) |
1752 | goto out_fail; | 1753 | goto out_fail; |
1753 | 1754 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index de51c48c4393..4b65aa492fb6 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev | |||
538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
539 | struct net_device *real_dev = vlan->real_dev; | 539 | struct net_device *real_dev = vlan->real_dev; |
540 | 540 | ||
541 | if (saddr == NULL) | ||
542 | saddr = dev->dev_addr; | ||
543 | |||
541 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); | 544 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); |
542 | } | 545 | } |
543 | 546 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ef66365b7354..93067ecdb9a2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
1127 | struct net_bridge_port *port, | 1127 | struct net_bridge_port *port, |
1128 | struct bridge_mcast_querier *querier, | 1128 | struct bridge_mcast_querier *querier, |
1129 | int saddr, | 1129 | int saddr, |
1130 | bool is_general_query, | ||
1130 | unsigned long max_delay) | 1131 | unsigned long max_delay) |
1131 | { | 1132 | { |
1132 | if (saddr) | 1133 | if (saddr && is_general_query) |
1133 | br_multicast_update_querier_timer(br, querier, max_delay); | 1134 | br_multicast_update_querier_timer(br, querier, max_delay); |
1134 | else if (timer_pending(&querier->timer)) | 1135 | else if (timer_pending(&querier->timer)) |
1135 | return; | 1136 | return; |
@@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1181 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; | 1182 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
1182 | } | 1183 | } |
1183 | 1184 | ||
1185 | /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer | ||
1186 | * all-systems destination addresses (224.0.0.1) for general queries | ||
1187 | */ | ||
1188 | if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) { | ||
1189 | err = -EINVAL; | ||
1190 | goto out; | ||
1191 | } | ||
1192 | |||
1184 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, | 1193 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, |
1185 | max_delay); | 1194 | !group, max_delay); |
1186 | 1195 | ||
1187 | if (!group) | 1196 | if (!group) |
1188 | goto out; | 1197 | goto out; |
@@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1228 | unsigned long max_delay; | 1237 | unsigned long max_delay; |
1229 | unsigned long now = jiffies; | 1238 | unsigned long now = jiffies; |
1230 | const struct in6_addr *group = NULL; | 1239 | const struct in6_addr *group = NULL; |
1240 | bool is_general_query; | ||
1231 | int err = 0; | 1241 | int err = 0; |
1232 | 1242 | ||
1233 | spin_lock(&br->multicast_lock); | 1243 | spin_lock(&br->multicast_lock); |
@@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1235 | (port && port->state == BR_STATE_DISABLED)) | 1245 | (port && port->state == BR_STATE_DISABLED)) |
1236 | goto out; | 1246 | goto out; |
1237 | 1247 | ||
1248 | /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ | ||
1249 | if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) { | ||
1250 | err = -EINVAL; | ||
1251 | goto out; | ||
1252 | } | ||
1253 | |||
1238 | if (skb->len == sizeof(*mld)) { | 1254 | if (skb->len == sizeof(*mld)) { |
1239 | if (!pskb_may_pull(skb, sizeof(*mld))) { | 1255 | if (!pskb_may_pull(skb, sizeof(*mld))) { |
1240 | err = -EINVAL; | 1256 | err = -EINVAL; |
@@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1256 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); | 1272 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); |
1257 | } | 1273 | } |
1258 | 1274 | ||
1275 | is_general_query = group && ipv6_addr_any(group); | ||
1276 | |||
1277 | /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer | ||
1278 | * all-nodes destination address (ff02::1) for general queries | ||
1279 | */ | ||
1280 | if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) { | ||
1281 | err = -EINVAL; | ||
1282 | goto out; | ||
1283 | } | ||
1284 | |||
1259 | br_multicast_query_received(br, port, &br->ip6_querier, | 1285 | br_multicast_query_received(br, port, &br->ip6_querier, |
1260 | !ipv6_addr_any(&ip6h->saddr), max_delay); | 1286 | !ipv6_addr_any(&ip6h->saddr), |
1287 | is_general_query, max_delay); | ||
1261 | 1288 | ||
1262 | if (!group) | 1289 | if (!group) |
1263 | goto out; | 1290 | goto out; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5d6236d9fdce..869c7afe3b07 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2838,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); | |||
2838 | 2838 | ||
2839 | /** | 2839 | /** |
2840 | * skb_segment - Perform protocol segmentation on skb. | 2840 | * skb_segment - Perform protocol segmentation on skb. |
2841 | * @skb: buffer to segment | 2841 | * @head_skb: buffer to segment |
2842 | * @features: features for the output path (see dev->features) | 2842 | * @features: features for the output path (see dev->features) |
2843 | * | 2843 | * |
2844 | * This function performs segmentation on the given skb. It returns | 2844 | * This function performs segmentation on the given skb. It returns |
2845 | * a pointer to the first in a list of new skbs for the segments. | 2845 | * a pointer to the first in a list of new skbs for the segments. |
2846 | * In case of error it returns ERR_PTR(err). | 2846 | * In case of error it returns ERR_PTR(err). |
2847 | */ | 2847 | */ |
2848 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | 2848 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
2849 | netdev_features_t features) | ||
2849 | { | 2850 | { |
2850 | struct sk_buff *segs = NULL; | 2851 | struct sk_buff *segs = NULL; |
2851 | struct sk_buff *tail = NULL; | 2852 | struct sk_buff *tail = NULL; |
2852 | struct sk_buff *fskb = skb_shinfo(skb)->frag_list; | 2853 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
2853 | skb_frag_t *skb_frag = skb_shinfo(skb)->frags; | 2854 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
2854 | unsigned int mss = skb_shinfo(skb)->gso_size; | 2855 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
2855 | unsigned int doffset = skb->data - skb_mac_header(skb); | 2856 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
2857 | struct sk_buff *frag_skb = head_skb; | ||
2856 | unsigned int offset = doffset; | 2858 | unsigned int offset = doffset; |
2857 | unsigned int tnl_hlen = skb_tnl_header_len(skb); | 2859 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
2858 | unsigned int headroom; | 2860 | unsigned int headroom; |
2859 | unsigned int len; | 2861 | unsigned int len; |
2860 | __be16 proto; | 2862 | __be16 proto; |
2861 | bool csum; | 2863 | bool csum; |
2862 | int sg = !!(features & NETIF_F_SG); | 2864 | int sg = !!(features & NETIF_F_SG); |
2863 | int nfrags = skb_shinfo(skb)->nr_frags; | 2865 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
2864 | int err = -ENOMEM; | 2866 | int err = -ENOMEM; |
2865 | int i = 0; | 2867 | int i = 0; |
2866 | int pos; | 2868 | int pos; |
2867 | 2869 | ||
2868 | proto = skb_network_protocol(skb); | 2870 | proto = skb_network_protocol(head_skb); |
2869 | if (unlikely(!proto)) | 2871 | if (unlikely(!proto)) |
2870 | return ERR_PTR(-EINVAL); | 2872 | return ERR_PTR(-EINVAL); |
2871 | 2873 | ||
2872 | csum = !!can_checksum_protocol(features, proto); | 2874 | csum = !!can_checksum_protocol(features, proto); |
2873 | __skb_push(skb, doffset); | 2875 | __skb_push(head_skb, doffset); |
2874 | headroom = skb_headroom(skb); | 2876 | headroom = skb_headroom(head_skb); |
2875 | pos = skb_headlen(skb); | 2877 | pos = skb_headlen(head_skb); |
2876 | 2878 | ||
2877 | do { | 2879 | do { |
2878 | struct sk_buff *nskb; | 2880 | struct sk_buff *nskb; |
2879 | skb_frag_t *frag; | 2881 | skb_frag_t *nskb_frag; |
2880 | int hsize; | 2882 | int hsize; |
2881 | int size; | 2883 | int size; |
2882 | 2884 | ||
2883 | len = skb->len - offset; | 2885 | len = head_skb->len - offset; |
2884 | if (len > mss) | 2886 | if (len > mss) |
2885 | len = mss; | 2887 | len = mss; |
2886 | 2888 | ||
2887 | hsize = skb_headlen(skb) - offset; | 2889 | hsize = skb_headlen(head_skb) - offset; |
2888 | if (hsize < 0) | 2890 | if (hsize < 0) |
2889 | hsize = 0; | 2891 | hsize = 0; |
2890 | if (hsize > len || !sg) | 2892 | if (hsize > len || !sg) |
2891 | hsize = len; | 2893 | hsize = len; |
2892 | 2894 | ||
2893 | if (!hsize && i >= nfrags && skb_headlen(fskb) && | 2895 | if (!hsize && i >= nfrags && skb_headlen(list_skb) && |
2894 | (skb_headlen(fskb) == len || sg)) { | 2896 | (skb_headlen(list_skb) == len || sg)) { |
2895 | BUG_ON(skb_headlen(fskb) > len); | 2897 | BUG_ON(skb_headlen(list_skb) > len); |
2896 | 2898 | ||
2897 | i = 0; | 2899 | i = 0; |
2898 | nfrags = skb_shinfo(fskb)->nr_frags; | 2900 | nfrags = skb_shinfo(list_skb)->nr_frags; |
2899 | skb_frag = skb_shinfo(fskb)->frags; | 2901 | frag = skb_shinfo(list_skb)->frags; |
2900 | pos += skb_headlen(fskb); | 2902 | frag_skb = list_skb; |
2903 | pos += skb_headlen(list_skb); | ||
2901 | 2904 | ||
2902 | while (pos < offset + len) { | 2905 | while (pos < offset + len) { |
2903 | BUG_ON(i >= nfrags); | 2906 | BUG_ON(i >= nfrags); |
2904 | 2907 | ||
2905 | size = skb_frag_size(skb_frag); | 2908 | size = skb_frag_size(frag); |
2906 | if (pos + size > offset + len) | 2909 | if (pos + size > offset + len) |
2907 | break; | 2910 | break; |
2908 | 2911 | ||
2909 | i++; | 2912 | i++; |
2910 | pos += size; | 2913 | pos += size; |
2911 | skb_frag++; | 2914 | frag++; |
2912 | } | 2915 | } |
2913 | 2916 | ||
2914 | nskb = skb_clone(fskb, GFP_ATOMIC); | 2917 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
2915 | fskb = fskb->next; | 2918 | list_skb = list_skb->next; |
2916 | 2919 | ||
2917 | if (unlikely(!nskb)) | 2920 | if (unlikely(!nskb)) |
2918 | goto err; | 2921 | goto err; |
@@ -2933,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
2933 | __skb_push(nskb, doffset); | 2936 | __skb_push(nskb, doffset); |
2934 | } else { | 2937 | } else { |
2935 | nskb = __alloc_skb(hsize + doffset + headroom, | 2938 | nskb = __alloc_skb(hsize + doffset + headroom, |
2936 | GFP_ATOMIC, skb_alloc_rx_flag(skb), | 2939 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
2937 | NUMA_NO_NODE); | 2940 | NUMA_NO_NODE); |
2938 | 2941 | ||
2939 | if (unlikely(!nskb)) | 2942 | if (unlikely(!nskb)) |
@@ -2949,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
2949 | segs = nskb; | 2952 | segs = nskb; |
2950 | tail = nskb; | 2953 | tail = nskb; |
2951 | 2954 | ||
2952 | __copy_skb_header(nskb, skb); | 2955 | __copy_skb_header(nskb, head_skb); |
2953 | nskb->mac_len = skb->mac_len; | 2956 | nskb->mac_len = head_skb->mac_len; |
2954 | 2957 | ||
2955 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); | 2958 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
2956 | 2959 | ||
2957 | skb_copy_from_linear_data_offset(skb, -tnl_hlen, | 2960 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
2958 | nskb->data - tnl_hlen, | 2961 | nskb->data - tnl_hlen, |
2959 | doffset + tnl_hlen); | 2962 | doffset + tnl_hlen); |
2960 | 2963 | ||
@@ -2963,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
2963 | 2966 | ||
2964 | if (!sg) { | 2967 | if (!sg) { |
2965 | nskb->ip_summed = CHECKSUM_NONE; | 2968 | nskb->ip_summed = CHECKSUM_NONE; |
2966 | nskb->csum = skb_copy_and_csum_bits(skb, offset, | 2969 | nskb->csum = skb_copy_and_csum_bits(head_skb, offset, |
2967 | skb_put(nskb, len), | 2970 | skb_put(nskb, len), |
2968 | len, 0); | 2971 | len, 0); |
2969 | continue; | 2972 | continue; |
2970 | } | 2973 | } |
2971 | 2974 | ||
2972 | frag = skb_shinfo(nskb)->frags; | 2975 | nskb_frag = skb_shinfo(nskb)->frags; |
2973 | 2976 | ||
2974 | skb_copy_from_linear_data_offset(skb, offset, | 2977 | skb_copy_from_linear_data_offset(head_skb, offset, |
2975 | skb_put(nskb, hsize), hsize); | 2978 | skb_put(nskb, hsize), hsize); |
2976 | 2979 | ||
2977 | skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; | 2980 | skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & |
2981 | SKBTX_SHARED_FRAG; | ||
2978 | 2982 | ||
2979 | while (pos < offset + len) { | 2983 | while (pos < offset + len) { |
2980 | if (i >= nfrags) { | 2984 | if (i >= nfrags) { |
2981 | BUG_ON(skb_headlen(fskb)); | 2985 | BUG_ON(skb_headlen(list_skb)); |
2982 | 2986 | ||
2983 | i = 0; | 2987 | i = 0; |
2984 | nfrags = skb_shinfo(fskb)->nr_frags; | 2988 | nfrags = skb_shinfo(list_skb)->nr_frags; |
2985 | skb_frag = skb_shinfo(fskb)->frags; | 2989 | frag = skb_shinfo(list_skb)->frags; |
2990 | frag_skb = list_skb; | ||
2986 | 2991 | ||
2987 | BUG_ON(!nfrags); | 2992 | BUG_ON(!nfrags); |
2988 | 2993 | ||
2989 | fskb = fskb->next; | 2994 | list_skb = list_skb->next; |
2990 | } | 2995 | } |
2991 | 2996 | ||
2992 | if (unlikely(skb_shinfo(nskb)->nr_frags >= | 2997 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
@@ -2997,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
2997 | goto err; | 3002 | goto err; |
2998 | } | 3003 | } |
2999 | 3004 | ||
3000 | *frag = *skb_frag; | 3005 | if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) |
3001 | __skb_frag_ref(frag); | 3006 | goto err; |
3002 | size = skb_frag_size(frag); | 3007 | |
3008 | *nskb_frag = *frag; | ||
3009 | __skb_frag_ref(nskb_frag); | ||
3010 | size = skb_frag_size(nskb_frag); | ||
3003 | 3011 | ||
3004 | if (pos < offset) { | 3012 | if (pos < offset) { |
3005 | frag->page_offset += offset - pos; | 3013 | nskb_frag->page_offset += offset - pos; |
3006 | skb_frag_size_sub(frag, offset - pos); | 3014 | skb_frag_size_sub(nskb_frag, offset - pos); |
3007 | } | 3015 | } |
3008 | 3016 | ||
3009 | skb_shinfo(nskb)->nr_frags++; | 3017 | skb_shinfo(nskb)->nr_frags++; |
3010 | 3018 | ||
3011 | if (pos + size <= offset + len) { | 3019 | if (pos + size <= offset + len) { |
3012 | i++; | 3020 | i++; |
3013 | skb_frag++; | 3021 | frag++; |
3014 | pos += size; | 3022 | pos += size; |
3015 | } else { | 3023 | } else { |
3016 | skb_frag_size_sub(frag, pos + size - (offset + len)); | 3024 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
3017 | goto skip_fraglist; | 3025 | goto skip_fraglist; |
3018 | } | 3026 | } |
3019 | 3027 | ||
3020 | frag++; | 3028 | nskb_frag++; |
3021 | } | 3029 | } |
3022 | 3030 | ||
3023 | skip_fraglist: | 3031 | skip_fraglist: |
@@ -3031,7 +3039,7 @@ perform_csum_check: | |||
3031 | nskb->len - doffset, 0); | 3039 | nskb->len - doffset, 0); |
3032 | nskb->ip_summed = CHECKSUM_NONE; | 3040 | nskb->ip_summed = CHECKSUM_NONE; |
3033 | } | 3041 | } |
3034 | } while ((offset += len) < skb->len); | 3042 | } while ((offset += len) < head_skb->len); |
3035 | 3043 | ||
3036 | return segs; | 3044 | return segs; |
3037 | 3045 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 5b6a9431b017..c0fc6bdad1e3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2357,10 +2357,13 @@ void release_sock(struct sock *sk) | |||
2357 | if (sk->sk_backlog.tail) | 2357 | if (sk->sk_backlog.tail) |
2358 | __release_sock(sk); | 2358 | __release_sock(sk); |
2359 | 2359 | ||
2360 | /* Warning : release_cb() might need to release sk ownership, | ||
2361 | * ie call sock_release_ownership(sk) before us. | ||
2362 | */ | ||
2360 | if (sk->sk_prot->release_cb) | 2363 | if (sk->sk_prot->release_cb) |
2361 | sk->sk_prot->release_cb(sk); | 2364 | sk->sk_prot->release_cb(sk); |
2362 | 2365 | ||
2363 | sk->sk_lock.owned = 0; | 2366 | sock_release_ownership(sk); |
2364 | if (waitqueue_active(&sk->sk_lock.wq)) | 2367 | if (waitqueue_active(&sk->sk_lock.wq)) |
2365 | wake_up(&sk->sk_lock.wq); | 2368 | wake_up(&sk->sk_lock.wq); |
2366 | spin_unlock_bh(&sk->sk_lock.slock); | 2369 | spin_unlock_bh(&sk->sk_lock.slock); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index bb075fc9a14f..3b01959bf4bb 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) | |||
208 | } | 208 | } |
209 | 209 | ||
210 | work = frag_mem_limit(nf) - nf->low_thresh; | 210 | work = frag_mem_limit(nf) - nf->low_thresh; |
211 | while (work > 0) { | 211 | while (work > 0 || force) { |
212 | spin_lock(&nf->lru_lock); | 212 | spin_lock(&nf->lru_lock); |
213 | 213 | ||
214 | if (list_empty(&nf->lru_list)) { | 214 | if (list_empty(&nf->lru_list)) { |
@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
278 | 278 | ||
279 | atomic_inc(&qp->refcnt); | 279 | atomic_inc(&qp->refcnt); |
280 | hlist_add_head(&qp->list, &hb->chain); | 280 | hlist_add_head(&qp->list, &hb->chain); |
281 | inet_frag_lru_add(nf, qp); | ||
281 | spin_unlock(&hb->chain_lock); | 282 | spin_unlock(&hb->chain_lock); |
282 | read_unlock(&f->lock); | 283 | read_unlock(&f->lock); |
283 | inet_frag_lru_add(nf, qp); | 284 | |
284 | return qp; | 285 | return qp; |
285 | } | 286 | } |
286 | 287 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f0eb4e337ec8..17a11e65e57f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -767,6 +767,17 @@ void tcp_release_cb(struct sock *sk) | |||
767 | if (flags & (1UL << TCP_TSQ_DEFERRED)) | 767 | if (flags & (1UL << TCP_TSQ_DEFERRED)) |
768 | tcp_tsq_handler(sk); | 768 | tcp_tsq_handler(sk); |
769 | 769 | ||
770 | /* Here begins the tricky part : | ||
771 | * We are called from release_sock() with : | ||
772 | * 1) BH disabled | ||
773 | * 2) sk_lock.slock spinlock held | ||
774 | * 3) socket owned by us (sk->sk_lock.owned == 1) | ||
775 | * | ||
776 | * But following code is meant to be called from BH handlers, | ||
777 | * so we should keep BH disabled, but early release socket ownership | ||
778 | */ | ||
779 | sock_release_ownership(sk); | ||
780 | |||
770 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { | 781 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { |
771 | tcp_write_timer_handler(sk); | 782 | tcp_write_timer_handler(sk); |
772 | __sock_put(sk); | 783 | __sock_put(sk); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fdbfeca36d63..344e972426df 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1103,8 +1103,11 @@ retry: | |||
1103 | * Lifetime is greater than REGEN_ADVANCE time units. In particular, | 1103 | * Lifetime is greater than REGEN_ADVANCE time units. In particular, |
1104 | * an implementation must not create a temporary address with a zero | 1104 | * an implementation must not create a temporary address with a zero |
1105 | * Preferred Lifetime. | 1105 | * Preferred Lifetime. |
1106 | * Use age calculation as in addrconf_verify to avoid unnecessary | ||
1107 | * temporary addresses being generated. | ||
1106 | */ | 1108 | */ |
1107 | if (tmp_prefered_lft <= regen_advance) { | 1109 | age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; |
1110 | if (tmp_prefered_lft <= regen_advance + age) { | ||
1108 | in6_ifa_put(ifp); | 1111 | in6_ifa_put(ifp); |
1109 | in6_dev_put(idev); | 1112 | in6_dev_put(idev); |
1110 | ret = -1; | 1113 | ret = -1; |
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c index cf77f3abfd06..447a7fbd1bb6 100644 --- a/net/ipv6/exthdrs_offload.c +++ b/net/ipv6/exthdrs_offload.c | |||
@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void) | |||
25 | int ret; | 25 | int ret; |
26 | 26 | ||
27 | ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); | 27 | ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); |
28 | if (!ret) | 28 | if (ret) |
29 | goto out; | 29 | goto out; |
30 | 30 | ||
31 | ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); | 31 | ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); |
32 | if (!ret) | 32 | if (ret) |
33 | goto out_rt; | 33 | goto out_rt; |
34 | 34 | ||
35 | out: | 35 | out: |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 11dac21e6586..fba54a407bb2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1513,7 +1513,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1513 | if (!table) | 1513 | if (!table) |
1514 | goto out; | 1514 | goto out; |
1515 | 1515 | ||
1516 | rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); | 1516 | rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); |
1517 | 1517 | ||
1518 | if (!rt) { | 1518 | if (!rt) { |
1519 | err = -ENOMEM; | 1519 | err = -ENOMEM; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 735d0f60c83a..85d9d94c0a3c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -112,7 +112,6 @@ struct l2tp_net { | |||
112 | spinlock_t l2tp_session_hlist_lock; | 112 | spinlock_t l2tp_session_hlist_lock; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 115 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
117 | 116 | ||
118 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) | 117 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) |
@@ -1863,7 +1862,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); | |||
1863 | /* We come here whenever a session's send_seq, cookie_len or | 1862 | /* We come here whenever a session's send_seq, cookie_len or |
1864 | * l2specific_len parameters are set. | 1863 | * l2specific_len parameters are set. |
1865 | */ | 1864 | */ |
1866 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version) | 1865 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) |
1867 | { | 1866 | { |
1868 | if (version == L2TP_HDR_VER_2) { | 1867 | if (version == L2TP_HDR_VER_2) { |
1869 | session->hdr_len = 6; | 1868 | session->hdr_len = 6; |
@@ -1876,6 +1875,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio | |||
1876 | } | 1875 | } |
1877 | 1876 | ||
1878 | } | 1877 | } |
1878 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
1879 | 1879 | ||
1880 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | 1880 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) |
1881 | { | 1881 | { |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1f01ba3435bc..3f93ccd6ba97 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -263,6 +263,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
263 | int length, int (*payload_hook)(struct sk_buff *skb)); | 263 | int length, int (*payload_hook)(struct sk_buff *skb)); |
264 | int l2tp_session_queue_purge(struct l2tp_session *session); | 264 | int l2tp_session_queue_purge(struct l2tp_session *session); |
265 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 265 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
266 | void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
266 | 267 | ||
267 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, | 268 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, |
268 | int hdr_len); | 269 | int hdr_len); |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 4cfd722e9153..bd7387adea9e 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -578,8 +578,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf | |||
578 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | 578 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) |
579 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | 579 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); |
580 | 580 | ||
581 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | 581 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) { |
582 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | 582 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); |
583 | l2tp_session_set_header_len(session, session->tunnel->version); | ||
584 | } | ||
583 | 585 | ||
584 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | 586 | if (info->attrs[L2TP_ATTR_LNS_MODE]) |
585 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | 587 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index be5fadf34739..5990919356a5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -254,12 +254,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int | |||
254 | po = pppox_sk(sk); | 254 | po = pppox_sk(sk); |
255 | ppp_input(&po->chan, skb); | 255 | ppp_input(&po->chan, skb); |
256 | } else { | 256 | } else { |
257 | l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", | 257 | l2tp_dbg(session, PPPOL2TP_MSG_DATA, |
258 | session->name); | 258 | "%s: recv %d byte data frame, passing to L2TP socket\n", |
259 | session->name, data_len); | ||
259 | 260 | ||
260 | /* Not bound. Nothing we can do, so discard. */ | 261 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
261 | atomic_long_inc(&session->stats.rx_errors); | 262 | atomic_long_inc(&session->stats.rx_errors); |
262 | kfree_skb(skb); | 263 | kfree_skb(skb); |
264 | } | ||
263 | } | 265 | } |
264 | 266 | ||
265 | return; | 267 | return; |
@@ -1312,6 +1314,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, | |||
1312 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : | 1314 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : |
1313 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | 1315 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; |
1314 | } | 1316 | } |
1317 | l2tp_session_set_header_len(session, session->tunnel->version); | ||
1315 | l2tp_info(session, PPPOL2TP_MSG_CONTROL, | 1318 | l2tp_info(session, PPPOL2TP_MSG_CONTROL, |
1316 | "%s: set send_seq=%d\n", | 1319 | "%s: set send_seq=%d\n", |
1317 | session->name, session->send_seq); | 1320 | session->name, session->send_seq); |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index f43613a97dd6..0c1ecfdf9a12 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -100,6 +100,12 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, | |||
100 | } | 100 | } |
101 | max_bw = max(max_bw, width); | 101 | max_bw = max(max_bw, width); |
102 | } | 102 | } |
103 | |||
104 | /* use the configured bandwidth in case of monitor interface */ | ||
105 | sdata = rcu_dereference(local->monitor_sdata); | ||
106 | if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf) | ||
107 | max_bw = max(max_bw, conf->def.width); | ||
108 | |||
103 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
104 | 110 | ||
105 | return max_bw; | 111 | return max_bw; |
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c index 2802f9d9279d..ad8b377b4b9f 100644 --- a/net/mac80211/mesh_ps.c +++ b/net/mac80211/mesh_ps.c | |||
@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta) | |||
36 | sdata->vif.addr); | 36 | sdata->vif.addr); |
37 | nullfunc->frame_control = fc; | 37 | nullfunc->frame_control = fc; |
38 | nullfunc->duration_id = 0; | 38 | nullfunc->duration_id = 0; |
39 | nullfunc->seq_ctrl = 0; | ||
39 | /* no address resolution for this frame -> set addr 1 immediately */ | 40 | /* no address resolution for this frame -> set addr 1 immediately */ |
40 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); | 41 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); |
41 | memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ | 42 | memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a023b432143b..137a192e64bc 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -1206,6 +1206,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, | |||
1206 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); | 1206 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); |
1207 | memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); | 1207 | memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); |
1208 | memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); | 1208 | memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); |
1209 | nullfunc->seq_ctrl = 0; | ||
1209 | 1210 | ||
1210 | skb->priority = tid; | 1211 | skb->priority = tid; |
1211 | skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); | 1212 | skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 1313145e3b86..a07d55e75698 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -273,11 +273,12 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
273 | 273 | ||
274 | void qdisc_list_add(struct Qdisc *q) | 274 | void qdisc_list_add(struct Qdisc *q) |
275 | { | 275 | { |
276 | struct Qdisc *root = qdisc_dev(q)->qdisc; | 276 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
277 | struct Qdisc *root = qdisc_dev(q)->qdisc; | ||
277 | 278 | ||
278 | WARN_ON_ONCE(root == &noop_qdisc); | 279 | WARN_ON_ONCE(root == &noop_qdisc); |
279 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | ||
280 | list_add_tail(&q->list, &root->list); | 280 | list_add_tail(&q->list, &root->list); |
281 | } | ||
281 | } | 282 | } |
282 | EXPORT_SYMBOL(qdisc_list_add); | 283 | EXPORT_SYMBOL(qdisc_list_add); |
283 | 284 | ||
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 08ef7a42c0e4..21e251766eb1 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
@@ -601,6 +601,7 @@ static int fq_resize(struct Qdisc *sch, u32 log) | |||
601 | { | 601 | { |
602 | struct fq_sched_data *q = qdisc_priv(sch); | 602 | struct fq_sched_data *q = qdisc_priv(sch); |
603 | struct rb_root *array; | 603 | struct rb_root *array; |
604 | void *old_fq_root; | ||
604 | u32 idx; | 605 | u32 idx; |
605 | 606 | ||
606 | if (q->fq_root && log == q->fq_trees_log) | 607 | if (q->fq_root && log == q->fq_trees_log) |
@@ -615,13 +616,19 @@ static int fq_resize(struct Qdisc *sch, u32 log) | |||
615 | for (idx = 0; idx < (1U << log); idx++) | 616 | for (idx = 0; idx < (1U << log); idx++) |
616 | array[idx] = RB_ROOT; | 617 | array[idx] = RB_ROOT; |
617 | 618 | ||
618 | if (q->fq_root) { | 619 | sch_tree_lock(sch); |
619 | fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); | 620 | |
620 | fq_free(q->fq_root); | 621 | old_fq_root = q->fq_root; |
621 | } | 622 | if (old_fq_root) |
623 | fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); | ||
624 | |||
622 | q->fq_root = array; | 625 | q->fq_root = array; |
623 | q->fq_trees_log = log; | 626 | q->fq_trees_log = log; |
624 | 627 | ||
628 | sch_tree_unlock(sch); | ||
629 | |||
630 | fq_free(old_fq_root); | ||
631 | |||
625 | return 0; | 632 | return 0; |
626 | } | 633 | } |
627 | 634 | ||
@@ -697,9 +704,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) | |||
697 | q->flow_refill_delay = usecs_to_jiffies(usecs_delay); | 704 | q->flow_refill_delay = usecs_to_jiffies(usecs_delay); |
698 | } | 705 | } |
699 | 706 | ||
700 | if (!err) | 707 | if (!err) { |
708 | sch_tree_unlock(sch); | ||
701 | err = fq_resize(sch, fq_log); | 709 | err = fq_resize(sch, fq_log); |
702 | 710 | sch_tree_lock(sch); | |
711 | } | ||
703 | while (sch->q.qlen > sch->limit) { | 712 | while (sch->q.qlen > sch->limit) { |
704 | struct sk_buff *skb = fq_dequeue(sch); | 713 | struct sk_buff *skb = fq_dequeue(sch); |
705 | 714 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 632090b961c3..3a1767ef3201 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) | |||
1421 | BUG_ON(!list_empty(&chunk->list)); | 1421 | BUG_ON(!list_empty(&chunk->list)); |
1422 | list_del_init(&chunk->transmitted_list); | 1422 | list_del_init(&chunk->transmitted_list); |
1423 | 1423 | ||
1424 | /* Free the chunk skb data and the SCTP_chunk stub itself. */ | 1424 | consume_skb(chunk->skb); |
1425 | dev_kfree_skb(chunk->skb); | 1425 | consume_skb(chunk->auth_chunk); |
1426 | 1426 | ||
1427 | SCTP_DBG_OBJCNT_DEC(chunk); | 1427 | SCTP_DBG_OBJCNT_DEC(chunk); |
1428 | kmem_cache_free(sctp_chunk_cachep, chunk); | 1428 | kmem_cache_free(sctp_chunk_cachep, chunk); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ae65b6b5973a..01e002430c85 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -760,7 +760,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
760 | 760 | ||
761 | /* Make sure that we and the peer are AUTH capable */ | 761 | /* Make sure that we and the peer are AUTH capable */ |
762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { | 762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { |
763 | kfree_skb(chunk->auth_chunk); | ||
764 | sctp_association_free(new_asoc); | 763 | sctp_association_free(new_asoc); |
765 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | 764 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
766 | } | 765 | } |
@@ -775,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
775 | auth.transport = chunk->transport; | 774 | auth.transport = chunk->transport; |
776 | 775 | ||
777 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); | 776 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); |
778 | |||
779 | /* We can now safely free the auth_chunk clone */ | ||
780 | kfree_skb(chunk->auth_chunk); | ||
781 | |||
782 | if (ret != SCTP_IERROR_NO_ERROR) { | 777 | if (ret != SCTP_IERROR_NO_ERROR) { |
783 | sctp_association_free(new_asoc); | 778 | sctp_association_free(new_asoc); |
784 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | 779 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
diff --git a/net/socket.c b/net/socket.c index 879933aaed4c..a19ae1968d37 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup); | |||
450 | 450 | ||
451 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) | 451 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) |
452 | { | 452 | { |
453 | struct file *file; | 453 | struct fd f = fdget(fd); |
454 | struct socket *sock; | 454 | struct socket *sock; |
455 | 455 | ||
456 | *err = -EBADF; | 456 | *err = -EBADF; |
457 | file = fget_light(fd, fput_needed); | 457 | if (f.file) { |
458 | if (file) { | 458 | sock = sock_from_file(f.file, err); |
459 | sock = sock_from_file(file, err); | 459 | if (likely(sock)) { |
460 | if (sock) | 460 | *fput_needed = f.flags; |
461 | return sock; | 461 | return sock; |
462 | fput_light(file, *fput_needed); | 462 | } |
463 | fdput(f); | ||
463 | } | 464 | } |
464 | return NULL; | 465 | return NULL; |
465 | } | 466 | } |
@@ -1985,6 +1986,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
1985 | { | 1986 | { |
1986 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1987 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
1987 | return -EFAULT; | 1988 | return -EFAULT; |
1989 | |||
1990 | if (kmsg->msg_namelen < 0) | ||
1991 | return -EINVAL; | ||
1992 | |||
1988 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 1993 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
1989 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); | 1994 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
1990 | return 0; | 1995 | return 0; |
diff --git a/net/tipc/config.c b/net/tipc/config.c index e74eef2e7490..e6d721692ae0 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
376 | struct tipc_cfg_msg_hdr *req_hdr; | 376 | struct tipc_cfg_msg_hdr *req_hdr; |
377 | struct tipc_cfg_msg_hdr *rep_hdr; | 377 | struct tipc_cfg_msg_hdr *rep_hdr; |
378 | struct sk_buff *rep_buf; | 378 | struct sk_buff *rep_buf; |
379 | int ret; | ||
380 | 379 | ||
381 | /* Validate configuration message header (ignore invalid message) */ | 380 | /* Validate configuration message header (ignore invalid message) */ |
382 | req_hdr = (struct tipc_cfg_msg_hdr *)buf; | 381 | req_hdr = (struct tipc_cfg_msg_hdr *)buf; |
@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
398 | memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); | 397 | memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); |
399 | rep_hdr->tcm_len = htonl(rep_buf->len); | 398 | rep_hdr->tcm_len = htonl(rep_buf->len); |
400 | rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); | 399 | rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); |
401 | 400 | tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, | |
402 | ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, | 401 | rep_buf->len); |
403 | rep_buf->len); | ||
404 | if (ret < 0) | ||
405 | pr_err("Sending cfg reply message failed, no memory\n"); | ||
406 | |||
407 | kfree_skb(rep_buf); | 402 | kfree_skb(rep_buf); |
408 | } | 403 | } |
409 | } | 404 | } |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index e4bc8a296744..1fabf160501f 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
@@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument) | |||
58 | 58 | ||
59 | spin_lock_bh(&qitem_lock); | 59 | spin_lock_bh(&qitem_lock); |
60 | if (!handler_enabled) { | 60 | if (!handler_enabled) { |
61 | pr_err("Signal request ignored by handler\n"); | ||
62 | spin_unlock_bh(&qitem_lock); | 61 | spin_unlock_bh(&qitem_lock); |
63 | return -ENOPROTOOPT; | 62 | return -ENOPROTOOPT; |
64 | } | 63 | } |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 48302be175ce..042e8e3cabc0 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -941,17 +941,48 @@ int tipc_nametbl_init(void) | |||
941 | return 0; | 941 | return 0; |
942 | } | 942 | } |
943 | 943 | ||
944 | /** | ||
945 | * tipc_purge_publications - remove all publications for a given type | ||
946 | * | ||
947 | * tipc_nametbl_lock must be held when calling this function | ||
948 | */ | ||
949 | static void tipc_purge_publications(struct name_seq *seq) | ||
950 | { | ||
951 | struct publication *publ, *safe; | ||
952 | struct sub_seq *sseq; | ||
953 | struct name_info *info; | ||
954 | |||
955 | if (!seq->sseqs) { | ||
956 | nameseq_delete_empty(seq); | ||
957 | return; | ||
958 | } | ||
959 | sseq = seq->sseqs; | ||
960 | info = sseq->info; | ||
961 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { | ||
962 | tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, | ||
963 | publ->ref, publ->key); | ||
964 | } | ||
965 | } | ||
966 | |||
944 | void tipc_nametbl_stop(void) | 967 | void tipc_nametbl_stop(void) |
945 | { | 968 | { |
946 | u32 i; | 969 | u32 i; |
970 | struct name_seq *seq; | ||
971 | struct hlist_head *seq_head; | ||
972 | struct hlist_node *safe; | ||
947 | 973 | ||
948 | /* Verify name table is empty, then release it */ | 974 | /* Verify name table is empty and purge any lingering |
975 | * publications, then release the name table | ||
976 | */ | ||
949 | write_lock_bh(&tipc_nametbl_lock); | 977 | write_lock_bh(&tipc_nametbl_lock); |
950 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 978 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
951 | if (hlist_empty(&table.types[i])) | 979 | if (hlist_empty(&table.types[i])) |
952 | continue; | 980 | continue; |
953 | pr_err("nametbl_stop(): orphaned hash chain detected\n"); | 981 | seq_head = &table.types[i]; |
954 | break; | 982 | hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { |
983 | tipc_purge_publications(seq); | ||
984 | } | ||
985 | continue; | ||
955 | } | 986 | } |
956 | kfree(table.types); | 987 | kfree(table.types); |
957 | table.types = NULL; | 988 | table.types = NULL; |
diff --git a/net/tipc/server.c b/net/tipc/server.c index 373979789a73..646a930eefbf 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con); | |||
87 | static void tipc_conn_kref_release(struct kref *kref) | 87 | static void tipc_conn_kref_release(struct kref *kref) |
88 | { | 88 | { |
89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); |
90 | struct tipc_server *s = con->server; | ||
91 | 90 | ||
92 | if (con->sock) { | 91 | if (con->sock) { |
93 | tipc_sock_release_local(con->sock); | 92 | tipc_sock_release_local(con->sock); |
@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref) | |||
95 | } | 94 | } |
96 | 95 | ||
97 | tipc_clean_outqueues(con); | 96 | tipc_clean_outqueues(con); |
98 | |||
99 | if (con->conid) | ||
100 | s->tipc_conn_shutdown(con->conid, con->usr_data); | ||
101 | |||
102 | kfree(con); | 97 | kfree(con); |
103 | } | 98 | } |
104 | 99 | ||
@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con) | |||
181 | struct tipc_server *s = con->server; | 176 | struct tipc_server *s = con->server; |
182 | 177 | ||
183 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | 178 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { |
179 | if (con->conid) | ||
180 | s->tipc_conn_shutdown(con->conid, con->usr_data); | ||
181 | |||
184 | spin_lock_bh(&s->idr_lock); | 182 | spin_lock_bh(&s->idr_lock); |
185 | idr_remove(&s->conn_idr, con->conid); | 183 | idr_remove(&s->conn_idr, con->conid); |
186 | s->idr_in_use--; | 184 | s->idr_in_use--; |
@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |||
429 | list_add_tail(&e->list, &con->outqueue); | 427 | list_add_tail(&e->list, &con->outqueue); |
430 | spin_unlock_bh(&con->outqueue_lock); | 428 | spin_unlock_bh(&con->outqueue_lock); |
431 | 429 | ||
432 | if (test_bit(CF_CONNECTED, &con->flags)) | 430 | if (test_bit(CF_CONNECTED, &con->flags)) { |
433 | if (!queue_work(s->send_wq, &con->swork)) | 431 | if (!queue_work(s->send_wq, &con->swork)) |
434 | conn_put(con); | 432 | conn_put(con); |
435 | 433 | } else { | |
434 | conn_put(con); | ||
435 | } | ||
436 | return 0; | 436 | return 0; |
437 | } | 437 | } |
438 | 438 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a4cf274455aa..0ed0eaa62f29 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -997,7 +997,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) | |||
997 | 997 | ||
998 | for (;;) { | 998 | for (;;) { |
999 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 999 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1000 | if (skb_queue_empty(&sk->sk_receive_queue)) { | 1000 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
1001 | if (sock->state == SS_DISCONNECTING) { | 1001 | if (sock->state == SS_DISCONNECTING) { |
1002 | err = -ENOTCONN; | 1002 | err = -ENOTCONN; |
1003 | break; | 1003 | break; |
@@ -1623,7 +1623,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) | |||
1623 | for (;;) { | 1623 | for (;;) { |
1624 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, | 1624 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
1625 | TASK_INTERRUPTIBLE); | 1625 | TASK_INTERRUPTIBLE); |
1626 | if (skb_queue_empty(&sk->sk_receive_queue)) { | 1626 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
1627 | release_sock(sk); | 1627 | release_sock(sk); |
1628 | timeo = schedule_timeout(timeo); | 1628 | timeo = schedule_timeout(timeo); |
1629 | lock_sock(sk); | 1629 | lock_sock(sk); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 7cb0bd5b1176..11c9ae00837d 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, | |||
96 | { | 96 | { |
97 | struct tipc_subscriber *subscriber = sub->subscriber; | 97 | struct tipc_subscriber *subscriber = sub->subscriber; |
98 | struct kvec msg_sect; | 98 | struct kvec msg_sect; |
99 | int ret; | ||
100 | 99 | ||
101 | msg_sect.iov_base = (void *)&sub->evt; | 100 | msg_sect.iov_base = (void *)&sub->evt; |
102 | msg_sect.iov_len = sizeof(struct tipc_event); | 101 | msg_sect.iov_len = sizeof(struct tipc_event); |
103 | |||
104 | sub->evt.event = htohl(event, sub->swap); | 102 | sub->evt.event = htohl(event, sub->swap); |
105 | sub->evt.found_lower = htohl(found_lower, sub->swap); | 103 | sub->evt.found_lower = htohl(found_lower, sub->swap); |
106 | sub->evt.found_upper = htohl(found_upper, sub->swap); | 104 | sub->evt.found_upper = htohl(found_upper, sub->swap); |
107 | sub->evt.port.ref = htohl(port_ref, sub->swap); | 105 | sub->evt.port.ref = htohl(port_ref, sub->swap); |
108 | sub->evt.port.node = htohl(node, sub->swap); | 106 | sub->evt.port.node = htohl(node, sub->swap); |
109 | ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, | 107 | tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base, |
110 | msg_sect.iov_base, msg_sect.iov_len); | 108 | msg_sect.iov_len); |
111 | if (ret < 0) | ||
112 | pr_err("Sending subscription event failed, no memory\n"); | ||
113 | } | 109 | } |
114 | 110 | ||
115 | /** | 111 | /** |
@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub) | |||
153 | /* The spin lock per subscriber is used to protect its members */ | 149 | /* The spin lock per subscriber is used to protect its members */ |
154 | spin_lock_bh(&subscriber->lock); | 150 | spin_lock_bh(&subscriber->lock); |
155 | 151 | ||
156 | /* Validate if the connection related to the subscriber is | ||
157 | * closed (in case subscriber is terminating) | ||
158 | */ | ||
159 | if (subscriber->conid == 0) { | ||
160 | spin_unlock_bh(&subscriber->lock); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | /* Validate timeout (in case subscription is being cancelled) */ | 152 | /* Validate timeout (in case subscription is being cancelled) */ |
165 | if (sub->timeout == TIPC_WAIT_FOREVER) { | 153 | if (sub->timeout == TIPC_WAIT_FOREVER) { |
166 | spin_unlock_bh(&subscriber->lock); | 154 | spin_unlock_bh(&subscriber->lock); |
@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber) | |||
215 | 203 | ||
216 | spin_lock_bh(&subscriber->lock); | 204 | spin_lock_bh(&subscriber->lock); |
217 | 205 | ||
218 | /* Invalidate subscriber reference */ | ||
219 | subscriber->conid = 0; | ||
220 | |||
221 | /* Destroy any existing subscriptions for subscriber */ | 206 | /* Destroy any existing subscriptions for subscriber */ |
222 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 207 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
223 | subscription_list) { | 208 | subscription_list) { |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 29fc8bee9702..ce6ec6c2f4de 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -163,9 +163,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | |||
163 | 163 | ||
164 | static inline unsigned int unix_hash_fold(__wsum n) | 164 | static inline unsigned int unix_hash_fold(__wsum n) |
165 | { | 165 | { |
166 | unsigned int hash = (__force unsigned int)n; | 166 | unsigned int hash = (__force unsigned int)csum_fold(n); |
167 | 167 | ||
168 | hash ^= hash>>16; | ||
169 | hash ^= hash>>8; | 168 | hash ^= hash>>8; |
170 | return hash&(UNIX_HASH_SIZE-1); | 169 | return hash&(UNIX_HASH_SIZE-1); |
171 | } | 170 | } |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 010892b81a06..a3bf18d11609 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -788,8 +788,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, | |||
788 | default: | 788 | default: |
789 | break; | 789 | break; |
790 | } | 790 | } |
791 | |||
792 | wdev->beacon_interval = 0; | ||
793 | } | 791 | } |
794 | 792 | ||
795 | static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | 793 | static int cfg80211_netdev_notifier_call(struct notifier_block *nb, |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 276e84b8a8e5..10085de886fe 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -330,7 +330,8 @@ static void write_src(void) | |||
330 | printf("\tPTR\t_text + %#llx\n", | 330 | printf("\tPTR\t_text + %#llx\n", |
331 | table[i].addr - _text); | 331 | table[i].addr - _text); |
332 | else | 332 | else |
333 | printf("\tPTR\t%#llx\n", table[i].addr); | 333 | printf("\tPTR\t_text - %#llx\n", |
334 | _text - table[i].addr); | ||
334 | } else { | 335 | } else { |
335 | printf("\tPTR\t%#llx\n", table[i].addr); | 336 | printf("\tPTR\t%#llx\n", table[i].addr); |
336 | } | 337 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 850296a1e0ff..8d0a84436674 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3616,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec, | |||
3616 | } | 3616 | } |
3617 | } | 3617 | } |
3618 | 3618 | ||
3619 | static void alc_no_shutup(struct hda_codec *codec) | ||
3620 | { | ||
3621 | } | ||
3622 | |||
3623 | static void alc_fixup_no_shutup(struct hda_codec *codec, | ||
3624 | const struct hda_fixup *fix, int action) | ||
3625 | { | ||
3626 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | ||
3627 | struct alc_spec *spec = codec->spec; | ||
3628 | spec->shutup = alc_no_shutup; | ||
3629 | } | ||
3630 | } | ||
3631 | |||
3619 | static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, | 3632 | static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, |
3620 | const struct hda_fixup *fix, int action) | 3633 | const struct hda_fixup *fix, int action) |
3621 | { | 3634 | { |
@@ -3844,6 +3857,7 @@ enum { | |||
3844 | ALC269_FIXUP_HP_GPIO_LED, | 3857 | ALC269_FIXUP_HP_GPIO_LED, |
3845 | ALC269_FIXUP_INV_DMIC, | 3858 | ALC269_FIXUP_INV_DMIC, |
3846 | ALC269_FIXUP_LENOVO_DOCK, | 3859 | ALC269_FIXUP_LENOVO_DOCK, |
3860 | ALC269_FIXUP_NO_SHUTUP, | ||
3847 | ALC286_FIXUP_SONY_MIC_NO_PRESENCE, | 3861 | ALC286_FIXUP_SONY_MIC_NO_PRESENCE, |
3848 | ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, | 3862 | ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, |
3849 | ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, | 3863 | ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, |
@@ -4020,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = { | |||
4020 | .type = HDA_FIXUP_FUNC, | 4034 | .type = HDA_FIXUP_FUNC, |
4021 | .v.func = alc_fixup_inv_dmic_0x12, | 4035 | .v.func = alc_fixup_inv_dmic_0x12, |
4022 | }, | 4036 | }, |
4037 | [ALC269_FIXUP_NO_SHUTUP] = { | ||
4038 | .type = HDA_FIXUP_FUNC, | ||
4039 | .v.func = alc_fixup_no_shutup, | ||
4040 | }, | ||
4023 | [ALC269_FIXUP_LENOVO_DOCK] = { | 4041 | [ALC269_FIXUP_LENOVO_DOCK] = { |
4024 | .type = HDA_FIXUP_PINS, | 4042 | .type = HDA_FIXUP_PINS, |
4025 | .v.pins = (const struct hda_pintbl[]) { | 4043 | .v.pins = (const struct hda_pintbl[]) { |
@@ -4405,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
4405 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4423 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4406 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4424 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4407 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4425 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4426 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), | ||
4408 | SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4427 | SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4409 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 4428 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
4410 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4429 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c index 75d0ad5d2dcb..647a72cda005 100644 --- a/sound/soc/codecs/88pm860x-codec.c +++ b/sound/soc/codecs/88pm860x-codec.c | |||
@@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec) | |||
1328 | pm860x->codec = codec; | 1328 | pm860x->codec = codec; |
1329 | 1329 | ||
1330 | codec->control_data = pm860x->regmap; | 1330 | codec->control_data = pm860x->regmap; |
1331 | ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); | ||
1332 | if (ret) | ||
1333 | return ret; | ||
1331 | 1334 | ||
1332 | for (i = 0; i < 4; i++) { | 1335 | for (i = 0; i < 4; i++) { |
1333 | ret = request_threaded_irq(pm860x->irq[i], NULL, | 1336 | ret = request_threaded_irq(pm860x->irq[i], NULL, |
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index 52e7cb08434b..fa2b8e07f420 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
@@ -210,7 +210,7 @@ out: | |||
210 | static int si476x_codec_probe(struct snd_soc_codec *codec) | 210 | static int si476x_codec_probe(struct snd_soc_codec *codec) |
211 | { | 211 | { |
212 | codec->control_data = dev_get_regmap(codec->dev->parent, NULL); | 212 | codec->control_data = dev_get_regmap(codec->dev->parent, NULL); |
213 | return 0; | 213 | return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); |
214 | } | 214 | } |
215 | 215 | ||
216 | static struct snd_soc_dai_ops si476x_dai_ops = { | 216 | static struct snd_soc_dai_ops si476x_dai_ops = { |
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c index 3fde9e402710..d163e18d85d4 100644 --- a/sound/soc/omap/n810.c +++ b/sound/soc/omap/n810.c | |||
@@ -305,7 +305,9 @@ static int __init n810_soc_init(void) | |||
305 | int err; | 305 | int err; |
306 | struct device *dev; | 306 | struct device *dev; |
307 | 307 | ||
308 | if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) | 308 | if (!of_have_populated_dt() || |
309 | (!of_machine_is_compatible("nokia,n810") && | ||
310 | !of_machine_is_compatible("nokia,n810-wimax"))) | ||
309 | return -ENODEV; | 311 | return -ENODEV; |
310 | 312 | ||
311 | n810_snd_device = platform_device_alloc("soc-audio", -1); | 313 | n810_snd_device = platform_device_alloc("soc-audio", -1); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 47e1ce771e65..28522bd03b8e 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
@@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card) | |||
1989 | 1989 | ||
1990 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); | 1990 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); |
1991 | if (paths < 0) { | 1991 | if (paths < 0) { |
1992 | dpcm_path_put(&list); | ||
1992 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", | 1993 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", |
1993 | fe->dai_link->name, "playback"); | 1994 | fe->dai_link->name, "playback"); |
1994 | mutex_unlock(&card->mutex); | 1995 | mutex_unlock(&card->mutex); |
@@ -2018,6 +2019,7 @@ capture: | |||
2018 | 2019 | ||
2019 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); | 2020 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); |
2020 | if (paths < 0) { | 2021 | if (paths < 0) { |
2022 | dpcm_path_put(&list); | ||
2021 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", | 2023 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", |
2022 | fe->dai_link->name, "capture"); | 2024 | fe->dai_link->name, "capture"); |
2023 | mutex_unlock(&card->mutex); | 2025 | mutex_unlock(&card->mutex); |
@@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream) | |||
2082 | fe->dpcm[stream].runtime = fe_substream->runtime; | 2084 | fe->dpcm[stream].runtime = fe_substream->runtime; |
2083 | 2085 | ||
2084 | if (dpcm_path_get(fe, stream, &list) <= 0) { | 2086 | if (dpcm_path_get(fe, stream, &list) <= 0) { |
2087 | dpcm_path_put(&list); | ||
2085 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", | 2088 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", |
2086 | fe->dai_link->name, stream ? "capture" : "playback"); | 2089 | fe->dai_link->name, stream ? "capture" : "playback"); |
2087 | } | 2090 | } |
diff --git a/tools/net/Makefile b/tools/net/Makefile index 004cd74734b6..ee577ea03ba5 100644 --- a/tools/net/Makefile +++ b/tools/net/Makefile | |||
@@ -12,7 +12,7 @@ YACC = bison | |||
12 | 12 | ||
13 | all : bpf_jit_disasm bpf_dbg bpf_asm | 13 | all : bpf_jit_disasm bpf_dbg bpf_asm |
14 | 14 | ||
15 | bpf_jit_disasm : CFLAGS = -Wall -O2 | 15 | bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' |
16 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl | 16 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl |
17 | bpf_jit_disasm : bpf_jit_disasm.o | 17 | bpf_jit_disasm : bpf_jit_disasm.o |
18 | 18 | ||
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 6aa6fb6f7bd9..f954c26de231 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -825,7 +825,6 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal | |||
825 | P_SIGNUM(PIPE); | 825 | P_SIGNUM(PIPE); |
826 | P_SIGNUM(ALRM); | 826 | P_SIGNUM(ALRM); |
827 | P_SIGNUM(TERM); | 827 | P_SIGNUM(TERM); |
828 | P_SIGNUM(STKFLT); | ||
829 | P_SIGNUM(CHLD); | 828 | P_SIGNUM(CHLD); |
830 | P_SIGNUM(CONT); | 829 | P_SIGNUM(CONT); |
831 | P_SIGNUM(STOP); | 830 | P_SIGNUM(STOP); |
@@ -841,6 +840,15 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal | |||
841 | P_SIGNUM(IO); | 840 | P_SIGNUM(IO); |
842 | P_SIGNUM(PWR); | 841 | P_SIGNUM(PWR); |
843 | P_SIGNUM(SYS); | 842 | P_SIGNUM(SYS); |
843 | #ifdef SIGEMT | ||
844 | P_SIGNUM(EMT); | ||
845 | #endif | ||
846 | #ifdef SIGSTKFLT | ||
847 | P_SIGNUM(STKFLT); | ||
848 | #endif | ||
849 | #ifdef SIGSWI | ||
850 | P_SIGNUM(SWI); | ||
851 | #endif | ||
844 | default: break; | 852 | default: break; |
845 | } | 853 | } |
846 | 854 | ||
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index c872991e0f65..620a1983b76b 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -1213,7 +1213,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread, | |||
1213 | */ | 1213 | */ |
1214 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, | 1214 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, |
1215 | ip, &al); | 1215 | ip, &al); |
1216 | if (al.sym) | 1216 | if (al.map) |
1217 | goto found; | 1217 | goto found; |
1218 | } | 1218 | } |
1219 | found: | 1219 | found: |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 3e9f336740fa..516d19fb999b 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -151,15 +151,15 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | |||
151 | 151 | ||
152 | gelf_getshdr(sec, shp); | 152 | gelf_getshdr(sec, shp); |
153 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); | 153 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); |
154 | if (!strcmp(name, str)) { | 154 | if (str && !strcmp(name, str)) { |
155 | if (idx) | 155 | if (idx) |
156 | *idx = cnt; | 156 | *idx = cnt; |
157 | break; | 157 | return sec; |
158 | } | 158 | } |
159 | ++cnt; | 159 | ++cnt; |
160 | } | 160 | } |
161 | 161 | ||
162 | return sec; | 162 | return NULL; |
163 | } | 163 | } |
164 | 164 | ||
165 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ | 165 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ |
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index d66418237d21..aa290c0de6f5 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c | |||
@@ -201,6 +201,7 @@ int main(int argc, char **argv) | |||
201 | 201 | ||
202 | msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); | 202 | msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); |
203 | if (msgque.msq_id == -1) { | 203 | if (msgque.msq_id == -1) { |
204 | err = -errno; | ||
204 | printf("Can't create queue\n"); | 205 | printf("Can't create queue\n"); |
205 | goto err_out; | 206 | goto err_out; |
206 | } | 207 | } |