diff options
271 files changed, 22732 insertions, 6456 deletions
diff --git a/Documentation/EDID/1024x768.S b/Documentation/EDID/1024x768.S new file mode 100644 index 000000000000..4b486fe31b32 --- /dev/null +++ b/Documentation/EDID/1024x768.S | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | 1024x768.S: EDID data set for standard 1024x768 60 Hz monitor | ||
3 | |||
4 | Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | /* EDID */ | ||
22 | #define VERSION 1 | ||
23 | #define REVISION 3 | ||
24 | |||
25 | /* Display */ | ||
26 | #define CLOCK 65000 /* kHz */ | ||
27 | #define XPIX 1024 | ||
28 | #define YPIX 768 | ||
29 | #define XY_RATIO XY_RATIO_4_3 | ||
30 | #define XBLANK 320 | ||
31 | #define YBLANK 38 | ||
32 | #define XOFFSET 8 | ||
33 | #define XPULSE 144 | ||
34 | #define YOFFSET (63+3) | ||
35 | #define YPULSE (63+6) | ||
36 | #define DPI 72 | ||
37 | #define VFREQ 60 /* Hz */ | ||
38 | #define TIMING_NAME "Linux XGA" | ||
39 | #define ESTABLISHED_TIMINGS_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */ | ||
40 | #define HSYNC_POL 0 | ||
41 | #define VSYNC_POL 0 | ||
42 | #define CRC 0x55 | ||
43 | |||
44 | #include "edid.S" | ||
diff --git a/Documentation/EDID/1280x1024.S b/Documentation/EDID/1280x1024.S new file mode 100644 index 000000000000..a2799fe33a4d --- /dev/null +++ b/Documentation/EDID/1280x1024.S | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | 1280x1024.S: EDID data set for standard 1280x1024 60 Hz monitor | ||
3 | |||
4 | Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | /* EDID */ | ||
22 | #define VERSION 1 | ||
23 | #define REVISION 3 | ||
24 | |||
25 | /* Display */ | ||
26 | #define CLOCK 108000 /* kHz */ | ||
27 | #define XPIX 1280 | ||
28 | #define YPIX 1024 | ||
29 | #define XY_RATIO XY_RATIO_5_4 | ||
30 | #define XBLANK 408 | ||
31 | #define YBLANK 42 | ||
32 | #define XOFFSET 48 | ||
33 | #define XPULSE 112 | ||
34 | #define YOFFSET (63+1) | ||
35 | #define YPULSE (63+3) | ||
36 | #define DPI 72 | ||
37 | #define VFREQ 60 /* Hz */ | ||
38 | #define TIMING_NAME "Linux SXGA" | ||
39 | #define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ | ||
40 | #define HSYNC_POL 1 | ||
41 | #define VSYNC_POL 1 | ||
42 | #define CRC 0xa0 | ||
43 | |||
44 | #include "edid.S" | ||
diff --git a/Documentation/EDID/1680x1050.S b/Documentation/EDID/1680x1050.S new file mode 100644 index 000000000000..96f67cafcf2e --- /dev/null +++ b/Documentation/EDID/1680x1050.S | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | 1680x1050.S: EDID data set for standard 1680x1050 60 Hz monitor | ||
3 | |||
4 | Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | /* EDID */ | ||
22 | #define VERSION 1 | ||
23 | #define REVISION 3 | ||
24 | |||
25 | /* Display */ | ||
26 | #define CLOCK 146250 /* kHz */ | ||
27 | #define XPIX 1680 | ||
28 | #define YPIX 1050 | ||
29 | #define XY_RATIO XY_RATIO_16_10 | ||
30 | #define XBLANK 560 | ||
31 | #define YBLANK 39 | ||
32 | #define XOFFSET 104 | ||
33 | #define XPULSE 176 | ||
34 | #define YOFFSET (63+3) | ||
35 | #define YPULSE (63+6) | ||
36 | #define DPI 96 | ||
37 | #define VFREQ 60 /* Hz */ | ||
38 | #define TIMING_NAME "Linux WSXGA" | ||
39 | #define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ | ||
40 | #define HSYNC_POL 1 | ||
41 | #define VSYNC_POL 1 | ||
42 | #define CRC 0x26 | ||
43 | |||
44 | #include "edid.S" | ||
diff --git a/Documentation/EDID/1920x1080.S b/Documentation/EDID/1920x1080.S new file mode 100644 index 000000000000..36ed5d571d0a --- /dev/null +++ b/Documentation/EDID/1920x1080.S | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | 1920x1080.S: EDID data set for standard 1920x1080 60 Hz monitor | ||
3 | |||
4 | Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | /* EDID */ | ||
22 | #define VERSION 1 | ||
23 | #define REVISION 3 | ||
24 | |||
25 | /* Display */ | ||
26 | #define CLOCK 148500 /* kHz */ | ||
27 | #define XPIX 1920 | ||
28 | #define YPIX 1080 | ||
29 | #define XY_RATIO XY_RATIO_16_9 | ||
30 | #define XBLANK 280 | ||
31 | #define YBLANK 45 | ||
32 | #define XOFFSET 88 | ||
33 | #define XPULSE 44 | ||
34 | #define YOFFSET (63+4) | ||
35 | #define YPULSE (63+5) | ||
36 | #define DPI 96 | ||
37 | #define VFREQ 60 /* Hz */ | ||
38 | #define TIMING_NAME "Linux FHD" | ||
39 | #define ESTABLISHED_TIMINGS_BITS 0x00 /* none */ | ||
40 | #define HSYNC_POL 1 | ||
41 | #define VSYNC_POL 1 | ||
42 | #define CRC 0x05 | ||
43 | |||
44 | #include "edid.S" | ||
diff --git a/Documentation/EDID/HOWTO.txt b/Documentation/EDID/HOWTO.txt new file mode 100644 index 000000000000..75a9f2a0c43d --- /dev/null +++ b/Documentation/EDID/HOWTO.txt | |||
@@ -0,0 +1,39 @@ | |||
1 | In the good old days when graphics parameters were configured explicitly | ||
2 | in a file called xorg.conf, even broken hardware could be managed. | ||
3 | |||
4 | Today, with the advent of Kernel Mode Setting, a graphics board is | ||
5 | either correctly working because all components follow the standards - | ||
6 | or the computer is unusable, because the screen remains dark after | ||
7 | booting or it displays the wrong area. Cases when this happens are: | ||
8 | - The graphics board does not recognize the monitor. | ||
9 | - The graphics board is unable to detect any EDID data. | ||
10 | - The graphics board incorrectly forwards EDID data to the driver. | ||
11 | - The monitor sends no or bogus EDID data. | ||
12 | - A KVM sends its own EDID data instead of querying the connected monitor. | ||
13 | Adding the kernel parameter "nomodeset" helps in most cases, but causes | ||
14 | restrictions later on. | ||
15 | |||
16 | As a remedy for such situations, the kernel configuration item | ||
17 | CONFIG_DRM_LOAD_EDID_FIRMWARE was introduced. It allows to provide an | ||
18 | individually prepared or corrected EDID data set in the /lib/firmware | ||
19 | directory from where it is loaded via the firmware interface. The code | ||
20 | (see drivers/gpu/drm/drm_edid_load.c) contains built-in data sets for | ||
21 | commonly used screen resolutions (1024x768, 1280x1024, 1680x1050, | ||
22 | 1920x1080) as binary blobs, but the kernel source tree does not contain | ||
23 | code to create these data. In order to elucidate the origin of the | ||
24 | built-in binary EDID blobs and to facilitate the creation of individual | ||
25 | data for a specific misbehaving monitor, commented sources and a | ||
26 | Makefile environment are given here. | ||
27 | |||
28 | To create binary EDID and C source code files from the existing data | ||
29 | material, simply type "make". | ||
30 | |||
31 | If you want to create your own EDID file, copy the file 1024x768.S and | ||
32 | replace the settings with your own data. The CRC value in the last line | ||
33 | #define CRC 0x55 | ||
34 | is a bit tricky. After a first version of the binary data set is | ||
35 | created, it must be be checked with the "edid-decode" utility which will | ||
36 | most probably complain about a wrong CRC. Fortunately, the utility also | ||
37 | displays the correct CRC which must then be inserted into the source | ||
38 | file. After the make procedure is repeated, the EDID data set is ready | ||
39 | to be used. | ||
diff --git a/Documentation/EDID/Makefile b/Documentation/EDID/Makefile new file mode 100644 index 000000000000..17763ca3f12b --- /dev/null +++ b/Documentation/EDID/Makefile | |||
@@ -0,0 +1,26 @@ | |||
1 | |||
2 | SOURCES := $(wildcard [0-9]*x[0-9]*.S) | ||
3 | |||
4 | BIN := $(patsubst %.S, %.bin, $(SOURCES)) | ||
5 | |||
6 | IHEX := $(patsubst %.S, %.bin.ihex, $(SOURCES)) | ||
7 | |||
8 | CODE := $(patsubst %.S, %.c, $(SOURCES)) | ||
9 | |||
10 | all: $(BIN) $(IHEX) $(CODE) | ||
11 | |||
12 | clean: | ||
13 | @rm -f *.o *.bin.ihex *.bin *.c | ||
14 | |||
15 | %.o: %.S | ||
16 | @cc -c $^ | ||
17 | |||
18 | %.bin: %.o | ||
19 | @objcopy -Obinary $^ $@ | ||
20 | |||
21 | %.bin.ihex: %.o | ||
22 | @objcopy -Oihex $^ $@ | ||
23 | @dos2unix $@ 2>/dev/null | ||
24 | |||
25 | %.c: %.bin | ||
26 | @echo "{" >$@; hexdump -f hex $^ >>$@; echo "};" >>$@ | ||
diff --git a/Documentation/EDID/edid.S b/Documentation/EDID/edid.S new file mode 100644 index 000000000000..ea97ae275fca --- /dev/null +++ b/Documentation/EDID/edid.S | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | edid.S: EDID data template | ||
3 | |||
4 | Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | |||
22 | /* Manufacturer */ | ||
23 | #define MFG_LNX1 'L' | ||
24 | #define MFG_LNX2 'N' | ||
25 | #define MFG_LNX3 'X' | ||
26 | #define SERIAL 0 | ||
27 | #define YEAR 2012 | ||
28 | #define WEEK 5 | ||
29 | |||
30 | /* EDID 1.3 standard definitions */ | ||
31 | #define XY_RATIO_16_10 0b00 | ||
32 | #define XY_RATIO_4_3 0b01 | ||
33 | #define XY_RATIO_5_4 0b10 | ||
34 | #define XY_RATIO_16_9 0b11 | ||
35 | |||
36 | #define mfgname2id(v1,v2,v3) \ | ||
37 | ((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f)) | ||
38 | #define swap16(v1) ((v1>>8)+((v1&0xff)<<8)) | ||
39 | #define msbs2(v1,v2) ((((v1>>8)&0x0f)<<4)+((v2>>8)&0x0f)) | ||
40 | #define msbs4(v1,v2,v3,v4) \ | ||
41 | (((v1&0x03)>>2)+((v2&0x03)>>4)+((v3&0x03)>>6)+((v4&0x03)>>8)) | ||
42 | #define pixdpi2mm(pix,dpi) ((pix*25)/dpi) | ||
43 | #define xsize pixdpi2mm(XPIX,DPI) | ||
44 | #define ysize pixdpi2mm(YPIX,DPI) | ||
45 | |||
46 | .data | ||
47 | |||
48 | /* Fixed header pattern */ | ||
49 | header: .byte 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00 | ||
50 | |||
51 | mfg_id: .word swap16(mfgname2id(MFG_LNX1, MFG_LNX2, MFG_LNX3)) | ||
52 | |||
53 | prod_code: .word 0 | ||
54 | |||
55 | /* Serial number. 32 bits, little endian. */ | ||
56 | serial_number: .long SERIAL | ||
57 | |||
58 | /* Week of manufacture */ | ||
59 | week: .byte WEEK | ||
60 | |||
61 | /* Year of manufacture, less 1990. (1990-2245) | ||
62 | If week=255, it is the model year instead */ | ||
63 | year: .byte YEAR-1990 | ||
64 | |||
65 | version: .byte VERSION /* EDID version, usually 1 (for 1.3) */ | ||
66 | revision: .byte REVISION /* EDID revision, usually 3 (for 1.3) */ | ||
67 | |||
68 | /* If Bit 7=1 Digital input. If set, the following bit definitions apply: | ||
69 | Bits 6-1 Reserved, must be 0 | ||
70 | Bit 0 Signal is compatible with VESA DFP 1.x TMDS CRGB, | ||
71 | 1 pixel per clock, up to 8 bits per color, MSB aligned, | ||
72 | If Bit 7=0 Analog input. If clear, the following bit definitions apply: | ||
73 | Bits 6-5 Video white and sync levels, relative to blank | ||
74 | 00=+0.7/-0.3 V; 01=+0.714/-0.286 V; | ||
75 | 10=+1.0/-0.4 V; 11=+0.7/0 V | ||
76 | Bit 4 Blank-to-black setup (pedestal) expected | ||
77 | Bit 3 Separate sync supported | ||
78 | Bit 2 Composite sync (on HSync) supported | ||
79 | Bit 1 Sync on green supported | ||
80 | Bit 0 VSync pulse must be serrated when somposite or | ||
81 | sync-on-green is used. */ | ||
82 | video_parms: .byte 0x6d | ||
83 | |||
84 | /* Maximum horizontal image size, in centimetres | ||
85 | (max 292 cm/115 in at 16:9 aspect ratio) */ | ||
86 | max_hor_size: .byte xsize/10 | ||
87 | |||
88 | /* Maximum vertical image size, in centimetres. | ||
89 | If either byte is 0, undefined (e.g. projector) */ | ||
90 | max_vert_size: .byte ysize/10 | ||
91 | |||
92 | /* Display gamma, minus 1, times 100 (range 1.00-3.5 */ | ||
93 | gamma: .byte 120 | ||
94 | |||
95 | /* Bit 7 DPMS standby supported | ||
96 | Bit 6 DPMS suspend supported | ||
97 | Bit 5 DPMS active-off supported | ||
98 | Bits 4-3 Display type: 00=monochrome; 01=RGB colour; | ||
99 | 10=non-RGB multicolour; 11=undefined | ||
100 | Bit 2 Standard sRGB colour space. Bytes 25-34 must contain | ||
101 | sRGB standard values. | ||
102 | Bit 1 Preferred timing mode specified in descriptor block 1. | ||
103 | Bit 0 GTF supported with default parameter values. */ | ||
104 | dsp_features: .byte 0xea | ||
105 | |||
106 | /* Chromaticity coordinates. */ | ||
107 | /* Red and green least-significant bits | ||
108 | Bits 7-6 Red x value least-significant 2 bits | ||
109 | Bits 5-4 Red y value least-significant 2 bits | ||
110 | Bits 3-2 Green x value lst-significant 2 bits | ||
111 | Bits 1-0 Green y value least-significant 2 bits */ | ||
112 | red_green_lsb: .byte 0x5e | ||
113 | |||
114 | /* Blue and white least-significant 2 bits */ | ||
115 | blue_white_lsb: .byte 0xc0 | ||
116 | |||
117 | /* Red x value most significant 8 bits. | ||
118 | 0-255 encodes 0-0.996 (255/256); 0-0.999 (1023/1024) with lsbits */ | ||
119 | red_x_msb: .byte 0xa4 | ||
120 | |||
121 | /* Red y value most significant 8 bits */ | ||
122 | red_y_msb: .byte 0x59 | ||
123 | |||
124 | /* Green x and y value most significant 8 bits */ | ||
125 | green_x_y_msb: .byte 0x4a,0x98 | ||
126 | |||
127 | /* Blue x and y value most significant 8 bits */ | ||
128 | blue_x_y_msb: .byte 0x25,0x20 | ||
129 | |||
130 | /* Default white point x and y value most significant 8 bits */ | ||
131 | white_x_y_msb: .byte 0x50,0x54 | ||
132 | |||
133 | /* Established timings */ | ||
134 | /* Bit 7 720x400 @ 70 Hz | ||
135 | Bit 6 720x400 @ 88 Hz | ||
136 | Bit 5 640x480 @ 60 Hz | ||
137 | Bit 4 640x480 @ 67 Hz | ||
138 | Bit 3 640x480 @ 72 Hz | ||
139 | Bit 2 640x480 @ 75 Hz | ||
140 | Bit 1 800x600 @ 56 Hz | ||
141 | Bit 0 800x600 @ 60 Hz */ | ||
142 | estbl_timing1: .byte 0x00 | ||
143 | |||
144 | /* Bit 7 800x600 @ 72 Hz | ||
145 | Bit 6 800x600 @ 75 Hz | ||
146 | Bit 5 832x624 @ 75 Hz | ||
147 | Bit 4 1024x768 @ 87 Hz, interlaced (1024x768) | ||
148 | Bit 3 1024x768 @ 60 Hz | ||
149 | Bit 2 1024x768 @ 72 Hz | ||
150 | Bit 1 1024x768 @ 75 Hz | ||
151 | Bit 0 1280x1024 @ 75 Hz */ | ||
152 | estbl_timing2: .byte ESTABLISHED_TIMINGS_BITS | ||
153 | |||
154 | /* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II) | ||
155 | Bits 6-0 Other manufacturer-specific display mod */ | ||
156 | estbl_timing3: .byte 0x00 | ||
157 | |||
158 | /* Standard timing */ | ||
159 | /* X resolution, less 31, divided by 8 (256-2288 pixels) */ | ||
160 | std_xres: .byte (XPIX/8)-31 | ||
161 | /* Y resolution, X:Y pixel ratio | ||
162 | Bits 7-6 X:Y pixel ratio: 00=16:10; 01=4:3; 10=5:4; 11=16:9. | ||
163 | Bits 5-0 Vertical frequency, less 60 (60-123 Hz) */ | ||
164 | std_vres: .byte (XY_RATIO<<6)+VFREQ-60 | ||
165 | .fill 7,2,0x0101 /* Unused */ | ||
166 | |||
167 | descriptor1: | ||
168 | /* Pixel clock in 10 kHz units. (0.-655.35 MHz, little-endian) */ | ||
169 | clock: .word CLOCK/10 | ||
170 | |||
171 | /* Horizontal active pixels 8 lsbits (0-4095) */ | ||
172 | x_act_lsb: .byte XPIX&0xff | ||
173 | /* Horizontal blanking pixels 8 lsbits (0-4095) | ||
174 | End of active to start of next active. */ | ||
175 | x_blk_lsb: .byte XBLANK&0xff | ||
176 | /* Bits 7-4 Horizontal active pixels 4 msbits | ||
177 | Bits 3-0 Horizontal blanking pixels 4 msbits */ | ||
178 | x_msbs: .byte msbs2(XPIX,XBLANK) | ||
179 | |||
180 | /* Vertical active lines 8 lsbits (0-4095) */ | ||
181 | y_act_lsb: .byte YPIX&0xff | ||
182 | /* Vertical blanking lines 8 lsbits (0-4095) */ | ||
183 | y_blk_lsb: .byte YBLANK&0xff | ||
184 | /* Bits 7-4 Vertical active lines 4 msbits | ||
185 | Bits 3-0 Vertical blanking lines 4 msbits */ | ||
186 | y_msbs: .byte msbs2(YPIX,YBLANK) | ||
187 | |||
188 | /* Horizontal sync offset pixels 8 lsbits (0-1023) From blanking start */ | ||
189 | x_snc_off_lsb: .byte XOFFSET&0xff | ||
190 | /* Horizontal sync pulse width pixels 8 lsbits (0-1023) */ | ||
191 | x_snc_pls_lsb: .byte XPULSE&0xff | ||
192 | /* Bits 7-4 Vertical sync offset lines 4 lsbits -63) | ||
193 | Bits 3-0 Vertical sync pulse width lines 4 lsbits -63) */ | ||
194 | y_snc_lsb: .byte ((YOFFSET-63)<<4)+(YPULSE-63) | ||
195 | /* Bits 7-6 Horizontal sync offset pixels 2 msbits | ||
196 | Bits 5-4 Horizontal sync pulse width pixels 2 msbits | ||
197 | Bits 3-2 Vertical sync offset lines 2 msbits | ||
198 | Bits 1-0 Vertical sync pulse width lines 2 msbits */ | ||
199 | xy_snc_msbs: .byte msbs4(XOFFSET,XPULSE,YOFFSET,YPULSE) | ||
200 | |||
201 | /* Horizontal display size, mm, 8 lsbits (0-4095 mm, 161 in) */ | ||
202 | x_dsp_size: .byte xsize&0xff | ||
203 | |||
204 | /* Vertical display size, mm, 8 lsbits (0-4095 mm, 161 in) */ | ||
205 | y_dsp_size: .byte ysize&0xff | ||
206 | |||
207 | /* Bits 7-4 Horizontal display size, mm, 4 msbits | ||
208 | Bits 3-0 Vertical display size, mm, 4 msbits */ | ||
209 | dsp_size_mbsb: .byte msbs2(xsize,ysize) | ||
210 | |||
211 | /* Horizontal border pixels (each side; total is twice this) */ | ||
212 | x_border: .byte 0 | ||
213 | /* Vertical border lines (each side; total is twice this) */ | ||
214 | y_border: .byte 0 | ||
215 | |||
216 | /* Bit 7 Interlaced | ||
217 | Bits 6-5 Stereo mode: 00=No stereo; other values depend on bit 0: | ||
218 | Bit 0=0: 01=Field sequential, sync=1 during right; 10=similar, | ||
219 | sync=1 during left; 11=4-way interleaved stereo | ||
220 | Bit 0=1 2-way interleaved stereo: 01=Right image on even lines; | ||
221 | 10=Left image on even lines; 11=side-by-side | ||
222 | Bits 4-3 Sync type: 00=Analog composite; 01=Bipolar analog composite; | ||
223 | 10=Digital composite (on HSync); 11=Digital separate | ||
224 | Bit 2 If digital separate: Vertical sync polarity (1=positive) | ||
225 | Other types: VSync serrated (HSync during VSync) | ||
226 | Bit 1 If analog sync: Sync on all 3 RGB lines (else green only) | ||
227 | Digital: HSync polarity (1=positive) | ||
228 | Bit 0 2-way line-interleaved stereo, if bits 4-3 are not 00. */ | ||
229 | features: .byte 0x18+(VSYNC_POL<<2)+(HSYNC_POL<<1) | ||
230 | |||
231 | descriptor2: .byte 0,0 /* Not a detailed timing descriptor */ | ||
232 | .byte 0 /* Must be zero */ | ||
233 | .byte 0xff /* Descriptor is monitor serial number (text) */ | ||
234 | .byte 0 /* Must be zero */ | ||
235 | start1: .ascii "Linux #0" | ||
236 | end1: .byte 0x0a /* End marker */ | ||
237 | .fill 12-(end1-start1), 1, 0x20 /* Padded spaces */ | ||
238 | descriptor3: .byte 0,0 /* Not a detailed timing descriptor */ | ||
239 | .byte 0 /* Must be zero */ | ||
240 | .byte 0xfd /* Descriptor is monitor range limits */ | ||
241 | .byte 0 /* Must be zero */ | ||
242 | start2: .byte VFREQ-1 /* Minimum vertical field rate (1-255 Hz) */ | ||
243 | .byte VFREQ+1 /* Maximum vertical field rate (1-255 Hz) */ | ||
244 | .byte (CLOCK/(XPIX+XBLANK))-1 /* Minimum horizontal line rate | ||
245 | (1-255 kHz) */ | ||
246 | .byte (CLOCK/(XPIX+XBLANK))+1 /* Maximum horizontal line rate | ||
247 | (1-255 kHz) */ | ||
248 | .byte (CLOCK/10000)+1 /* Maximum pixel clock rate, rounded up | ||
249 | to 10 MHz multiple (10-2550 MHz) */ | ||
250 | .byte 0 /* No extended timing information type */ | ||
251 | end2: .byte 0x0a /* End marker */ | ||
252 | .fill 12-(end2-start2), 1, 0x20 /* Padded spaces */ | ||
253 | descriptor4: .byte 0,0 /* Not a detailed timing descriptor */ | ||
254 | .byte 0 /* Must be zero */ | ||
255 | .byte 0xfc /* Descriptor is text */ | ||
256 | .byte 0 /* Must be zero */ | ||
257 | start3: .ascii TIMING_NAME | ||
258 | end3: .byte 0x0a /* End marker */ | ||
259 | .fill 12-(end3-start3), 1, 0x20 /* Padded spaces */ | ||
260 | extensions: .byte 0 /* Number of extensions to follow */ | ||
261 | checksum: .byte CRC /* Sum of all bytes must be 0 */ | ||
diff --git a/Documentation/EDID/hex b/Documentation/EDID/hex new file mode 100644 index 000000000000..8873ebb618af --- /dev/null +++ b/Documentation/EDID/hex | |||
@@ -0,0 +1 @@ | |||
"\t" 8/1 "0x%02x, " "\n" | |||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 7986d79d9d17..247dcfd62034 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -713,6 +713,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
713 | The filter can be disabled or changed to another | 713 | The filter can be disabled or changed to another |
714 | driver later using sysfs. | 714 | driver later using sysfs. |
715 | 715 | ||
716 | drm_kms_helper.edid_firmware=[<connector>:]<file> | ||
717 | Broken monitors, graphic adapters and KVMs may | ||
718 | send no or incorrect EDID data sets. This parameter | ||
719 | allows to specify an EDID data set in the | ||
720 | /lib/firmware directory that is used instead. | ||
721 | Generic built-in EDID data sets are used, if one of | ||
722 | edid/1024x768.bin, edid/1280x1024.bin, | ||
723 | edid/1680x1050.bin, or edid/1920x1080.bin is given | ||
724 | and no file with the same name exists. Details and | ||
725 | instructions how to build your own EDID data are | ||
726 | available in Documentation/EDID/HOWTO.txt. An EDID | ||
727 | data set will only be used for a particular connector, | ||
728 | if its name and a colon are prepended to the EDID | ||
729 | name. | ||
730 | |||
716 | dscc4.setup= [NET] | 731 | dscc4.setup= [NET] |
717 | 732 | ||
718 | earlycon= [KNL] Output early console device and options. | 733 | earlycon= [KNL] Output early console device and options. |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 721e65285dce..e0a37233c0af 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
30 | #include <linux/mfd/intel_msic.h> | 30 | #include <linux/mfd/intel_msic.h> |
31 | #include <linux/gpio.h> | ||
32 | #include <linux/i2c/tc35876x.h> | ||
31 | 33 | ||
32 | #include <asm/setup.h> | 34 | #include <asm/setup.h> |
33 | #include <asm/mpspec_def.h> | 35 | #include <asm/mpspec_def.h> |
@@ -675,6 +677,19 @@ static void *msic_thermal_platform_data(void *info) | |||
675 | return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL); | 677 | return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL); |
676 | } | 678 | } |
677 | 679 | ||
680 | /* tc35876x DSI-LVDS bridge chip and panel platform data */ | ||
681 | static void *tc35876x_platform_data(void *data) | ||
682 | { | ||
683 | static struct tc35876x_platform_data pdata; | ||
684 | |||
685 | /* gpio pins set to -1 will not be used by the driver */ | ||
686 | pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN"); | ||
687 | pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN"); | ||
688 | pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3"); | ||
689 | |||
690 | return &pdata; | ||
691 | } | ||
692 | |||
678 | static const struct devs_id __initconst device_ids[] = { | 693 | static const struct devs_id __initconst device_ids[] = { |
679 | {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data}, | 694 | {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data}, |
680 | {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, | 695 | {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, |
@@ -687,6 +702,7 @@ static const struct devs_id __initconst device_ids[] = { | |||
687 | {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, | 702 | {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, |
688 | {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, | 703 | {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, |
689 | {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data}, | 704 | {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data}, |
705 | {"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data}, | ||
690 | 706 | ||
691 | /* MSIC subdevices */ | 707 | /* MSIC subdevices */ |
692 | {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, | 708 | {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index b427711be4be..962e75dc4781 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -850,6 +850,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
850 | .subvendor = PCI_ANY_ID, \ | 850 | .subvendor = PCI_ANY_ID, \ |
851 | .subdevice = PCI_ANY_ID, \ | 851 | .subdevice = PCI_ANY_ID, \ |
852 | } | 852 | } |
853 | ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */ | ||
853 | ID(PCI_DEVICE_ID_INTEL_82443LX_0), | 854 | ID(PCI_DEVICE_ID_INTEL_82443LX_0), |
854 | ID(PCI_DEVICE_ID_INTEL_82443BX_0), | 855 | ID(PCI_DEVICE_ID_INTEL_82443BX_0), |
855 | ID(PCI_DEVICE_ID_INTEL_82443GX_0), | 856 | ID(PCI_DEVICE_ID_INTEL_82443GX_0), |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c92424ca1a55..5cf47ac2d401 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -76,7 +76,6 @@ static struct _intel_private { | |||
76 | struct resource ifp_resource; | 76 | struct resource ifp_resource; |
77 | int resource_valid; | 77 | int resource_valid; |
78 | struct page *scratch_page; | 78 | struct page *scratch_page; |
79 | dma_addr_t scratch_page_dma; | ||
80 | } intel_private; | 79 | } intel_private; |
81 | 80 | ||
82 | #define INTEL_GTT_GEN intel_private.driver->gen | 81 | #define INTEL_GTT_GEN intel_private.driver->gen |
@@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void) | |||
306 | if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) | 305 | if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) |
307 | return -EINVAL; | 306 | return -EINVAL; |
308 | 307 | ||
309 | intel_private.scratch_page_dma = dma_addr; | 308 | intel_private.base.scratch_page_dma = dma_addr; |
310 | } else | 309 | } else |
311 | intel_private.scratch_page_dma = page_to_phys(page); | 310 | intel_private.base.scratch_page_dma = page_to_phys(page); |
312 | 311 | ||
313 | intel_private.scratch_page = page; | 312 | intel_private.scratch_page = page; |
314 | 313 | ||
@@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void) | |||
631 | static void intel_gtt_teardown_scratch_page(void) | 630 | static void intel_gtt_teardown_scratch_page(void) |
632 | { | 631 | { |
633 | set_pages_wb(intel_private.scratch_page, 1); | 632 | set_pages_wb(intel_private.scratch_page, 1); |
634 | pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, | 633 | pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, |
635 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 634 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
636 | put_page(intel_private.scratch_page); | 635 | put_page(intel_private.scratch_page); |
637 | __free_page(intel_private.scratch_page); | 636 | __free_page(intel_private.scratch_page); |
@@ -681,6 +680,7 @@ static int intel_gtt_init(void) | |||
681 | iounmap(intel_private.registers); | 680 | iounmap(intel_private.registers); |
682 | return -ENOMEM; | 681 | return -ENOMEM; |
683 | } | 682 | } |
683 | intel_private.base.gtt = intel_private.gtt; | ||
684 | 684 | ||
685 | global_cache_flush(); /* FIXME: ? */ | 685 | global_cache_flush(); /* FIXME: ? */ |
686 | 686 | ||
@@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) | |||
975 | unsigned int i; | 975 | unsigned int i; |
976 | 976 | ||
977 | for (i = first_entry; i < (first_entry + num_entries); i++) { | 977 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
978 | intel_private.driver->write_entry(intel_private.scratch_page_dma, | 978 | intel_private.driver->write_entry(intel_private.base.scratch_page_dma, |
979 | i, 0); | 979 | i, 0); |
980 | } | 980 | } |
981 | readl(intel_private.gtt+i-1); | 981 | readl(intel_private.gtt+i-1); |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2418429a9836..87ca18b82e15 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -18,6 +18,11 @@ menuconfig DRM | |||
18 | details. You should also select and configure AGP | 18 | details. You should also select and configure AGP |
19 | (/dev/agpgart) support if it is available for your platform. | 19 | (/dev/agpgart) support if it is available for your platform. |
20 | 20 | ||
21 | config DRM_USB | ||
22 | tristate | ||
23 | depends on DRM | ||
24 | select USB | ||
25 | |||
21 | config DRM_KMS_HELPER | 26 | config DRM_KMS_HELPER |
22 | tristate | 27 | tristate |
23 | depends on DRM | 28 | depends on DRM |
@@ -27,6 +32,18 @@ config DRM_KMS_HELPER | |||
27 | help | 32 | help |
28 | FB and CRTC helpers for KMS drivers. | 33 | FB and CRTC helpers for KMS drivers. |
29 | 34 | ||
35 | config DRM_LOAD_EDID_FIRMWARE | ||
36 | bool "Allow to specify an EDID data set instead of probing for it" | ||
37 | depends on DRM_KMS_HELPER | ||
38 | help | ||
39 | Say Y here, if you want to use EDID data to be loaded from the | ||
40 | /lib/firmware directory or one of the provided built-in | ||
41 | data sets. This may be necessary, if the graphics adapter or | ||
42 | monitor are unable to provide appropriate EDID data. Since this | ||
43 | feature is provided as a workaround for broken hardware, the | ||
44 | default case is N. Details and instructions how to build your own | ||
45 | EDID data are given in Documentation/EDID/HOWTO.txt. | ||
46 | |||
30 | config DRM_TTM | 47 | config DRM_TTM |
31 | tristate | 48 | tristate |
32 | depends on DRM | 49 | depends on DRM |
@@ -165,3 +182,4 @@ source "drivers/gpu/drm/vmwgfx/Kconfig" | |||
165 | 182 | ||
166 | source "drivers/gpu/drm/gma500/Kconfig" | 183 | source "drivers/gpu/drm/gma500/Kconfig" |
167 | 184 | ||
185 | source "drivers/gpu/drm/udl/Kconfig" | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0cde1b80fdb1..a858532806ae 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -12,17 +12,21 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ | |||
12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ | 12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ |
13 | drm_crtc.o drm_modes.o drm_edid.o \ | 13 | drm_crtc.o drm_modes.o drm_edid.o \ |
14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ | 14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ |
15 | drm_trace_points.o drm_global.o drm_usb.o | 15 | drm_trace_points.o drm_global.o |
16 | 16 | ||
17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
18 | 18 | ||
19 | drm-usb-y := drm_usb.o | ||
20 | |||
19 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o | 21 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o |
22 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | ||
20 | 23 | ||
21 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | 24 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o |
22 | 25 | ||
23 | CFLAGS_drm_trace_points.o := -I$(src) | 26 | CFLAGS_drm_trace_points.o := -I$(src) |
24 | 27 | ||
25 | obj-$(CONFIG_DRM) += drm.o | 28 | obj-$(CONFIG_DRM) += drm.o |
29 | obj-$(CONFIG_DRM_USB) += drm_usb.o | ||
26 | obj-$(CONFIG_DRM_TTM) += ttm/ | 30 | obj-$(CONFIG_DRM_TTM) += ttm/ |
27 | obj-$(CONFIG_DRM_TDFX) += tdfx/ | 31 | obj-$(CONFIG_DRM_TDFX) += tdfx/ |
28 | obj-$(CONFIG_DRM_R128) += r128/ | 32 | obj-$(CONFIG_DRM_R128) += r128/ |
@@ -37,4 +41,5 @@ obj-$(CONFIG_DRM_VIA) +=via/ | |||
37 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ | 41 | obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ |
38 | obj-$(CONFIG_DRM_EXYNOS) +=exynos/ | 42 | obj-$(CONFIG_DRM_EXYNOS) +=exynos/ |
39 | obj-$(CONFIG_DRM_GMA500) += gma500/ | 43 | obj-$(CONFIG_DRM_GMA500) += gma500/ |
44 | obj-$(CONFIG_DRM_UDL) += udl/ | ||
40 | obj-y += i2c/ | 45 | obj-y += i2c/ |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5e818a808ace..d3aaeb6ae236 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -38,11 +38,6 @@ | |||
38 | #include "drm_edid.h" | 38 | #include "drm_edid.h" |
39 | #include "drm_fourcc.h" | 39 | #include "drm_fourcc.h" |
40 | 40 | ||
41 | struct drm_prop_enum_list { | ||
42 | int type; | ||
43 | char *name; | ||
44 | }; | ||
45 | |||
46 | /* Avoid boilerplate. I'm tired of typing. */ | 41 | /* Avoid boilerplate. I'm tired of typing. */ |
47 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 42 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
48 | char *fnname(int val) \ | 43 | char *fnname(int val) \ |
@@ -298,9 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, | |||
298 | int ret; | 293 | int ret; |
299 | 294 | ||
300 | ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); | 295 | ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); |
301 | if (ret) { | 296 | if (ret) |
302 | return ret; | 297 | return ret; |
303 | } | ||
304 | 298 | ||
305 | fb->dev = dev; | 299 | fb->dev = dev; |
306 | fb->funcs = funcs; | 300 | fb->funcs = funcs; |
@@ -370,19 +364,31 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup); | |||
370 | * Caller must hold mode config lock. | 364 | * Caller must hold mode config lock. |
371 | * | 365 | * |
372 | * Inits a new object created as base part of an driver crtc object. | 366 | * Inits a new object created as base part of an driver crtc object. |
367 | * | ||
368 | * RETURNS: | ||
369 | * Zero on success, error code on failure. | ||
373 | */ | 370 | */ |
374 | void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, | 371 | int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
375 | const struct drm_crtc_funcs *funcs) | 372 | const struct drm_crtc_funcs *funcs) |
376 | { | 373 | { |
374 | int ret; | ||
375 | |||
377 | crtc->dev = dev; | 376 | crtc->dev = dev; |
378 | crtc->funcs = funcs; | 377 | crtc->funcs = funcs; |
379 | 378 | ||
380 | mutex_lock(&dev->mode_config.mutex); | 379 | mutex_lock(&dev->mode_config.mutex); |
381 | drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); | 380 | |
381 | ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); | ||
382 | if (ret) | ||
383 | goto out; | ||
382 | 384 | ||
383 | list_add_tail(&crtc->head, &dev->mode_config.crtc_list); | 385 | list_add_tail(&crtc->head, &dev->mode_config.crtc_list); |
384 | dev->mode_config.num_crtc++; | 386 | dev->mode_config.num_crtc++; |
387 | |||
388 | out: | ||
385 | mutex_unlock(&dev->mode_config.mutex); | 389 | mutex_unlock(&dev->mode_config.mutex); |
390 | |||
391 | return ret; | ||
386 | } | 392 | } |
387 | EXPORT_SYMBOL(drm_crtc_init); | 393 | EXPORT_SYMBOL(drm_crtc_init); |
388 | 394 | ||
@@ -442,7 +448,7 @@ void drm_mode_remove(struct drm_connector *connector, | |||
442 | struct drm_display_mode *mode) | 448 | struct drm_display_mode *mode) |
443 | { | 449 | { |
444 | list_del(&mode->head); | 450 | list_del(&mode->head); |
445 | kfree(mode); | 451 | drm_mode_destroy(connector->dev, mode); |
446 | } | 452 | } |
447 | EXPORT_SYMBOL(drm_mode_remove); | 453 | EXPORT_SYMBOL(drm_mode_remove); |
448 | 454 | ||
@@ -454,21 +460,29 @@ EXPORT_SYMBOL(drm_mode_remove); | |||
454 | * @name: user visible name of the connector | 460 | * @name: user visible name of the connector |
455 | * | 461 | * |
456 | * LOCKING: | 462 | * LOCKING: |
457 | * Caller must hold @dev's mode_config lock. | 463 | * Takes mode config lock. |
458 | * | 464 | * |
459 | * Initialises a preallocated connector. Connectors should be | 465 | * Initialises a preallocated connector. Connectors should be |
460 | * subclassed as part of driver connector objects. | 466 | * subclassed as part of driver connector objects. |
467 | * | ||
468 | * RETURNS: | ||
469 | * Zero on success, error code on failure. | ||
461 | */ | 470 | */ |
462 | void drm_connector_init(struct drm_device *dev, | 471 | int drm_connector_init(struct drm_device *dev, |
463 | struct drm_connector *connector, | 472 | struct drm_connector *connector, |
464 | const struct drm_connector_funcs *funcs, | 473 | const struct drm_connector_funcs *funcs, |
465 | int connector_type) | 474 | int connector_type) |
466 | { | 475 | { |
476 | int ret; | ||
477 | |||
467 | mutex_lock(&dev->mode_config.mutex); | 478 | mutex_lock(&dev->mode_config.mutex); |
468 | 479 | ||
480 | ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); | ||
481 | if (ret) | ||
482 | goto out; | ||
483 | |||
469 | connector->dev = dev; | 484 | connector->dev = dev; |
470 | connector->funcs = funcs; | 485 | connector->funcs = funcs; |
471 | drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); | ||
472 | connector->connector_type = connector_type; | 486 | connector->connector_type = connector_type; |
473 | connector->connector_type_id = | 487 | connector->connector_type_id = |
474 | ++drm_connector_enum_list[connector_type].count; /* TODO */ | 488 | ++drm_connector_enum_list[connector_type].count; /* TODO */ |
@@ -488,7 +502,10 @@ void drm_connector_init(struct drm_device *dev, | |||
488 | drm_connector_attach_property(connector, | 502 | drm_connector_attach_property(connector, |
489 | dev->mode_config.dpms_property, 0); | 503 | dev->mode_config.dpms_property, 0); |
490 | 504 | ||
505 | out: | ||
491 | mutex_unlock(&dev->mode_config.mutex); | 506 | mutex_unlock(&dev->mode_config.mutex); |
507 | |||
508 | return ret; | ||
492 | } | 509 | } |
493 | EXPORT_SYMBOL(drm_connector_init); | 510 | EXPORT_SYMBOL(drm_connector_init); |
494 | 511 | ||
@@ -497,7 +514,7 @@ EXPORT_SYMBOL(drm_connector_init); | |||
497 | * @connector: connector to cleanup | 514 | * @connector: connector to cleanup |
498 | * | 515 | * |
499 | * LOCKING: | 516 | * LOCKING: |
500 | * Caller must hold @dev's mode_config lock. | 517 | * Takes mode config lock. |
501 | * | 518 | * |
502 | * Cleans up the connector but doesn't free the object. | 519 | * Cleans up the connector but doesn't free the object. |
503 | */ | 520 | */ |
@@ -523,23 +540,41 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
523 | } | 540 | } |
524 | EXPORT_SYMBOL(drm_connector_cleanup); | 541 | EXPORT_SYMBOL(drm_connector_cleanup); |
525 | 542 | ||
526 | void drm_encoder_init(struct drm_device *dev, | 543 | void drm_connector_unplug_all(struct drm_device *dev) |
544 | { | ||
545 | struct drm_connector *connector; | ||
546 | |||
547 | /* taking the mode config mutex ends up in a clash with sysfs */ | ||
548 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
549 | drm_sysfs_connector_remove(connector); | ||
550 | |||
551 | } | ||
552 | EXPORT_SYMBOL(drm_connector_unplug_all); | ||
553 | |||
554 | int drm_encoder_init(struct drm_device *dev, | ||
527 | struct drm_encoder *encoder, | 555 | struct drm_encoder *encoder, |
528 | const struct drm_encoder_funcs *funcs, | 556 | const struct drm_encoder_funcs *funcs, |
529 | int encoder_type) | 557 | int encoder_type) |
530 | { | 558 | { |
559 | int ret; | ||
560 | |||
531 | mutex_lock(&dev->mode_config.mutex); | 561 | mutex_lock(&dev->mode_config.mutex); |
532 | 562 | ||
533 | encoder->dev = dev; | 563 | ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); |
564 | if (ret) | ||
565 | goto out; | ||
534 | 566 | ||
535 | drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); | 567 | encoder->dev = dev; |
536 | encoder->encoder_type = encoder_type; | 568 | encoder->encoder_type = encoder_type; |
537 | encoder->funcs = funcs; | 569 | encoder->funcs = funcs; |
538 | 570 | ||
539 | list_add_tail(&encoder->head, &dev->mode_config.encoder_list); | 571 | list_add_tail(&encoder->head, &dev->mode_config.encoder_list); |
540 | dev->mode_config.num_encoder++; | 572 | dev->mode_config.num_encoder++; |
541 | 573 | ||
574 | out: | ||
542 | mutex_unlock(&dev->mode_config.mutex); | 575 | mutex_unlock(&dev->mode_config.mutex); |
576 | |||
577 | return ret; | ||
543 | } | 578 | } |
544 | EXPORT_SYMBOL(drm_encoder_init); | 579 | EXPORT_SYMBOL(drm_encoder_init); |
545 | 580 | ||
@@ -560,18 +595,23 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, | |||
560 | const uint32_t *formats, uint32_t format_count, | 595 | const uint32_t *formats, uint32_t format_count, |
561 | bool priv) | 596 | bool priv) |
562 | { | 597 | { |
598 | int ret; | ||
599 | |||
563 | mutex_lock(&dev->mode_config.mutex); | 600 | mutex_lock(&dev->mode_config.mutex); |
564 | 601 | ||
602 | ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); | ||
603 | if (ret) | ||
604 | goto out; | ||
605 | |||
565 | plane->dev = dev; | 606 | plane->dev = dev; |
566 | drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); | ||
567 | plane->funcs = funcs; | 607 | plane->funcs = funcs; |
568 | plane->format_types = kmalloc(sizeof(uint32_t) * format_count, | 608 | plane->format_types = kmalloc(sizeof(uint32_t) * format_count, |
569 | GFP_KERNEL); | 609 | GFP_KERNEL); |
570 | if (!plane->format_types) { | 610 | if (!plane->format_types) { |
571 | DRM_DEBUG_KMS("out of memory when allocating plane\n"); | 611 | DRM_DEBUG_KMS("out of memory when allocating plane\n"); |
572 | drm_mode_object_put(dev, &plane->base); | 612 | drm_mode_object_put(dev, &plane->base); |
573 | mutex_unlock(&dev->mode_config.mutex); | 613 | ret = -ENOMEM; |
574 | return -ENOMEM; | 614 | goto out; |
575 | } | 615 | } |
576 | 616 | ||
577 | memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); | 617 | memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); |
@@ -589,9 +629,10 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, | |||
589 | INIT_LIST_HEAD(&plane->head); | 629 | INIT_LIST_HEAD(&plane->head); |
590 | } | 630 | } |
591 | 631 | ||
632 | out: | ||
592 | mutex_unlock(&dev->mode_config.mutex); | 633 | mutex_unlock(&dev->mode_config.mutex); |
593 | 634 | ||
594 | return 0; | 635 | return ret; |
595 | } | 636 | } |
596 | EXPORT_SYMBOL(drm_plane_init); | 637 | EXPORT_SYMBOL(drm_plane_init); |
597 | 638 | ||
@@ -631,7 +672,11 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev) | |||
631 | if (!nmode) | 672 | if (!nmode) |
632 | return NULL; | 673 | return NULL; |
633 | 674 | ||
634 | drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE); | 675 | if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { |
676 | kfree(nmode); | ||
677 | return NULL; | ||
678 | } | ||
679 | |||
635 | return nmode; | 680 | return nmode; |
636 | } | 681 | } |
637 | EXPORT_SYMBOL(drm_mode_create); | 682 | EXPORT_SYMBOL(drm_mode_create); |
@@ -648,6 +693,9 @@ EXPORT_SYMBOL(drm_mode_create); | |||
648 | */ | 693 | */ |
649 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) | 694 | void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) |
650 | { | 695 | { |
696 | if (!mode) | ||
697 | return; | ||
698 | |||
651 | drm_mode_object_put(dev, &mode->base); | 699 | drm_mode_object_put(dev, &mode->base); |
652 | 700 | ||
653 | kfree(mode); | 701 | kfree(mode); |
@@ -658,7 +706,6 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) | |||
658 | { | 706 | { |
659 | struct drm_property *edid; | 707 | struct drm_property *edid; |
660 | struct drm_property *dpms; | 708 | struct drm_property *dpms; |
661 | int i; | ||
662 | 709 | ||
663 | /* | 710 | /* |
664 | * Standard properties (apply to all connectors) | 711 | * Standard properties (apply to all connectors) |
@@ -668,11 +715,9 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) | |||
668 | "EDID", 0); | 715 | "EDID", 0); |
669 | dev->mode_config.edid_property = edid; | 716 | dev->mode_config.edid_property = edid; |
670 | 717 | ||
671 | dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM, | 718 | dpms = drm_property_create_enum(dev, 0, |
672 | "DPMS", ARRAY_SIZE(drm_dpms_enum_list)); | 719 | "DPMS", drm_dpms_enum_list, |
673 | for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++) | 720 | ARRAY_SIZE(drm_dpms_enum_list)); |
674 | drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type, | ||
675 | drm_dpms_enum_list[i].name); | ||
676 | dev->mode_config.dpms_property = dpms; | 721 | dev->mode_config.dpms_property = dpms; |
677 | 722 | ||
678 | return 0; | 723 | return 0; |
@@ -688,30 +733,21 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev) | |||
688 | { | 733 | { |
689 | struct drm_property *dvi_i_selector; | 734 | struct drm_property *dvi_i_selector; |
690 | struct drm_property *dvi_i_subconnector; | 735 | struct drm_property *dvi_i_subconnector; |
691 | int i; | ||
692 | 736 | ||
693 | if (dev->mode_config.dvi_i_select_subconnector_property) | 737 | if (dev->mode_config.dvi_i_select_subconnector_property) |
694 | return 0; | 738 | return 0; |
695 | 739 | ||
696 | dvi_i_selector = | 740 | dvi_i_selector = |
697 | drm_property_create(dev, DRM_MODE_PROP_ENUM, | 741 | drm_property_create_enum(dev, 0, |
698 | "select subconnector", | 742 | "select subconnector", |
743 | drm_dvi_i_select_enum_list, | ||
699 | ARRAY_SIZE(drm_dvi_i_select_enum_list)); | 744 | ARRAY_SIZE(drm_dvi_i_select_enum_list)); |
700 | for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++) | ||
701 | drm_property_add_enum(dvi_i_selector, i, | ||
702 | drm_dvi_i_select_enum_list[i].type, | ||
703 | drm_dvi_i_select_enum_list[i].name); | ||
704 | dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; | 745 | dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; |
705 | 746 | ||
706 | dvi_i_subconnector = | 747 | dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, |
707 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
708 | DRM_MODE_PROP_IMMUTABLE, | ||
709 | "subconnector", | 748 | "subconnector", |
749 | drm_dvi_i_subconnector_enum_list, | ||
710 | ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); | 750 | ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); |
711 | for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++) | ||
712 | drm_property_add_enum(dvi_i_subconnector, i, | ||
713 | drm_dvi_i_subconnector_enum_list[i].type, | ||
714 | drm_dvi_i_subconnector_enum_list[i].name); | ||
715 | dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; | 751 | dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; |
716 | 752 | ||
717 | return 0; | 753 | return 0; |
@@ -742,51 +778,33 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, | |||
742 | /* | 778 | /* |
743 | * Basic connector properties | 779 | * Basic connector properties |
744 | */ | 780 | */ |
745 | tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM, | 781 | tv_selector = drm_property_create_enum(dev, 0, |
746 | "select subconnector", | 782 | "select subconnector", |
783 | drm_tv_select_enum_list, | ||
747 | ARRAY_SIZE(drm_tv_select_enum_list)); | 784 | ARRAY_SIZE(drm_tv_select_enum_list)); |
748 | for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++) | ||
749 | drm_property_add_enum(tv_selector, i, | ||
750 | drm_tv_select_enum_list[i].type, | ||
751 | drm_tv_select_enum_list[i].name); | ||
752 | dev->mode_config.tv_select_subconnector_property = tv_selector; | 785 | dev->mode_config.tv_select_subconnector_property = tv_selector; |
753 | 786 | ||
754 | tv_subconnector = | 787 | tv_subconnector = |
755 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | 788 | drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, |
756 | DRM_MODE_PROP_IMMUTABLE, "subconnector", | 789 | "subconnector", |
790 | drm_tv_subconnector_enum_list, | ||
757 | ARRAY_SIZE(drm_tv_subconnector_enum_list)); | 791 | ARRAY_SIZE(drm_tv_subconnector_enum_list)); |
758 | for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++) | ||
759 | drm_property_add_enum(tv_subconnector, i, | ||
760 | drm_tv_subconnector_enum_list[i].type, | ||
761 | drm_tv_subconnector_enum_list[i].name); | ||
762 | dev->mode_config.tv_subconnector_property = tv_subconnector; | 792 | dev->mode_config.tv_subconnector_property = tv_subconnector; |
763 | 793 | ||
764 | /* | 794 | /* |
765 | * Other, TV specific properties: margins & TV modes. | 795 | * Other, TV specific properties: margins & TV modes. |
766 | */ | 796 | */ |
767 | dev->mode_config.tv_left_margin_property = | 797 | dev->mode_config.tv_left_margin_property = |
768 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 798 | drm_property_create_range(dev, 0, "left margin", 0, 100); |
769 | "left margin", 2); | ||
770 | dev->mode_config.tv_left_margin_property->values[0] = 0; | ||
771 | dev->mode_config.tv_left_margin_property->values[1] = 100; | ||
772 | 799 | ||
773 | dev->mode_config.tv_right_margin_property = | 800 | dev->mode_config.tv_right_margin_property = |
774 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 801 | drm_property_create_range(dev, 0, "right margin", 0, 100); |
775 | "right margin", 2); | ||
776 | dev->mode_config.tv_right_margin_property->values[0] = 0; | ||
777 | dev->mode_config.tv_right_margin_property->values[1] = 100; | ||
778 | 802 | ||
779 | dev->mode_config.tv_top_margin_property = | 803 | dev->mode_config.tv_top_margin_property = |
780 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 804 | drm_property_create_range(dev, 0, "top margin", 0, 100); |
781 | "top margin", 2); | ||
782 | dev->mode_config.tv_top_margin_property->values[0] = 0; | ||
783 | dev->mode_config.tv_top_margin_property->values[1] = 100; | ||
784 | 805 | ||
785 | dev->mode_config.tv_bottom_margin_property = | 806 | dev->mode_config.tv_bottom_margin_property = |
786 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 807 | drm_property_create_range(dev, 0, "bottom margin", 0, 100); |
787 | "bottom margin", 2); | ||
788 | dev->mode_config.tv_bottom_margin_property->values[0] = 0; | ||
789 | dev->mode_config.tv_bottom_margin_property->values[1] = 100; | ||
790 | 808 | ||
791 | dev->mode_config.tv_mode_property = | 809 | dev->mode_config.tv_mode_property = |
792 | drm_property_create(dev, DRM_MODE_PROP_ENUM, | 810 | drm_property_create(dev, DRM_MODE_PROP_ENUM, |
@@ -796,40 +814,22 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, | |||
796 | i, modes[i]); | 814 | i, modes[i]); |
797 | 815 | ||
798 | dev->mode_config.tv_brightness_property = | 816 | dev->mode_config.tv_brightness_property = |
799 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 817 | drm_property_create_range(dev, 0, "brightness", 0, 100); |
800 | "brightness", 2); | ||
801 | dev->mode_config.tv_brightness_property->values[0] = 0; | ||
802 | dev->mode_config.tv_brightness_property->values[1] = 100; | ||
803 | 818 | ||
804 | dev->mode_config.tv_contrast_property = | 819 | dev->mode_config.tv_contrast_property = |
805 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 820 | drm_property_create_range(dev, 0, "contrast", 0, 100); |
806 | "contrast", 2); | ||
807 | dev->mode_config.tv_contrast_property->values[0] = 0; | ||
808 | dev->mode_config.tv_contrast_property->values[1] = 100; | ||
809 | 821 | ||
810 | dev->mode_config.tv_flicker_reduction_property = | 822 | dev->mode_config.tv_flicker_reduction_property = |
811 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 823 | drm_property_create_range(dev, 0, "flicker reduction", 0, 100); |
812 | "flicker reduction", 2); | ||
813 | dev->mode_config.tv_flicker_reduction_property->values[0] = 0; | ||
814 | dev->mode_config.tv_flicker_reduction_property->values[1] = 100; | ||
815 | 824 | ||
816 | dev->mode_config.tv_overscan_property = | 825 | dev->mode_config.tv_overscan_property = |
817 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 826 | drm_property_create_range(dev, 0, "overscan", 0, 100); |
818 | "overscan", 2); | ||
819 | dev->mode_config.tv_overscan_property->values[0] = 0; | ||
820 | dev->mode_config.tv_overscan_property->values[1] = 100; | ||
821 | 827 | ||
822 | dev->mode_config.tv_saturation_property = | 828 | dev->mode_config.tv_saturation_property = |
823 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 829 | drm_property_create_range(dev, 0, "saturation", 0, 100); |
824 | "saturation", 2); | ||
825 | dev->mode_config.tv_saturation_property->values[0] = 0; | ||
826 | dev->mode_config.tv_saturation_property->values[1] = 100; | ||
827 | 830 | ||
828 | dev->mode_config.tv_hue_property = | 831 | dev->mode_config.tv_hue_property = |
829 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 832 | drm_property_create_range(dev, 0, "hue", 0, 100); |
830 | "hue", 2); | ||
831 | dev->mode_config.tv_hue_property->values[0] = 0; | ||
832 | dev->mode_config.tv_hue_property->values[1] = 100; | ||
833 | 833 | ||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
@@ -845,18 +845,14 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties); | |||
845 | int drm_mode_create_scaling_mode_property(struct drm_device *dev) | 845 | int drm_mode_create_scaling_mode_property(struct drm_device *dev) |
846 | { | 846 | { |
847 | struct drm_property *scaling_mode; | 847 | struct drm_property *scaling_mode; |
848 | int i; | ||
849 | 848 | ||
850 | if (dev->mode_config.scaling_mode_property) | 849 | if (dev->mode_config.scaling_mode_property) |
851 | return 0; | 850 | return 0; |
852 | 851 | ||
853 | scaling_mode = | 852 | scaling_mode = |
854 | drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode", | 853 | drm_property_create_enum(dev, 0, "scaling mode", |
854 | drm_scaling_mode_enum_list, | ||
855 | ARRAY_SIZE(drm_scaling_mode_enum_list)); | 855 | ARRAY_SIZE(drm_scaling_mode_enum_list)); |
856 | for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++) | ||
857 | drm_property_add_enum(scaling_mode, i, | ||
858 | drm_scaling_mode_enum_list[i].type, | ||
859 | drm_scaling_mode_enum_list[i].name); | ||
860 | 856 | ||
861 | dev->mode_config.scaling_mode_property = scaling_mode; | 857 | dev->mode_config.scaling_mode_property = scaling_mode; |
862 | 858 | ||
@@ -874,18 +870,14 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); | |||
874 | int drm_mode_create_dithering_property(struct drm_device *dev) | 870 | int drm_mode_create_dithering_property(struct drm_device *dev) |
875 | { | 871 | { |
876 | struct drm_property *dithering_mode; | 872 | struct drm_property *dithering_mode; |
877 | int i; | ||
878 | 873 | ||
879 | if (dev->mode_config.dithering_mode_property) | 874 | if (dev->mode_config.dithering_mode_property) |
880 | return 0; | 875 | return 0; |
881 | 876 | ||
882 | dithering_mode = | 877 | dithering_mode = |
883 | drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering", | 878 | drm_property_create_enum(dev, 0, "dithering", |
879 | drm_dithering_mode_enum_list, | ||
884 | ARRAY_SIZE(drm_dithering_mode_enum_list)); | 880 | ARRAY_SIZE(drm_dithering_mode_enum_list)); |
885 | for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++) | ||
886 | drm_property_add_enum(dithering_mode, i, | ||
887 | drm_dithering_mode_enum_list[i].type, | ||
888 | drm_dithering_mode_enum_list[i].name); | ||
889 | dev->mode_config.dithering_mode_property = dithering_mode; | 881 | dev->mode_config.dithering_mode_property = dithering_mode; |
890 | 882 | ||
891 | return 0; | 883 | return 0; |
@@ -902,20 +894,15 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property); | |||
902 | int drm_mode_create_dirty_info_property(struct drm_device *dev) | 894 | int drm_mode_create_dirty_info_property(struct drm_device *dev) |
903 | { | 895 | { |
904 | struct drm_property *dirty_info; | 896 | struct drm_property *dirty_info; |
905 | int i; | ||
906 | 897 | ||
907 | if (dev->mode_config.dirty_info_property) | 898 | if (dev->mode_config.dirty_info_property) |
908 | return 0; | 899 | return 0; |
909 | 900 | ||
910 | dirty_info = | 901 | dirty_info = |
911 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | 902 | drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, |
912 | DRM_MODE_PROP_IMMUTABLE, | ||
913 | "dirty", | 903 | "dirty", |
904 | drm_dirty_info_enum_list, | ||
914 | ARRAY_SIZE(drm_dirty_info_enum_list)); | 905 | ARRAY_SIZE(drm_dirty_info_enum_list)); |
915 | for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) | ||
916 | drm_property_add_enum(dirty_info, i, | ||
917 | drm_dirty_info_enum_list[i].type, | ||
918 | drm_dirty_info_enum_list[i].name); | ||
919 | dev->mode_config.dirty_info_property = dirty_info; | 906 | dev->mode_config.dirty_info_property = dirty_info; |
920 | 907 | ||
921 | return 0; | 908 | return 0; |
@@ -999,6 +986,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, | |||
999 | 986 | ||
1000 | return 0; | 987 | return 0; |
1001 | } | 988 | } |
989 | EXPORT_SYMBOL(drm_mode_group_init_legacy_group); | ||
1002 | 990 | ||
1003 | /** | 991 | /** |
1004 | * drm_mode_config_cleanup - free up DRM mode_config info | 992 | * drm_mode_config_cleanup - free up DRM mode_config info |
@@ -1048,6 +1036,9 @@ void drm_mode_config_cleanup(struct drm_device *dev) | |||
1048 | head) { | 1036 | head) { |
1049 | plane->funcs->destroy(plane); | 1037 | plane->funcs->destroy(plane); |
1050 | } | 1038 | } |
1039 | |||
1040 | idr_remove_all(&dev->mode_config.crtc_idr); | ||
1041 | idr_destroy(&dev->mode_config.crtc_idr); | ||
1051 | } | 1042 | } |
1052 | EXPORT_SYMBOL(drm_mode_config_cleanup); | 1043 | EXPORT_SYMBOL(drm_mode_config_cleanup); |
1053 | 1044 | ||
@@ -1062,9 +1053,16 @@ EXPORT_SYMBOL(drm_mode_config_cleanup); | |||
1062 | * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to | 1053 | * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to |
1063 | * the user. | 1054 | * the user. |
1064 | */ | 1055 | */ |
1065 | void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, | 1056 | static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, |
1066 | struct drm_display_mode *in) | 1057 | const struct drm_display_mode *in) |
1067 | { | 1058 | { |
1059 | WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX || | ||
1060 | in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX || | ||
1061 | in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX || | ||
1062 | in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX || | ||
1063 | in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX, | ||
1064 | "timing values too large for mode info\n"); | ||
1065 | |||
1068 | out->clock = in->clock; | 1066 | out->clock = in->clock; |
1069 | out->hdisplay = in->hdisplay; | 1067 | out->hdisplay = in->hdisplay; |
1070 | out->hsync_start = in->hsync_start; | 1068 | out->hsync_start = in->hsync_start; |
@@ -1093,10 +1091,16 @@ void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, | |||
1093 | * | 1091 | * |
1094 | * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to | 1092 | * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to |
1095 | * the caller. | 1093 | * the caller. |
1094 | * | ||
1095 | * RETURNS: | ||
1096 | * Zero on success, errno on failure. | ||
1096 | */ | 1097 | */ |
1097 | void drm_crtc_convert_umode(struct drm_display_mode *out, | 1098 | static int drm_crtc_convert_umode(struct drm_display_mode *out, |
1098 | struct drm_mode_modeinfo *in) | 1099 | const struct drm_mode_modeinfo *in) |
1099 | { | 1100 | { |
1101 | if (in->clock > INT_MAX || in->vrefresh > INT_MAX) | ||
1102 | return -ERANGE; | ||
1103 | |||
1100 | out->clock = in->clock; | 1104 | out->clock = in->clock; |
1101 | out->hdisplay = in->hdisplay; | 1105 | out->hdisplay = in->hdisplay; |
1102 | out->hsync_start = in->hsync_start; | 1106 | out->hsync_start = in->hsync_start; |
@@ -1113,6 +1117,8 @@ void drm_crtc_convert_umode(struct drm_display_mode *out, | |||
1113 | out->type = in->type; | 1117 | out->type = in->type; |
1114 | strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); | 1118 | strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); |
1115 | out->name[DRM_DISPLAY_MODE_LEN-1] = 0; | 1119 | out->name[DRM_DISPLAY_MODE_LEN-1] = 0; |
1120 | |||
1121 | return 0; | ||
1116 | } | 1122 | } |
1117 | 1123 | ||
1118 | /** | 1124 | /** |
@@ -1311,7 +1317,7 @@ out: | |||
1311 | * @arg: arg from ioctl | 1317 | * @arg: arg from ioctl |
1312 | * | 1318 | * |
1313 | * LOCKING: | 1319 | * LOCKING: |
1314 | * Caller? (FIXME) | 1320 | * Takes mode config lock. |
1315 | * | 1321 | * |
1316 | * Construct a CRTC configuration structure to return to the user. | 1322 | * Construct a CRTC configuration structure to return to the user. |
1317 | * | 1323 | * |
@@ -1371,7 +1377,7 @@ out: | |||
1371 | * @arg: arg from ioctl | 1377 | * @arg: arg from ioctl |
1372 | * | 1378 | * |
1373 | * LOCKING: | 1379 | * LOCKING: |
1374 | * Caller? (FIXME) | 1380 | * Takes mode config lock. |
1375 | * | 1381 | * |
1376 | * Construct a connector configuration structure to return to the user. | 1382 | * Construct a connector configuration structure to return to the user. |
1377 | * | 1383 | * |
@@ -1553,6 +1559,9 @@ out: | |||
1553 | * @data: ioctl data | 1559 | * @data: ioctl data |
1554 | * @file_priv: DRM file info | 1560 | * @file_priv: DRM file info |
1555 | * | 1561 | * |
1562 | * LOCKING: | ||
1563 | * Takes mode config lock. | ||
1564 | * | ||
1556 | * Return an plane count and set of IDs. | 1565 | * Return an plane count and set of IDs. |
1557 | */ | 1566 | */ |
1558 | int drm_mode_getplane_res(struct drm_device *dev, void *data, | 1567 | int drm_mode_getplane_res(struct drm_device *dev, void *data, |
@@ -1599,6 +1608,9 @@ out: | |||
1599 | * @data: ioctl data | 1608 | * @data: ioctl data |
1600 | * @file_priv: DRM file info | 1609 | * @file_priv: DRM file info |
1601 | * | 1610 | * |
1611 | * LOCKING: | ||
1612 | * Takes mode config lock. | ||
1613 | * | ||
1602 | * Return plane info, including formats supported, gamma size, any | 1614 | * Return plane info, including formats supported, gamma size, any |
1603 | * current fb, etc. | 1615 | * current fb, etc. |
1604 | */ | 1616 | */ |
@@ -1664,6 +1676,9 @@ out: | |||
1664 | * @data: ioctl data* | 1676 | * @data: ioctl data* |
1665 | * @file_prive: DRM file info | 1677 | * @file_prive: DRM file info |
1666 | * | 1678 | * |
1679 | * LOCKING: | ||
1680 | * Takes mode config lock. | ||
1681 | * | ||
1667 | * Set plane info, including placement, fb, scaling, and other factors. | 1682 | * Set plane info, including placement, fb, scaling, and other factors. |
1668 | * Or pass a NULL fb to disable. | 1683 | * Or pass a NULL fb to disable. |
1669 | */ | 1684 | */ |
@@ -1794,7 +1809,7 @@ out: | |||
1794 | * @arg: arg from ioctl | 1809 | * @arg: arg from ioctl |
1795 | * | 1810 | * |
1796 | * LOCKING: | 1811 | * LOCKING: |
1797 | * Caller? (FIXME) | 1812 | * Takes mode config lock. |
1798 | * | 1813 | * |
1799 | * Build a new CRTC configuration based on user request. | 1814 | * Build a new CRTC configuration based on user request. |
1800 | * | 1815 | * |
@@ -1809,7 +1824,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1809 | struct drm_mode_config *config = &dev->mode_config; | 1824 | struct drm_mode_config *config = &dev->mode_config; |
1810 | struct drm_mode_crtc *crtc_req = data; | 1825 | struct drm_mode_crtc *crtc_req = data; |
1811 | struct drm_mode_object *obj; | 1826 | struct drm_mode_object *obj; |
1812 | struct drm_crtc *crtc, *crtcfb; | 1827 | struct drm_crtc *crtc; |
1813 | struct drm_connector **connector_set = NULL, *connector; | 1828 | struct drm_connector **connector_set = NULL, *connector; |
1814 | struct drm_framebuffer *fb = NULL; | 1829 | struct drm_framebuffer *fb = NULL; |
1815 | struct drm_display_mode *mode = NULL; | 1830 | struct drm_display_mode *mode = NULL; |
@@ -1821,6 +1836,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1821 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 1836 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
1822 | return -EINVAL; | 1837 | return -EINVAL; |
1823 | 1838 | ||
1839 | /* For some reason crtc x/y offsets are signed internally. */ | ||
1840 | if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) | ||
1841 | return -ERANGE; | ||
1842 | |||
1824 | mutex_lock(&dev->mode_config.mutex); | 1843 | mutex_lock(&dev->mode_config.mutex); |
1825 | obj = drm_mode_object_find(dev, crtc_req->crtc_id, | 1844 | obj = drm_mode_object_find(dev, crtc_req->crtc_id, |
1826 | DRM_MODE_OBJECT_CRTC); | 1845 | DRM_MODE_OBJECT_CRTC); |
@@ -1836,14 +1855,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1836 | /* If we have a mode we need a framebuffer. */ | 1855 | /* If we have a mode we need a framebuffer. */ |
1837 | /* If we pass -1, set the mode with the currently bound fb */ | 1856 | /* If we pass -1, set the mode with the currently bound fb */ |
1838 | if (crtc_req->fb_id == -1) { | 1857 | if (crtc_req->fb_id == -1) { |
1839 | list_for_each_entry(crtcfb, | 1858 | if (!crtc->fb) { |
1840 | &dev->mode_config.crtc_list, head) { | 1859 | DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); |
1841 | if (crtcfb == crtc) { | 1860 | ret = -EINVAL; |
1842 | DRM_DEBUG_KMS("Using current fb for " | 1861 | goto out; |
1843 | "setmode\n"); | ||
1844 | fb = crtc->fb; | ||
1845 | } | ||
1846 | } | 1862 | } |
1863 | fb = crtc->fb; | ||
1847 | } else { | 1864 | } else { |
1848 | obj = drm_mode_object_find(dev, crtc_req->fb_id, | 1865 | obj = drm_mode_object_find(dev, crtc_req->fb_id, |
1849 | DRM_MODE_OBJECT_FB); | 1866 | DRM_MODE_OBJECT_FB); |
@@ -1857,8 +1874,30 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1857 | } | 1874 | } |
1858 | 1875 | ||
1859 | mode = drm_mode_create(dev); | 1876 | mode = drm_mode_create(dev); |
1860 | drm_crtc_convert_umode(mode, &crtc_req->mode); | 1877 | if (!mode) { |
1878 | ret = -ENOMEM; | ||
1879 | goto out; | ||
1880 | } | ||
1881 | |||
1882 | ret = drm_crtc_convert_umode(mode, &crtc_req->mode); | ||
1883 | if (ret) { | ||
1884 | DRM_DEBUG_KMS("Invalid mode\n"); | ||
1885 | goto out; | ||
1886 | } | ||
1887 | |||
1861 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1888 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
1889 | |||
1890 | if (mode->hdisplay > fb->width || | ||
1891 | mode->vdisplay > fb->height || | ||
1892 | crtc_req->x > fb->width - mode->hdisplay || | ||
1893 | crtc_req->y > fb->height - mode->vdisplay) { | ||
1894 | DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n", | ||
1895 | mode->hdisplay, mode->vdisplay, | ||
1896 | crtc_req->x, crtc_req->y, | ||
1897 | fb->width, fb->height); | ||
1898 | ret = -ENOSPC; | ||
1899 | goto out; | ||
1900 | } | ||
1862 | } | 1901 | } |
1863 | 1902 | ||
1864 | if (crtc_req->count_connectors == 0 && mode) { | 1903 | if (crtc_req->count_connectors == 0 && mode) { |
@@ -1926,6 +1965,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1926 | 1965 | ||
1927 | out: | 1966 | out: |
1928 | kfree(connector_set); | 1967 | kfree(connector_set); |
1968 | drm_mode_destroy(dev, mode); | ||
1929 | mutex_unlock(&dev->mode_config.mutex); | 1969 | mutex_unlock(&dev->mode_config.mutex); |
1930 | return ret; | 1970 | return ret; |
1931 | } | 1971 | } |
@@ -2275,7 +2315,7 @@ out: | |||
2275 | * @arg: arg from ioctl | 2315 | * @arg: arg from ioctl |
2276 | * | 2316 | * |
2277 | * LOCKING: | 2317 | * LOCKING: |
2278 | * Caller? (FIXME) | 2318 | * Takes mode config lock. |
2279 | * | 2319 | * |
2280 | * Lookup the FB given its ID and return info about it. | 2320 | * Lookup the FB given its ID and return info about it. |
2281 | * | 2321 | * |
@@ -2424,38 +2464,48 @@ void drm_fb_release(struct drm_file *priv) | |||
2424 | * | 2464 | * |
2425 | * Add @mode to @connector's user mode list. | 2465 | * Add @mode to @connector's user mode list. |
2426 | */ | 2466 | */ |
2427 | static int drm_mode_attachmode(struct drm_device *dev, | 2467 | static void drm_mode_attachmode(struct drm_device *dev, |
2428 | struct drm_connector *connector, | 2468 | struct drm_connector *connector, |
2429 | struct drm_display_mode *mode) | 2469 | struct drm_display_mode *mode) |
2430 | { | 2470 | { |
2431 | int ret = 0; | ||
2432 | |||
2433 | list_add_tail(&mode->head, &connector->user_modes); | 2471 | list_add_tail(&mode->head, &connector->user_modes); |
2434 | return ret; | ||
2435 | } | 2472 | } |
2436 | 2473 | ||
2437 | int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, | 2474 | int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc, |
2438 | struct drm_display_mode *mode) | 2475 | const struct drm_display_mode *mode) |
2439 | { | 2476 | { |
2440 | struct drm_connector *connector; | 2477 | struct drm_connector *connector; |
2441 | int ret = 0; | 2478 | int ret = 0; |
2442 | struct drm_display_mode *dup_mode; | 2479 | struct drm_display_mode *dup_mode, *next; |
2443 | int need_dup = 0; | 2480 | LIST_HEAD(list); |
2481 | |||
2444 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 2482 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
2445 | if (!connector->encoder) | 2483 | if (!connector->encoder) |
2446 | break; | 2484 | continue; |
2447 | if (connector->encoder->crtc == crtc) { | 2485 | if (connector->encoder->crtc == crtc) { |
2448 | if (need_dup) | 2486 | dup_mode = drm_mode_duplicate(dev, mode); |
2449 | dup_mode = drm_mode_duplicate(dev, mode); | 2487 | if (!dup_mode) { |
2450 | else | 2488 | ret = -ENOMEM; |
2451 | dup_mode = mode; | 2489 | goto out; |
2452 | ret = drm_mode_attachmode(dev, connector, dup_mode); | 2490 | } |
2453 | if (ret) | 2491 | list_add_tail(&dup_mode->head, &list); |
2454 | return ret; | ||
2455 | need_dup = 1; | ||
2456 | } | 2492 | } |
2457 | } | 2493 | } |
2458 | return 0; | 2494 | |
2495 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
2496 | if (!connector->encoder) | ||
2497 | continue; | ||
2498 | if (connector->encoder->crtc == crtc) | ||
2499 | list_move_tail(list.next, &connector->user_modes); | ||
2500 | } | ||
2501 | |||
2502 | WARN_ON(!list_empty(&list)); | ||
2503 | |||
2504 | out: | ||
2505 | list_for_each_entry_safe(dup_mode, next, &list, head) | ||
2506 | drm_mode_destroy(dev, dup_mode); | ||
2507 | |||
2508 | return ret; | ||
2459 | } | 2509 | } |
2460 | EXPORT_SYMBOL(drm_mode_attachmode_crtc); | 2510 | EXPORT_SYMBOL(drm_mode_attachmode_crtc); |
2461 | 2511 | ||
@@ -2534,9 +2584,14 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, | |||
2534 | goto out; | 2584 | goto out; |
2535 | } | 2585 | } |
2536 | 2586 | ||
2537 | drm_crtc_convert_umode(mode, umode); | 2587 | ret = drm_crtc_convert_umode(mode, umode); |
2588 | if (ret) { | ||
2589 | DRM_DEBUG_KMS("Invalid mode\n"); | ||
2590 | drm_mode_destroy(dev, mode); | ||
2591 | goto out; | ||
2592 | } | ||
2538 | 2593 | ||
2539 | ret = drm_mode_attachmode(dev, connector, mode); | 2594 | drm_mode_attachmode(dev, connector, mode); |
2540 | out: | 2595 | out: |
2541 | mutex_unlock(&dev->mode_config.mutex); | 2596 | mutex_unlock(&dev->mode_config.mutex); |
2542 | return ret; | 2597 | return ret; |
@@ -2577,7 +2632,12 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, | |||
2577 | } | 2632 | } |
2578 | connector = obj_to_connector(obj); | 2633 | connector = obj_to_connector(obj); |
2579 | 2634 | ||
2580 | drm_crtc_convert_umode(&mode, umode); | 2635 | ret = drm_crtc_convert_umode(&mode, umode); |
2636 | if (ret) { | ||
2637 | DRM_DEBUG_KMS("Invalid mode\n"); | ||
2638 | goto out; | ||
2639 | } | ||
2640 | |||
2581 | ret = drm_mode_detachmode(dev, connector, &mode); | 2641 | ret = drm_mode_detachmode(dev, connector, &mode); |
2582 | out: | 2642 | out: |
2583 | mutex_unlock(&dev->mode_config.mutex); | 2643 | mutex_unlock(&dev->mode_config.mutex); |
@@ -2588,6 +2648,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, | |||
2588 | const char *name, int num_values) | 2648 | const char *name, int num_values) |
2589 | { | 2649 | { |
2590 | struct drm_property *property = NULL; | 2650 | struct drm_property *property = NULL; |
2651 | int ret; | ||
2591 | 2652 | ||
2592 | property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); | 2653 | property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); |
2593 | if (!property) | 2654 | if (!property) |
@@ -2599,7 +2660,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, | |||
2599 | goto fail; | 2660 | goto fail; |
2600 | } | 2661 | } |
2601 | 2662 | ||
2602 | drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); | 2663 | ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); |
2664 | if (ret) | ||
2665 | goto fail; | ||
2666 | |||
2603 | property->flags = flags; | 2667 | property->flags = flags; |
2604 | property->num_values = num_values; | 2668 | property->num_values = num_values; |
2605 | INIT_LIST_HEAD(&property->enum_blob_list); | 2669 | INIT_LIST_HEAD(&property->enum_blob_list); |
@@ -2612,11 +2676,59 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, | |||
2612 | list_add_tail(&property->head, &dev->mode_config.property_list); | 2676 | list_add_tail(&property->head, &dev->mode_config.property_list); |
2613 | return property; | 2677 | return property; |
2614 | fail: | 2678 | fail: |
2679 | kfree(property->values); | ||
2615 | kfree(property); | 2680 | kfree(property); |
2616 | return NULL; | 2681 | return NULL; |
2617 | } | 2682 | } |
2618 | EXPORT_SYMBOL(drm_property_create); | 2683 | EXPORT_SYMBOL(drm_property_create); |
2619 | 2684 | ||
2685 | struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, | ||
2686 | const char *name, | ||
2687 | const struct drm_prop_enum_list *props, | ||
2688 | int num_values) | ||
2689 | { | ||
2690 | struct drm_property *property; | ||
2691 | int i, ret; | ||
2692 | |||
2693 | flags |= DRM_MODE_PROP_ENUM; | ||
2694 | |||
2695 | property = drm_property_create(dev, flags, name, num_values); | ||
2696 | if (!property) | ||
2697 | return NULL; | ||
2698 | |||
2699 | for (i = 0; i < num_values; i++) { | ||
2700 | ret = drm_property_add_enum(property, i, | ||
2701 | props[i].type, | ||
2702 | props[i].name); | ||
2703 | if (ret) { | ||
2704 | drm_property_destroy(dev, property); | ||
2705 | return NULL; | ||
2706 | } | ||
2707 | } | ||
2708 | |||
2709 | return property; | ||
2710 | } | ||
2711 | EXPORT_SYMBOL(drm_property_create_enum); | ||
2712 | |||
2713 | struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, | ||
2714 | const char *name, | ||
2715 | uint64_t min, uint64_t max) | ||
2716 | { | ||
2717 | struct drm_property *property; | ||
2718 | |||
2719 | flags |= DRM_MODE_PROP_RANGE; | ||
2720 | |||
2721 | property = drm_property_create(dev, flags, name, 2); | ||
2722 | if (!property) | ||
2723 | return NULL; | ||
2724 | |||
2725 | property->values[0] = min; | ||
2726 | property->values[1] = max; | ||
2727 | |||
2728 | return property; | ||
2729 | } | ||
2730 | EXPORT_SYMBOL(drm_property_create_range); | ||
2731 | |||
2620 | int drm_property_add_enum(struct drm_property *property, int index, | 2732 | int drm_property_add_enum(struct drm_property *property, int index, |
2621 | uint64_t value, const char *name) | 2733 | uint64_t value, const char *name) |
2622 | { | 2734 | { |
@@ -2828,6 +2940,7 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev | |||
2828 | void *data) | 2940 | void *data) |
2829 | { | 2941 | { |
2830 | struct drm_property_blob *blob; | 2942 | struct drm_property_blob *blob; |
2943 | int ret; | ||
2831 | 2944 | ||
2832 | if (!length || !data) | 2945 | if (!length || !data) |
2833 | return NULL; | 2946 | return NULL; |
@@ -2836,13 +2949,16 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev | |||
2836 | if (!blob) | 2949 | if (!blob) |
2837 | return NULL; | 2950 | return NULL; |
2838 | 2951 | ||
2839 | blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob)); | 2952 | ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); |
2953 | if (ret) { | ||
2954 | kfree(blob); | ||
2955 | return NULL; | ||
2956 | } | ||
2957 | |||
2840 | blob->length = length; | 2958 | blob->length = length; |
2841 | 2959 | ||
2842 | memcpy(blob->data, data, length); | 2960 | memcpy(blob->data, data, length); |
2843 | 2961 | ||
2844 | drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); | ||
2845 | |||
2846 | list_add_tail(&blob->head, &dev->mode_config.property_blob_list); | 2962 | list_add_tail(&blob->head, &dev->mode_config.property_blob_list); |
2847 | return blob; | 2963 | return blob; |
2848 | } | 2964 | } |
@@ -3021,7 +3137,7 @@ void drm_mode_connector_detach_encoder(struct drm_connector *connector, | |||
3021 | } | 3137 | } |
3022 | EXPORT_SYMBOL(drm_mode_connector_detach_encoder); | 3138 | EXPORT_SYMBOL(drm_mode_connector_detach_encoder); |
3023 | 3139 | ||
3024 | bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | 3140 | int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
3025 | int gamma_size) | 3141 | int gamma_size) |
3026 | { | 3142 | { |
3027 | crtc->gamma_size = gamma_size; | 3143 | crtc->gamma_size = gamma_size; |
@@ -3029,10 +3145,10 @@ bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | |||
3029 | crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); | 3145 | crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); |
3030 | if (!crtc->gamma_store) { | 3146 | if (!crtc->gamma_store) { |
3031 | crtc->gamma_size = 0; | 3147 | crtc->gamma_size = 0; |
3032 | return false; | 3148 | return -ENOMEM; |
3033 | } | 3149 | } |
3034 | 3150 | ||
3035 | return true; | 3151 | return 0; |
3036 | } | 3152 | } |
3037 | EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); | 3153 | EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); |
3038 | 3154 | ||
@@ -3178,6 +3294,18 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, | |||
3178 | goto out; | 3294 | goto out; |
3179 | fb = obj_to_fb(obj); | 3295 | fb = obj_to_fb(obj); |
3180 | 3296 | ||
3297 | if (crtc->mode.hdisplay > fb->width || | ||
3298 | crtc->mode.vdisplay > fb->height || | ||
3299 | crtc->x > fb->width - crtc->mode.hdisplay || | ||
3300 | crtc->y > fb->height - crtc->mode.vdisplay) { | ||
3301 | DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n", | ||
3302 | fb->width, fb->height, | ||
3303 | crtc->mode.hdisplay, crtc->mode.vdisplay, | ||
3304 | crtc->x, crtc->y); | ||
3305 | ret = -ENOSPC; | ||
3306 | goto out; | ||
3307 | } | ||
3308 | |||
3181 | if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { | 3309 | if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { |
3182 | ret = -ENOMEM; | 3310 | ret = -ENOMEM; |
3183 | spin_lock_irqsave(&dev->event_lock, flags); | 3311 | spin_lock_irqsave(&dev->event_lock, flags); |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 84a4a809793f..81118893264c 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "drm_fourcc.h" | 37 | #include "drm_fourcc.h" |
38 | #include "drm_crtc_helper.h" | 38 | #include "drm_crtc_helper.h" |
39 | #include "drm_fb_helper.h" | 39 | #include "drm_fb_helper.h" |
40 | #include "drm_edid.h" | ||
40 | 41 | ||
41 | static bool drm_kms_helper_poll = true; | 42 | static bool drm_kms_helper_poll = true; |
42 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); | 43 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); |
@@ -44,12 +45,12 @@ module_param_named(poll, drm_kms_helper_poll, bool, 0600); | |||
44 | static void drm_mode_validate_flag(struct drm_connector *connector, | 45 | static void drm_mode_validate_flag(struct drm_connector *connector, |
45 | int flags) | 46 | int flags) |
46 | { | 47 | { |
47 | struct drm_display_mode *mode, *t; | 48 | struct drm_display_mode *mode; |
48 | 49 | ||
49 | if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) | 50 | if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) |
50 | return; | 51 | return; |
51 | 52 | ||
52 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | 53 | list_for_each_entry(mode, &connector->modes, head) { |
53 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && | 54 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && |
54 | !(flags & DRM_MODE_FLAG_INTERLACE)) | 55 | !(flags & DRM_MODE_FLAG_INTERLACE)) |
55 | mode->status = MODE_NO_INTERLACE; | 56 | mode->status = MODE_NO_INTERLACE; |
@@ -87,7 +88,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
87 | uint32_t maxX, uint32_t maxY) | 88 | uint32_t maxX, uint32_t maxY) |
88 | { | 89 | { |
89 | struct drm_device *dev = connector->dev; | 90 | struct drm_device *dev = connector->dev; |
90 | struct drm_display_mode *mode, *t; | 91 | struct drm_display_mode *mode; |
91 | struct drm_connector_helper_funcs *connector_funcs = | 92 | struct drm_connector_helper_funcs *connector_funcs = |
92 | connector->helper_private; | 93 | connector->helper_private; |
93 | int count = 0; | 94 | int count = 0; |
@@ -96,7 +97,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
96 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, | 97 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, |
97 | drm_get_connector_name(connector)); | 98 | drm_get_connector_name(connector)); |
98 | /* set all modes to the unverified state */ | 99 | /* set all modes to the unverified state */ |
99 | list_for_each_entry_safe(mode, t, &connector->modes, head) | 100 | list_for_each_entry(mode, &connector->modes, head) |
100 | mode->status = MODE_UNVERIFIED; | 101 | mode->status = MODE_UNVERIFIED; |
101 | 102 | ||
102 | if (connector->force) { | 103 | if (connector->force) { |
@@ -118,7 +119,12 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
118 | goto prune; | 119 | goto prune; |
119 | } | 120 | } |
120 | 121 | ||
121 | count = (*connector_funcs->get_modes)(connector); | 122 | #ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE |
123 | count = drm_load_edid_firmware(connector); | ||
124 | if (count == 0) | ||
125 | #endif | ||
126 | count = (*connector_funcs->get_modes)(connector); | ||
127 | |||
122 | if (count == 0 && connector->status == connector_status_connected) | 128 | if (count == 0 && connector->status == connector_status_connected) |
123 | count = drm_add_modes_noedid(connector, 1024, 768); | 129 | count = drm_add_modes_noedid(connector, 1024, 768); |
124 | if (count == 0) | 130 | if (count == 0) |
@@ -136,7 +142,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
136 | mode_flags |= DRM_MODE_FLAG_DBLSCAN; | 142 | mode_flags |= DRM_MODE_FLAG_DBLSCAN; |
137 | drm_mode_validate_flag(connector, mode_flags); | 143 | drm_mode_validate_flag(connector, mode_flags); |
138 | 144 | ||
139 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | 145 | list_for_each_entry(mode, &connector->modes, head) { |
140 | if (mode->status == MODE_OK) | 146 | if (mode->status == MODE_OK) |
141 | mode->status = connector_funcs->mode_valid(connector, | 147 | mode->status = connector_funcs->mode_valid(connector, |
142 | mode); | 148 | mode); |
@@ -152,7 +158,7 @@ prune: | |||
152 | 158 | ||
153 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, | 159 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, |
154 | drm_get_connector_name(connector)); | 160 | drm_get_connector_name(connector)); |
155 | list_for_each_entry_safe(mode, t, &connector->modes, head) { | 161 | list_for_each_entry(mode, &connector->modes, head) { |
156 | mode->vrefresh = drm_mode_vrefresh(mode); | 162 | mode->vrefresh = drm_mode_vrefresh(mode); |
157 | 163 | ||
158 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 164 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
@@ -352,6 +358,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
352 | return true; | 358 | return true; |
353 | 359 | ||
354 | adjusted_mode = drm_mode_duplicate(dev, mode); | 360 | adjusted_mode = drm_mode_duplicate(dev, mode); |
361 | if (!adjusted_mode) | ||
362 | return false; | ||
355 | 363 | ||
356 | saved_hwmode = crtc->hwmode; | 364 | saved_hwmode = crtc->hwmode; |
357 | saved_mode = crtc->mode; | 365 | saved_mode = crtc->mode; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ebf7d3f68fc4..0b65fbc8a630 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -135,23 +135,23 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
135 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), | 135 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), |
136 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), | 136 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), |
137 | 137 | ||
138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
142 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 142 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), | 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), |
146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), | 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), |
147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
150 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 150 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
151 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 151 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
152 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 152 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
153 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 153 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
154 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 154 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
155 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 155 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
156 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 156 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
157 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 157 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
@@ -390,6 +390,10 @@ long drm_ioctl(struct file *filp, | |||
390 | unsigned int usize, asize; | 390 | unsigned int usize, asize; |
391 | 391 | ||
392 | dev = file_priv->minor->dev; | 392 | dev = file_priv->minor->dev; |
393 | |||
394 | if (drm_device_is_unplugged(dev)) | ||
395 | return -ENODEV; | ||
396 | |||
393 | atomic_inc(&dev->ioctl_count); | 397 | atomic_inc(&dev->ioctl_count); |
394 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); | 398 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); |
395 | ++file_priv->ioctl_count; | 399 | ++file_priv->ioctl_count; |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ece03fc2d386..5a18b0df8285 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -149,8 +149,7 @@ EXPORT_SYMBOL(drm_edid_header_is_valid); | |||
149 | * Sanity check the EDID block (base or extension). Return 0 if the block | 149 | * Sanity check the EDID block (base or extension). Return 0 if the block |
150 | * doesn't check out, or 1 if it's valid. | 150 | * doesn't check out, or 1 if it's valid. |
151 | */ | 151 | */ |
152 | static bool | 152 | bool drm_edid_block_valid(u8 *raw_edid) |
153 | drm_edid_block_valid(u8 *raw_edid) | ||
154 | { | 153 | { |
155 | int i; | 154 | int i; |
156 | u8 csum = 0; | 155 | u8 csum = 0; |
@@ -203,6 +202,7 @@ bad: | |||
203 | } | 202 | } |
204 | return 0; | 203 | return 0; |
205 | } | 204 | } |
205 | EXPORT_SYMBOL(drm_edid_block_valid); | ||
206 | 206 | ||
207 | /** | 207 | /** |
208 | * drm_edid_is_valid - sanity check EDID data | 208 | * drm_edid_is_valid - sanity check EDID data |
@@ -226,7 +226,6 @@ bool drm_edid_is_valid(struct edid *edid) | |||
226 | } | 226 | } |
227 | EXPORT_SYMBOL(drm_edid_is_valid); | 227 | EXPORT_SYMBOL(drm_edid_is_valid); |
228 | 228 | ||
229 | #define DDC_ADDR 0x50 | ||
230 | #define DDC_SEGMENT_ADDR 0x30 | 229 | #define DDC_SEGMENT_ADDR 0x30 |
231 | /** | 230 | /** |
232 | * Get EDID information via I2C. | 231 | * Get EDID information via I2C. |
@@ -266,6 +265,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
266 | } | 265 | } |
267 | }; | 266 | }; |
268 | ret = i2c_transfer(adapter, msgs, 2); | 267 | ret = i2c_transfer(adapter, msgs, 2); |
268 | if (ret == -ENXIO) { | ||
269 | DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", | ||
270 | adapter->name); | ||
271 | break; | ||
272 | } | ||
269 | } while (ret != 2 && --retries); | 273 | } while (ret != 2 && --retries); |
270 | 274 | ||
271 | return ret == 2 ? 0 : -1; | 275 | return ret == 2 ? 0 : -1; |
@@ -745,7 +749,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
745 | */ | 749 | */ |
746 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | 750 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
747 | if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { | 751 | if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { |
748 | kfree(mode); | 752 | drm_mode_destroy(dev, mode); |
749 | mode = drm_gtf_mode_complex(dev, hsize, vsize, | 753 | mode = drm_gtf_mode_complex(dev, hsize, vsize, |
750 | vrefresh_rate, 0, 0, | 754 | vrefresh_rate, 0, 0, |
751 | drm_gtf2_m(edid), | 755 | drm_gtf2_m(edid), |
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c new file mode 100644 index 000000000000..da9acba2dd6c --- /dev/null +++ b/drivers/gpu/drm/drm_edid_load.c | |||
@@ -0,0 +1,250 @@ | |||
1 | /* | ||
2 | drm_edid_load.c: use a built-in EDID data set or load it via the firmware | ||
3 | interface | ||
4 | |||
5 | Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org> | ||
6 | |||
7 | This program is free software; you can redistribute it and/or | ||
8 | modify it under the terms of the GNU General Public License | ||
9 | as published by the Free Software Foundation; either version 2 | ||
10 | of the License, or (at your option) any later version. | ||
11 | |||
12 | This program is distributed in the hope that it will be useful, | ||
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | GNU General Public License for more details. | ||
16 | |||
17 | You should have received a copy of the GNU General Public License | ||
18 | along with this program; if not, write to the Free Software | ||
19 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "drm_crtc.h" | ||
26 | #include "drm_crtc_helper.h" | ||
27 | #include "drm_edid.h" | ||
28 | |||
29 | static char edid_firmware[PATH_MAX]; | ||
30 | module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644); | ||
31 | MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " | ||
32 | "from built-in data or /lib/firmware instead. "); | ||
33 | |||
34 | #define GENERIC_EDIDS 4 | ||
35 | static char *generic_edid_name[GENERIC_EDIDS] = { | ||
36 | "edid/1024x768.bin", | ||
37 | "edid/1280x1024.bin", | ||
38 | "edid/1680x1050.bin", | ||
39 | "edid/1920x1080.bin", | ||
40 | }; | ||
41 | |||
42 | static u8 generic_edid[GENERIC_EDIDS][128] = { | ||
43 | { | ||
44 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
45 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
46 | 0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78, | ||
47 | 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, | ||
48 | 0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40, | ||
49 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | ||
50 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19, | ||
51 | 0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90, | ||
52 | 0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18, | ||
53 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, | ||
54 | 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, | ||
55 | 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, | ||
56 | 0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20, | ||
57 | 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, | ||
58 | 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58, | ||
59 | 0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55, | ||
60 | }, | ||
61 | { | ||
62 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
63 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
64 | 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78, | ||
65 | 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, | ||
66 | 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80, | ||
67 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | ||
68 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a, | ||
69 | 0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70, | ||
70 | 0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e, | ||
71 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, | ||
72 | 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, | ||
73 | 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, | ||
74 | 0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20, | ||
75 | 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, | ||
76 | 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53, | ||
77 | 0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0, | ||
78 | }, | ||
79 | { | ||
80 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
81 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
82 | 0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78, | ||
83 | 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, | ||
84 | 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00, | ||
85 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | ||
86 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39, | ||
87 | 0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0, | ||
88 | 0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e, | ||
89 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, | ||
90 | 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, | ||
91 | 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, | ||
92 | 0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20, | ||
93 | 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, | ||
94 | 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57, | ||
95 | 0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26, | ||
96 | }, | ||
97 | { | ||
98 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
99 | 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
100 | 0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78, | ||
101 | 0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25, | ||
102 | 0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0, | ||
103 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | ||
104 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, | ||
105 | 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, | ||
106 | 0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e, | ||
107 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e, | ||
108 | 0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20, | ||
109 | 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b, | ||
110 | 0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20, | ||
111 | 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, | ||
112 | 0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46, | ||
113 | 0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05, | ||
114 | }, | ||
115 | }; | ||
116 | |||
117 | static int edid_load(struct drm_connector *connector, char *name, | ||
118 | char *connector_name) | ||
119 | { | ||
120 | const struct firmware *fw; | ||
121 | struct platform_device *pdev; | ||
122 | u8 *fwdata = NULL, *edid; | ||
123 | int fwsize, expected; | ||
124 | int builtin = 0, err = 0; | ||
125 | int i, valid_extensions = 0; | ||
126 | |||
127 | pdev = platform_device_register_simple(connector_name, -1, NULL, 0); | ||
128 | if (IS_ERR(pdev)) { | ||
129 | DRM_ERROR("Failed to register EDID firmware platform device " | ||
130 | "for connector \"%s\"\n", connector_name); | ||
131 | err = -EINVAL; | ||
132 | goto out; | ||
133 | } | ||
134 | |||
135 | err = request_firmware(&fw, name, &pdev->dev); | ||
136 | platform_device_unregister(pdev); | ||
137 | |||
138 | if (err) { | ||
139 | i = 0; | ||
140 | while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i])) | ||
141 | i++; | ||
142 | if (i < GENERIC_EDIDS) { | ||
143 | err = 0; | ||
144 | builtin = 1; | ||
145 | fwdata = generic_edid[i]; | ||
146 | fwsize = sizeof(generic_edid[i]); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | if (err) { | ||
151 | DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n", | ||
152 | name, err); | ||
153 | goto out; | ||
154 | } | ||
155 | |||
156 | if (fwdata == NULL) { | ||
157 | fwdata = (u8 *) fw->data; | ||
158 | fwsize = fw->size; | ||
159 | } | ||
160 | |||
161 | expected = (fwdata[0x7e] + 1) * EDID_LENGTH; | ||
162 | if (expected != fwsize) { | ||
163 | DRM_ERROR("Size of EDID firmware \"%s\" is invalid " | ||
164 | "(expected %d, got %d)\n", name, expected, (int) fwsize); | ||
165 | err = -EINVAL; | ||
166 | goto relfw_out; | ||
167 | } | ||
168 | |||
169 | edid = kmalloc(fwsize, GFP_KERNEL); | ||
170 | if (edid == NULL) { | ||
171 | err = -ENOMEM; | ||
172 | goto relfw_out; | ||
173 | } | ||
174 | memcpy(edid, fwdata, fwsize); | ||
175 | |||
176 | if (!drm_edid_block_valid(edid)) { | ||
177 | DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", | ||
178 | name); | ||
179 | kfree(edid); | ||
180 | err = -EINVAL; | ||
181 | goto relfw_out; | ||
182 | } | ||
183 | |||
184 | for (i = 1; i <= edid[0x7e]; i++) { | ||
185 | if (i != valid_extensions + 1) | ||
186 | memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, | ||
187 | edid + i * EDID_LENGTH, EDID_LENGTH); | ||
188 | if (drm_edid_block_valid(edid + i * EDID_LENGTH)) | ||
189 | valid_extensions++; | ||
190 | } | ||
191 | |||
192 | if (valid_extensions != edid[0x7e]) { | ||
193 | edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; | ||
194 | DRM_INFO("Found %d valid extensions instead of %d in EDID data " | ||
195 | "\"%s\" for connector \"%s\"\n", valid_extensions, | ||
196 | edid[0x7e], name, connector_name); | ||
197 | edid[0x7e] = valid_extensions; | ||
198 | edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, | ||
199 | GFP_KERNEL); | ||
200 | if (edid == NULL) { | ||
201 | err = -ENOMEM; | ||
202 | goto relfw_out; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | connector->display_info.raw_edid = edid; | ||
207 | DRM_INFO("Got %s EDID base block and %d extension%s from " | ||
208 | "\"%s\" for connector \"%s\"\n", builtin ? "built-in" : | ||
209 | "external", valid_extensions, valid_extensions == 1 ? "" : "s", | ||
210 | name, connector_name); | ||
211 | |||
212 | relfw_out: | ||
213 | release_firmware(fw); | ||
214 | |||
215 | out: | ||
216 | return err; | ||
217 | } | ||
218 | |||
219 | int drm_load_edid_firmware(struct drm_connector *connector) | ||
220 | { | ||
221 | char *connector_name = drm_get_connector_name(connector); | ||
222 | char *edidname = edid_firmware, *last, *colon; | ||
223 | int ret = 0; | ||
224 | |||
225 | if (*edidname == '\0') | ||
226 | return ret; | ||
227 | |||
228 | colon = strchr(edidname, ':'); | ||
229 | if (colon != NULL) { | ||
230 | if (strncmp(connector_name, edidname, colon - edidname)) | ||
231 | return ret; | ||
232 | edidname = colon + 1; | ||
233 | if (*edidname == '\0') | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | last = edidname + strlen(edidname) - 1; | ||
238 | if (*last == '\n') | ||
239 | *last = '\0'; | ||
240 | |||
241 | ret = edid_load(connector, edidname, connector_name); | ||
242 | if (ret) | ||
243 | return 0; | ||
244 | |||
245 | drm_mode_connector_update_edid_property(connector, | ||
246 | (struct edid *) connector->display_info.raw_edid); | ||
247 | |||
248 | return drm_add_edid_modes(connector, (struct edid *) | ||
249 | connector->display_info.raw_edid); | ||
250 | } | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index aada26f63dec..7740dd26f007 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -306,91 +306,31 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { | |||
306 | static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; | 306 | static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; |
307 | #endif | 307 | #endif |
308 | 308 | ||
309 | static void drm_fb_helper_on(struct fb_info *info) | 309 | static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) |
310 | { | 310 | { |
311 | struct drm_fb_helper *fb_helper = info->par; | 311 | struct drm_fb_helper *fb_helper = info->par; |
312 | struct drm_device *dev = fb_helper->dev; | 312 | struct drm_device *dev = fb_helper->dev; |
313 | struct drm_crtc *crtc; | 313 | struct drm_crtc *crtc; |
314 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
315 | struct drm_connector *connector; | ||
316 | struct drm_encoder *encoder; | ||
317 | int i, j; | ||
318 | |||
319 | /* | ||
320 | * For each CRTC in this fb, turn the crtc on then, | ||
321 | * find all associated encoders and turn them on. | ||
322 | */ | ||
323 | mutex_lock(&dev->mode_config.mutex); | ||
324 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
325 | crtc = fb_helper->crtc_info[i].mode_set.crtc; | ||
326 | crtc_funcs = crtc->helper_private; | ||
327 | |||
328 | if (!crtc->enabled) | ||
329 | continue; | ||
330 | |||
331 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
332 | |||
333 | /* Walk the connectors & encoders on this fb turning them on */ | ||
334 | for (j = 0; j < fb_helper->connector_count; j++) { | ||
335 | connector = fb_helper->connector_info[j]->connector; | ||
336 | connector->dpms = DRM_MODE_DPMS_ON; | ||
337 | drm_connector_property_set_value(connector, | ||
338 | dev->mode_config.dpms_property, | ||
339 | DRM_MODE_DPMS_ON); | ||
340 | } | ||
341 | /* Found a CRTC on this fb, now find encoders */ | ||
342 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
343 | if (encoder->crtc == crtc) { | ||
344 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
345 | |||
346 | encoder_funcs = encoder->helper_private; | ||
347 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | mutex_unlock(&dev->mode_config.mutex); | ||
352 | } | ||
353 | |||
354 | static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | ||
355 | { | ||
356 | struct drm_fb_helper *fb_helper = info->par; | ||
357 | struct drm_device *dev = fb_helper->dev; | ||
358 | struct drm_crtc *crtc; | ||
359 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
360 | struct drm_connector *connector; | 314 | struct drm_connector *connector; |
361 | struct drm_encoder *encoder; | ||
362 | int i, j; | 315 | int i, j; |
363 | 316 | ||
364 | /* | 317 | /* |
365 | * For each CRTC in this fb, find all associated encoders | 318 | * For each CRTC in this fb, turn the connectors on/off. |
366 | * and turn them off, then turn off the CRTC. | ||
367 | */ | 319 | */ |
368 | mutex_lock(&dev->mode_config.mutex); | 320 | mutex_lock(&dev->mode_config.mutex); |
369 | for (i = 0; i < fb_helper->crtc_count; i++) { | 321 | for (i = 0; i < fb_helper->crtc_count; i++) { |
370 | crtc = fb_helper->crtc_info[i].mode_set.crtc; | 322 | crtc = fb_helper->crtc_info[i].mode_set.crtc; |
371 | crtc_funcs = crtc->helper_private; | ||
372 | 323 | ||
373 | if (!crtc->enabled) | 324 | if (!crtc->enabled) |
374 | continue; | 325 | continue; |
375 | 326 | ||
376 | /* Walk the connectors on this fb and mark them off */ | 327 | /* Walk the connectors & encoders on this fb turning them on/off */ |
377 | for (j = 0; j < fb_helper->connector_count; j++) { | 328 | for (j = 0; j < fb_helper->connector_count; j++) { |
378 | connector = fb_helper->connector_info[j]->connector; | 329 | connector = fb_helper->connector_info[j]->connector; |
379 | connector->dpms = dpms_mode; | 330 | drm_helper_connector_dpms(connector, dpms_mode); |
380 | drm_connector_property_set_value(connector, | 331 | drm_connector_property_set_value(connector, |
381 | dev->mode_config.dpms_property, | 332 | dev->mode_config.dpms_property, dpms_mode); |
382 | dpms_mode); | ||
383 | } | ||
384 | /* Found a CRTC on this fb, now find encoders */ | ||
385 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
386 | if (encoder->crtc == crtc) { | ||
387 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
388 | |||
389 | encoder_funcs = encoder->helper_private; | ||
390 | encoder_funcs->dpms(encoder, dpms_mode); | ||
391 | } | ||
392 | } | 333 | } |
393 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
394 | } | 334 | } |
395 | mutex_unlock(&dev->mode_config.mutex); | 335 | mutex_unlock(&dev->mode_config.mutex); |
396 | } | 336 | } |
@@ -400,23 +340,23 @@ int drm_fb_helper_blank(int blank, struct fb_info *info) | |||
400 | switch (blank) { | 340 | switch (blank) { |
401 | /* Display: On; HSync: On, VSync: On */ | 341 | /* Display: On; HSync: On, VSync: On */ |
402 | case FB_BLANK_UNBLANK: | 342 | case FB_BLANK_UNBLANK: |
403 | drm_fb_helper_on(info); | 343 | drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); |
404 | break; | 344 | break; |
405 | /* Display: Off; HSync: On, VSync: On */ | 345 | /* Display: Off; HSync: On, VSync: On */ |
406 | case FB_BLANK_NORMAL: | 346 | case FB_BLANK_NORMAL: |
407 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 347 | drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); |
408 | break; | 348 | break; |
409 | /* Display: Off; HSync: Off, VSync: On */ | 349 | /* Display: Off; HSync: Off, VSync: On */ |
410 | case FB_BLANK_HSYNC_SUSPEND: | 350 | case FB_BLANK_HSYNC_SUSPEND: |
411 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 351 | drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); |
412 | break; | 352 | break; |
413 | /* Display: Off; HSync: On, VSync: Off */ | 353 | /* Display: Off; HSync: On, VSync: Off */ |
414 | case FB_BLANK_VSYNC_SUSPEND: | 354 | case FB_BLANK_VSYNC_SUSPEND: |
415 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); | 355 | drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); |
416 | break; | 356 | break; |
417 | /* Display: Off; HSync: Off, VSync: Off */ | 357 | /* Display: Off; HSync: Off, VSync: Off */ |
418 | case FB_BLANK_POWERDOWN: | 358 | case FB_BLANK_POWERDOWN: |
419 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); | 359 | drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); |
420 | break; | 360 | break; |
421 | } | 361 | } |
422 | return 0; | 362 | return 0; |
@@ -430,8 +370,11 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) | |||
430 | for (i = 0; i < helper->connector_count; i++) | 370 | for (i = 0; i < helper->connector_count; i++) |
431 | kfree(helper->connector_info[i]); | 371 | kfree(helper->connector_info[i]); |
432 | kfree(helper->connector_info); | 372 | kfree(helper->connector_info); |
433 | for (i = 0; i < helper->crtc_count; i++) | 373 | for (i = 0; i < helper->crtc_count; i++) { |
434 | kfree(helper->crtc_info[i].mode_set.connectors); | 374 | kfree(helper->crtc_info[i].mode_set.connectors); |
375 | if (helper->crtc_info[i].mode_set.mode) | ||
376 | drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); | ||
377 | } | ||
435 | kfree(helper->crtc_info); | 378 | kfree(helper->crtc_info); |
436 | } | 379 | } |
437 | 380 | ||
@@ -474,11 +417,10 @@ int drm_fb_helper_init(struct drm_device *dev, | |||
474 | 417 | ||
475 | i = 0; | 418 | i = 0; |
476 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 419 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
477 | fb_helper->crtc_info[i].crtc_id = crtc->base.id; | ||
478 | fb_helper->crtc_info[i].mode_set.crtc = crtc; | 420 | fb_helper->crtc_info[i].mode_set.crtc = crtc; |
479 | i++; | 421 | i++; |
480 | } | 422 | } |
481 | fb_helper->conn_limit = max_conn_count; | 423 | |
482 | return 0; | 424 | return 0; |
483 | out_free: | 425 | out_free: |
484 | drm_fb_helper_crtc_free(fb_helper); | 426 | drm_fb_helper_crtc_free(fb_helper); |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 6263b0147598..7348a3dab250 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -133,6 +133,9 @@ int drm_open(struct inode *inode, struct file *filp) | |||
133 | if (!(dev = minor->dev)) | 133 | if (!(dev = minor->dev)) |
134 | return -ENODEV; | 134 | return -ENODEV; |
135 | 135 | ||
136 | if (drm_device_is_unplugged(dev)) | ||
137 | return -ENODEV; | ||
138 | |||
136 | retcode = drm_open_helper(inode, filp, dev); | 139 | retcode = drm_open_helper(inode, filp, dev); |
137 | if (!retcode) { | 140 | if (!retcode) { |
138 | atomic_inc(&dev->counts[_DRM_STAT_OPENS]); | 141 | atomic_inc(&dev->counts[_DRM_STAT_OPENS]); |
@@ -181,6 +184,9 @@ int drm_stub_open(struct inode *inode, struct file *filp) | |||
181 | if (!(dev = minor->dev)) | 184 | if (!(dev = minor->dev)) |
182 | goto out; | 185 | goto out; |
183 | 186 | ||
187 | if (drm_device_is_unplugged(dev)) | ||
188 | goto out; | ||
189 | |||
184 | old_fops = filp->f_op; | 190 | old_fops = filp->f_op; |
185 | filp->f_op = fops_get(dev->driver->fops); | 191 | filp->f_op = fops_get(dev->driver->fops); |
186 | if (filp->f_op == NULL) { | 192 | if (filp->f_op == NULL) { |
@@ -579,6 +585,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
579 | retcode = -EBUSY; | 585 | retcode = -EBUSY; |
580 | } else | 586 | } else |
581 | retcode = drm_lastclose(dev); | 587 | retcode = drm_lastclose(dev); |
588 | if (drm_device_is_unplugged(dev)) | ||
589 | drm_put_dev(dev); | ||
582 | } | 590 | } |
583 | mutex_unlock(&drm_global_mutex); | 591 | mutex_unlock(&drm_global_mutex); |
584 | 592 | ||
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f8625e290728..0ef358e53245 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -661,6 +661,9 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
661 | struct drm_hash_item *hash; | 661 | struct drm_hash_item *hash; |
662 | int ret = 0; | 662 | int ret = 0; |
663 | 663 | ||
664 | if (drm_device_is_unplugged(dev)) | ||
665 | return -ENODEV; | ||
666 | |||
664 | mutex_lock(&dev->struct_mutex); | 667 | mutex_lock(&dev->struct_mutex); |
665 | 668 | ||
666 | if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { | 669 | if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { |
@@ -700,7 +703,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
700 | */ | 703 | */ |
701 | drm_gem_object_reference(obj); | 704 | drm_gem_object_reference(obj); |
702 | 705 | ||
703 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
704 | drm_vm_open_locked(vma); | 706 | drm_vm_open_locked(vma); |
705 | 707 | ||
706 | out_unlock: | 708 | out_unlock: |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 956fd38d7c9e..cf85155da2a0 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "drm_core.h" | 37 | #include "drm_core.h" |
38 | 38 | ||
39 | #include "linux/pci.h" | 39 | #include "linux/pci.h" |
40 | #include "linux/export.h" | ||
40 | 41 | ||
41 | /** | 42 | /** |
42 | * Get the bus id. | 43 | * Get the bus id. |
@@ -276,6 +277,12 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
276 | case DRM_CAP_VBLANK_HIGH_CRTC: | 277 | case DRM_CAP_VBLANK_HIGH_CRTC: |
277 | req->value = 1; | 278 | req->value = 1; |
278 | break; | 279 | break; |
280 | case DRM_CAP_DUMB_PREFERRED_DEPTH: | ||
281 | req->value = dev->mode_config.preferred_depth; | ||
282 | break; | ||
283 | case DRM_CAP_DUMB_PREFER_SHADOW: | ||
284 | req->value = dev->mode_config.prefer_shadow; | ||
285 | break; | ||
279 | default: | 286 | default: |
280 | return -EINVAL; | 287 | return -EINVAL; |
281 | } | 288 | } |
@@ -346,3 +353,4 @@ int drm_noop(struct drm_device *dev, void *data, | |||
346 | DRM_DEBUG("\n"); | 353 | DRM_DEBUG("\n"); |
347 | return 0; | 354 | return 0; |
348 | } | 355 | } |
356 | EXPORT_SYMBOL(drm_noop); | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 44a5d0ad8b7c..c869436e238a 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -305,7 +305,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state) | |||
305 | * \param dev DRM device. | 305 | * \param dev DRM device. |
306 | * | 306 | * |
307 | * Initializes the IRQ related data. Installs the handler, calling the driver | 307 | * Initializes the IRQ related data. Installs the handler, calling the driver |
308 | * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions | 308 | * \c irq_preinstall() and \c irq_postinstall() functions |
309 | * before and after the installation. | 309 | * before and after the installation. |
310 | */ | 310 | */ |
311 | int drm_irq_install(struct drm_device *dev) | 311 | int drm_irq_install(struct drm_device *dev) |
@@ -385,7 +385,7 @@ EXPORT_SYMBOL(drm_irq_install); | |||
385 | * | 385 | * |
386 | * \param dev DRM device. | 386 | * \param dev DRM device. |
387 | * | 387 | * |
388 | * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. | 388 | * Calls the driver's \c irq_uninstall() function, and stops the irq. |
389 | */ | 389 | */ |
390 | int drm_irq_uninstall(struct drm_device *dev) | 390 | int drm_irq_uninstall(struct drm_device *dev) |
391 | { | 391 | { |
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index c8b6b66d428d..c86a0f1a435c 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c | |||
@@ -37,25 +37,6 @@ | |||
37 | #include <linux/export.h> | 37 | #include <linux/export.h> |
38 | #include "drmP.h" | 38 | #include "drmP.h" |
39 | 39 | ||
40 | /** | ||
41 | * Called when "/proc/dri/%dev%/mem" is read. | ||
42 | * | ||
43 | * \param buf output buffer. | ||
44 | * \param start start of output data. | ||
45 | * \param offset requested start offset. | ||
46 | * \param len requested number of bytes. | ||
47 | * \param eof whether there is no more data to return. | ||
48 | * \param data private data. | ||
49 | * \return number of written bytes. | ||
50 | * | ||
51 | * No-op. | ||
52 | */ | ||
53 | int drm_mem_info(char *buf, char **start, off_t offset, | ||
54 | int len, int *eof, void *data) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | #if __OS_HAS_AGP | 40 | #if __OS_HAS_AGP |
60 | static void *agp_remap(unsigned long offset, unsigned long size, | 41 | static void *agp_remap(unsigned long offset, unsigned long size, |
61 | struct drm_device * dev) | 42 | struct drm_device * dev) |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index fb8e46b4e8bc..b7adb4a967fd 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -686,8 +686,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) | |||
686 | p->crtc_vsync_end /= 2; | 686 | p->crtc_vsync_end /= 2; |
687 | p->crtc_vtotal /= 2; | 687 | p->crtc_vtotal /= 2; |
688 | } | 688 | } |
689 | |||
690 | p->crtc_vtotal |= 1; | ||
691 | } | 689 | } |
692 | 690 | ||
693 | if (p->flags & DRM_MODE_FLAG_DBLSCAN) { | 691 | if (p->flags & DRM_MODE_FLAG_DBLSCAN) { |
@@ -716,6 +714,27 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo); | |||
716 | 714 | ||
717 | 715 | ||
718 | /** | 716 | /** |
717 | * drm_mode_copy - copy the mode | ||
718 | * @dst: mode to overwrite | ||
719 | * @src: mode to copy | ||
720 | * | ||
721 | * LOCKING: | ||
722 | * None. | ||
723 | * | ||
724 | * Copy an existing mode into another mode, preserving the object id | ||
725 | * of the destination mode. | ||
726 | */ | ||
727 | void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src) | ||
728 | { | ||
729 | int id = dst->base.id; | ||
730 | |||
731 | *dst = *src; | ||
732 | dst->base.id = id; | ||
733 | INIT_LIST_HEAD(&dst->head); | ||
734 | } | ||
735 | EXPORT_SYMBOL(drm_mode_copy); | ||
736 | |||
737 | /** | ||
719 | * drm_mode_duplicate - allocate and duplicate an existing mode | 738 | * drm_mode_duplicate - allocate and duplicate an existing mode |
720 | * @m: mode to duplicate | 739 | * @m: mode to duplicate |
721 | * | 740 | * |
@@ -729,16 +748,13 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | |||
729 | const struct drm_display_mode *mode) | 748 | const struct drm_display_mode *mode) |
730 | { | 749 | { |
731 | struct drm_display_mode *nmode; | 750 | struct drm_display_mode *nmode; |
732 | int new_id; | ||
733 | 751 | ||
734 | nmode = drm_mode_create(dev); | 752 | nmode = drm_mode_create(dev); |
735 | if (!nmode) | 753 | if (!nmode) |
736 | return NULL; | 754 | return NULL; |
737 | 755 | ||
738 | new_id = nmode->base.id; | 756 | drm_mode_copy(nmode, mode); |
739 | *nmode = *mode; | 757 | |
740 | nmode->base.id = new_id; | ||
741 | INIT_LIST_HEAD(&nmode->head); | ||
742 | return nmode; | 758 | return nmode; |
743 | } | 759 | } |
744 | EXPORT_SYMBOL(drm_mode_duplicate); | 760 | EXPORT_SYMBOL(drm_mode_duplicate); |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index d4d10b7880cf..13f3d936472f 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -324,8 +324,6 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | |||
324 | if (ret) | 324 | if (ret) |
325 | goto err_g1; | 325 | goto err_g1; |
326 | 326 | ||
327 | pci_set_master(pdev); | ||
328 | |||
329 | dev->pdev = pdev; | 327 | dev->pdev = pdev; |
330 | dev->dev = &pdev->dev; | 328 | dev->dev = &pdev->dev; |
331 | 329 | ||
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index ae9db5e2b27c..82431dcae37b 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -122,7 +122,7 @@ static const char *drm_platform_get_name(struct drm_device *dev) | |||
122 | 122 | ||
123 | static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) | 123 | static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) |
124 | { | 124 | { |
125 | int len, ret; | 125 | int len, ret, id; |
126 | 126 | ||
127 | master->unique_len = 13 + strlen(dev->platformdev->name); | 127 | master->unique_len = 13 + strlen(dev->platformdev->name); |
128 | master->unique_size = master->unique_len; | 128 | master->unique_size = master->unique_len; |
@@ -131,8 +131,16 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas | |||
131 | if (master->unique == NULL) | 131 | if (master->unique == NULL) |
132 | return -ENOMEM; | 132 | return -ENOMEM; |
133 | 133 | ||
134 | id = dev->platformdev->id; | ||
135 | |||
136 | /* if only a single instance of the platform device, id will be | ||
137 | * set to -1.. use 0 instead to avoid a funny looking bus-id: | ||
138 | */ | ||
139 | if (id == -1) | ||
140 | id = 0; | ||
141 | |||
134 | len = snprintf(master->unique, master->unique_len, | 142 | len = snprintf(master->unique, master->unique_len, |
135 | "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id); | 143 | "platform:%s:%02d", dev->platformdev->name, id); |
136 | 144 | ||
137 | if (len > master->unique_len) { | 145 | if (len > master->unique_len) { |
138 | DRM_ERROR("Unique buffer overflowed\n"); | 146 | DRM_ERROR("Unique buffer overflowed\n"); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 6d7b083c5b77..aa454f80e109 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -319,6 +319,7 @@ int drm_fill_in_dev(struct drm_device *dev, | |||
319 | drm_lastclose(dev); | 319 | drm_lastclose(dev); |
320 | return retcode; | 320 | return retcode; |
321 | } | 321 | } |
322 | EXPORT_SYMBOL(drm_fill_in_dev); | ||
322 | 323 | ||
323 | 324 | ||
324 | /** | 325 | /** |
@@ -397,6 +398,7 @@ err_idr: | |||
397 | *minor = NULL; | 398 | *minor = NULL; |
398 | return ret; | 399 | return ret; |
399 | } | 400 | } |
401 | EXPORT_SYMBOL(drm_get_minor); | ||
400 | 402 | ||
401 | /** | 403 | /** |
402 | * Put a secondary minor number. | 404 | * Put a secondary minor number. |
@@ -428,6 +430,12 @@ int drm_put_minor(struct drm_minor **minor_p) | |||
428 | *minor_p = NULL; | 430 | *minor_p = NULL; |
429 | return 0; | 431 | return 0; |
430 | } | 432 | } |
433 | EXPORT_SYMBOL(drm_put_minor); | ||
434 | |||
435 | static void drm_unplug_minor(struct drm_minor *minor) | ||
436 | { | ||
437 | drm_sysfs_device_remove(minor); | ||
438 | } | ||
431 | 439 | ||
432 | /** | 440 | /** |
433 | * Called via drm_exit() at module unload time or when pci device is | 441 | * Called via drm_exit() at module unload time or when pci device is |
@@ -492,3 +500,21 @@ void drm_put_dev(struct drm_device *dev) | |||
492 | kfree(dev); | 500 | kfree(dev); |
493 | } | 501 | } |
494 | EXPORT_SYMBOL(drm_put_dev); | 502 | EXPORT_SYMBOL(drm_put_dev); |
503 | |||
504 | void drm_unplug_dev(struct drm_device *dev) | ||
505 | { | ||
506 | /* for a USB device */ | ||
507 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
508 | drm_unplug_minor(dev->control); | ||
509 | drm_unplug_minor(dev->primary); | ||
510 | |||
511 | mutex_lock(&drm_global_mutex); | ||
512 | |||
513 | drm_device_set_unplugged(dev); | ||
514 | |||
515 | if (dev->open_count == 0) { | ||
516 | drm_put_dev(dev); | ||
517 | } | ||
518 | mutex_unlock(&drm_global_mutex); | ||
519 | } | ||
520 | EXPORT_SYMBOL(drm_unplug_dev); | ||
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 62c3675045ac..5a7bd51fc3d8 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c | |||
@@ -454,6 +454,8 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) | |||
454 | { | 454 | { |
455 | int i; | 455 | int i; |
456 | 456 | ||
457 | if (!connector->kdev.parent) | ||
458 | return; | ||
457 | DRM_DEBUG("removing \"%s\" from sysfs\n", | 459 | DRM_DEBUG("removing \"%s\" from sysfs\n", |
458 | drm_get_connector_name(connector)); | 460 | drm_get_connector_name(connector)); |
459 | 461 | ||
@@ -461,6 +463,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) | |||
461 | device_remove_file(&connector->kdev, &connector_attrs[i]); | 463 | device_remove_file(&connector->kdev, &connector_attrs[i]); |
462 | sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); | 464 | sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr); |
463 | device_unregister(&connector->kdev); | 465 | device_unregister(&connector->kdev); |
466 | connector->kdev.parent = NULL; | ||
464 | } | 467 | } |
465 | EXPORT_SYMBOL(drm_sysfs_connector_remove); | 468 | EXPORT_SYMBOL(drm_sysfs_connector_remove); |
466 | 469 | ||
@@ -533,7 +536,9 @@ err_out: | |||
533 | */ | 536 | */ |
534 | void drm_sysfs_device_remove(struct drm_minor *minor) | 537 | void drm_sysfs_device_remove(struct drm_minor *minor) |
535 | { | 538 | { |
536 | device_unregister(&minor->kdev); | 539 | if (minor->kdev.parent) |
540 | device_unregister(&minor->kdev); | ||
541 | minor->kdev.parent = NULL; | ||
537 | } | 542 | } |
538 | 543 | ||
539 | 544 | ||
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 445003f4dc93..c8c83dad2ce1 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c | |||
@@ -2,7 +2,6 @@ | |||
2 | #include <linux/usb.h> | 2 | #include <linux/usb.h> |
3 | #include <linux/export.h> | 3 | #include <linux/export.h> |
4 | 4 | ||
5 | #ifdef CONFIG_USB | ||
6 | int drm_get_usb_dev(struct usb_interface *interface, | 5 | int drm_get_usb_dev(struct usb_interface *interface, |
7 | const struct usb_device_id *id, | 6 | const struct usb_device_id *id, |
8 | struct drm_driver *driver) | 7 | struct drm_driver *driver) |
@@ -115,4 +114,3 @@ void drm_usb_exit(struct drm_driver *driver, | |||
115 | usb_deregister(udriver); | 114 | usb_deregister(udriver); |
116 | } | 115 | } |
117 | EXPORT_SYMBOL(drm_usb_exit); | 116 | EXPORT_SYMBOL(drm_usb_exit); |
118 | #endif | ||
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 8c03eaf41448..149561818349 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -519,7 +519,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
519 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 519 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
520 | vma->vm_flags |= VM_DONTEXPAND; | 520 | vma->vm_flags |= VM_DONTEXPAND; |
521 | 521 | ||
522 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
523 | drm_vm_open_locked(vma); | 522 | drm_vm_open_locked(vma); |
524 | return 0; | 523 | return 0; |
525 | } | 524 | } |
@@ -671,7 +670,6 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | |||
671 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 670 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
672 | vma->vm_flags |= VM_DONTEXPAND; | 671 | vma->vm_flags |= VM_DONTEXPAND; |
673 | 672 | ||
674 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | ||
675 | drm_vm_open_locked(vma); | 673 | drm_vm_open_locked(vma); |
676 | return 0; | 674 | return 0; |
677 | } | 675 | } |
@@ -682,6 +680,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
682 | struct drm_device *dev = priv->minor->dev; | 680 | struct drm_device *dev = priv->minor->dev; |
683 | int ret; | 681 | int ret; |
684 | 682 | ||
683 | if (drm_device_is_unplugged(dev)) | ||
684 | return -ENODEV; | ||
685 | |||
685 | mutex_lock(&dev->struct_mutex); | 686 | mutex_lock(&dev->struct_mutex); |
686 | ret = drm_mmap_locked(filp, vma); | 687 | ret = drm_mmap_locked(filp, vma); |
687 | mutex_unlock(&dev->struct_mutex); | 688 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index b9e5266c341b..3343ac437fe5 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config DRM_EXYNOS | 1 | config DRM_EXYNOS |
2 | tristate "DRM Support for Samsung SoC EXYNOS Series" | 2 | tristate "DRM Support for Samsung SoC EXYNOS Series" |
3 | depends on DRM && PLAT_SAMSUNG | 3 | depends on DRM && PLAT_SAMSUNG |
4 | default n | ||
5 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
6 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
7 | select FB_CFB_COPYAREA | 6 | select FB_CFB_COPYAREA |
@@ -12,16 +11,19 @@ config DRM_EXYNOS | |||
12 | If M is selected the module will be called exynosdrm. | 11 | If M is selected the module will be called exynosdrm. |
13 | 12 | ||
14 | config DRM_EXYNOS_FIMD | 13 | config DRM_EXYNOS_FIMD |
15 | tristate "Exynos DRM FIMD" | 14 | bool "Exynos DRM FIMD" |
16 | depends on DRM_EXYNOS && !FB_S3C | 15 | depends on DRM_EXYNOS && !FB_S3C |
17 | default n | ||
18 | help | 16 | help |
19 | Choose this option if you want to use Exynos FIMD for DRM. | 17 | Choose this option if you want to use Exynos FIMD for DRM. |
20 | If M is selected, the module will be called exynos_drm_fimd | ||
21 | 18 | ||
22 | config DRM_EXYNOS_HDMI | 19 | config DRM_EXYNOS_HDMI |
23 | tristate "Exynos DRM HDMI" | 20 | bool "Exynos DRM HDMI" |
24 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV | 21 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV |
25 | help | 22 | help |
26 | Choose this option if you want to use Exynos HDMI for DRM. | 23 | Choose this option if you want to use Exynos HDMI for DRM. |
27 | If M is selected, the module will be called exynos_drm_hdmi | 24 | |
25 | config DRM_EXYNOS_VIDI | ||
26 | bool "Exynos DRM Virtual Display" | ||
27 | depends on DRM_EXYNOS | ||
28 | help | ||
29 | Choose this option if you want to use Exynos VIDI for DRM. | ||
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 395e69c9a96e..9e0bff8badf9 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile | |||
@@ -8,7 +8,10 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ | |||
8 | exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ | 8 | exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ |
9 | exynos_drm_plane.o | 9 | exynos_drm_plane.o |
10 | 10 | ||
11 | obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o | 11 | exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o |
12 | obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o | 12 | exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ |
13 | obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \ | 13 | exynos_ddc.o exynos_hdmiphy.o \ |
14 | exynos_hdmiphy.o exynos_drm_hdmi.o | 14 | exynos_drm_hdmi.o |
15 | exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o | ||
16 | |||
17 | obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o | ||
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 84b614fe26fd..7e1051d07f1f 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c | |||
@@ -55,4 +55,3 @@ struct i2c_driver ddc_driver = { | |||
55 | .remove = __devexit_p(s5p_ddc_remove), | 55 | .remove = __devexit_p(s5p_ddc_remove), |
56 | .command = NULL, | 56 | .command = NULL, |
57 | }; | 57 | }; |
58 | EXPORT_SYMBOL(ddc_driver); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 3cf785c58186..4a3a5f72ed4a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c | |||
@@ -25,45 +25,161 @@ | |||
25 | 25 | ||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "drm.h" | 27 | #include "drm.h" |
28 | #include "exynos_drm.h" | ||
28 | 29 | ||
29 | #include "exynos_drm_drv.h" | 30 | #include "exynos_drm_drv.h" |
30 | #include "exynos_drm_gem.h" | 31 | #include "exynos_drm_gem.h" |
31 | #include "exynos_drm_buf.h" | 32 | #include "exynos_drm_buf.h" |
32 | 33 | ||
33 | static int lowlevel_buffer_allocate(struct drm_device *dev, | 34 | static int lowlevel_buffer_allocate(struct drm_device *dev, |
34 | struct exynos_drm_gem_buf *buffer) | 35 | unsigned int flags, struct exynos_drm_gem_buf *buf) |
35 | { | 36 | { |
37 | dma_addr_t start_addr, end_addr; | ||
38 | unsigned int npages, page_size, i = 0; | ||
39 | struct scatterlist *sgl; | ||
40 | int ret = 0; | ||
41 | |||
36 | DRM_DEBUG_KMS("%s\n", __FILE__); | 42 | DRM_DEBUG_KMS("%s\n", __FILE__); |
37 | 43 | ||
38 | buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size, | 44 | if (flags & EXYNOS_BO_NONCONTIG) { |
39 | &buffer->dma_addr, GFP_KERNEL); | 45 | DRM_DEBUG_KMS("not support allocation type.\n"); |
40 | if (!buffer->kvaddr) { | 46 | return -EINVAL; |
41 | DRM_ERROR("failed to allocate buffer.\n"); | 47 | } |
48 | |||
49 | if (buf->dma_addr) { | ||
50 | DRM_DEBUG_KMS("already allocated.\n"); | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | if (buf->size >= SZ_1M) { | ||
55 | npages = (buf->size >> SECTION_SHIFT) + 1; | ||
56 | page_size = SECTION_SIZE; | ||
57 | } else if (buf->size >= SZ_64K) { | ||
58 | npages = (buf->size >> 16) + 1; | ||
59 | page_size = SZ_64K; | ||
60 | } else { | ||
61 | npages = (buf->size >> PAGE_SHIFT) + 1; | ||
62 | page_size = PAGE_SIZE; | ||
63 | } | ||
64 | |||
65 | buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); | ||
66 | if (!buf->sgt) { | ||
67 | DRM_ERROR("failed to allocate sg table.\n"); | ||
42 | return -ENOMEM; | 68 | return -ENOMEM; |
43 | } | 69 | } |
44 | 70 | ||
45 | DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", | 71 | ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); |
46 | (unsigned long)buffer->kvaddr, | 72 | if (ret < 0) { |
47 | (unsigned long)buffer->dma_addr, | 73 | DRM_ERROR("failed to initialize sg table.\n"); |
48 | buffer->size); | 74 | kfree(buf->sgt); |
75 | buf->sgt = NULL; | ||
76 | return -ENOMEM; | ||
77 | } | ||
49 | 78 | ||
50 | return 0; | 79 | buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, |
80 | &buf->dma_addr, GFP_KERNEL); | ||
81 | if (!buf->kvaddr) { | ||
82 | DRM_ERROR("failed to allocate buffer.\n"); | ||
83 | ret = -ENOMEM; | ||
84 | goto err1; | ||
85 | } | ||
86 | |||
87 | start_addr = buf->dma_addr; | ||
88 | end_addr = buf->dma_addr + buf->size; | ||
89 | |||
90 | buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); | ||
91 | if (!buf->pages) { | ||
92 | DRM_ERROR("failed to allocate pages.\n"); | ||
93 | ret = -ENOMEM; | ||
94 | goto err2; | ||
95 | } | ||
96 | |||
97 | start_addr = buf->dma_addr; | ||
98 | end_addr = buf->dma_addr + buf->size; | ||
99 | |||
100 | buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); | ||
101 | if (!buf->pages) { | ||
102 | DRM_ERROR("failed to allocate pages.\n"); | ||
103 | ret = -ENOMEM; | ||
104 | goto err2; | ||
105 | } | ||
106 | |||
107 | sgl = buf->sgt->sgl; | ||
108 | |||
109 | while (i < npages) { | ||
110 | buf->pages[i] = phys_to_page(start_addr); | ||
111 | sg_set_page(sgl, buf->pages[i], page_size, 0); | ||
112 | sg_dma_address(sgl) = start_addr; | ||
113 | start_addr += page_size; | ||
114 | if (end_addr - start_addr < page_size) | ||
115 | break; | ||
116 | sgl = sg_next(sgl); | ||
117 | i++; | ||
118 | } | ||
119 | |||
120 | buf->pages[i] = phys_to_page(start_addr); | ||
121 | |||
122 | sgl = sg_next(sgl); | ||
123 | sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0); | ||
124 | |||
125 | DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", | ||
126 | (unsigned long)buf->kvaddr, | ||
127 | (unsigned long)buf->dma_addr, | ||
128 | buf->size); | ||
129 | |||
130 | return ret; | ||
131 | err2: | ||
132 | dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, | ||
133 | (dma_addr_t)buf->dma_addr); | ||
134 | buf->dma_addr = (dma_addr_t)NULL; | ||
135 | err1: | ||
136 | sg_free_table(buf->sgt); | ||
137 | kfree(buf->sgt); | ||
138 | buf->sgt = NULL; | ||
139 | |||
140 | return ret; | ||
51 | } | 141 | } |
52 | 142 | ||
53 | static void lowlevel_buffer_deallocate(struct drm_device *dev, | 143 | static void lowlevel_buffer_deallocate(struct drm_device *dev, |
54 | struct exynos_drm_gem_buf *buffer) | 144 | unsigned int flags, struct exynos_drm_gem_buf *buf) |
55 | { | 145 | { |
56 | DRM_DEBUG_KMS("%s.\n", __FILE__); | 146 | DRM_DEBUG_KMS("%s.\n", __FILE__); |
57 | 147 | ||
58 | if (buffer->dma_addr && buffer->size) | 148 | /* |
59 | dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr, | 149 | * release only physically continuous memory and |
60 | (dma_addr_t)buffer->dma_addr); | 150 | * non-continuous memory would be released by exynos |
61 | else | 151 | * gem framework. |
62 | DRM_DEBUG_KMS("buffer data are invalid.\n"); | 152 | */ |
153 | if (flags & EXYNOS_BO_NONCONTIG) { | ||
154 | DRM_DEBUG_KMS("not support allocation type.\n"); | ||
155 | return; | ||
156 | } | ||
157 | |||
158 | if (!buf->dma_addr) { | ||
159 | DRM_DEBUG_KMS("dma_addr is invalid.\n"); | ||
160 | return; | ||
161 | } | ||
162 | |||
163 | DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", | ||
164 | (unsigned long)buf->kvaddr, | ||
165 | (unsigned long)buf->dma_addr, | ||
166 | buf->size); | ||
167 | |||
168 | sg_free_table(buf->sgt); | ||
169 | |||
170 | kfree(buf->sgt); | ||
171 | buf->sgt = NULL; | ||
172 | |||
173 | kfree(buf->pages); | ||
174 | buf->pages = NULL; | ||
175 | |||
176 | dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, | ||
177 | (dma_addr_t)buf->dma_addr); | ||
178 | buf->dma_addr = (dma_addr_t)NULL; | ||
63 | } | 179 | } |
64 | 180 | ||
65 | struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, | 181 | struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, |
66 | unsigned int size) | 182 | unsigned int size) |
67 | { | 183 | { |
68 | struct exynos_drm_gem_buf *buffer; | 184 | struct exynos_drm_gem_buf *buffer; |
69 | 185 | ||
@@ -77,21 +193,11 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, | |||
77 | } | 193 | } |
78 | 194 | ||
79 | buffer->size = size; | 195 | buffer->size = size; |
80 | |||
81 | /* | ||
82 | * allocate memory region with size and set the memory information | ||
83 | * to vaddr and dma_addr of a buffer object. | ||
84 | */ | ||
85 | if (lowlevel_buffer_allocate(dev, buffer) < 0) { | ||
86 | kfree(buffer); | ||
87 | return NULL; | ||
88 | } | ||
89 | |||
90 | return buffer; | 196 | return buffer; |
91 | } | 197 | } |
92 | 198 | ||
93 | void exynos_drm_buf_destroy(struct drm_device *dev, | 199 | void exynos_drm_fini_buf(struct drm_device *dev, |
94 | struct exynos_drm_gem_buf *buffer) | 200 | struct exynos_drm_gem_buf *buffer) |
95 | { | 201 | { |
96 | DRM_DEBUG_KMS("%s.\n", __FILE__); | 202 | DRM_DEBUG_KMS("%s.\n", __FILE__); |
97 | 203 | ||
@@ -100,12 +206,27 @@ void exynos_drm_buf_destroy(struct drm_device *dev, | |||
100 | return; | 206 | return; |
101 | } | 207 | } |
102 | 208 | ||
103 | lowlevel_buffer_deallocate(dev, buffer); | ||
104 | |||
105 | kfree(buffer); | 209 | kfree(buffer); |
106 | buffer = NULL; | 210 | buffer = NULL; |
107 | } | 211 | } |
108 | 212 | ||
109 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | 213 | int exynos_drm_alloc_buf(struct drm_device *dev, |
110 | MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module"); | 214 | struct exynos_drm_gem_buf *buf, unsigned int flags) |
111 | MODULE_LICENSE("GPL"); | 215 | { |
216 | |||
217 | /* | ||
218 | * allocate memory region and set the memory information | ||
219 | * to vaddr and dma_addr of a buffer object. | ||
220 | */ | ||
221 | if (lowlevel_buffer_allocate(dev, flags, buf) < 0) | ||
222 | return -ENOMEM; | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | void exynos_drm_free_buf(struct drm_device *dev, | ||
228 | unsigned int flags, struct exynos_drm_gem_buf *buffer) | ||
229 | { | ||
230 | |||
231 | lowlevel_buffer_deallocate(dev, flags, buffer); | ||
232 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h index c913f2bad760..3388e4eb4ba2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h | |||
@@ -26,12 +26,22 @@ | |||
26 | #ifndef _EXYNOS_DRM_BUF_H_ | 26 | #ifndef _EXYNOS_DRM_BUF_H_ |
27 | #define _EXYNOS_DRM_BUF_H_ | 27 | #define _EXYNOS_DRM_BUF_H_ |
28 | 28 | ||
29 | /* allocate physical memory. */ | 29 | /* create and initialize buffer object. */ |
30 | struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, | 30 | struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, |
31 | unsigned int size); | 31 | unsigned int size); |
32 | 32 | ||
33 | /* remove allocated physical memory. */ | 33 | /* destroy buffer object. */ |
34 | void exynos_drm_buf_destroy(struct drm_device *dev, | 34 | void exynos_drm_fini_buf(struct drm_device *dev, |
35 | struct exynos_drm_gem_buf *buffer); | 35 | struct exynos_drm_gem_buf *buffer); |
36 | |||
37 | /* allocate physical memory region and setup sgt and pages. */ | ||
38 | int exynos_drm_alloc_buf(struct drm_device *dev, | ||
39 | struct exynos_drm_gem_buf *buf, | ||
40 | unsigned int flags); | ||
41 | |||
42 | /* release physical memory region, sgt and pages. */ | ||
43 | void exynos_drm_free_buf(struct drm_device *dev, | ||
44 | unsigned int flags, | ||
45 | struct exynos_drm_gem_buf *buffer); | ||
36 | 46 | ||
37 | #endif | 47 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 99d5527b2ca6..bf791fa0e50d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c | |||
@@ -225,6 +225,29 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | |||
225 | .best_encoder = exynos_drm_best_encoder, | 225 | .best_encoder = exynos_drm_best_encoder, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, | ||
229 | unsigned int max_width, unsigned int max_height) | ||
230 | { | ||
231 | struct exynos_drm_connector *exynos_connector = | ||
232 | to_exynos_connector(connector); | ||
233 | struct exynos_drm_manager *manager = exynos_connector->manager; | ||
234 | struct exynos_drm_manager_ops *ops = manager->ops; | ||
235 | unsigned int width, height; | ||
236 | |||
237 | width = max_width; | ||
238 | height = max_height; | ||
239 | |||
240 | /* | ||
241 | * if specific driver want to find desired_mode using maxmum | ||
242 | * resolution then get max width and height from that driver. | ||
243 | */ | ||
244 | if (ops && ops->get_max_resol) | ||
245 | ops->get_max_resol(manager->dev, &width, &height); | ||
246 | |||
247 | return drm_helper_probe_single_connector_modes(connector, width, | ||
248 | height); | ||
249 | } | ||
250 | |||
228 | /* get detection status of display device. */ | 251 | /* get detection status of display device. */ |
229 | static enum drm_connector_status | 252 | static enum drm_connector_status |
230 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | 253 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) |
@@ -262,7 +285,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector) | |||
262 | 285 | ||
263 | static struct drm_connector_funcs exynos_connector_funcs = { | 286 | static struct drm_connector_funcs exynos_connector_funcs = { |
264 | .dpms = drm_helper_connector_dpms, | 287 | .dpms = drm_helper_connector_dpms, |
265 | .fill_modes = drm_helper_probe_single_connector_modes, | 288 | .fill_modes = exynos_drm_connector_fill_modes, |
266 | .detect = exynos_drm_connector_detect, | 289 | .detect = exynos_drm_connector_detect, |
267 | .destroy = exynos_drm_connector_destroy, | 290 | .destroy = exynos_drm_connector_destroy, |
268 | }; | 291 | }; |
@@ -292,6 +315,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | |||
292 | connector->interlace_allowed = true; | 315 | connector->interlace_allowed = true; |
293 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 316 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
294 | break; | 317 | break; |
318 | case EXYNOS_DISPLAY_TYPE_VIDI: | ||
319 | type = DRM_MODE_CONNECTOR_VIRTUAL; | ||
320 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
321 | break; | ||
295 | default: | 322 | default: |
296 | type = DRM_MODE_CONNECTOR_Unknown; | 323 | type = DRM_MODE_CONNECTOR_Unknown; |
297 | break; | 324 | break; |
@@ -325,9 +352,3 @@ err_connector: | |||
325 | kfree(exynos_connector); | 352 | kfree(exynos_connector); |
326 | return NULL; | 353 | return NULL; |
327 | } | 354 | } |
328 | |||
329 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
330 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
331 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
332 | MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver"); | ||
333 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index d08a55896d50..411832e8e17a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include "exynos_drm_connector.h" | 32 | #include "exynos_drm_connector.h" |
33 | #include "exynos_drm_fbdev.h" | 33 | #include "exynos_drm_fbdev.h" |
34 | 34 | ||
35 | static DEFINE_MUTEX(exynos_drm_mutex); | ||
36 | static LIST_HEAD(exynos_drm_subdrv_list); | 35 | static LIST_HEAD(exynos_drm_subdrv_list); |
37 | static struct drm_device *drm_dev; | 36 | static struct drm_device *drm_dev; |
38 | 37 | ||
@@ -60,6 +59,9 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev, | |||
60 | return ret; | 59 | return ret; |
61 | } | 60 | } |
62 | 61 | ||
62 | if (subdrv->is_local) | ||
63 | return 0; | ||
64 | |||
63 | /* create and initialize a encoder for this sub driver. */ | 65 | /* create and initialize a encoder for this sub driver. */ |
64 | encoder = exynos_drm_encoder_create(dev, &subdrv->manager, | 66 | encoder = exynos_drm_encoder_create(dev, &subdrv->manager, |
65 | (1 << MAX_CRTC) - 1); | 67 | (1 << MAX_CRTC) - 1); |
@@ -116,13 +118,10 @@ int exynos_drm_device_register(struct drm_device *dev) | |||
116 | if (!dev) | 118 | if (!dev) |
117 | return -EINVAL; | 119 | return -EINVAL; |
118 | 120 | ||
119 | if (drm_dev) { | 121 | drm_dev = dev; |
120 | DRM_ERROR("Already drm device were registered\n"); | ||
121 | return -EBUSY; | ||
122 | } | ||
123 | 122 | ||
124 | mutex_lock(&exynos_drm_mutex); | ||
125 | list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { | 123 | list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { |
124 | subdrv->drm_dev = dev; | ||
126 | err = exynos_drm_subdrv_probe(dev, subdrv); | 125 | err = exynos_drm_subdrv_probe(dev, subdrv); |
127 | if (err) { | 126 | if (err) { |
128 | DRM_DEBUG("exynos drm subdrv probe failed.\n"); | 127 | DRM_DEBUG("exynos drm subdrv probe failed.\n"); |
@@ -130,9 +129,6 @@ int exynos_drm_device_register(struct drm_device *dev) | |||
130 | } | 129 | } |
131 | } | 130 | } |
132 | 131 | ||
133 | drm_dev = dev; | ||
134 | mutex_unlock(&exynos_drm_mutex); | ||
135 | |||
136 | return 0; | 132 | return 0; |
137 | } | 133 | } |
138 | EXPORT_SYMBOL_GPL(exynos_drm_device_register); | 134 | EXPORT_SYMBOL_GPL(exynos_drm_device_register); |
@@ -143,86 +139,28 @@ int exynos_drm_device_unregister(struct drm_device *dev) | |||
143 | 139 | ||
144 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 140 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
145 | 141 | ||
146 | if (!dev || dev != drm_dev) { | 142 | if (!dev) { |
147 | WARN(1, "Unexpected drm device unregister!\n"); | 143 | WARN(1, "Unexpected drm device unregister!\n"); |
148 | return -EINVAL; | 144 | return -EINVAL; |
149 | } | 145 | } |
150 | 146 | ||
151 | mutex_lock(&exynos_drm_mutex); | ||
152 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) | 147 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) |
153 | exynos_drm_subdrv_remove(dev, subdrv); | 148 | exynos_drm_subdrv_remove(dev, subdrv); |
154 | 149 | ||
155 | drm_dev = NULL; | 150 | drm_dev = NULL; |
156 | mutex_unlock(&exynos_drm_mutex); | ||
157 | 151 | ||
158 | return 0; | 152 | return 0; |
159 | } | 153 | } |
160 | EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); | 154 | EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); |
161 | 155 | ||
162 | static int exynos_drm_mode_group_reinit(struct drm_device *dev) | ||
163 | { | ||
164 | struct drm_mode_group *group = &dev->primary->mode_group; | ||
165 | uint32_t *id_list = group->id_list; | ||
166 | int ret; | ||
167 | |||
168 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
169 | |||
170 | ret = drm_mode_group_init_legacy_group(dev, group); | ||
171 | if (ret < 0) | ||
172 | return ret; | ||
173 | |||
174 | kfree(id_list); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) | 156 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) |
179 | { | 157 | { |
180 | int err; | ||
181 | |||
182 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 158 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
183 | 159 | ||
184 | if (!subdrv) | 160 | if (!subdrv) |
185 | return -EINVAL; | 161 | return -EINVAL; |
186 | 162 | ||
187 | mutex_lock(&exynos_drm_mutex); | ||
188 | if (drm_dev) { | ||
189 | err = exynos_drm_subdrv_probe(drm_dev, subdrv); | ||
190 | if (err) { | ||
191 | DRM_ERROR("failed to probe exynos drm subdrv\n"); | ||
192 | mutex_unlock(&exynos_drm_mutex); | ||
193 | return err; | ||
194 | } | ||
195 | |||
196 | /* setup possible_clones. */ | ||
197 | exynos_drm_encoder_setup(drm_dev); | ||
198 | |||
199 | /* | ||
200 | * if any specific driver such as fimd or hdmi driver called | ||
201 | * exynos_drm_subdrv_register() later than drm_load(), | ||
202 | * the fb helper should be re-initialized and re-configured. | ||
203 | */ | ||
204 | err = exynos_drm_fbdev_reinit(drm_dev); | ||
205 | if (err) { | ||
206 | DRM_ERROR("failed to reinitialize exynos drm fbdev\n"); | ||
207 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
208 | mutex_unlock(&exynos_drm_mutex); | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | err = exynos_drm_mode_group_reinit(drm_dev); | ||
213 | if (err) { | ||
214 | DRM_ERROR("failed to reinitialize mode group\n"); | ||
215 | exynos_drm_fbdev_fini(drm_dev); | ||
216 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
217 | mutex_unlock(&exynos_drm_mutex); | ||
218 | return err; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | subdrv->drm_dev = drm_dev; | ||
223 | |||
224 | list_add_tail(&subdrv->list, &exynos_drm_subdrv_list); | 163 | list_add_tail(&subdrv->list, &exynos_drm_subdrv_list); |
225 | mutex_unlock(&exynos_drm_mutex); | ||
226 | 164 | ||
227 | return 0; | 165 | return 0; |
228 | } | 166 | } |
@@ -230,46 +168,48 @@ EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); | |||
230 | 168 | ||
231 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | 169 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) |
232 | { | 170 | { |
233 | int ret = -EFAULT; | ||
234 | |||
235 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 171 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
236 | 172 | ||
237 | if (!subdrv) { | 173 | if (!subdrv) |
238 | DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n"); | 174 | return -EINVAL; |
239 | return ret; | ||
240 | } | ||
241 | 175 | ||
242 | mutex_lock(&exynos_drm_mutex); | 176 | list_del(&subdrv->list); |
243 | if (drm_dev) { | ||
244 | exynos_drm_subdrv_remove(drm_dev, subdrv); | ||
245 | list_del(&subdrv->list); | ||
246 | 177 | ||
247 | /* | 178 | return 0; |
248 | * fb helper should be updated once a sub driver is released | 179 | } |
249 | * to re-configure crtc and connector and also to re-setup | 180 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); |
250 | * drm framebuffer. | ||
251 | */ | ||
252 | ret = exynos_drm_fbdev_reinit(drm_dev); | ||
253 | if (ret < 0) { | ||
254 | DRM_ERROR("failed fb helper reinit.\n"); | ||
255 | goto fail; | ||
256 | } | ||
257 | 181 | ||
258 | ret = exynos_drm_mode_group_reinit(drm_dev); | 182 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) |
259 | if (ret < 0) { | 183 | { |
260 | DRM_ERROR("failed drm mode group reinit.\n"); | 184 | struct exynos_drm_subdrv *subdrv; |
261 | goto fail; | 185 | int ret; |
186 | |||
187 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { | ||
188 | if (subdrv->open) { | ||
189 | ret = subdrv->open(dev, subdrv->manager.dev, file); | ||
190 | if (ret) | ||
191 | goto err; | ||
262 | } | 192 | } |
263 | } | 193 | } |
264 | 194 | ||
265 | fail: | 195 | return 0; |
266 | mutex_unlock(&exynos_drm_mutex); | 196 | |
197 | err: | ||
198 | list_for_each_entry_reverse(subdrv, &subdrv->list, list) { | ||
199 | if (subdrv->close) | ||
200 | subdrv->close(dev, subdrv->manager.dev, file); | ||
201 | } | ||
267 | return ret; | 202 | return ret; |
268 | } | 203 | } |
269 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); | 204 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); |
205 | |||
206 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) | ||
207 | { | ||
208 | struct exynos_drm_subdrv *subdrv; | ||
270 | 209 | ||
271 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | 210 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { |
272 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | 211 | if (subdrv->close) |
273 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | 212 | subdrv->close(dev, subdrv->manager.dev, file); |
274 | MODULE_DESCRIPTION("Samsung SoC DRM Core Driver"); | 213 | } |
275 | MODULE_LICENSE("GPL"); | 214 | } |
215 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index de818831a511..3486ffed0bf0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -249,7 +249,11 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
249 | { | 249 | { |
250 | DRM_DEBUG_KMS("%s\n", __FILE__); | 250 | DRM_DEBUG_KMS("%s\n", __FILE__); |
251 | 251 | ||
252 | mode = adjusted_mode; | 252 | /* |
253 | * copy the mode data adjusted by mode_fixup() into crtc->mode | ||
254 | * so that hardware can be seet to proper mode. | ||
255 | */ | ||
256 | memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); | ||
253 | 257 | ||
254 | return exynos_drm_crtc_update(crtc); | 258 | return exynos_drm_crtc_update(crtc); |
255 | } | 259 | } |
@@ -426,9 +430,3 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) | |||
426 | exynos_drm_fn_encoder(private->crtc[crtc], &crtc, | 430 | exynos_drm_fn_encoder(private->crtc[crtc], &crtc, |
427 | exynos_drm_disable_vblank); | 431 | exynos_drm_disable_vblank); |
428 | } | 432 | } |
429 | |||
430 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
431 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
432 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
433 | MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver"); | ||
434 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 09cc13f791b3..a6819b5f8428 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "exynos_drm_fb.h" | 38 | #include "exynos_drm_fb.h" |
39 | #include "exynos_drm_gem.h" | 39 | #include "exynos_drm_gem.h" |
40 | #include "exynos_drm_plane.h" | 40 | #include "exynos_drm_plane.h" |
41 | #include "exynos_drm_vidi.h" | ||
41 | 42 | ||
42 | #define DRIVER_NAME "exynos" | 43 | #define DRIVER_NAME "exynos" |
43 | #define DRIVER_DESC "Samsung SoC DRM" | 44 | #define DRIVER_DESC "Samsung SoC DRM" |
@@ -144,11 +145,34 @@ static int exynos_drm_unload(struct drm_device *dev) | |||
144 | return 0; | 145 | return 0; |
145 | } | 146 | } |
146 | 147 | ||
148 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | ||
149 | { | ||
150 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
151 | |||
152 | return exynos_drm_subdrv_open(dev, file); | ||
153 | } | ||
154 | |||
147 | static void exynos_drm_preclose(struct drm_device *dev, | 155 | static void exynos_drm_preclose(struct drm_device *dev, |
148 | struct drm_file *file) | 156 | struct drm_file *file) |
149 | { | 157 | { |
158 | struct exynos_drm_private *private = dev->dev_private; | ||
159 | struct drm_pending_vblank_event *e, *t; | ||
160 | unsigned long flags; | ||
161 | |||
150 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 162 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
151 | 163 | ||
164 | /* release events of current file */ | ||
165 | spin_lock_irqsave(&dev->event_lock, flags); | ||
166 | list_for_each_entry_safe(e, t, &private->pageflip_event_list, | ||
167 | base.link) { | ||
168 | if (e->base.file_priv == file) { | ||
169 | list_del(&e->base.link); | ||
170 | e->base.destroy(&e->base); | ||
171 | } | ||
172 | } | ||
173 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
174 | |||
175 | exynos_drm_subdrv_close(dev, file); | ||
152 | } | 176 | } |
153 | 177 | ||
154 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 178 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
@@ -185,6 +209,8 @@ static struct drm_ioctl_desc exynos_ioctls[] = { | |||
185 | exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), | 209 | exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), |
186 | DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl, | 210 | DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl, |
187 | DRM_UNLOCKED | DRM_AUTH), | 211 | DRM_UNLOCKED | DRM_AUTH), |
212 | DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, | ||
213 | vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), | ||
188 | }; | 214 | }; |
189 | 215 | ||
190 | static const struct file_operations exynos_drm_driver_fops = { | 216 | static const struct file_operations exynos_drm_driver_fops = { |
@@ -202,6 +228,7 @@ static struct drm_driver exynos_drm_driver = { | |||
202 | DRIVER_MODESET | DRIVER_GEM, | 228 | DRIVER_MODESET | DRIVER_GEM, |
203 | .load = exynos_drm_load, | 229 | .load = exynos_drm_load, |
204 | .unload = exynos_drm_unload, | 230 | .unload = exynos_drm_unload, |
231 | .open = exynos_drm_open, | ||
205 | .preclose = exynos_drm_preclose, | 232 | .preclose = exynos_drm_preclose, |
206 | .lastclose = exynos_drm_lastclose, | 233 | .lastclose = exynos_drm_lastclose, |
207 | .postclose = exynos_drm_postclose, | 234 | .postclose = exynos_drm_postclose, |
@@ -252,9 +279,60 @@ static struct platform_driver exynos_drm_platform_driver = { | |||
252 | 279 | ||
253 | static int __init exynos_drm_init(void) | 280 | static int __init exynos_drm_init(void) |
254 | { | 281 | { |
282 | int ret; | ||
283 | |||
255 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 284 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
256 | 285 | ||
257 | return platform_driver_register(&exynos_drm_platform_driver); | 286 | #ifdef CONFIG_DRM_EXYNOS_FIMD |
287 | ret = platform_driver_register(&fimd_driver); | ||
288 | if (ret < 0) | ||
289 | goto out_fimd; | ||
290 | #endif | ||
291 | |||
292 | #ifdef CONFIG_DRM_EXYNOS_HDMI | ||
293 | ret = platform_driver_register(&hdmi_driver); | ||
294 | if (ret < 0) | ||
295 | goto out_hdmi; | ||
296 | ret = platform_driver_register(&mixer_driver); | ||
297 | if (ret < 0) | ||
298 | goto out_mixer; | ||
299 | ret = platform_driver_register(&exynos_drm_common_hdmi_driver); | ||
300 | if (ret < 0) | ||
301 | goto out_common_hdmi; | ||
302 | #endif | ||
303 | |||
304 | #ifdef CONFIG_DRM_EXYNOS_VIDI | ||
305 | ret = platform_driver_register(&vidi_driver); | ||
306 | if (ret < 0) | ||
307 | goto out_vidi; | ||
308 | #endif | ||
309 | |||
310 | ret = platform_driver_register(&exynos_drm_platform_driver); | ||
311 | if (ret < 0) | ||
312 | goto out; | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | out: | ||
317 | #ifdef CONFIG_DRM_EXYNOS_VIDI | ||
318 | out_vidi: | ||
319 | platform_driver_unregister(&vidi_driver); | ||
320 | #endif | ||
321 | |||
322 | #ifdef CONFIG_DRM_EXYNOS_HDMI | ||
323 | platform_driver_unregister(&exynos_drm_common_hdmi_driver); | ||
324 | out_common_hdmi: | ||
325 | platform_driver_unregister(&mixer_driver); | ||
326 | out_mixer: | ||
327 | platform_driver_unregister(&hdmi_driver); | ||
328 | out_hdmi: | ||
329 | #endif | ||
330 | |||
331 | #ifdef CONFIG_DRM_EXYNOS_FIMD | ||
332 | platform_driver_unregister(&fimd_driver); | ||
333 | out_fimd: | ||
334 | #endif | ||
335 | return ret; | ||
258 | } | 336 | } |
259 | 337 | ||
260 | static void __exit exynos_drm_exit(void) | 338 | static void __exit exynos_drm_exit(void) |
@@ -262,6 +340,20 @@ static void __exit exynos_drm_exit(void) | |||
262 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 340 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
263 | 341 | ||
264 | platform_driver_unregister(&exynos_drm_platform_driver); | 342 | platform_driver_unregister(&exynos_drm_platform_driver); |
343 | |||
344 | #ifdef CONFIG_DRM_EXYNOS_HDMI | ||
345 | platform_driver_unregister(&exynos_drm_common_hdmi_driver); | ||
346 | platform_driver_unregister(&mixer_driver); | ||
347 | platform_driver_unregister(&hdmi_driver); | ||
348 | #endif | ||
349 | |||
350 | #ifdef CONFIG_DRM_EXYNOS_VIDI | ||
351 | platform_driver_unregister(&vidi_driver); | ||
352 | #endif | ||
353 | |||
354 | #ifdef CONFIG_DRM_EXYNOS_FIMD | ||
355 | platform_driver_unregister(&fimd_driver); | ||
356 | #endif | ||
265 | } | 357 | } |
266 | 358 | ||
267 | module_init(exynos_drm_init); | 359 | module_init(exynos_drm_init); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 13540de90bfc..fbd0a232c93d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -32,9 +32,9 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | 34 | ||
35 | #define MAX_CRTC 2 | 35 | #define MAX_CRTC 3 |
36 | #define MAX_PLANE 5 | 36 | #define MAX_PLANE 5 |
37 | #define MAX_FB_BUFFER 3 | 37 | #define MAX_FB_BUFFER 4 |
38 | #define DEFAULT_ZPOS -1 | 38 | #define DEFAULT_ZPOS -1 |
39 | 39 | ||
40 | struct drm_device; | 40 | struct drm_device; |
@@ -50,6 +50,8 @@ enum exynos_drm_output_type { | |||
50 | EXYNOS_DISPLAY_TYPE_LCD, | 50 | EXYNOS_DISPLAY_TYPE_LCD, |
51 | /* HDMI Interface. */ | 51 | /* HDMI Interface. */ |
52 | EXYNOS_DISPLAY_TYPE_HDMI, | 52 | EXYNOS_DISPLAY_TYPE_HDMI, |
53 | /* Virtual Display Interface. */ | ||
54 | EXYNOS_DISPLAY_TYPE_VIDI, | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | /* | 57 | /* |
@@ -155,8 +157,10 @@ struct exynos_drm_display_ops { | |||
155 | * | 157 | * |
156 | * @dpms: control device power. | 158 | * @dpms: control device power. |
157 | * @apply: set timing, vblank and overlay data to registers. | 159 | * @apply: set timing, vblank and overlay data to registers. |
160 | * @mode_fixup: fix mode data comparing to hw specific display mode. | ||
158 | * @mode_set: convert drm_display_mode to hw specific display mode and | 161 | * @mode_set: convert drm_display_mode to hw specific display mode and |
159 | * would be called by encoder->mode_set(). | 162 | * would be called by encoder->mode_set(). |
163 | * @get_max_resol: get maximum resolution to specific hardware. | ||
160 | * @commit: set current hw specific display mode to hw. | 164 | * @commit: set current hw specific display mode to hw. |
161 | * @enable_vblank: specific driver callback for enabling vblank interrupt. | 165 | * @enable_vblank: specific driver callback for enabling vblank interrupt. |
162 | * @disable_vblank: specific driver callback for disabling vblank interrupt. | 166 | * @disable_vblank: specific driver callback for disabling vblank interrupt. |
@@ -164,7 +168,13 @@ struct exynos_drm_display_ops { | |||
164 | struct exynos_drm_manager_ops { | 168 | struct exynos_drm_manager_ops { |
165 | void (*dpms)(struct device *subdrv_dev, int mode); | 169 | void (*dpms)(struct device *subdrv_dev, int mode); |
166 | void (*apply)(struct device *subdrv_dev); | 170 | void (*apply)(struct device *subdrv_dev); |
171 | void (*mode_fixup)(struct device *subdrv_dev, | ||
172 | struct drm_connector *connector, | ||
173 | struct drm_display_mode *mode, | ||
174 | struct drm_display_mode *adjusted_mode); | ||
167 | void (*mode_set)(struct device *subdrv_dev, void *mode); | 175 | void (*mode_set)(struct device *subdrv_dev, void *mode); |
176 | void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width, | ||
177 | unsigned int *height); | ||
168 | void (*commit)(struct device *subdrv_dev); | 178 | void (*commit)(struct device *subdrv_dev); |
169 | int (*enable_vblank)(struct device *subdrv_dev); | 179 | int (*enable_vblank)(struct device *subdrv_dev); |
170 | void (*disable_vblank)(struct device *subdrv_dev); | 180 | void (*disable_vblank)(struct device *subdrv_dev); |
@@ -217,10 +227,13 @@ struct exynos_drm_private { | |||
217 | * @list: sub driver has its own list object to register to exynos drm driver. | 227 | * @list: sub driver has its own list object to register to exynos drm driver. |
218 | * @drm_dev: pointer to drm_device and this pointer would be set | 228 | * @drm_dev: pointer to drm_device and this pointer would be set |
219 | * when sub driver calls exynos_drm_subdrv_register(). | 229 | * when sub driver calls exynos_drm_subdrv_register(). |
230 | * @is_local: appear encoder and connector disrelated device. | ||
220 | * @probe: this callback would be called by exynos drm driver after | 231 | * @probe: this callback would be called by exynos drm driver after |
221 | * subdrv is registered to it. | 232 | * subdrv is registered to it. |
222 | * @remove: this callback is used to release resources created | 233 | * @remove: this callback is used to release resources created |
223 | * by probe callback. | 234 | * by probe callback. |
235 | * @open: this would be called with drm device file open. | ||
236 | * @close: this would be called with drm device file close. | ||
224 | * @manager: subdrv has its own manager to control a hardware appropriately | 237 | * @manager: subdrv has its own manager to control a hardware appropriately |
225 | * and we can access a hardware drawing on this manager. | 238 | * and we can access a hardware drawing on this manager. |
226 | * @encoder: encoder object owned by this sub driver. | 239 | * @encoder: encoder object owned by this sub driver. |
@@ -229,9 +242,14 @@ struct exynos_drm_private { | |||
229 | struct exynos_drm_subdrv { | 242 | struct exynos_drm_subdrv { |
230 | struct list_head list; | 243 | struct list_head list; |
231 | struct drm_device *drm_dev; | 244 | struct drm_device *drm_dev; |
245 | bool is_local; | ||
232 | 246 | ||
233 | int (*probe)(struct drm_device *drm_dev, struct device *dev); | 247 | int (*probe)(struct drm_device *drm_dev, struct device *dev); |
234 | void (*remove)(struct drm_device *dev); | 248 | void (*remove)(struct drm_device *dev); |
249 | int (*open)(struct drm_device *drm_dev, struct device *dev, | ||
250 | struct drm_file *file); | ||
251 | void (*close)(struct drm_device *drm_dev, struct device *dev, | ||
252 | struct drm_file *file); | ||
235 | 253 | ||
236 | struct exynos_drm_manager manager; | 254 | struct exynos_drm_manager manager; |
237 | struct drm_encoder *encoder; | 255 | struct drm_encoder *encoder; |
@@ -254,15 +272,19 @@ int exynos_drm_device_unregister(struct drm_device *dev); | |||
254 | * this function would be called by sub drivers such as display controller | 272 | * this function would be called by sub drivers such as display controller |
255 | * or hdmi driver to register this sub driver object to exynos drm driver | 273 | * or hdmi driver to register this sub driver object to exynos drm driver |
256 | * and when a sub driver is registered to exynos drm driver a probe callback | 274 | * and when a sub driver is registered to exynos drm driver a probe callback |
257 | * of the sub driver is called and creates its own encoder and connector | 275 | * of the sub driver is called and creates its own encoder and connector. |
258 | * and then fb helper and drm mode group would be re-initialized. | ||
259 | */ | 276 | */ |
260 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv); | 277 | int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv); |
261 | 278 | ||
262 | /* | 279 | /* this function removes subdrv list from exynos drm driver */ |
263 | * this function removes subdrv list from exynos drm driver and fb helper | ||
264 | * and drm mode group would be re-initialized. | ||
265 | */ | ||
266 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv); | 280 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv); |
267 | 281 | ||
282 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); | ||
283 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); | ||
284 | |||
285 | extern struct platform_driver fimd_driver; | ||
286 | extern struct platform_driver hdmi_driver; | ||
287 | extern struct platform_driver mixer_driver; | ||
288 | extern struct platform_driver exynos_drm_common_hdmi_driver; | ||
289 | extern struct platform_driver vidi_driver; | ||
268 | #endif | 290 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index ef4754f1519b..6e9ac7bd1dcf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
@@ -111,9 +111,19 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, | |||
111 | struct drm_display_mode *mode, | 111 | struct drm_display_mode *mode, |
112 | struct drm_display_mode *adjusted_mode) | 112 | struct drm_display_mode *adjusted_mode) |
113 | { | 113 | { |
114 | struct drm_device *dev = encoder->dev; | ||
115 | struct drm_connector *connector; | ||
116 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
117 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | ||
118 | |||
114 | DRM_DEBUG_KMS("%s\n", __FILE__); | 119 | DRM_DEBUG_KMS("%s\n", __FILE__); |
115 | 120 | ||
116 | /* drm framework doesn't check NULL. */ | 121 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
122 | if (connector->encoder == encoder) | ||
123 | if (manager_ops && manager_ops->mode_fixup) | ||
124 | manager_ops->mode_fixup(manager->dev, connector, | ||
125 | mode, adjusted_mode); | ||
126 | } | ||
117 | 127 | ||
118 | return true; | 128 | return true; |
119 | } | 129 | } |
@@ -132,12 +142,11 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, | |||
132 | 142 | ||
133 | DRM_DEBUG_KMS("%s\n", __FILE__); | 143 | DRM_DEBUG_KMS("%s\n", __FILE__); |
134 | 144 | ||
135 | mode = adjusted_mode; | ||
136 | |||
137 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 145 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
138 | if (connector->encoder == encoder) { | 146 | if (connector->encoder == encoder) { |
139 | if (manager_ops && manager_ops->mode_set) | 147 | if (manager_ops && manager_ops->mode_set) |
140 | manager_ops->mode_set(manager->dev, mode); | 148 | manager_ops->mode_set(manager->dev, |
149 | adjusted_mode); | ||
141 | 150 | ||
142 | if (overlay_ops && overlay_ops->mode_set) | 151 | if (overlay_ops && overlay_ops->mode_set) |
143 | overlay_ops->mode_set(manager->dev, overlay); | 152 | overlay_ops->mode_set(manager->dev, overlay); |
@@ -209,6 +218,7 @@ static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder) | |||
209 | switch (display_ops->type) { | 218 | switch (display_ops->type) { |
210 | case EXYNOS_DISPLAY_TYPE_LCD: | 219 | case EXYNOS_DISPLAY_TYPE_LCD: |
211 | case EXYNOS_DISPLAY_TYPE_HDMI: | 220 | case EXYNOS_DISPLAY_TYPE_HDMI: |
221 | case EXYNOS_DISPLAY_TYPE_VIDI: | ||
212 | clone_mask |= (1 << (cnt++)); | 222 | clone_mask |= (1 << (cnt++)); |
213 | break; | 223 | break; |
214 | default: | 224 | default: |
@@ -433,9 +443,3 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) | |||
433 | if (overlay_ops && overlay_ops->disable) | 443 | if (overlay_ops && overlay_ops->disable) |
434 | overlay_ops->disable(manager->dev, zpos); | 444 | overlay_ops->disable(manager->dev, zpos); |
435 | } | 445 | } |
436 | |||
437 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
438 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
439 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
440 | MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver"); | ||
441 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 3733fe6723d3..c38c8f468fa3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -211,9 +211,3 @@ void exynos_drm_mode_config_init(struct drm_device *dev) | |||
211 | 211 | ||
212 | dev->mode_config.funcs = &exynos_drm_mode_config_funcs; | 212 | dev->mode_config.funcs = &exynos_drm_mode_config_funcs; |
213 | } | 213 | } |
214 | |||
215 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
216 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
217 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
218 | MODULE_DESCRIPTION("Samsung SoC DRM FB Driver"); | ||
219 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 54f8f074822f..d5586cc75163 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -125,7 +125,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, | |||
125 | } | 125 | } |
126 | 126 | ||
127 | size = mode_cmd.pitches[0] * mode_cmd.height; | 127 | size = mode_cmd.pitches[0] * mode_cmd.height; |
128 | exynos_gem_obj = exynos_drm_gem_create(dev, size); | 128 | |
129 | /* 0 means to allocate physically continuous memory */ | ||
130 | exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); | ||
129 | if (IS_ERR(exynos_gem_obj)) { | 131 | if (IS_ERR(exynos_gem_obj)) { |
130 | ret = PTR_ERR(exynos_gem_obj); | 132 | ret = PTR_ERR(exynos_gem_obj); |
131 | goto out; | 133 | goto out; |
@@ -314,89 +316,3 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev) | |||
314 | 316 | ||
315 | drm_fb_helper_restore_fbdev_mode(private->fb_helper); | 317 | drm_fb_helper_restore_fbdev_mode(private->fb_helper); |
316 | } | 318 | } |
317 | |||
318 | int exynos_drm_fbdev_reinit(struct drm_device *dev) | ||
319 | { | ||
320 | struct exynos_drm_private *private = dev->dev_private; | ||
321 | struct drm_fb_helper *fb_helper; | ||
322 | int ret; | ||
323 | |||
324 | if (!private) | ||
325 | return -EINVAL; | ||
326 | |||
327 | /* | ||
328 | * if all sub drivers were unloaded then num_connector is 0 | ||
329 | * so at this time, the framebuffers also should be destroyed. | ||
330 | */ | ||
331 | if (!dev->mode_config.num_connector) { | ||
332 | exynos_drm_fbdev_fini(dev); | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | fb_helper = private->fb_helper; | ||
337 | |||
338 | if (fb_helper) { | ||
339 | struct list_head temp_list; | ||
340 | |||
341 | INIT_LIST_HEAD(&temp_list); | ||
342 | |||
343 | /* | ||
344 | * fb_helper is reintialized but kernel fb is reused | ||
345 | * so kernel_fb_list need to be backuped and restored | ||
346 | */ | ||
347 | if (!list_empty(&fb_helper->kernel_fb_list)) | ||
348 | list_replace_init(&fb_helper->kernel_fb_list, | ||
349 | &temp_list); | ||
350 | |||
351 | drm_fb_helper_fini(fb_helper); | ||
352 | |||
353 | ret = drm_fb_helper_init(dev, fb_helper, | ||
354 | dev->mode_config.num_crtc, MAX_CONNECTOR); | ||
355 | if (ret < 0) { | ||
356 | DRM_ERROR("failed to initialize drm fb helper\n"); | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | if (!list_empty(&temp_list)) | ||
361 | list_replace(&temp_list, &fb_helper->kernel_fb_list); | ||
362 | |||
363 | ret = drm_fb_helper_single_add_all_connectors(fb_helper); | ||
364 | if (ret < 0) { | ||
365 | DRM_ERROR("failed to add fb helper to connectors\n"); | ||
366 | goto err; | ||
367 | } | ||
368 | |||
369 | ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP); | ||
370 | if (ret < 0) { | ||
371 | DRM_ERROR("failed to set up hw configuration.\n"); | ||
372 | goto err; | ||
373 | } | ||
374 | } else { | ||
375 | /* | ||
376 | * if drm_load() failed whem drm load() was called prior | ||
377 | * to specific drivers, fb_helper must be NULL and so | ||
378 | * this fuction should be called again to re-initialize and | ||
379 | * re-configure the fb helper. it means that this function | ||
380 | * has been called by the specific drivers. | ||
381 | */ | ||
382 | ret = exynos_drm_fbdev_init(dev); | ||
383 | } | ||
384 | |||
385 | return ret; | ||
386 | |||
387 | err: | ||
388 | /* | ||
389 | * if drm_load() failed when drm load() was called prior | ||
390 | * to specific drivers, the fb_helper must be NULL and so check it. | ||
391 | */ | ||
392 | if (fb_helper) | ||
393 | drm_fb_helper_fini(fb_helper); | ||
394 | |||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
399 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
400 | MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); | ||
401 | MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver"); | ||
402 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 56458eea0501..ecb6db229700 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -1007,7 +1007,7 @@ static const struct dev_pm_ops fimd_pm_ops = { | |||
1007 | SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) | 1007 | SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) |
1008 | }; | 1008 | }; |
1009 | 1009 | ||
1010 | static struct platform_driver fimd_driver = { | 1010 | struct platform_driver fimd_driver = { |
1011 | .probe = fimd_probe, | 1011 | .probe = fimd_probe, |
1012 | .remove = __devexit_p(fimd_remove), | 1012 | .remove = __devexit_p(fimd_remove), |
1013 | .driver = { | 1013 | .driver = { |
@@ -1016,21 +1016,3 @@ static struct platform_driver fimd_driver = { | |||
1016 | .pm = &fimd_pm_ops, | 1016 | .pm = &fimd_pm_ops, |
1017 | }, | 1017 | }, |
1018 | }; | 1018 | }; |
1019 | |||
1020 | static int __init fimd_init(void) | ||
1021 | { | ||
1022 | return platform_driver_register(&fimd_driver); | ||
1023 | } | ||
1024 | |||
1025 | static void __exit fimd_exit(void) | ||
1026 | { | ||
1027 | platform_driver_unregister(&fimd_driver); | ||
1028 | } | ||
1029 | |||
1030 | module_init(fimd_init); | ||
1031 | module_exit(fimd_exit); | ||
1032 | |||
1033 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
1034 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
1035 | MODULE_DESCRIPTION("Samsung DRM FIMD Driver"); | ||
1036 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 025abb3e3b67..fa1aa94a3d8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "drm.h" | 27 | #include "drm.h" |
28 | 28 | ||
29 | #include <linux/shmem_fs.h> | ||
29 | #include <drm/exynos_drm.h> | 30 | #include <drm/exynos_drm.h> |
30 | 31 | ||
31 | #include "exynos_drm_drv.h" | 32 | #include "exynos_drm_drv.h" |
@@ -55,6 +56,178 @@ static unsigned int convert_to_vm_err_msg(int msg) | |||
55 | return out_msg; | 56 | return out_msg; |
56 | } | 57 | } |
57 | 58 | ||
59 | static unsigned int mask_gem_flags(unsigned int flags) | ||
60 | { | ||
61 | return flags &= EXYNOS_BO_NONCONTIG; | ||
62 | } | ||
63 | |||
64 | static struct page **exynos_gem_get_pages(struct drm_gem_object *obj, | ||
65 | gfp_t gfpmask) | ||
66 | { | ||
67 | struct inode *inode; | ||
68 | struct address_space *mapping; | ||
69 | struct page *p, **pages; | ||
70 | int i, npages; | ||
71 | |||
72 | /* This is the shared memory object that backs the GEM resource */ | ||
73 | inode = obj->filp->f_path.dentry->d_inode; | ||
74 | mapping = inode->i_mapping; | ||
75 | |||
76 | npages = obj->size >> PAGE_SHIFT; | ||
77 | |||
78 | pages = drm_malloc_ab(npages, sizeof(struct page *)); | ||
79 | if (pages == NULL) | ||
80 | return ERR_PTR(-ENOMEM); | ||
81 | |||
82 | gfpmask |= mapping_gfp_mask(mapping); | ||
83 | |||
84 | for (i = 0; i < npages; i++) { | ||
85 | p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
86 | if (IS_ERR(p)) | ||
87 | goto fail; | ||
88 | pages[i] = p; | ||
89 | } | ||
90 | |||
91 | return pages; | ||
92 | |||
93 | fail: | ||
94 | while (i--) | ||
95 | page_cache_release(pages[i]); | ||
96 | |||
97 | drm_free_large(pages); | ||
98 | return ERR_PTR(PTR_ERR(p)); | ||
99 | } | ||
100 | |||
101 | static void exynos_gem_put_pages(struct drm_gem_object *obj, | ||
102 | struct page **pages, | ||
103 | bool dirty, bool accessed) | ||
104 | { | ||
105 | int i, npages; | ||
106 | |||
107 | npages = obj->size >> PAGE_SHIFT; | ||
108 | |||
109 | for (i = 0; i < npages; i++) { | ||
110 | if (dirty) | ||
111 | set_page_dirty(pages[i]); | ||
112 | |||
113 | if (accessed) | ||
114 | mark_page_accessed(pages[i]); | ||
115 | |||
116 | /* Undo the reference we took when populating the table */ | ||
117 | page_cache_release(pages[i]); | ||
118 | } | ||
119 | |||
120 | drm_free_large(pages); | ||
121 | } | ||
122 | |||
123 | static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, | ||
124 | struct vm_area_struct *vma, | ||
125 | unsigned long f_vaddr, | ||
126 | pgoff_t page_offset) | ||
127 | { | ||
128 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
129 | struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; | ||
130 | unsigned long pfn; | ||
131 | |||
132 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | ||
133 | unsigned long usize = buf->size; | ||
134 | |||
135 | if (!buf->pages) | ||
136 | return -EINTR; | ||
137 | |||
138 | while (usize > 0) { | ||
139 | pfn = page_to_pfn(buf->pages[page_offset++]); | ||
140 | vm_insert_mixed(vma, f_vaddr, pfn); | ||
141 | f_vaddr += PAGE_SIZE; | ||
142 | usize -= PAGE_SIZE; | ||
143 | } | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; | ||
149 | |||
150 | return vm_insert_mixed(vma, f_vaddr, pfn); | ||
151 | } | ||
152 | |||
153 | static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) | ||
154 | { | ||
155 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
156 | struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; | ||
157 | struct scatterlist *sgl; | ||
158 | struct page **pages; | ||
159 | unsigned int npages, i = 0; | ||
160 | int ret; | ||
161 | |||
162 | if (buf->pages) { | ||
163 | DRM_DEBUG_KMS("already allocated.\n"); | ||
164 | return -EINVAL; | ||
165 | } | ||
166 | |||
167 | pages = exynos_gem_get_pages(obj, GFP_KERNEL); | ||
168 | if (IS_ERR(pages)) { | ||
169 | DRM_ERROR("failed to get pages.\n"); | ||
170 | return PTR_ERR(pages); | ||
171 | } | ||
172 | |||
173 | npages = obj->size >> PAGE_SHIFT; | ||
174 | |||
175 | buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); | ||
176 | if (!buf->sgt) { | ||
177 | DRM_ERROR("failed to allocate sg table.\n"); | ||
178 | ret = -ENOMEM; | ||
179 | goto err; | ||
180 | } | ||
181 | |||
182 | ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); | ||
183 | if (ret < 0) { | ||
184 | DRM_ERROR("failed to initialize sg table.\n"); | ||
185 | ret = -EFAULT; | ||
186 | goto err1; | ||
187 | } | ||
188 | |||
189 | sgl = buf->sgt->sgl; | ||
190 | |||
191 | /* set all pages to sg list. */ | ||
192 | while (i < npages) { | ||
193 | sg_set_page(sgl, pages[i], PAGE_SIZE, 0); | ||
194 | sg_dma_address(sgl) = page_to_phys(pages[i]); | ||
195 | i++; | ||
196 | sgl = sg_next(sgl); | ||
197 | } | ||
198 | |||
199 | /* add some codes for UNCACHED type here. TODO */ | ||
200 | |||
201 | buf->pages = pages; | ||
202 | return ret; | ||
203 | err1: | ||
204 | kfree(buf->sgt); | ||
205 | buf->sgt = NULL; | ||
206 | err: | ||
207 | exynos_gem_put_pages(obj, pages, true, false); | ||
208 | return ret; | ||
209 | |||
210 | } | ||
211 | |||
212 | static void exynos_drm_gem_put_pages(struct drm_gem_object *obj) | ||
213 | { | ||
214 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | ||
215 | struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; | ||
216 | |||
217 | /* | ||
218 | * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages | ||
219 | * allocated at gem fault handler. | ||
220 | */ | ||
221 | sg_free_table(buf->sgt); | ||
222 | kfree(buf->sgt); | ||
223 | buf->sgt = NULL; | ||
224 | |||
225 | exynos_gem_put_pages(obj, buf->pages, true, false); | ||
226 | buf->pages = NULL; | ||
227 | |||
228 | /* add some codes for UNCACHED type here. TODO */ | ||
229 | } | ||
230 | |||
58 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | 231 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, |
59 | struct drm_file *file_priv, | 232 | struct drm_file *file_priv, |
60 | unsigned int *handle) | 233 | unsigned int *handle) |
@@ -90,7 +263,15 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | |||
90 | 263 | ||
91 | DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); | 264 | DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); |
92 | 265 | ||
93 | exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer); | 266 | if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) && |
267 | exynos_gem_obj->buffer->pages) | ||
268 | exynos_drm_gem_put_pages(obj); | ||
269 | else | ||
270 | exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, | ||
271 | exynos_gem_obj->buffer); | ||
272 | |||
273 | exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer); | ||
274 | exynos_gem_obj->buffer = NULL; | ||
94 | 275 | ||
95 | if (obj->map_list.map) | 276 | if (obj->map_list.map) |
96 | drm_gem_free_mmap_offset(obj); | 277 | drm_gem_free_mmap_offset(obj); |
@@ -99,6 +280,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | |||
99 | drm_gem_object_release(obj); | 280 | drm_gem_object_release(obj); |
100 | 281 | ||
101 | kfree(exynos_gem_obj); | 282 | kfree(exynos_gem_obj); |
283 | exynos_gem_obj = NULL; | ||
102 | } | 284 | } |
103 | 285 | ||
104 | static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | 286 | static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, |
@@ -114,6 +296,7 @@ static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |||
114 | return NULL; | 296 | return NULL; |
115 | } | 297 | } |
116 | 298 | ||
299 | exynos_gem_obj->size = size; | ||
117 | obj = &exynos_gem_obj->base; | 300 | obj = &exynos_gem_obj->base; |
118 | 301 | ||
119 | ret = drm_gem_object_init(dev, obj, size); | 302 | ret = drm_gem_object_init(dev, obj, size); |
@@ -129,27 +312,55 @@ static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |||
129 | } | 312 | } |
130 | 313 | ||
131 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | 314 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, |
132 | unsigned long size) | 315 | unsigned int flags, |
316 | unsigned long size) | ||
133 | { | 317 | { |
134 | struct exynos_drm_gem_buf *buffer; | ||
135 | struct exynos_drm_gem_obj *exynos_gem_obj; | 318 | struct exynos_drm_gem_obj *exynos_gem_obj; |
319 | struct exynos_drm_gem_buf *buf; | ||
320 | int ret; | ||
136 | 321 | ||
137 | size = roundup(size, PAGE_SIZE); | 322 | size = roundup(size, PAGE_SIZE); |
138 | DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); | 323 | DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); |
139 | 324 | ||
140 | buffer = exynos_drm_buf_create(dev, size); | 325 | flags = mask_gem_flags(flags); |
141 | if (!buffer) | 326 | |
327 | buf = exynos_drm_init_buf(dev, size); | ||
328 | if (!buf) | ||
142 | return ERR_PTR(-ENOMEM); | 329 | return ERR_PTR(-ENOMEM); |
143 | 330 | ||
144 | exynos_gem_obj = exynos_drm_gem_init(dev, size); | 331 | exynos_gem_obj = exynos_drm_gem_init(dev, size); |
145 | if (!exynos_gem_obj) { | 332 | if (!exynos_gem_obj) { |
146 | exynos_drm_buf_destroy(dev, buffer); | 333 | ret = -ENOMEM; |
147 | return ERR_PTR(-ENOMEM); | 334 | goto err; |
148 | } | 335 | } |
149 | 336 | ||
150 | exynos_gem_obj->buffer = buffer; | 337 | exynos_gem_obj->buffer = buf; |
338 | |||
339 | /* set memory type and cache attribute from user side. */ | ||
340 | exynos_gem_obj->flags = flags; | ||
341 | |||
342 | /* | ||
343 | * allocate all pages as desired size if user wants to allocate | ||
344 | * physically non-continuous memory. | ||
345 | */ | ||
346 | if (flags & EXYNOS_BO_NONCONTIG) { | ||
347 | ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); | ||
348 | if (ret < 0) { | ||
349 | drm_gem_object_release(&exynos_gem_obj->base); | ||
350 | goto err; | ||
351 | } | ||
352 | } else { | ||
353 | ret = exynos_drm_alloc_buf(dev, buf, flags); | ||
354 | if (ret < 0) { | ||
355 | drm_gem_object_release(&exynos_gem_obj->base); | ||
356 | goto err; | ||
357 | } | ||
358 | } | ||
151 | 359 | ||
152 | return exynos_gem_obj; | 360 | return exynos_gem_obj; |
361 | err: | ||
362 | exynos_drm_fini_buf(dev, buf); | ||
363 | return ERR_PTR(ret); | ||
153 | } | 364 | } |
154 | 365 | ||
155 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | 366 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, |
@@ -161,7 +372,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | |||
161 | 372 | ||
162 | DRM_DEBUG_KMS("%s\n", __FILE__); | 373 | DRM_DEBUG_KMS("%s\n", __FILE__); |
163 | 374 | ||
164 | exynos_gem_obj = exynos_drm_gem_create(dev, args->size); | 375 | exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); |
165 | if (IS_ERR(exynos_gem_obj)) | 376 | if (IS_ERR(exynos_gem_obj)) |
166 | return PTR_ERR(exynos_gem_obj); | 377 | return PTR_ERR(exynos_gem_obj); |
167 | 378 | ||
@@ -175,6 +386,64 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | |||
175 | return 0; | 386 | return 0; |
176 | } | 387 | } |
177 | 388 | ||
389 | void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | ||
390 | unsigned int gem_handle, | ||
391 | struct drm_file *file_priv) | ||
392 | { | ||
393 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
394 | struct drm_gem_object *obj; | ||
395 | |||
396 | obj = drm_gem_object_lookup(dev, file_priv, gem_handle); | ||
397 | if (!obj) { | ||
398 | DRM_ERROR("failed to lookup gem object.\n"); | ||
399 | return ERR_PTR(-EINVAL); | ||
400 | } | ||
401 | |||
402 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
403 | |||
404 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | ||
405 | DRM_DEBUG_KMS("not support NONCONTIG type.\n"); | ||
406 | drm_gem_object_unreference_unlocked(obj); | ||
407 | |||
408 | /* TODO */ | ||
409 | return ERR_PTR(-EINVAL); | ||
410 | } | ||
411 | |||
412 | return &exynos_gem_obj->buffer->dma_addr; | ||
413 | } | ||
414 | |||
415 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | ||
416 | unsigned int gem_handle, | ||
417 | struct drm_file *file_priv) | ||
418 | { | ||
419 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
420 | struct drm_gem_object *obj; | ||
421 | |||
422 | obj = drm_gem_object_lookup(dev, file_priv, gem_handle); | ||
423 | if (!obj) { | ||
424 | DRM_ERROR("failed to lookup gem object.\n"); | ||
425 | return; | ||
426 | } | ||
427 | |||
428 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
429 | |||
430 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | ||
431 | DRM_DEBUG_KMS("not support NONCONTIG type.\n"); | ||
432 | drm_gem_object_unreference_unlocked(obj); | ||
433 | |||
434 | /* TODO */ | ||
435 | return; | ||
436 | } | ||
437 | |||
438 | drm_gem_object_unreference_unlocked(obj); | ||
439 | |||
440 | /* | ||
441 | * decrease obj->refcount one more time because we has already | ||
442 | * increased it at exynos_drm_gem_get_dma_addr(). | ||
443 | */ | ||
444 | drm_gem_object_unreference_unlocked(obj); | ||
445 | } | ||
446 | |||
178 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | 447 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, |
179 | struct drm_file *file_priv) | 448 | struct drm_file *file_priv) |
180 | { | 449 | { |
@@ -200,7 +469,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
200 | struct drm_gem_object *obj = filp->private_data; | 469 | struct drm_gem_object *obj = filp->private_data; |
201 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 470 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
202 | struct exynos_drm_gem_buf *buffer; | 471 | struct exynos_drm_gem_buf *buffer; |
203 | unsigned long pfn, vm_size; | 472 | unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; |
473 | int ret; | ||
204 | 474 | ||
205 | DRM_DEBUG_KMS("%s\n", __FILE__); | 475 | DRM_DEBUG_KMS("%s\n", __FILE__); |
206 | 476 | ||
@@ -208,9 +478,9 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
208 | 478 | ||
209 | /* in case of direct mapping, always having non-cachable attribute */ | 479 | /* in case of direct mapping, always having non-cachable attribute */ |
210 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 480 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
211 | vma->vm_file = filp; | ||
212 | 481 | ||
213 | vm_size = vma->vm_end - vma->vm_start; | 482 | vm_size = usize = vma->vm_end - vma->vm_start; |
483 | |||
214 | /* | 484 | /* |
215 | * a buffer contains information to physically continuous memory | 485 | * a buffer contains information to physically continuous memory |
216 | * allocated by user request or at framebuffer creation. | 486 | * allocated by user request or at framebuffer creation. |
@@ -221,18 +491,37 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, | |||
221 | if (vm_size > buffer->size) | 491 | if (vm_size > buffer->size) |
222 | return -EINVAL; | 492 | return -EINVAL; |
223 | 493 | ||
224 | /* | 494 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { |
225 | * get page frame number to physical memory to be mapped | 495 | int i = 0; |
226 | * to user space. | 496 | |
227 | */ | 497 | if (!buffer->pages) |
228 | pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT; | 498 | return -EINVAL; |
229 | 499 | ||
230 | DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); | 500 | do { |
231 | 501 | ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); | |
232 | if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, | 502 | if (ret) { |
233 | vma->vm_page_prot)) { | 503 | DRM_ERROR("failed to remap user space.\n"); |
234 | DRM_ERROR("failed to remap pfn range.\n"); | 504 | return ret; |
235 | return -EAGAIN; | 505 | } |
506 | |||
507 | uaddr += PAGE_SIZE; | ||
508 | usize -= PAGE_SIZE; | ||
509 | } while (usize > 0); | ||
510 | } else { | ||
511 | /* | ||
512 | * get page frame number to physical memory to be mapped | ||
513 | * to user space. | ||
514 | */ | ||
515 | pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> | ||
516 | PAGE_SHIFT; | ||
517 | |||
518 | DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); | ||
519 | |||
520 | if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, | ||
521 | vma->vm_page_prot)) { | ||
522 | DRM_ERROR("failed to remap pfn range.\n"); | ||
523 | return -EAGAIN; | ||
524 | } | ||
236 | } | 525 | } |
237 | 526 | ||
238 | return 0; | 527 | return 0; |
@@ -312,9 +601,9 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
312 | */ | 601 | */ |
313 | 602 | ||
314 | args->pitch = args->width * args->bpp >> 3; | 603 | args->pitch = args->width * args->bpp >> 3; |
315 | args->size = args->pitch * args->height; | 604 | args->size = PAGE_ALIGN(args->pitch * args->height); |
316 | 605 | ||
317 | exynos_gem_obj = exynos_drm_gem_create(dev, args->size); | 606 | exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); |
318 | if (IS_ERR(exynos_gem_obj)) | 607 | if (IS_ERR(exynos_gem_obj)) |
319 | return PTR_ERR(exynos_gem_obj); | 608 | return PTR_ERR(exynos_gem_obj); |
320 | 609 | ||
@@ -398,20 +687,31 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
398 | struct drm_gem_object *obj = vma->vm_private_data; | 687 | struct drm_gem_object *obj = vma->vm_private_data; |
399 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 688 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
400 | struct drm_device *dev = obj->dev; | 689 | struct drm_device *dev = obj->dev; |
401 | unsigned long pfn; | 690 | unsigned long f_vaddr; |
402 | pgoff_t page_offset; | 691 | pgoff_t page_offset; |
403 | int ret; | 692 | int ret; |
404 | 693 | ||
405 | page_offset = ((unsigned long)vmf->virtual_address - | 694 | page_offset = ((unsigned long)vmf->virtual_address - |
406 | vma->vm_start) >> PAGE_SHIFT; | 695 | vma->vm_start) >> PAGE_SHIFT; |
696 | f_vaddr = (unsigned long)vmf->virtual_address; | ||
407 | 697 | ||
408 | mutex_lock(&dev->struct_mutex); | 698 | mutex_lock(&dev->struct_mutex); |
409 | 699 | ||
410 | pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >> | 700 | /* |
411 | PAGE_SHIFT) + page_offset; | 701 | * allocate all pages as desired size if user wants to allocate |
702 | * physically non-continuous memory. | ||
703 | */ | ||
704 | if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { | ||
705 | ret = exynos_drm_gem_get_pages(obj); | ||
706 | if (ret < 0) | ||
707 | goto err; | ||
708 | } | ||
412 | 709 | ||
413 | ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); | 710 | ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); |
711 | if (ret < 0) | ||
712 | DRM_ERROR("failed to map pages.\n"); | ||
414 | 713 | ||
714 | err: | ||
415 | mutex_unlock(&dev->struct_mutex); | 715 | mutex_unlock(&dev->struct_mutex); |
416 | 716 | ||
417 | return convert_to_vm_err_msg(ret); | 717 | return convert_to_vm_err_msg(ret); |
@@ -435,7 +735,3 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
435 | 735 | ||
436 | return ret; | 736 | return ret; |
437 | } | 737 | } |
438 | |||
439 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
440 | MODULE_DESCRIPTION("Samsung SoC DRM GEM Module"); | ||
441 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 67cdc9168708..e40fbad8b705 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -36,11 +36,15 @@ | |||
36 | * @dma_addr: bus address(accessed by dma) to allocated memory region. | 36 | * @dma_addr: bus address(accessed by dma) to allocated memory region. |
37 | * - this address could be physical address without IOMMU and | 37 | * - this address could be physical address without IOMMU and |
38 | * device address with IOMMU. | 38 | * device address with IOMMU. |
39 | * @sgt: sg table to transfer page data. | ||
40 | * @pages: contain all pages to allocated memory region. | ||
39 | * @size: size of allocated memory region. | 41 | * @size: size of allocated memory region. |
40 | */ | 42 | */ |
41 | struct exynos_drm_gem_buf { | 43 | struct exynos_drm_gem_buf { |
42 | void __iomem *kvaddr; | 44 | void __iomem *kvaddr; |
43 | dma_addr_t dma_addr; | 45 | dma_addr_t dma_addr; |
46 | struct sg_table *sgt; | ||
47 | struct page **pages; | ||
44 | unsigned long size; | 48 | unsigned long size; |
45 | }; | 49 | }; |
46 | 50 | ||
@@ -55,6 +59,8 @@ struct exynos_drm_gem_buf { | |||
55 | * by user request or at framebuffer creation. | 59 | * by user request or at framebuffer creation. |
56 | * continuous memory region allocated by user request | 60 | * continuous memory region allocated by user request |
57 | * or at framebuffer creation. | 61 | * or at framebuffer creation. |
62 | * @size: total memory size to physically non-continuous memory region. | ||
63 | * @flags: indicate memory type to allocated buffer and cache attruibute. | ||
58 | * | 64 | * |
59 | * P.S. this object would be transfered to user as kms_bo.handle so | 65 | * P.S. this object would be transfered to user as kms_bo.handle so |
60 | * user can access the buffer through kms_bo.handle. | 66 | * user can access the buffer through kms_bo.handle. |
@@ -62,6 +68,8 @@ struct exynos_drm_gem_buf { | |||
62 | struct exynos_drm_gem_obj { | 68 | struct exynos_drm_gem_obj { |
63 | struct drm_gem_object base; | 69 | struct drm_gem_object base; |
64 | struct exynos_drm_gem_buf *buffer; | 70 | struct exynos_drm_gem_buf *buffer; |
71 | unsigned long size; | ||
72 | unsigned int flags; | ||
65 | }; | 73 | }; |
66 | 74 | ||
67 | /* destroy a buffer with gem object */ | 75 | /* destroy a buffer with gem object */ |
@@ -69,7 +77,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); | |||
69 | 77 | ||
70 | /* create a new buffer with gem object */ | 78 | /* create a new buffer with gem object */ |
71 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | 79 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, |
72 | unsigned long size); | 80 | unsigned int flags, |
81 | unsigned long size); | ||
73 | 82 | ||
74 | /* | 83 | /* |
75 | * request gem object creation and buffer allocation as the size | 84 | * request gem object creation and buffer allocation as the size |
@@ -79,6 +88,24 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | |||
79 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | 88 | int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, |
80 | struct drm_file *file_priv); | 89 | struct drm_file *file_priv); |
81 | 90 | ||
91 | /* | ||
92 | * get dma address from gem handle and this function could be used for | ||
93 | * other drivers such as 2d/3d acceleration drivers. | ||
94 | * with this function call, gem object reference count would be increased. | ||
95 | */ | ||
96 | void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | ||
97 | unsigned int gem_handle, | ||
98 | struct drm_file *file_priv); | ||
99 | |||
100 | /* | ||
101 | * put dma address from gem handle and this function could be used for | ||
102 | * other drivers such as 2d/3d acceleration drivers. | ||
103 | * with this function call, gem object reference count would be decreased. | ||
104 | */ | ||
105 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | ||
106 | unsigned int gem_handle, | ||
107 | struct drm_file *file_priv); | ||
108 | |||
82 | /* get buffer offset to map to user space. */ | 109 | /* get buffer offset to map to user space. */ |
83 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | 110 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, |
84 | struct drm_file *file_priv); | 111 | struct drm_file *file_priv); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index ed8a319ed84b..14eb26b0ba1c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c | |||
@@ -38,7 +38,6 @@ struct drm_hdmi_context { | |||
38 | struct exynos_drm_subdrv subdrv; | 38 | struct exynos_drm_subdrv subdrv; |
39 | struct exynos_drm_hdmi_context *hdmi_ctx; | 39 | struct exynos_drm_hdmi_context *hdmi_ctx; |
40 | struct exynos_drm_hdmi_context *mixer_ctx; | 40 | struct exynos_drm_hdmi_context *mixer_ctx; |
41 | struct work_struct work; | ||
42 | }; | 41 | }; |
43 | 42 | ||
44 | void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops | 43 | void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops |
@@ -49,7 +48,6 @@ void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops | |||
49 | if (display_ops) | 48 | if (display_ops) |
50 | hdmi_display_ops = display_ops; | 49 | hdmi_display_ops = display_ops; |
51 | } | 50 | } |
52 | EXPORT_SYMBOL(exynos_drm_display_ops_register); | ||
53 | 51 | ||
54 | void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops | 52 | void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops |
55 | *manager_ops) | 53 | *manager_ops) |
@@ -59,7 +57,6 @@ void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops | |||
59 | if (manager_ops) | 57 | if (manager_ops) |
60 | hdmi_manager_ops = manager_ops; | 58 | hdmi_manager_ops = manager_ops; |
61 | } | 59 | } |
62 | EXPORT_SYMBOL(exynos_drm_manager_ops_register); | ||
63 | 60 | ||
64 | void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops | 61 | void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops |
65 | *overlay_ops) | 62 | *overlay_ops) |
@@ -69,7 +66,6 @@ void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops | |||
69 | if (overlay_ops) | 66 | if (overlay_ops) |
70 | hdmi_overlay_ops = overlay_ops; | 67 | hdmi_overlay_ops = overlay_ops; |
71 | } | 68 | } |
72 | EXPORT_SYMBOL(exynos_drm_overlay_ops_register); | ||
73 | 69 | ||
74 | static bool drm_hdmi_is_connected(struct device *dev) | 70 | static bool drm_hdmi_is_connected(struct device *dev) |
75 | { | 71 | { |
@@ -155,6 +151,20 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev) | |||
155 | return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx); | 151 | return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx); |
156 | } | 152 | } |
157 | 153 | ||
154 | static void drm_hdmi_mode_fixup(struct device *subdrv_dev, | ||
155 | struct drm_connector *connector, | ||
156 | struct drm_display_mode *mode, | ||
157 | struct drm_display_mode *adjusted_mode) | ||
158 | { | ||
159 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); | ||
160 | |||
161 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
162 | |||
163 | if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup) | ||
164 | hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, | ||
165 | mode, adjusted_mode); | ||
166 | } | ||
167 | |||
158 | static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) | 168 | static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) |
159 | { | 169 | { |
160 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); | 170 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); |
@@ -165,6 +175,18 @@ static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) | |||
165 | hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode); | 175 | hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode); |
166 | } | 176 | } |
167 | 177 | ||
178 | static void drm_hdmi_get_max_resol(struct device *subdrv_dev, | ||
179 | unsigned int *width, unsigned int *height) | ||
180 | { | ||
181 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); | ||
182 | |||
183 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
184 | |||
185 | if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol) | ||
186 | hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, | ||
187 | height); | ||
188 | } | ||
189 | |||
168 | static void drm_hdmi_commit(struct device *subdrv_dev) | 190 | static void drm_hdmi_commit(struct device *subdrv_dev) |
169 | { | 191 | { |
170 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); | 192 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); |
@@ -200,7 +222,9 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = { | |||
200 | .dpms = drm_hdmi_dpms, | 222 | .dpms = drm_hdmi_dpms, |
201 | .enable_vblank = drm_hdmi_enable_vblank, | 223 | .enable_vblank = drm_hdmi_enable_vblank, |
202 | .disable_vblank = drm_hdmi_disable_vblank, | 224 | .disable_vblank = drm_hdmi_disable_vblank, |
225 | .mode_fixup = drm_hdmi_mode_fixup, | ||
203 | .mode_set = drm_hdmi_mode_set, | 226 | .mode_set = drm_hdmi_mode_set, |
227 | .get_max_resol = drm_hdmi_get_max_resol, | ||
204 | .commit = drm_hdmi_commit, | 228 | .commit = drm_hdmi_commit, |
205 | }; | 229 | }; |
206 | 230 | ||
@@ -249,7 +273,6 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, | |||
249 | struct drm_hdmi_context *ctx; | 273 | struct drm_hdmi_context *ctx; |
250 | struct platform_device *pdev = to_platform_device(dev); | 274 | struct platform_device *pdev = to_platform_device(dev); |
251 | struct exynos_drm_common_hdmi_pd *pd; | 275 | struct exynos_drm_common_hdmi_pd *pd; |
252 | int ret; | ||
253 | 276 | ||
254 | DRM_DEBUG_KMS("%s\n", __FILE__); | 277 | DRM_DEBUG_KMS("%s\n", __FILE__); |
255 | 278 | ||
@@ -270,26 +293,13 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, | |||
270 | return -EFAULT; | 293 | return -EFAULT; |
271 | } | 294 | } |
272 | 295 | ||
273 | ret = platform_driver_register(&hdmi_driver); | ||
274 | if (ret) { | ||
275 | DRM_DEBUG_KMS("failed to register hdmi driver.\n"); | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | ret = platform_driver_register(&mixer_driver); | ||
280 | if (ret) { | ||
281 | DRM_DEBUG_KMS("failed to register mixer driver.\n"); | ||
282 | goto err_hdmidrv; | ||
283 | } | ||
284 | |||
285 | ctx = get_ctx_from_subdrv(subdrv); | 296 | ctx = get_ctx_from_subdrv(subdrv); |
286 | 297 | ||
287 | ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *) | 298 | ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *) |
288 | to_context(pd->hdmi_dev); | 299 | to_context(pd->hdmi_dev); |
289 | if (!ctx->hdmi_ctx) { | 300 | if (!ctx->hdmi_ctx) { |
290 | DRM_DEBUG_KMS("hdmi context is null.\n"); | 301 | DRM_DEBUG_KMS("hdmi context is null.\n"); |
291 | ret = -EFAULT; | 302 | return -EFAULT; |
292 | goto err_mixerdrv; | ||
293 | } | 303 | } |
294 | 304 | ||
295 | ctx->hdmi_ctx->drm_dev = drm_dev; | 305 | ctx->hdmi_ctx->drm_dev = drm_dev; |
@@ -298,42 +308,12 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, | |||
298 | to_context(pd->mixer_dev); | 308 | to_context(pd->mixer_dev); |
299 | if (!ctx->mixer_ctx) { | 309 | if (!ctx->mixer_ctx) { |
300 | DRM_DEBUG_KMS("mixer context is null.\n"); | 310 | DRM_DEBUG_KMS("mixer context is null.\n"); |
301 | ret = -EFAULT; | 311 | return -EFAULT; |
302 | goto err_mixerdrv; | ||
303 | } | 312 | } |
304 | 313 | ||
305 | ctx->mixer_ctx->drm_dev = drm_dev; | 314 | ctx->mixer_ctx->drm_dev = drm_dev; |
306 | 315 | ||
307 | return 0; | 316 | return 0; |
308 | |||
309 | err_mixerdrv: | ||
310 | platform_driver_unregister(&mixer_driver); | ||
311 | err_hdmidrv: | ||
312 | platform_driver_unregister(&hdmi_driver); | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | static void hdmi_subdrv_remove(struct drm_device *drm_dev) | ||
317 | { | ||
318 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
319 | |||
320 | platform_driver_unregister(&hdmi_driver); | ||
321 | platform_driver_unregister(&mixer_driver); | ||
322 | } | ||
323 | |||
324 | static void exynos_drm_hdmi_late_probe(struct work_struct *work) | ||
325 | { | ||
326 | struct drm_hdmi_context *ctx = container_of(work, | ||
327 | struct drm_hdmi_context, work); | ||
328 | |||
329 | /* | ||
330 | * this function calls subdrv->probe() so this must be called | ||
331 | * after probe context. | ||
332 | * | ||
333 | * PS. subdrv->probe() will call platform_driver_register() to probe | ||
334 | * hdmi and mixer driver. | ||
335 | */ | ||
336 | exynos_drm_subdrv_register(&ctx->subdrv); | ||
337 | } | 317 | } |
338 | 318 | ||
339 | static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) | 319 | static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) |
@@ -353,7 +333,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) | |||
353 | subdrv = &ctx->subdrv; | 333 | subdrv = &ctx->subdrv; |
354 | 334 | ||
355 | subdrv->probe = hdmi_subdrv_probe; | 335 | subdrv->probe = hdmi_subdrv_probe; |
356 | subdrv->remove = hdmi_subdrv_remove; | ||
357 | subdrv->manager.pipe = -1; | 336 | subdrv->manager.pipe = -1; |
358 | subdrv->manager.ops = &drm_hdmi_manager_ops; | 337 | subdrv->manager.ops = &drm_hdmi_manager_ops; |
359 | subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops; | 338 | subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops; |
@@ -362,9 +341,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) | |||
362 | 341 | ||
363 | platform_set_drvdata(pdev, subdrv); | 342 | platform_set_drvdata(pdev, subdrv); |
364 | 343 | ||
365 | INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe); | 344 | exynos_drm_subdrv_register(subdrv); |
366 | |||
367 | schedule_work(&ctx->work); | ||
368 | 345 | ||
369 | return 0; | 346 | return 0; |
370 | } | 347 | } |
@@ -400,7 +377,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev) | |||
400 | return 0; | 377 | return 0; |
401 | } | 378 | } |
402 | 379 | ||
403 | static struct platform_driver exynos_drm_common_hdmi_driver = { | 380 | struct platform_driver exynos_drm_common_hdmi_driver = { |
404 | .probe = exynos_drm_hdmi_probe, | 381 | .probe = exynos_drm_hdmi_probe, |
405 | .remove = __devexit_p(exynos_drm_hdmi_remove), | 382 | .remove = __devexit_p(exynos_drm_hdmi_remove), |
406 | .driver = { | 383 | .driver = { |
@@ -409,31 +386,3 @@ static struct platform_driver exynos_drm_common_hdmi_driver = { | |||
409 | .pm = &hdmi_pm_ops, | 386 | .pm = &hdmi_pm_ops, |
410 | }, | 387 | }, |
411 | }; | 388 | }; |
412 | |||
413 | static int __init exynos_drm_hdmi_init(void) | ||
414 | { | ||
415 | int ret; | ||
416 | |||
417 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
418 | |||
419 | ret = platform_driver_register(&exynos_drm_common_hdmi_driver); | ||
420 | if (ret) { | ||
421 | DRM_DEBUG_KMS("failed to register hdmi common driver.\n"); | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | static void __exit exynos_drm_hdmi_exit(void) | ||
429 | { | ||
430 | platform_driver_unregister(&exynos_drm_common_hdmi_driver); | ||
431 | } | ||
432 | |||
433 | module_init(exynos_drm_hdmi_init); | ||
434 | module_exit(exynos_drm_hdmi_exit); | ||
435 | |||
436 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
437 | MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>"); | ||
438 | MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver"); | ||
439 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index 3c29f790ee45..44497cfb6c74 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h | |||
@@ -47,7 +47,12 @@ struct exynos_hdmi_display_ops { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct exynos_hdmi_manager_ops { | 49 | struct exynos_hdmi_manager_ops { |
50 | void (*mode_fixup)(void *ctx, struct drm_connector *connector, | ||
51 | struct drm_display_mode *mode, | ||
52 | struct drm_display_mode *adjusted_mode); | ||
50 | void (*mode_set)(void *ctx, void *mode); | 53 | void (*mode_set)(void *ctx, void *mode); |
54 | void (*get_max_resol)(void *ctx, unsigned int *width, | ||
55 | unsigned int *height); | ||
51 | void (*commit)(void *ctx); | 56 | void (*commit)(void *ctx); |
52 | void (*disable)(void *ctx); | 57 | void (*disable)(void *ctx); |
53 | }; | 58 | }; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index bdcf770aa22e..c277a3a445f5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -22,6 +22,10 @@ struct exynos_plane { | |||
22 | bool enabled; | 22 | bool enabled; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static const uint32_t formats[] = { | ||
26 | DRM_FORMAT_XRGB8888, | ||
27 | }; | ||
28 | |||
25 | static int | 29 | static int |
26 | exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | 30 | exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, |
27 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | 31 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, |
@@ -115,9 +119,9 @@ int exynos_plane_init(struct drm_device *dev, unsigned int nr) | |||
115 | 119 | ||
116 | exynos_plane->overlay.zpos = DEFAULT_ZPOS; | 120 | exynos_plane->overlay.zpos = DEFAULT_ZPOS; |
117 | 121 | ||
118 | /* TODO: format */ | ||
119 | return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, | 122 | return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, |
120 | &exynos_plane_funcs, NULL, 0, false); | 123 | &exynos_plane_funcs, formats, ARRAY_SIZE(formats), |
124 | false); | ||
121 | } | 125 | } |
122 | 126 | ||
123 | int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, | 127 | int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c new file mode 100644 index 000000000000..8e1339f9fe1f --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -0,0 +1,676 @@ | |||
1 | /* exynos_drm_vidi.c | ||
2 | * | ||
3 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | ||
4 | * Authors: | ||
5 | * Inki Dae <inki.dae@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | #include "drmP.h" | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | |||
19 | #include <drm/exynos_drm.h> | ||
20 | |||
21 | #include "drm_edid.h" | ||
22 | #include "drm_crtc_helper.h" | ||
23 | |||
24 | #include "exynos_drm_drv.h" | ||
25 | #include "exynos_drm_crtc.h" | ||
26 | #include "exynos_drm_encoder.h" | ||
27 | |||
28 | /* vidi has totally three virtual windows. */ | ||
29 | #define WINDOWS_NR 3 | ||
30 | |||
31 | #define get_vidi_context(dev) platform_get_drvdata(to_platform_device(dev)) | ||
32 | |||
33 | struct vidi_win_data { | ||
34 | unsigned int offset_x; | ||
35 | unsigned int offset_y; | ||
36 | unsigned int ovl_width; | ||
37 | unsigned int ovl_height; | ||
38 | unsigned int fb_width; | ||
39 | unsigned int fb_height; | ||
40 | unsigned int bpp; | ||
41 | dma_addr_t dma_addr; | ||
42 | void __iomem *vaddr; | ||
43 | unsigned int buf_offsize; | ||
44 | unsigned int line_size; /* bytes */ | ||
45 | bool enabled; | ||
46 | }; | ||
47 | |||
48 | struct vidi_context { | ||
49 | struct exynos_drm_subdrv subdrv; | ||
50 | struct drm_crtc *crtc; | ||
51 | struct vidi_win_data win_data[WINDOWS_NR]; | ||
52 | struct edid *raw_edid; | ||
53 | unsigned int clkdiv; | ||
54 | unsigned int default_win; | ||
55 | unsigned long irq_flags; | ||
56 | unsigned int connected; | ||
57 | bool vblank_on; | ||
58 | bool suspended; | ||
59 | struct work_struct work; | ||
60 | struct mutex lock; | ||
61 | }; | ||
62 | |||
63 | static const char fake_edid_info[] = { | ||
64 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05, | ||
65 | 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78, | ||
66 | 0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd, | ||
67 | 0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, | ||
68 | 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00, | ||
69 | 0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, | ||
70 | 0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, | ||
71 | 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, | ||
72 | 0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
73 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47, | ||
74 | 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1, | ||
75 | 0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83, | ||
76 | 0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00, | ||
77 | 0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c, | ||
78 | 0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a, | ||
79 | 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00, | ||
80 | 0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, | ||
81 | 0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, | ||
82 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
83 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
84 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
85 | 0x00, 0x00, 0x00, 0x06 | ||
86 | }; | ||
87 | |||
88 | static void vidi_fake_vblank_handler(struct work_struct *work); | ||
89 | |||
90 | static bool vidi_display_is_connected(struct device *dev) | ||
91 | { | ||
92 | struct vidi_context *ctx = get_vidi_context(dev); | ||
93 | |||
94 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
95 | |||
96 | /* | ||
97 | * connection request would come from user side | ||
98 | * to do hotplug through specific ioctl. | ||
99 | */ | ||
100 | return ctx->connected ? true : false; | ||
101 | } | ||
102 | |||
103 | static int vidi_get_edid(struct device *dev, struct drm_connector *connector, | ||
104 | u8 *edid, int len) | ||
105 | { | ||
106 | struct vidi_context *ctx = get_vidi_context(dev); | ||
107 | struct edid *raw_edid; | ||
108 | |||
109 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
110 | |||
111 | /* | ||
112 | * the edid data comes from user side and it would be set | ||
113 | * to ctx->raw_edid through specific ioctl. | ||
114 | */ | ||
115 | if (!ctx->raw_edid) { | ||
116 | DRM_DEBUG_KMS("raw_edid is null.\n"); | ||
117 | return -EFAULT; | ||
118 | } | ||
119 | |||
120 | raw_edid = kzalloc(len, GFP_KERNEL); | ||
121 | if (!raw_edid) { | ||
122 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | |||
126 | memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) | ||
127 | * EDID_LENGTH, len)); | ||
128 | |||
129 | /* attach the edid data to connector. */ | ||
130 | connector->display_info.raw_edid = (char *)raw_edid; | ||
131 | |||
132 | memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) | ||
133 | * EDID_LENGTH, len)); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static void *vidi_get_panel(struct device *dev) | ||
139 | { | ||
140 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
141 | |||
142 | /* TODO. */ | ||
143 | |||
144 | return NULL; | ||
145 | } | ||
146 | |||
147 | static int vidi_check_timing(struct device *dev, void *timing) | ||
148 | { | ||
149 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
150 | |||
151 | /* TODO. */ | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int vidi_display_power_on(struct device *dev, int mode) | ||
157 | { | ||
158 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
159 | |||
160 | /* TODO */ | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static struct exynos_drm_display_ops vidi_display_ops = { | ||
166 | .type = EXYNOS_DISPLAY_TYPE_VIDI, | ||
167 | .is_connected = vidi_display_is_connected, | ||
168 | .get_edid = vidi_get_edid, | ||
169 | .get_panel = vidi_get_panel, | ||
170 | .check_timing = vidi_check_timing, | ||
171 | .power_on = vidi_display_power_on, | ||
172 | }; | ||
173 | |||
174 | static void vidi_dpms(struct device *subdrv_dev, int mode) | ||
175 | { | ||
176 | struct vidi_context *ctx = get_vidi_context(subdrv_dev); | ||
177 | |||
178 | DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode); | ||
179 | |||
180 | mutex_lock(&ctx->lock); | ||
181 | |||
182 | switch (mode) { | ||
183 | case DRM_MODE_DPMS_ON: | ||
184 | /* TODO. */ | ||
185 | break; | ||
186 | case DRM_MODE_DPMS_STANDBY: | ||
187 | case DRM_MODE_DPMS_SUSPEND: | ||
188 | case DRM_MODE_DPMS_OFF: | ||
189 | /* TODO. */ | ||
190 | break; | ||
191 | default: | ||
192 | DRM_DEBUG_KMS("unspecified mode %d\n", mode); | ||
193 | break; | ||
194 | } | ||
195 | |||
196 | mutex_unlock(&ctx->lock); | ||
197 | } | ||
198 | |||
199 | static void vidi_apply(struct device *subdrv_dev) | ||
200 | { | ||
201 | struct vidi_context *ctx = get_vidi_context(subdrv_dev); | ||
202 | struct exynos_drm_manager *mgr = &ctx->subdrv.manager; | ||
203 | struct exynos_drm_manager_ops *mgr_ops = mgr->ops; | ||
204 | struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops; | ||
205 | struct vidi_win_data *win_data; | ||
206 | int i; | ||
207 | |||
208 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
209 | |||
210 | for (i = 0; i < WINDOWS_NR; i++) { | ||
211 | win_data = &ctx->win_data[i]; | ||
212 | if (win_data->enabled && (ovl_ops && ovl_ops->commit)) | ||
213 | ovl_ops->commit(subdrv_dev, i); | ||
214 | } | ||
215 | |||
216 | if (mgr_ops && mgr_ops->commit) | ||
217 | mgr_ops->commit(subdrv_dev); | ||
218 | } | ||
219 | |||
220 | static void vidi_commit(struct device *dev) | ||
221 | { | ||
222 | struct vidi_context *ctx = get_vidi_context(dev); | ||
223 | |||
224 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
225 | |||
226 | if (ctx->suspended) | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | static int vidi_enable_vblank(struct device *dev) | ||
231 | { | ||
232 | struct vidi_context *ctx = get_vidi_context(dev); | ||
233 | |||
234 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
235 | |||
236 | if (ctx->suspended) | ||
237 | return -EPERM; | ||
238 | |||
239 | if (!test_and_set_bit(0, &ctx->irq_flags)) | ||
240 | ctx->vblank_on = true; | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static void vidi_disable_vblank(struct device *dev) | ||
246 | { | ||
247 | struct vidi_context *ctx = get_vidi_context(dev); | ||
248 | |||
249 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
250 | |||
251 | if (ctx->suspended) | ||
252 | return; | ||
253 | |||
254 | if (test_and_clear_bit(0, &ctx->irq_flags)) | ||
255 | ctx->vblank_on = false; | ||
256 | } | ||
257 | |||
258 | static struct exynos_drm_manager_ops vidi_manager_ops = { | ||
259 | .dpms = vidi_dpms, | ||
260 | .apply = vidi_apply, | ||
261 | .commit = vidi_commit, | ||
262 | .enable_vblank = vidi_enable_vblank, | ||
263 | .disable_vblank = vidi_disable_vblank, | ||
264 | }; | ||
265 | |||
266 | static void vidi_win_mode_set(struct device *dev, | ||
267 | struct exynos_drm_overlay *overlay) | ||
268 | { | ||
269 | struct vidi_context *ctx = get_vidi_context(dev); | ||
270 | struct vidi_win_data *win_data; | ||
271 | int win; | ||
272 | unsigned long offset; | ||
273 | |||
274 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
275 | |||
276 | if (!overlay) { | ||
277 | dev_err(dev, "overlay is NULL\n"); | ||
278 | return; | ||
279 | } | ||
280 | |||
281 | win = overlay->zpos; | ||
282 | if (win == DEFAULT_ZPOS) | ||
283 | win = ctx->default_win; | ||
284 | |||
285 | if (win < 0 || win > WINDOWS_NR) | ||
286 | return; | ||
287 | |||
288 | offset = overlay->fb_x * (overlay->bpp >> 3); | ||
289 | offset += overlay->fb_y * overlay->pitch; | ||
290 | |||
291 | DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); | ||
292 | |||
293 | win_data = &ctx->win_data[win]; | ||
294 | |||
295 | win_data->offset_x = overlay->crtc_x; | ||
296 | win_data->offset_y = overlay->crtc_y; | ||
297 | win_data->ovl_width = overlay->crtc_width; | ||
298 | win_data->ovl_height = overlay->crtc_height; | ||
299 | win_data->fb_width = overlay->fb_width; | ||
300 | win_data->fb_height = overlay->fb_height; | ||
301 | win_data->dma_addr = overlay->dma_addr[0] + offset; | ||
302 | win_data->vaddr = overlay->vaddr[0] + offset; | ||
303 | win_data->bpp = overlay->bpp; | ||
304 | win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * | ||
305 | (overlay->bpp >> 3); | ||
306 | win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); | ||
307 | |||
308 | /* | ||
309 | * some parts of win_data should be transferred to user side | ||
310 | * through specific ioctl. | ||
311 | */ | ||
312 | |||
313 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", | ||
314 | win_data->offset_x, win_data->offset_y); | ||
315 | DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", | ||
316 | win_data->ovl_width, win_data->ovl_height); | ||
317 | DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", | ||
318 | (unsigned long)win_data->dma_addr, | ||
319 | (unsigned long)win_data->vaddr); | ||
320 | DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", | ||
321 | overlay->fb_width, overlay->crtc_width); | ||
322 | } | ||
323 | |||
324 | static void vidi_win_commit(struct device *dev, int zpos) | ||
325 | { | ||
326 | struct vidi_context *ctx = get_vidi_context(dev); | ||
327 | struct vidi_win_data *win_data; | ||
328 | int win = zpos; | ||
329 | |||
330 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
331 | |||
332 | if (ctx->suspended) | ||
333 | return; | ||
334 | |||
335 | if (win == DEFAULT_ZPOS) | ||
336 | win = ctx->default_win; | ||
337 | |||
338 | if (win < 0 || win > WINDOWS_NR) | ||
339 | return; | ||
340 | |||
341 | win_data = &ctx->win_data[win]; | ||
342 | |||
343 | win_data->enabled = true; | ||
344 | |||
345 | DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); | ||
346 | |||
347 | if (ctx->vblank_on) | ||
348 | schedule_work(&ctx->work); | ||
349 | } | ||
350 | |||
351 | static void vidi_win_disable(struct device *dev, int zpos) | ||
352 | { | ||
353 | struct vidi_context *ctx = get_vidi_context(dev); | ||
354 | struct vidi_win_data *win_data; | ||
355 | int win = zpos; | ||
356 | |||
357 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
358 | |||
359 | if (win == DEFAULT_ZPOS) | ||
360 | win = ctx->default_win; | ||
361 | |||
362 | if (win < 0 || win > WINDOWS_NR) | ||
363 | return; | ||
364 | |||
365 | win_data = &ctx->win_data[win]; | ||
366 | win_data->enabled = false; | ||
367 | |||
368 | /* TODO. */ | ||
369 | } | ||
370 | |||
371 | static struct exynos_drm_overlay_ops vidi_overlay_ops = { | ||
372 | .mode_set = vidi_win_mode_set, | ||
373 | .commit = vidi_win_commit, | ||
374 | .disable = vidi_win_disable, | ||
375 | }; | ||
376 | |||
377 | static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) | ||
378 | { | ||
379 | struct exynos_drm_private *dev_priv = drm_dev->dev_private; | ||
380 | struct drm_pending_vblank_event *e, *t; | ||
381 | struct timeval now; | ||
382 | unsigned long flags; | ||
383 | bool is_checked = false; | ||
384 | |||
385 | spin_lock_irqsave(&drm_dev->event_lock, flags); | ||
386 | |||
387 | list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, | ||
388 | base.link) { | ||
389 | /* if event's pipe isn't same as crtc then ignore it. */ | ||
390 | if (crtc != e->pipe) | ||
391 | continue; | ||
392 | |||
393 | is_checked = true; | ||
394 | |||
395 | do_gettimeofday(&now); | ||
396 | e->event.sequence = 0; | ||
397 | e->event.tv_sec = now.tv_sec; | ||
398 | e->event.tv_usec = now.tv_usec; | ||
399 | |||
400 | list_move_tail(&e->base.link, &e->base.file_priv->event_list); | ||
401 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
402 | } | ||
403 | |||
404 | if (is_checked) { | ||
405 | /* | ||
406 | * call drm_vblank_put only in case that drm_vblank_get was | ||
407 | * called. | ||
408 | */ | ||
409 | if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) | ||
410 | drm_vblank_put(drm_dev, crtc); | ||
411 | |||
412 | /* | ||
413 | * don't off vblank if vblank_disable_allowed is 1, | ||
414 | * because vblank would be off by timer handler. | ||
415 | */ | ||
416 | if (!drm_dev->vblank_disable_allowed) | ||
417 | drm_vblank_off(drm_dev, crtc); | ||
418 | } | ||
419 | |||
420 | spin_unlock_irqrestore(&drm_dev->event_lock, flags); | ||
421 | } | ||
422 | |||
423 | static void vidi_fake_vblank_handler(struct work_struct *work) | ||
424 | { | ||
425 | struct vidi_context *ctx = container_of(work, struct vidi_context, | ||
426 | work); | ||
427 | struct exynos_drm_subdrv *subdrv = &ctx->subdrv; | ||
428 | struct exynos_drm_manager *manager = &subdrv->manager; | ||
429 | |||
430 | if (manager->pipe < 0) | ||
431 | return; | ||
432 | |||
433 | /* refresh rate is about 50Hz. */ | ||
434 | usleep_range(16000, 20000); | ||
435 | |||
436 | drm_handle_vblank(subdrv->drm_dev, manager->pipe); | ||
437 | vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); | ||
438 | } | ||
439 | |||
440 | static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | ||
441 | { | ||
442 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
443 | |||
444 | /* | ||
445 | * enable drm irq mode. | ||
446 | * - with irq_enabled = 1, we can use the vblank feature. | ||
447 | * | ||
448 | * P.S. note that we wouldn't use drm irq handler but | ||
449 | * just specific driver own one instead because | ||
450 | * drm framework supports only one irq handler. | ||
451 | */ | ||
452 | drm_dev->irq_enabled = 1; | ||
453 | |||
454 | /* | ||
455 | * with vblank_disable_allowed = 1, vblank interrupt will be disabled | ||
456 | * by drm timer once a current process gives up ownership of | ||
457 | * vblank event.(after drm_vblank_put function is called) | ||
458 | */ | ||
459 | drm_dev->vblank_disable_allowed = 1; | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static void vidi_subdrv_remove(struct drm_device *drm_dev) | ||
465 | { | ||
466 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
467 | |||
468 | /* TODO. */ | ||
469 | } | ||
470 | |||
471 | static int vidi_power_on(struct vidi_context *ctx, bool enable) | ||
472 | { | ||
473 | struct exynos_drm_subdrv *subdrv = &ctx->subdrv; | ||
474 | struct device *dev = subdrv->manager.dev; | ||
475 | |||
476 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
477 | |||
478 | if (enable != false && enable != true) | ||
479 | return -EINVAL; | ||
480 | |||
481 | if (enable) { | ||
482 | ctx->suspended = false; | ||
483 | |||
484 | /* if vblank was enabled status, enable it again. */ | ||
485 | if (test_and_clear_bit(0, &ctx->irq_flags)) | ||
486 | vidi_enable_vblank(dev); | ||
487 | |||
488 | vidi_apply(dev); | ||
489 | } else { | ||
490 | ctx->suspended = true; | ||
491 | } | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static int vidi_show_connection(struct device *dev, | ||
497 | struct device_attribute *attr, char *buf) | ||
498 | { | ||
499 | int rc; | ||
500 | struct vidi_context *ctx = get_vidi_context(dev); | ||
501 | |||
502 | mutex_lock(&ctx->lock); | ||
503 | |||
504 | rc = sprintf(buf, "%d\n", ctx->connected); | ||
505 | |||
506 | mutex_unlock(&ctx->lock); | ||
507 | |||
508 | return rc; | ||
509 | } | ||
510 | |||
511 | static int vidi_store_connection(struct device *dev, | ||
512 | struct device_attribute *attr, | ||
513 | const char *buf, size_t len) | ||
514 | { | ||
515 | struct vidi_context *ctx = get_vidi_context(dev); | ||
516 | int ret; | ||
517 | |||
518 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
519 | |||
520 | ret = kstrtoint(buf, 0, &ctx->connected); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | |||
524 | if (ctx->connected > 1) | ||
525 | return -EINVAL; | ||
526 | |||
527 | DRM_DEBUG_KMS("requested connection.\n"); | ||
528 | |||
529 | drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); | ||
530 | |||
531 | return len; | ||
532 | } | ||
533 | |||
534 | static DEVICE_ATTR(connection, 0644, vidi_show_connection, | ||
535 | vidi_store_connection); | ||
536 | |||
537 | int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | ||
538 | struct drm_file *file_priv) | ||
539 | { | ||
540 | struct vidi_context *ctx = NULL; | ||
541 | struct drm_encoder *encoder; | ||
542 | struct exynos_drm_manager *manager; | ||
543 | struct exynos_drm_display_ops *display_ops; | ||
544 | struct drm_exynos_vidi_connection *vidi = data; | ||
545 | |||
546 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
547 | |||
548 | if (!vidi) { | ||
549 | DRM_DEBUG_KMS("user data for vidi is null.\n"); | ||
550 | return -EINVAL; | ||
551 | } | ||
552 | |||
553 | if (!vidi->edid) { | ||
554 | DRM_DEBUG_KMS("edid data is null.\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | if (vidi->connection > 1) { | ||
559 | DRM_DEBUG_KMS("connection should be 0 or 1.\n"); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | |||
563 | list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, | ||
564 | head) { | ||
565 | manager = exynos_drm_get_manager(encoder); | ||
566 | display_ops = manager->display_ops; | ||
567 | |||
568 | if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) { | ||
569 | ctx = get_vidi_context(manager->dev); | ||
570 | break; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | if (!ctx) { | ||
575 | DRM_DEBUG_KMS("not found virtual device type encoder.\n"); | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | if (ctx->connected == vidi->connection) { | ||
580 | DRM_DEBUG_KMS("same connection request.\n"); | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | |||
584 | if (vidi->connection) | ||
585 | ctx->raw_edid = (struct edid *)vidi->edid; | ||
586 | |||
587 | ctx->connected = vidi->connection; | ||
588 | drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); | ||
589 | |||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | static int __devinit vidi_probe(struct platform_device *pdev) | ||
594 | { | ||
595 | struct device *dev = &pdev->dev; | ||
596 | struct vidi_context *ctx; | ||
597 | struct exynos_drm_subdrv *subdrv; | ||
598 | int ret; | ||
599 | |||
600 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
601 | |||
602 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
603 | if (!ctx) | ||
604 | return -ENOMEM; | ||
605 | |||
606 | ctx->default_win = 0; | ||
607 | |||
608 | INIT_WORK(&ctx->work, vidi_fake_vblank_handler); | ||
609 | |||
610 | /* for test */ | ||
611 | ctx->raw_edid = (struct edid *)fake_edid_info; | ||
612 | |||
613 | subdrv = &ctx->subdrv; | ||
614 | subdrv->probe = vidi_subdrv_probe; | ||
615 | subdrv->remove = vidi_subdrv_remove; | ||
616 | subdrv->manager.pipe = -1; | ||
617 | subdrv->manager.ops = &vidi_manager_ops; | ||
618 | subdrv->manager.overlay_ops = &vidi_overlay_ops; | ||
619 | subdrv->manager.display_ops = &vidi_display_ops; | ||
620 | subdrv->manager.dev = dev; | ||
621 | |||
622 | mutex_init(&ctx->lock); | ||
623 | |||
624 | platform_set_drvdata(pdev, ctx); | ||
625 | |||
626 | ret = device_create_file(&pdev->dev, &dev_attr_connection); | ||
627 | if (ret < 0) | ||
628 | DRM_INFO("failed to create connection sysfs.\n"); | ||
629 | |||
630 | exynos_drm_subdrv_register(subdrv); | ||
631 | |||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static int __devexit vidi_remove(struct platform_device *pdev) | ||
636 | { | ||
637 | struct vidi_context *ctx = platform_get_drvdata(pdev); | ||
638 | |||
639 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
640 | |||
641 | exynos_drm_subdrv_unregister(&ctx->subdrv); | ||
642 | |||
643 | kfree(ctx); | ||
644 | |||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | #ifdef CONFIG_PM_SLEEP | ||
649 | static int vidi_suspend(struct device *dev) | ||
650 | { | ||
651 | struct vidi_context *ctx = get_vidi_context(dev); | ||
652 | |||
653 | return vidi_power_on(ctx, false); | ||
654 | } | ||
655 | |||
656 | static int vidi_resume(struct device *dev) | ||
657 | { | ||
658 | struct vidi_context *ctx = get_vidi_context(dev); | ||
659 | |||
660 | return vidi_power_on(ctx, true); | ||
661 | } | ||
662 | #endif | ||
663 | |||
664 | static const struct dev_pm_ops vidi_pm_ops = { | ||
665 | SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume) | ||
666 | }; | ||
667 | |||
668 | struct platform_driver vidi_driver = { | ||
669 | .probe = vidi_probe, | ||
670 | .remove = __devexit_p(vidi_remove), | ||
671 | .driver = { | ||
672 | .name = "exynos-drm-vidi", | ||
673 | .owner = THIS_MODULE, | ||
674 | .pm = &vidi_pm_ops, | ||
675 | }, | ||
676 | }; | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h new file mode 100644 index 000000000000..a4babe4e65d7 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* exynos_drm_vidi.h | ||
2 | * | ||
3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
4 | * Author: Inki Dae <inki.dae@samsung.com> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
23 | * OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef _EXYNOS_DRM_VIDI_H_ | ||
27 | #define _EXYNOS_DRM_VIDI_H_ | ||
28 | |||
29 | #ifdef CONFIG_DRM_EXYNOS_VIDI | ||
30 | int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | ||
31 | struct drm_file *file_priv); | ||
32 | #else | ||
33 | #define vidi_connection_ioctl NULL | ||
34 | #endif | ||
35 | |||
36 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 3429d3fd93f3..575a8cbd3533 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -41,44 +41,83 @@ | |||
41 | #include "exynos_hdmi.h" | 41 | #include "exynos_hdmi.h" |
42 | 42 | ||
43 | #define HDMI_OVERLAY_NUMBER 3 | 43 | #define HDMI_OVERLAY_NUMBER 3 |
44 | #define MAX_WIDTH 1920 | ||
45 | #define MAX_HEIGHT 1080 | ||
44 | #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) | 46 | #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) |
45 | 47 | ||
46 | static const u8 hdmiphy_conf27[32] = { | 48 | struct hdmi_resources { |
49 | struct clk *hdmi; | ||
50 | struct clk *sclk_hdmi; | ||
51 | struct clk *sclk_pixel; | ||
52 | struct clk *sclk_hdmiphy; | ||
53 | struct clk *hdmiphy; | ||
54 | struct regulator_bulk_data *regul_bulk; | ||
55 | int regul_count; | ||
56 | }; | ||
57 | |||
58 | struct hdmi_context { | ||
59 | struct device *dev; | ||
60 | struct drm_device *drm_dev; | ||
61 | struct fb_videomode *default_timing; | ||
62 | unsigned int is_v13:1; | ||
63 | unsigned int default_win; | ||
64 | unsigned int default_bpp; | ||
65 | bool hpd_handle; | ||
66 | bool enabled; | ||
67 | |||
68 | struct resource *regs_res; | ||
69 | void __iomem *regs; | ||
70 | unsigned int irq; | ||
71 | struct workqueue_struct *wq; | ||
72 | struct work_struct hotplug_work; | ||
73 | |||
74 | struct i2c_client *ddc_port; | ||
75 | struct i2c_client *hdmiphy_port; | ||
76 | |||
77 | /* current hdmiphy conf index */ | ||
78 | int cur_conf; | ||
79 | |||
80 | struct hdmi_resources res; | ||
81 | void *parent_ctx; | ||
82 | }; | ||
83 | |||
84 | /* HDMI Version 1.3 */ | ||
85 | static const u8 hdmiphy_v13_conf27[32] = { | ||
47 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, | 86 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, |
48 | 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, | 87 | 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, |
49 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, | 88 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, |
50 | 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, | 89 | 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, |
51 | }; | 90 | }; |
52 | 91 | ||
53 | static const u8 hdmiphy_conf27_027[32] = { | 92 | static const u8 hdmiphy_v13_conf27_027[32] = { |
54 | 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, | 93 | 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, |
55 | 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, | 94 | 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, |
56 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, | 95 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, |
57 | 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, | 96 | 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, |
58 | }; | 97 | }; |
59 | 98 | ||
60 | static const u8 hdmiphy_conf74_175[32] = { | 99 | static const u8 hdmiphy_v13_conf74_175[32] = { |
61 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, | 100 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, |
62 | 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, | 101 | 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, |
63 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, | 102 | 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, |
64 | 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, | 103 | 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, |
65 | }; | 104 | }; |
66 | 105 | ||
67 | static const u8 hdmiphy_conf74_25[32] = { | 106 | static const u8 hdmiphy_v13_conf74_25[32] = { |
68 | 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, | 107 | 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, |
69 | 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, | 108 | 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, |
70 | 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, | 109 | 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, |
71 | 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, | 110 | 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, |
72 | }; | 111 | }; |
73 | 112 | ||
74 | static const u8 hdmiphy_conf148_5[32] = { | 113 | static const u8 hdmiphy_v13_conf148_5[32] = { |
75 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, | 114 | 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, |
76 | 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, | 115 | 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, |
77 | 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, | 116 | 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, |
78 | 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, | 117 | 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, |
79 | }; | 118 | }; |
80 | 119 | ||
81 | struct hdmi_tg_regs { | 120 | struct hdmi_v13_tg_regs { |
82 | u8 cmd; | 121 | u8 cmd; |
83 | u8 h_fsz_l; | 122 | u8 h_fsz_l; |
84 | u8 h_fsz_h; | 123 | u8 h_fsz_h; |
@@ -110,7 +149,7 @@ struct hdmi_tg_regs { | |||
110 | u8 field_bot_hdmi_h; | 149 | u8 field_bot_hdmi_h; |
111 | }; | 150 | }; |
112 | 151 | ||
113 | struct hdmi_core_regs { | 152 | struct hdmi_v13_core_regs { |
114 | u8 h_blank[2]; | 153 | u8 h_blank[2]; |
115 | u8 v_blank[3]; | 154 | u8 v_blank[3]; |
116 | u8 h_v_line[3]; | 155 | u8 h_v_line[3]; |
@@ -123,12 +162,21 @@ struct hdmi_core_regs { | |||
123 | u8 v_sync_gen3[3]; | 162 | u8 v_sync_gen3[3]; |
124 | }; | 163 | }; |
125 | 164 | ||
126 | struct hdmi_preset_conf { | 165 | struct hdmi_v13_preset_conf { |
127 | struct hdmi_core_regs core; | 166 | struct hdmi_v13_core_regs core; |
128 | struct hdmi_tg_regs tg; | 167 | struct hdmi_v13_tg_regs tg; |
129 | }; | 168 | }; |
130 | 169 | ||
131 | static const struct hdmi_preset_conf hdmi_conf_480p = { | 170 | struct hdmi_v13_conf { |
171 | int width; | ||
172 | int height; | ||
173 | int vrefresh; | ||
174 | bool interlace; | ||
175 | const u8 *hdmiphy_data; | ||
176 | const struct hdmi_v13_preset_conf *conf; | ||
177 | }; | ||
178 | |||
179 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = { | ||
132 | .core = { | 180 | .core = { |
133 | .h_blank = {0x8a, 0x00}, | 181 | .h_blank = {0x8a, 0x00}, |
134 | .v_blank = {0x0d, 0x6a, 0x01}, | 182 | .v_blank = {0x0d, 0x6a, 0x01}, |
@@ -154,7 +202,7 @@ static const struct hdmi_preset_conf hdmi_conf_480p = { | |||
154 | }, | 202 | }, |
155 | }; | 203 | }; |
156 | 204 | ||
157 | static const struct hdmi_preset_conf hdmi_conf_720p60 = { | 205 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = { |
158 | .core = { | 206 | .core = { |
159 | .h_blank = {0x72, 0x01}, | 207 | .h_blank = {0x72, 0x01}, |
160 | .v_blank = {0xee, 0xf2, 0x00}, | 208 | .v_blank = {0xee, 0xf2, 0x00}, |
@@ -182,7 +230,7 @@ static const struct hdmi_preset_conf hdmi_conf_720p60 = { | |||
182 | }, | 230 | }, |
183 | }; | 231 | }; |
184 | 232 | ||
185 | static const struct hdmi_preset_conf hdmi_conf_1080i50 = { | 233 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = { |
186 | .core = { | 234 | .core = { |
187 | .h_blank = {0xd0, 0x02}, | 235 | .h_blank = {0xd0, 0x02}, |
188 | .v_blank = {0x32, 0xB2, 0x00}, | 236 | .v_blank = {0x32, 0xB2, 0x00}, |
@@ -210,7 +258,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080i50 = { | |||
210 | }, | 258 | }, |
211 | }; | 259 | }; |
212 | 260 | ||
213 | static const struct hdmi_preset_conf hdmi_conf_1080p50 = { | 261 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = { |
214 | .core = { | 262 | .core = { |
215 | .h_blank = {0xd0, 0x02}, | 263 | .h_blank = {0xd0, 0x02}, |
216 | .v_blank = {0x65, 0x6c, 0x01}, | 264 | .v_blank = {0x65, 0x6c, 0x01}, |
@@ -238,7 +286,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080p50 = { | |||
238 | }, | 286 | }, |
239 | }; | 287 | }; |
240 | 288 | ||
241 | static const struct hdmi_preset_conf hdmi_conf_1080i60 = { | 289 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = { |
242 | .core = { | 290 | .core = { |
243 | .h_blank = {0x18, 0x01}, | 291 | .h_blank = {0x18, 0x01}, |
244 | .v_blank = {0x32, 0xB2, 0x00}, | 292 | .v_blank = {0x32, 0xB2, 0x00}, |
@@ -266,7 +314,7 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = { | |||
266 | }, | 314 | }, |
267 | }; | 315 | }; |
268 | 316 | ||
269 | static const struct hdmi_preset_conf hdmi_conf_1080p60 = { | 317 | static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { |
270 | .core = { | 318 | .core = { |
271 | .h_blank = {0x18, 0x01}, | 319 | .h_blank = {0x18, 0x01}, |
272 | .v_blank = {0x65, 0x6c, 0x01}, | 320 | .v_blank = {0x65, 0x6c, 0x01}, |
@@ -294,13 +342,530 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = { | |||
294 | }, | 342 | }, |
295 | }; | 343 | }; |
296 | 344 | ||
345 | static const struct hdmi_v13_conf hdmi_v13_confs[] = { | ||
346 | { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, | ||
347 | { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, | ||
348 | { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, | ||
349 | { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, | ||
350 | { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, | ||
351 | &hdmi_v13_conf_1080p50 }, | ||
352 | { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, | ||
353 | { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, | ||
354 | &hdmi_v13_conf_1080p60 }, | ||
355 | }; | ||
356 | |||
357 | /* HDMI Version 1.4 */ | ||
358 | static const u8 hdmiphy_conf27_027[32] = { | ||
359 | 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, | ||
360 | 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, | ||
361 | 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, | ||
362 | 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, | ||
363 | }; | ||
364 | |||
365 | static const u8 hdmiphy_conf74_25[32] = { | ||
366 | 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, | ||
367 | 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, | ||
368 | 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, | ||
369 | 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, | ||
370 | }; | ||
371 | |||
372 | static const u8 hdmiphy_conf148_5[32] = { | ||
373 | 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, | ||
374 | 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, | ||
375 | 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, | ||
376 | 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, | ||
377 | }; | ||
378 | |||
379 | struct hdmi_tg_regs { | ||
380 | u8 cmd; | ||
381 | u8 h_fsz_l; | ||
382 | u8 h_fsz_h; | ||
383 | u8 hact_st_l; | ||
384 | u8 hact_st_h; | ||
385 | u8 hact_sz_l; | ||
386 | u8 hact_sz_h; | ||
387 | u8 v_fsz_l; | ||
388 | u8 v_fsz_h; | ||
389 | u8 vsync_l; | ||
390 | u8 vsync_h; | ||
391 | u8 vsync2_l; | ||
392 | u8 vsync2_h; | ||
393 | u8 vact_st_l; | ||
394 | u8 vact_st_h; | ||
395 | u8 vact_sz_l; | ||
396 | u8 vact_sz_h; | ||
397 | u8 field_chg_l; | ||
398 | u8 field_chg_h; | ||
399 | u8 vact_st2_l; | ||
400 | u8 vact_st2_h; | ||
401 | u8 vact_st3_l; | ||
402 | u8 vact_st3_h; | ||
403 | u8 vact_st4_l; | ||
404 | u8 vact_st4_h; | ||
405 | u8 vsync_top_hdmi_l; | ||
406 | u8 vsync_top_hdmi_h; | ||
407 | u8 vsync_bot_hdmi_l; | ||
408 | u8 vsync_bot_hdmi_h; | ||
409 | u8 field_top_hdmi_l; | ||
410 | u8 field_top_hdmi_h; | ||
411 | u8 field_bot_hdmi_l; | ||
412 | u8 field_bot_hdmi_h; | ||
413 | u8 tg_3d; | ||
414 | }; | ||
415 | |||
416 | struct hdmi_core_regs { | ||
417 | u8 h_blank[2]; | ||
418 | u8 v2_blank[2]; | ||
419 | u8 v1_blank[2]; | ||
420 | u8 v_line[2]; | ||
421 | u8 h_line[2]; | ||
422 | u8 hsync_pol[1]; | ||
423 | u8 vsync_pol[1]; | ||
424 | u8 int_pro_mode[1]; | ||
425 | u8 v_blank_f0[2]; | ||
426 | u8 v_blank_f1[2]; | ||
427 | u8 h_sync_start[2]; | ||
428 | u8 h_sync_end[2]; | ||
429 | u8 v_sync_line_bef_2[2]; | ||
430 | u8 v_sync_line_bef_1[2]; | ||
431 | u8 v_sync_line_aft_2[2]; | ||
432 | u8 v_sync_line_aft_1[2]; | ||
433 | u8 v_sync_line_aft_pxl_2[2]; | ||
434 | u8 v_sync_line_aft_pxl_1[2]; | ||
435 | u8 v_blank_f2[2]; /* for 3D mode */ | ||
436 | u8 v_blank_f3[2]; /* for 3D mode */ | ||
437 | u8 v_blank_f4[2]; /* for 3D mode */ | ||
438 | u8 v_blank_f5[2]; /* for 3D mode */ | ||
439 | u8 v_sync_line_aft_3[2]; | ||
440 | u8 v_sync_line_aft_4[2]; | ||
441 | u8 v_sync_line_aft_5[2]; | ||
442 | u8 v_sync_line_aft_6[2]; | ||
443 | u8 v_sync_line_aft_pxl_3[2]; | ||
444 | u8 v_sync_line_aft_pxl_4[2]; | ||
445 | u8 v_sync_line_aft_pxl_5[2]; | ||
446 | u8 v_sync_line_aft_pxl_6[2]; | ||
447 | u8 vact_space_1[2]; | ||
448 | u8 vact_space_2[2]; | ||
449 | u8 vact_space_3[2]; | ||
450 | u8 vact_space_4[2]; | ||
451 | u8 vact_space_5[2]; | ||
452 | u8 vact_space_6[2]; | ||
453 | }; | ||
454 | |||
455 | struct hdmi_preset_conf { | ||
456 | struct hdmi_core_regs core; | ||
457 | struct hdmi_tg_regs tg; | ||
458 | }; | ||
459 | |||
460 | struct hdmi_conf { | ||
461 | int width; | ||
462 | int height; | ||
463 | int vrefresh; | ||
464 | bool interlace; | ||
465 | const u8 *hdmiphy_data; | ||
466 | const struct hdmi_preset_conf *conf; | ||
467 | }; | ||
468 | |||
469 | static const struct hdmi_preset_conf hdmi_conf_480p60 = { | ||
470 | .core = { | ||
471 | .h_blank = {0x8a, 0x00}, | ||
472 | .v2_blank = {0x0d, 0x02}, | ||
473 | .v1_blank = {0x2d, 0x00}, | ||
474 | .v_line = {0x0d, 0x02}, | ||
475 | .h_line = {0x5a, 0x03}, | ||
476 | .hsync_pol = {0x01}, | ||
477 | .vsync_pol = {0x01}, | ||
478 | .int_pro_mode = {0x00}, | ||
479 | .v_blank_f0 = {0xff, 0xff}, | ||
480 | .v_blank_f1 = {0xff, 0xff}, | ||
481 | .h_sync_start = {0x0e, 0x00}, | ||
482 | .h_sync_end = {0x4c, 0x00}, | ||
483 | .v_sync_line_bef_2 = {0x0f, 0x00}, | ||
484 | .v_sync_line_bef_1 = {0x09, 0x00}, | ||
485 | .v_sync_line_aft_2 = {0xff, 0xff}, | ||
486 | .v_sync_line_aft_1 = {0xff, 0xff}, | ||
487 | .v_sync_line_aft_pxl_2 = {0xff, 0xff}, | ||
488 | .v_sync_line_aft_pxl_1 = {0xff, 0xff}, | ||
489 | .v_blank_f2 = {0xff, 0xff}, | ||
490 | .v_blank_f3 = {0xff, 0xff}, | ||
491 | .v_blank_f4 = {0xff, 0xff}, | ||
492 | .v_blank_f5 = {0xff, 0xff}, | ||
493 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
494 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
495 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
496 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
497 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
498 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
499 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
500 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
501 | .vact_space_1 = {0xff, 0xff}, | ||
502 | .vact_space_2 = {0xff, 0xff}, | ||
503 | .vact_space_3 = {0xff, 0xff}, | ||
504 | .vact_space_4 = {0xff, 0xff}, | ||
505 | .vact_space_5 = {0xff, 0xff}, | ||
506 | .vact_space_6 = {0xff, 0xff}, | ||
507 | /* other don't care */ | ||
508 | }, | ||
509 | .tg = { | ||
510 | 0x00, /* cmd */ | ||
511 | 0x5a, 0x03, /* h_fsz */ | ||
512 | 0x8a, 0x00, 0xd0, 0x02, /* hact */ | ||
513 | 0x0d, 0x02, /* v_fsz */ | ||
514 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
515 | 0x2d, 0x00, 0xe0, 0x01, /* vact */ | ||
516 | 0x33, 0x02, /* field_chg */ | ||
517 | 0x48, 0x02, /* vact_st2 */ | ||
518 | 0x00, 0x00, /* vact_st3 */ | ||
519 | 0x00, 0x00, /* vact_st4 */ | ||
520 | 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ | ||
521 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
522 | 0x00, /* 3d FP */ | ||
523 | }, | ||
524 | }; | ||
525 | |||
526 | static const struct hdmi_preset_conf hdmi_conf_720p50 = { | ||
527 | .core = { | ||
528 | .h_blank = {0xbc, 0x02}, | ||
529 | .v2_blank = {0xee, 0x02}, | ||
530 | .v1_blank = {0x1e, 0x00}, | ||
531 | .v_line = {0xee, 0x02}, | ||
532 | .h_line = {0xbc, 0x07}, | ||
533 | .hsync_pol = {0x00}, | ||
534 | .vsync_pol = {0x00}, | ||
535 | .int_pro_mode = {0x00}, | ||
536 | .v_blank_f0 = {0xff, 0xff}, | ||
537 | .v_blank_f1 = {0xff, 0xff}, | ||
538 | .h_sync_start = {0xb6, 0x01}, | ||
539 | .h_sync_end = {0xde, 0x01}, | ||
540 | .v_sync_line_bef_2 = {0x0a, 0x00}, | ||
541 | .v_sync_line_bef_1 = {0x05, 0x00}, | ||
542 | .v_sync_line_aft_2 = {0xff, 0xff}, | ||
543 | .v_sync_line_aft_1 = {0xff, 0xff}, | ||
544 | .v_sync_line_aft_pxl_2 = {0xff, 0xff}, | ||
545 | .v_sync_line_aft_pxl_1 = {0xff, 0xff}, | ||
546 | .v_blank_f2 = {0xff, 0xff}, | ||
547 | .v_blank_f3 = {0xff, 0xff}, | ||
548 | .v_blank_f4 = {0xff, 0xff}, | ||
549 | .v_blank_f5 = {0xff, 0xff}, | ||
550 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
551 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
552 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
553 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
554 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
555 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
556 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
557 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
558 | .vact_space_1 = {0xff, 0xff}, | ||
559 | .vact_space_2 = {0xff, 0xff}, | ||
560 | .vact_space_3 = {0xff, 0xff}, | ||
561 | .vact_space_4 = {0xff, 0xff}, | ||
562 | .vact_space_5 = {0xff, 0xff}, | ||
563 | .vact_space_6 = {0xff, 0xff}, | ||
564 | /* other don't care */ | ||
565 | }, | ||
566 | .tg = { | ||
567 | 0x00, /* cmd */ | ||
568 | 0xbc, 0x07, /* h_fsz */ | ||
569 | 0xbc, 0x02, 0x00, 0x05, /* hact */ | ||
570 | 0xee, 0x02, /* v_fsz */ | ||
571 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
572 | 0x1e, 0x00, 0xd0, 0x02, /* vact */ | ||
573 | 0x33, 0x02, /* field_chg */ | ||
574 | 0x48, 0x02, /* vact_st2 */ | ||
575 | 0x00, 0x00, /* vact_st3 */ | ||
576 | 0x00, 0x00, /* vact_st4 */ | ||
577 | 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ | ||
578 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
579 | 0x00, /* 3d FP */ | ||
580 | }, | ||
581 | }; | ||
582 | |||
583 | static const struct hdmi_preset_conf hdmi_conf_720p60 = { | ||
584 | .core = { | ||
585 | .h_blank = {0x72, 0x01}, | ||
586 | .v2_blank = {0xee, 0x02}, | ||
587 | .v1_blank = {0x1e, 0x00}, | ||
588 | .v_line = {0xee, 0x02}, | ||
589 | .h_line = {0x72, 0x06}, | ||
590 | .hsync_pol = {0x00}, | ||
591 | .vsync_pol = {0x00}, | ||
592 | .int_pro_mode = {0x00}, | ||
593 | .v_blank_f0 = {0xff, 0xff}, | ||
594 | .v_blank_f1 = {0xff, 0xff}, | ||
595 | .h_sync_start = {0x6c, 0x00}, | ||
596 | .h_sync_end = {0x94, 0x00}, | ||
597 | .v_sync_line_bef_2 = {0x0a, 0x00}, | ||
598 | .v_sync_line_bef_1 = {0x05, 0x00}, | ||
599 | .v_sync_line_aft_2 = {0xff, 0xff}, | ||
600 | .v_sync_line_aft_1 = {0xff, 0xff}, | ||
601 | .v_sync_line_aft_pxl_2 = {0xff, 0xff}, | ||
602 | .v_sync_line_aft_pxl_1 = {0xff, 0xff}, | ||
603 | .v_blank_f2 = {0xff, 0xff}, | ||
604 | .v_blank_f3 = {0xff, 0xff}, | ||
605 | .v_blank_f4 = {0xff, 0xff}, | ||
606 | .v_blank_f5 = {0xff, 0xff}, | ||
607 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
608 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
609 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
610 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
611 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
612 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
613 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
614 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
615 | .vact_space_1 = {0xff, 0xff}, | ||
616 | .vact_space_2 = {0xff, 0xff}, | ||
617 | .vact_space_3 = {0xff, 0xff}, | ||
618 | .vact_space_4 = {0xff, 0xff}, | ||
619 | .vact_space_5 = {0xff, 0xff}, | ||
620 | .vact_space_6 = {0xff, 0xff}, | ||
621 | /* other don't care */ | ||
622 | }, | ||
623 | .tg = { | ||
624 | 0x00, /* cmd */ | ||
625 | 0x72, 0x06, /* h_fsz */ | ||
626 | 0x72, 0x01, 0x00, 0x05, /* hact */ | ||
627 | 0xee, 0x02, /* v_fsz */ | ||
628 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
629 | 0x1e, 0x00, 0xd0, 0x02, /* vact */ | ||
630 | 0x33, 0x02, /* field_chg */ | ||
631 | 0x48, 0x02, /* vact_st2 */ | ||
632 | 0x00, 0x00, /* vact_st3 */ | ||
633 | 0x00, 0x00, /* vact_st4 */ | ||
634 | 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ | ||
635 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
636 | 0x00, /* 3d FP */ | ||
637 | }, | ||
638 | }; | ||
639 | |||
640 | static const struct hdmi_preset_conf hdmi_conf_1080i50 = { | ||
641 | .core = { | ||
642 | .h_blank = {0xd0, 0x02}, | ||
643 | .v2_blank = {0x32, 0x02}, | ||
644 | .v1_blank = {0x16, 0x00}, | ||
645 | .v_line = {0x65, 0x04}, | ||
646 | .h_line = {0x50, 0x0a}, | ||
647 | .hsync_pol = {0x00}, | ||
648 | .vsync_pol = {0x00}, | ||
649 | .int_pro_mode = {0x01}, | ||
650 | .v_blank_f0 = {0x49, 0x02}, | ||
651 | .v_blank_f1 = {0x65, 0x04}, | ||
652 | .h_sync_start = {0x0e, 0x02}, | ||
653 | .h_sync_end = {0x3a, 0x02}, | ||
654 | .v_sync_line_bef_2 = {0x07, 0x00}, | ||
655 | .v_sync_line_bef_1 = {0x02, 0x00}, | ||
656 | .v_sync_line_aft_2 = {0x39, 0x02}, | ||
657 | .v_sync_line_aft_1 = {0x34, 0x02}, | ||
658 | .v_sync_line_aft_pxl_2 = {0x38, 0x07}, | ||
659 | .v_sync_line_aft_pxl_1 = {0x38, 0x07}, | ||
660 | .v_blank_f2 = {0xff, 0xff}, | ||
661 | .v_blank_f3 = {0xff, 0xff}, | ||
662 | .v_blank_f4 = {0xff, 0xff}, | ||
663 | .v_blank_f5 = {0xff, 0xff}, | ||
664 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
665 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
666 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
667 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
668 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
669 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
670 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
671 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
672 | .vact_space_1 = {0xff, 0xff}, | ||
673 | .vact_space_2 = {0xff, 0xff}, | ||
674 | .vact_space_3 = {0xff, 0xff}, | ||
675 | .vact_space_4 = {0xff, 0xff}, | ||
676 | .vact_space_5 = {0xff, 0xff}, | ||
677 | .vact_space_6 = {0xff, 0xff}, | ||
678 | /* other don't care */ | ||
679 | }, | ||
680 | .tg = { | ||
681 | 0x00, /* cmd */ | ||
682 | 0x50, 0x0a, /* h_fsz */ | ||
683 | 0xd0, 0x02, 0x80, 0x07, /* hact */ | ||
684 | 0x65, 0x04, /* v_fsz */ | ||
685 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
686 | 0x16, 0x00, 0x1c, 0x02, /* vact */ | ||
687 | 0x33, 0x02, /* field_chg */ | ||
688 | 0x49, 0x02, /* vact_st2 */ | ||
689 | 0x00, 0x00, /* vact_st3 */ | ||
690 | 0x00, 0x00, /* vact_st4 */ | ||
691 | 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ | ||
692 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
693 | 0x00, /* 3d FP */ | ||
694 | }, | ||
695 | }; | ||
696 | |||
697 | static const struct hdmi_preset_conf hdmi_conf_1080i60 = { | ||
698 | .core = { | ||
699 | .h_blank = {0x18, 0x01}, | ||
700 | .v2_blank = {0x32, 0x02}, | ||
701 | .v1_blank = {0x16, 0x00}, | ||
702 | .v_line = {0x65, 0x04}, | ||
703 | .h_line = {0x98, 0x08}, | ||
704 | .hsync_pol = {0x00}, | ||
705 | .vsync_pol = {0x00}, | ||
706 | .int_pro_mode = {0x01}, | ||
707 | .v_blank_f0 = {0x49, 0x02}, | ||
708 | .v_blank_f1 = {0x65, 0x04}, | ||
709 | .h_sync_start = {0x56, 0x00}, | ||
710 | .h_sync_end = {0x82, 0x00}, | ||
711 | .v_sync_line_bef_2 = {0x07, 0x00}, | ||
712 | .v_sync_line_bef_1 = {0x02, 0x00}, | ||
713 | .v_sync_line_aft_2 = {0x39, 0x02}, | ||
714 | .v_sync_line_aft_1 = {0x34, 0x02}, | ||
715 | .v_sync_line_aft_pxl_2 = {0xa4, 0x04}, | ||
716 | .v_sync_line_aft_pxl_1 = {0xa4, 0x04}, | ||
717 | .v_blank_f2 = {0xff, 0xff}, | ||
718 | .v_blank_f3 = {0xff, 0xff}, | ||
719 | .v_blank_f4 = {0xff, 0xff}, | ||
720 | .v_blank_f5 = {0xff, 0xff}, | ||
721 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
722 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
723 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
724 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
725 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
726 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
727 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
728 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
729 | .vact_space_1 = {0xff, 0xff}, | ||
730 | .vact_space_2 = {0xff, 0xff}, | ||
731 | .vact_space_3 = {0xff, 0xff}, | ||
732 | .vact_space_4 = {0xff, 0xff}, | ||
733 | .vact_space_5 = {0xff, 0xff}, | ||
734 | .vact_space_6 = {0xff, 0xff}, | ||
735 | /* other don't care */ | ||
736 | }, | ||
737 | .tg = { | ||
738 | 0x00, /* cmd */ | ||
739 | 0x98, 0x08, /* h_fsz */ | ||
740 | 0x18, 0x01, 0x80, 0x07, /* hact */ | ||
741 | 0x65, 0x04, /* v_fsz */ | ||
742 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
743 | 0x16, 0x00, 0x1c, 0x02, /* vact */ | ||
744 | 0x33, 0x02, /* field_chg */ | ||
745 | 0x49, 0x02, /* vact_st2 */ | ||
746 | 0x00, 0x00, /* vact_st3 */ | ||
747 | 0x00, 0x00, /* vact_st4 */ | ||
748 | 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */ | ||
749 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
750 | 0x00, /* 3d FP */ | ||
751 | }, | ||
752 | }; | ||
753 | |||
754 | static const struct hdmi_preset_conf hdmi_conf_1080p50 = { | ||
755 | .core = { | ||
756 | .h_blank = {0xd0, 0x02}, | ||
757 | .v2_blank = {0x65, 0x04}, | ||
758 | .v1_blank = {0x2d, 0x00}, | ||
759 | .v_line = {0x65, 0x04}, | ||
760 | .h_line = {0x50, 0x0a}, | ||
761 | .hsync_pol = {0x00}, | ||
762 | .vsync_pol = {0x00}, | ||
763 | .int_pro_mode = {0x00}, | ||
764 | .v_blank_f0 = {0xff, 0xff}, | ||
765 | .v_blank_f1 = {0xff, 0xff}, | ||
766 | .h_sync_start = {0x0e, 0x02}, | ||
767 | .h_sync_end = {0x3a, 0x02}, | ||
768 | .v_sync_line_bef_2 = {0x09, 0x00}, | ||
769 | .v_sync_line_bef_1 = {0x04, 0x00}, | ||
770 | .v_sync_line_aft_2 = {0xff, 0xff}, | ||
771 | .v_sync_line_aft_1 = {0xff, 0xff}, | ||
772 | .v_sync_line_aft_pxl_2 = {0xff, 0xff}, | ||
773 | .v_sync_line_aft_pxl_1 = {0xff, 0xff}, | ||
774 | .v_blank_f2 = {0xff, 0xff}, | ||
775 | .v_blank_f3 = {0xff, 0xff}, | ||
776 | .v_blank_f4 = {0xff, 0xff}, | ||
777 | .v_blank_f5 = {0xff, 0xff}, | ||
778 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
779 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
780 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
781 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
782 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
783 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
784 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
785 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
786 | .vact_space_1 = {0xff, 0xff}, | ||
787 | .vact_space_2 = {0xff, 0xff}, | ||
788 | .vact_space_3 = {0xff, 0xff}, | ||
789 | .vact_space_4 = {0xff, 0xff}, | ||
790 | .vact_space_5 = {0xff, 0xff}, | ||
791 | .vact_space_6 = {0xff, 0xff}, | ||
792 | /* other don't care */ | ||
793 | }, | ||
794 | .tg = { | ||
795 | 0x00, /* cmd */ | ||
796 | 0x50, 0x0a, /* h_fsz */ | ||
797 | 0xd0, 0x02, 0x80, 0x07, /* hact */ | ||
798 | 0x65, 0x04, /* v_fsz */ | ||
799 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
800 | 0x2d, 0x00, 0x38, 0x04, /* vact */ | ||
801 | 0x33, 0x02, /* field_chg */ | ||
802 | 0x48, 0x02, /* vact_st2 */ | ||
803 | 0x00, 0x00, /* vact_st3 */ | ||
804 | 0x00, 0x00, /* vact_st4 */ | ||
805 | 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ | ||
806 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
807 | 0x00, /* 3d FP */ | ||
808 | }, | ||
809 | }; | ||
810 | |||
811 | static const struct hdmi_preset_conf hdmi_conf_1080p60 = { | ||
812 | .core = { | ||
813 | .h_blank = {0x18, 0x01}, | ||
814 | .v2_blank = {0x65, 0x04}, | ||
815 | .v1_blank = {0x2d, 0x00}, | ||
816 | .v_line = {0x65, 0x04}, | ||
817 | .h_line = {0x98, 0x08}, | ||
818 | .hsync_pol = {0x00}, | ||
819 | .vsync_pol = {0x00}, | ||
820 | .int_pro_mode = {0x00}, | ||
821 | .v_blank_f0 = {0xff, 0xff}, | ||
822 | .v_blank_f1 = {0xff, 0xff}, | ||
823 | .h_sync_start = {0x56, 0x00}, | ||
824 | .h_sync_end = {0x82, 0x00}, | ||
825 | .v_sync_line_bef_2 = {0x09, 0x00}, | ||
826 | .v_sync_line_bef_1 = {0x04, 0x00}, | ||
827 | .v_sync_line_aft_2 = {0xff, 0xff}, | ||
828 | .v_sync_line_aft_1 = {0xff, 0xff}, | ||
829 | .v_sync_line_aft_pxl_2 = {0xff, 0xff}, | ||
830 | .v_sync_line_aft_pxl_1 = {0xff, 0xff}, | ||
831 | .v_blank_f2 = {0xff, 0xff}, | ||
832 | .v_blank_f3 = {0xff, 0xff}, | ||
833 | .v_blank_f4 = {0xff, 0xff}, | ||
834 | .v_blank_f5 = {0xff, 0xff}, | ||
835 | .v_sync_line_aft_3 = {0xff, 0xff}, | ||
836 | .v_sync_line_aft_4 = {0xff, 0xff}, | ||
837 | .v_sync_line_aft_5 = {0xff, 0xff}, | ||
838 | .v_sync_line_aft_6 = {0xff, 0xff}, | ||
839 | .v_sync_line_aft_pxl_3 = {0xff, 0xff}, | ||
840 | .v_sync_line_aft_pxl_4 = {0xff, 0xff}, | ||
841 | .v_sync_line_aft_pxl_5 = {0xff, 0xff}, | ||
842 | .v_sync_line_aft_pxl_6 = {0xff, 0xff}, | ||
843 | /* other don't care */ | ||
844 | }, | ||
845 | .tg = { | ||
846 | 0x00, /* cmd */ | ||
847 | 0x98, 0x08, /* h_fsz */ | ||
848 | 0x18, 0x01, 0x80, 0x07, /* hact */ | ||
849 | 0x65, 0x04, /* v_fsz */ | ||
850 | 0x01, 0x00, 0x33, 0x02, /* vsync */ | ||
851 | 0x2d, 0x00, 0x38, 0x04, /* vact */ | ||
852 | 0x33, 0x02, /* field_chg */ | ||
853 | 0x48, 0x02, /* vact_st2 */ | ||
854 | 0x00, 0x00, /* vact_st3 */ | ||
855 | 0x00, 0x00, /* vact_st4 */ | ||
856 | 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */ | ||
857 | 0x01, 0x00, 0x33, 0x02, /* field top/bot */ | ||
858 | 0x00, /* 3d FP */ | ||
859 | }, | ||
860 | }; | ||
861 | |||
297 | static const struct hdmi_conf hdmi_confs[] = { | 862 | static const struct hdmi_conf hdmi_confs[] = { |
863 | { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, | ||
864 | { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, | ||
298 | { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, | 865 | { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, |
299 | { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, | ||
300 | { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p }, | ||
301 | { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, | 866 | { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, |
302 | { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, | ||
303 | { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, | 867 | { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, |
868 | { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, | ||
304 | { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, | 869 | { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, |
305 | }; | 870 | }; |
306 | 871 | ||
@@ -324,7 +889,7 @@ static inline void hdmi_reg_writemask(struct hdmi_context *hdata, | |||
324 | writel(value, hdata->regs + reg_id); | 889 | writel(value, hdata->regs + reg_id); |
325 | } | 890 | } |
326 | 891 | ||
327 | static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | 892 | static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) |
328 | { | 893 | { |
329 | #define DUMPREG(reg_id) \ | 894 | #define DUMPREG(reg_id) \ |
330 | DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ | 895 | DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ |
@@ -333,6 +898,101 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | |||
333 | DUMPREG(HDMI_INTC_FLAG); | 898 | DUMPREG(HDMI_INTC_FLAG); |
334 | DUMPREG(HDMI_INTC_CON); | 899 | DUMPREG(HDMI_INTC_CON); |
335 | DUMPREG(HDMI_HPD_STATUS); | 900 | DUMPREG(HDMI_HPD_STATUS); |
901 | DUMPREG(HDMI_V13_PHY_RSTOUT); | ||
902 | DUMPREG(HDMI_V13_PHY_VPLL); | ||
903 | DUMPREG(HDMI_V13_PHY_CMU); | ||
904 | DUMPREG(HDMI_V13_CORE_RSTOUT); | ||
905 | |||
906 | DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); | ||
907 | DUMPREG(HDMI_CON_0); | ||
908 | DUMPREG(HDMI_CON_1); | ||
909 | DUMPREG(HDMI_CON_2); | ||
910 | DUMPREG(HDMI_SYS_STATUS); | ||
911 | DUMPREG(HDMI_V13_PHY_STATUS); | ||
912 | DUMPREG(HDMI_STATUS_EN); | ||
913 | DUMPREG(HDMI_HPD); | ||
914 | DUMPREG(HDMI_MODE_SEL); | ||
915 | DUMPREG(HDMI_V13_HPD_GEN); | ||
916 | DUMPREG(HDMI_V13_DC_CONTROL); | ||
917 | DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN); | ||
918 | |||
919 | DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); | ||
920 | DUMPREG(HDMI_H_BLANK_0); | ||
921 | DUMPREG(HDMI_H_BLANK_1); | ||
922 | DUMPREG(HDMI_V13_V_BLANK_0); | ||
923 | DUMPREG(HDMI_V13_V_BLANK_1); | ||
924 | DUMPREG(HDMI_V13_V_BLANK_2); | ||
925 | DUMPREG(HDMI_V13_H_V_LINE_0); | ||
926 | DUMPREG(HDMI_V13_H_V_LINE_1); | ||
927 | DUMPREG(HDMI_V13_H_V_LINE_2); | ||
928 | DUMPREG(HDMI_VSYNC_POL); | ||
929 | DUMPREG(HDMI_INT_PRO_MODE); | ||
930 | DUMPREG(HDMI_V13_V_BLANK_F_0); | ||
931 | DUMPREG(HDMI_V13_V_BLANK_F_1); | ||
932 | DUMPREG(HDMI_V13_V_BLANK_F_2); | ||
933 | DUMPREG(HDMI_V13_H_SYNC_GEN_0); | ||
934 | DUMPREG(HDMI_V13_H_SYNC_GEN_1); | ||
935 | DUMPREG(HDMI_V13_H_SYNC_GEN_2); | ||
936 | DUMPREG(HDMI_V13_V_SYNC_GEN_1_0); | ||
937 | DUMPREG(HDMI_V13_V_SYNC_GEN_1_1); | ||
938 | DUMPREG(HDMI_V13_V_SYNC_GEN_1_2); | ||
939 | DUMPREG(HDMI_V13_V_SYNC_GEN_2_0); | ||
940 | DUMPREG(HDMI_V13_V_SYNC_GEN_2_1); | ||
941 | DUMPREG(HDMI_V13_V_SYNC_GEN_2_2); | ||
942 | DUMPREG(HDMI_V13_V_SYNC_GEN_3_0); | ||
943 | DUMPREG(HDMI_V13_V_SYNC_GEN_3_1); | ||
944 | DUMPREG(HDMI_V13_V_SYNC_GEN_3_2); | ||
945 | |||
946 | DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); | ||
947 | DUMPREG(HDMI_TG_CMD); | ||
948 | DUMPREG(HDMI_TG_H_FSZ_L); | ||
949 | DUMPREG(HDMI_TG_H_FSZ_H); | ||
950 | DUMPREG(HDMI_TG_HACT_ST_L); | ||
951 | DUMPREG(HDMI_TG_HACT_ST_H); | ||
952 | DUMPREG(HDMI_TG_HACT_SZ_L); | ||
953 | DUMPREG(HDMI_TG_HACT_SZ_H); | ||
954 | DUMPREG(HDMI_TG_V_FSZ_L); | ||
955 | DUMPREG(HDMI_TG_V_FSZ_H); | ||
956 | DUMPREG(HDMI_TG_VSYNC_L); | ||
957 | DUMPREG(HDMI_TG_VSYNC_H); | ||
958 | DUMPREG(HDMI_TG_VSYNC2_L); | ||
959 | DUMPREG(HDMI_TG_VSYNC2_H); | ||
960 | DUMPREG(HDMI_TG_VACT_ST_L); | ||
961 | DUMPREG(HDMI_TG_VACT_ST_H); | ||
962 | DUMPREG(HDMI_TG_VACT_SZ_L); | ||
963 | DUMPREG(HDMI_TG_VACT_SZ_H); | ||
964 | DUMPREG(HDMI_TG_FIELD_CHG_L); | ||
965 | DUMPREG(HDMI_TG_FIELD_CHG_H); | ||
966 | DUMPREG(HDMI_TG_VACT_ST2_L); | ||
967 | DUMPREG(HDMI_TG_VACT_ST2_H); | ||
968 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); | ||
969 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); | ||
970 | DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); | ||
971 | DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); | ||
972 | DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); | ||
973 | DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); | ||
974 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); | ||
975 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); | ||
976 | #undef DUMPREG | ||
977 | } | ||
978 | |||
979 | static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) | ||
980 | { | ||
981 | int i; | ||
982 | |||
983 | #define DUMPREG(reg_id) \ | ||
984 | DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ | ||
985 | readl(hdata->regs + reg_id)) | ||
986 | |||
987 | DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); | ||
988 | DUMPREG(HDMI_INTC_CON); | ||
989 | DUMPREG(HDMI_INTC_FLAG); | ||
990 | DUMPREG(HDMI_HPD_STATUS); | ||
991 | DUMPREG(HDMI_INTC_CON_1); | ||
992 | DUMPREG(HDMI_INTC_FLAG_1); | ||
993 | DUMPREG(HDMI_PHY_STATUS_0); | ||
994 | DUMPREG(HDMI_PHY_STATUS_PLL); | ||
995 | DUMPREG(HDMI_PHY_CON_0); | ||
336 | DUMPREG(HDMI_PHY_RSTOUT); | 996 | DUMPREG(HDMI_PHY_RSTOUT); |
337 | DUMPREG(HDMI_PHY_VPLL); | 997 | DUMPREG(HDMI_PHY_VPLL); |
338 | DUMPREG(HDMI_PHY_CMU); | 998 | DUMPREG(HDMI_PHY_CMU); |
@@ -343,40 +1003,93 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | |||
343 | DUMPREG(HDMI_CON_1); | 1003 | DUMPREG(HDMI_CON_1); |
344 | DUMPREG(HDMI_CON_2); | 1004 | DUMPREG(HDMI_CON_2); |
345 | DUMPREG(HDMI_SYS_STATUS); | 1005 | DUMPREG(HDMI_SYS_STATUS); |
346 | DUMPREG(HDMI_PHY_STATUS); | 1006 | DUMPREG(HDMI_PHY_STATUS_0); |
347 | DUMPREG(HDMI_STATUS_EN); | 1007 | DUMPREG(HDMI_STATUS_EN); |
348 | DUMPREG(HDMI_HPD); | 1008 | DUMPREG(HDMI_HPD); |
349 | DUMPREG(HDMI_MODE_SEL); | 1009 | DUMPREG(HDMI_MODE_SEL); |
350 | DUMPREG(HDMI_HPD_GEN); | 1010 | DUMPREG(HDMI_ENC_EN); |
351 | DUMPREG(HDMI_DC_CONTROL); | 1011 | DUMPREG(HDMI_DC_CONTROL); |
352 | DUMPREG(HDMI_VIDEO_PATTERN_GEN); | 1012 | DUMPREG(HDMI_VIDEO_PATTERN_GEN); |
353 | 1013 | ||
354 | DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); | 1014 | DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); |
355 | DUMPREG(HDMI_H_BLANK_0); | 1015 | DUMPREG(HDMI_H_BLANK_0); |
356 | DUMPREG(HDMI_H_BLANK_1); | 1016 | DUMPREG(HDMI_H_BLANK_1); |
357 | DUMPREG(HDMI_V_BLANK_0); | 1017 | DUMPREG(HDMI_V2_BLANK_0); |
358 | DUMPREG(HDMI_V_BLANK_1); | 1018 | DUMPREG(HDMI_V2_BLANK_1); |
359 | DUMPREG(HDMI_V_BLANK_2); | 1019 | DUMPREG(HDMI_V1_BLANK_0); |
360 | DUMPREG(HDMI_H_V_LINE_0); | 1020 | DUMPREG(HDMI_V1_BLANK_1); |
361 | DUMPREG(HDMI_H_V_LINE_1); | 1021 | DUMPREG(HDMI_V_LINE_0); |
362 | DUMPREG(HDMI_H_V_LINE_2); | 1022 | DUMPREG(HDMI_V_LINE_1); |
1023 | DUMPREG(HDMI_H_LINE_0); | ||
1024 | DUMPREG(HDMI_H_LINE_1); | ||
1025 | DUMPREG(HDMI_HSYNC_POL); | ||
1026 | |||
363 | DUMPREG(HDMI_VSYNC_POL); | 1027 | DUMPREG(HDMI_VSYNC_POL); |
364 | DUMPREG(HDMI_INT_PRO_MODE); | 1028 | DUMPREG(HDMI_INT_PRO_MODE); |
365 | DUMPREG(HDMI_V_BLANK_F_0); | 1029 | DUMPREG(HDMI_V_BLANK_F0_0); |
366 | DUMPREG(HDMI_V_BLANK_F_1); | 1030 | DUMPREG(HDMI_V_BLANK_F0_1); |
367 | DUMPREG(HDMI_V_BLANK_F_2); | 1031 | DUMPREG(HDMI_V_BLANK_F1_0); |
368 | DUMPREG(HDMI_H_SYNC_GEN_0); | 1032 | DUMPREG(HDMI_V_BLANK_F1_1); |
369 | DUMPREG(HDMI_H_SYNC_GEN_1); | 1033 | |
370 | DUMPREG(HDMI_H_SYNC_GEN_2); | 1034 | DUMPREG(HDMI_H_SYNC_START_0); |
371 | DUMPREG(HDMI_V_SYNC_GEN_1_0); | 1035 | DUMPREG(HDMI_H_SYNC_START_1); |
372 | DUMPREG(HDMI_V_SYNC_GEN_1_1); | 1036 | DUMPREG(HDMI_H_SYNC_END_0); |
373 | DUMPREG(HDMI_V_SYNC_GEN_1_2); | 1037 | DUMPREG(HDMI_H_SYNC_END_1); |
374 | DUMPREG(HDMI_V_SYNC_GEN_2_0); | 1038 | |
375 | DUMPREG(HDMI_V_SYNC_GEN_2_1); | 1039 | DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0); |
376 | DUMPREG(HDMI_V_SYNC_GEN_2_2); | 1040 | DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1); |
377 | DUMPREG(HDMI_V_SYNC_GEN_3_0); | 1041 | DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0); |
378 | DUMPREG(HDMI_V_SYNC_GEN_3_1); | 1042 | DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1); |
379 | DUMPREG(HDMI_V_SYNC_GEN_3_2); | 1043 | |
1044 | DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0); | ||
1045 | DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1); | ||
1046 | DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0); | ||
1047 | DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1); | ||
1048 | |||
1049 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0); | ||
1050 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1); | ||
1051 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0); | ||
1052 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1); | ||
1053 | |||
1054 | DUMPREG(HDMI_V_BLANK_F2_0); | ||
1055 | DUMPREG(HDMI_V_BLANK_F2_1); | ||
1056 | DUMPREG(HDMI_V_BLANK_F3_0); | ||
1057 | DUMPREG(HDMI_V_BLANK_F3_1); | ||
1058 | DUMPREG(HDMI_V_BLANK_F4_0); | ||
1059 | DUMPREG(HDMI_V_BLANK_F4_1); | ||
1060 | DUMPREG(HDMI_V_BLANK_F5_0); | ||
1061 | DUMPREG(HDMI_V_BLANK_F5_1); | ||
1062 | |||
1063 | DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0); | ||
1064 | DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1); | ||
1065 | DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0); | ||
1066 | DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1); | ||
1067 | DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0); | ||
1068 | DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1); | ||
1069 | DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0); | ||
1070 | DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1); | ||
1071 | |||
1072 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0); | ||
1073 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1); | ||
1074 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0); | ||
1075 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1); | ||
1076 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0); | ||
1077 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1); | ||
1078 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0); | ||
1079 | DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1); | ||
1080 | |||
1081 | DUMPREG(HDMI_VACT_SPACE_1_0); | ||
1082 | DUMPREG(HDMI_VACT_SPACE_1_1); | ||
1083 | DUMPREG(HDMI_VACT_SPACE_2_0); | ||
1084 | DUMPREG(HDMI_VACT_SPACE_2_1); | ||
1085 | DUMPREG(HDMI_VACT_SPACE_3_0); | ||
1086 | DUMPREG(HDMI_VACT_SPACE_3_1); | ||
1087 | DUMPREG(HDMI_VACT_SPACE_4_0); | ||
1088 | DUMPREG(HDMI_VACT_SPACE_4_1); | ||
1089 | DUMPREG(HDMI_VACT_SPACE_5_0); | ||
1090 | DUMPREG(HDMI_VACT_SPACE_5_1); | ||
1091 | DUMPREG(HDMI_VACT_SPACE_6_0); | ||
1092 | DUMPREG(HDMI_VACT_SPACE_6_1); | ||
380 | 1093 | ||
381 | DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); | 1094 | DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); |
382 | DUMPREG(HDMI_TG_CMD); | 1095 | DUMPREG(HDMI_TG_CMD); |
@@ -400,6 +1113,10 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | |||
400 | DUMPREG(HDMI_TG_FIELD_CHG_H); | 1113 | DUMPREG(HDMI_TG_FIELD_CHG_H); |
401 | DUMPREG(HDMI_TG_VACT_ST2_L); | 1114 | DUMPREG(HDMI_TG_VACT_ST2_L); |
402 | DUMPREG(HDMI_TG_VACT_ST2_H); | 1115 | DUMPREG(HDMI_TG_VACT_ST2_H); |
1116 | DUMPREG(HDMI_TG_VACT_ST3_L); | ||
1117 | DUMPREG(HDMI_TG_VACT_ST3_H); | ||
1118 | DUMPREG(HDMI_TG_VACT_ST4_L); | ||
1119 | DUMPREG(HDMI_TG_VACT_ST4_H); | ||
403 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); | 1120 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); |
404 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); | 1121 | DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); |
405 | DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); | 1122 | DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); |
@@ -408,10 +1125,49 @@ static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | |||
408 | DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); | 1125 | DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); |
409 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); | 1126 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); |
410 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); | 1127 | DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); |
1128 | DUMPREG(HDMI_TG_3D); | ||
1129 | |||
1130 | DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix); | ||
1131 | DUMPREG(HDMI_AVI_CON); | ||
1132 | DUMPREG(HDMI_AVI_HEADER0); | ||
1133 | DUMPREG(HDMI_AVI_HEADER1); | ||
1134 | DUMPREG(HDMI_AVI_HEADER2); | ||
1135 | DUMPREG(HDMI_AVI_CHECK_SUM); | ||
1136 | DUMPREG(HDMI_VSI_CON); | ||
1137 | DUMPREG(HDMI_VSI_HEADER0); | ||
1138 | DUMPREG(HDMI_VSI_HEADER1); | ||
1139 | DUMPREG(HDMI_VSI_HEADER2); | ||
1140 | for (i = 0; i < 7; ++i) | ||
1141 | DUMPREG(HDMI_VSI_DATA(i)); | ||
1142 | |||
411 | #undef DUMPREG | 1143 | #undef DUMPREG |
412 | } | 1144 | } |
413 | 1145 | ||
414 | static int hdmi_conf_index(struct drm_display_mode *mode) | 1146 | static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) |
1147 | { | ||
1148 | if (hdata->is_v13) | ||
1149 | hdmi_v13_regs_dump(hdata, prefix); | ||
1150 | else | ||
1151 | hdmi_v14_regs_dump(hdata, prefix); | ||
1152 | } | ||
1153 | |||
1154 | static int hdmi_v13_conf_index(struct drm_display_mode *mode) | ||
1155 | { | ||
1156 | int i; | ||
1157 | |||
1158 | for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i) | ||
1159 | if (hdmi_v13_confs[i].width == mode->hdisplay && | ||
1160 | hdmi_v13_confs[i].height == mode->vdisplay && | ||
1161 | hdmi_v13_confs[i].vrefresh == mode->vrefresh && | ||
1162 | hdmi_v13_confs[i].interlace == | ||
1163 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? | ||
1164 | true : false)) | ||
1165 | return i; | ||
1166 | |||
1167 | return -EINVAL; | ||
1168 | } | ||
1169 | |||
1170 | static int hdmi_v14_conf_index(struct drm_display_mode *mode) | ||
415 | { | 1171 | { |
416 | int i; | 1172 | int i; |
417 | 1173 | ||
@@ -424,7 +1180,16 @@ static int hdmi_conf_index(struct drm_display_mode *mode) | |||
424 | true : false)) | 1180 | true : false)) |
425 | return i; | 1181 | return i; |
426 | 1182 | ||
427 | return -1; | 1183 | return -EINVAL; |
1184 | } | ||
1185 | |||
1186 | static int hdmi_conf_index(struct hdmi_context *hdata, | ||
1187 | struct drm_display_mode *mode) | ||
1188 | { | ||
1189 | if (hdata->is_v13) | ||
1190 | return hdmi_v13_conf_index(mode); | ||
1191 | |||
1192 | return hdmi_v14_conf_index(mode); | ||
428 | } | 1193 | } |
429 | 1194 | ||
430 | static bool hdmi_is_connected(void *ctx) | 1195 | static bool hdmi_is_connected(void *ctx) |
@@ -462,29 +1227,69 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector, | |||
462 | return 0; | 1227 | return 0; |
463 | } | 1228 | } |
464 | 1229 | ||
465 | static int hdmi_check_timing(void *ctx, void *timing) | 1230 | static int hdmi_v13_check_timing(struct fb_videomode *check_timing) |
466 | { | 1231 | { |
467 | struct fb_videomode *check_timing = timing; | ||
468 | int i; | 1232 | int i; |
469 | 1233 | ||
470 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 1234 | DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", |
1235 | check_timing->xres, check_timing->yres, | ||
1236 | check_timing->refresh, (check_timing->vmode & | ||
1237 | FB_VMODE_INTERLACED) ? true : false); | ||
471 | 1238 | ||
472 | DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres, | 1239 | for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i) |
473 | check_timing->yres, check_timing->refresh, | 1240 | if (hdmi_v13_confs[i].width == check_timing->xres && |
474 | check_timing->vmode); | 1241 | hdmi_v13_confs[i].height == check_timing->yres && |
1242 | hdmi_v13_confs[i].vrefresh == check_timing->refresh && | ||
1243 | hdmi_v13_confs[i].interlace == | ||
1244 | ((check_timing->vmode & FB_VMODE_INTERLACED) ? | ||
1245 | true : false)) | ||
1246 | return 0; | ||
475 | 1247 | ||
476 | for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i) | 1248 | /* TODO */ |
1249 | |||
1250 | return -EINVAL; | ||
1251 | } | ||
1252 | |||
1253 | static int hdmi_v14_check_timing(struct fb_videomode *check_timing) | ||
1254 | { | ||
1255 | int i; | ||
1256 | |||
1257 | DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", | ||
1258 | check_timing->xres, check_timing->yres, | ||
1259 | check_timing->refresh, (check_timing->vmode & | ||
1260 | FB_VMODE_INTERLACED) ? true : false); | ||
1261 | |||
1262 | for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++) | ||
477 | if (hdmi_confs[i].width == check_timing->xres && | 1263 | if (hdmi_confs[i].width == check_timing->xres && |
478 | hdmi_confs[i].height == check_timing->yres && | 1264 | hdmi_confs[i].height == check_timing->yres && |
479 | hdmi_confs[i].vrefresh == check_timing->refresh && | 1265 | hdmi_confs[i].vrefresh == check_timing->refresh && |
480 | hdmi_confs[i].interlace == | 1266 | hdmi_confs[i].interlace == |
481 | ((check_timing->vmode & FB_VMODE_INTERLACED) ? | 1267 | ((check_timing->vmode & FB_VMODE_INTERLACED) ? |
482 | true : false)) | 1268 | true : false)) |
483 | return 0; | 1269 | return 0; |
1270 | |||
1271 | /* TODO */ | ||
484 | 1272 | ||
485 | return -EINVAL; | 1273 | return -EINVAL; |
486 | } | 1274 | } |
487 | 1275 | ||
1276 | static int hdmi_check_timing(void *ctx, void *timing) | ||
1277 | { | ||
1278 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; | ||
1279 | struct fb_videomode *check_timing = timing; | ||
1280 | |||
1281 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | ||
1282 | |||
1283 | DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres, | ||
1284 | check_timing->yres, check_timing->refresh, | ||
1285 | check_timing->vmode); | ||
1286 | |||
1287 | if (hdata->is_v13) | ||
1288 | return hdmi_v13_check_timing(check_timing); | ||
1289 | else | ||
1290 | return hdmi_v14_check_timing(check_timing); | ||
1291 | } | ||
1292 | |||
488 | static int hdmi_display_power_on(void *ctx, int mode) | 1293 | static int hdmi_display_power_on(void *ctx, int mode) |
489 | { | 1294 | { |
490 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 1295 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); |
@@ -514,15 +1319,185 @@ static struct exynos_hdmi_display_ops display_ops = { | |||
514 | .power_on = hdmi_display_power_on, | 1319 | .power_on = hdmi_display_power_on, |
515 | }; | 1320 | }; |
516 | 1321 | ||
1322 | static void hdmi_set_acr(u32 freq, u8 *acr) | ||
1323 | { | ||
1324 | u32 n, cts; | ||
1325 | |||
1326 | switch (freq) { | ||
1327 | case 32000: | ||
1328 | n = 4096; | ||
1329 | cts = 27000; | ||
1330 | break; | ||
1331 | case 44100: | ||
1332 | n = 6272; | ||
1333 | cts = 30000; | ||
1334 | break; | ||
1335 | case 88200: | ||
1336 | n = 12544; | ||
1337 | cts = 30000; | ||
1338 | break; | ||
1339 | case 176400: | ||
1340 | n = 25088; | ||
1341 | cts = 30000; | ||
1342 | break; | ||
1343 | case 48000: | ||
1344 | n = 6144; | ||
1345 | cts = 27000; | ||
1346 | break; | ||
1347 | case 96000: | ||
1348 | n = 12288; | ||
1349 | cts = 27000; | ||
1350 | break; | ||
1351 | case 192000: | ||
1352 | n = 24576; | ||
1353 | cts = 27000; | ||
1354 | break; | ||
1355 | default: | ||
1356 | n = 0; | ||
1357 | cts = 0; | ||
1358 | break; | ||
1359 | } | ||
1360 | |||
1361 | acr[1] = cts >> 16; | ||
1362 | acr[2] = cts >> 8 & 0xff; | ||
1363 | acr[3] = cts & 0xff; | ||
1364 | |||
1365 | acr[4] = n >> 16; | ||
1366 | acr[5] = n >> 8 & 0xff; | ||
1367 | acr[6] = n & 0xff; | ||
1368 | } | ||
1369 | |||
1370 | static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) | ||
1371 | { | ||
1372 | hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]); | ||
1373 | hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]); | ||
1374 | hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]); | ||
1375 | hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]); | ||
1376 | hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]); | ||
1377 | hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]); | ||
1378 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]); | ||
1379 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); | ||
1380 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); | ||
1381 | |||
1382 | if (hdata->is_v13) | ||
1383 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); | ||
1384 | else | ||
1385 | hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); | ||
1386 | } | ||
1387 | |||
1388 | static void hdmi_audio_init(struct hdmi_context *hdata) | ||
1389 | { | ||
1390 | u32 sample_rate, bits_per_sample, frame_size_code; | ||
1391 | u32 data_num, bit_ch, sample_frq; | ||
1392 | u32 val; | ||
1393 | u8 acr[7]; | ||
1394 | |||
1395 | sample_rate = 44100; | ||
1396 | bits_per_sample = 16; | ||
1397 | frame_size_code = 0; | ||
1398 | |||
1399 | switch (bits_per_sample) { | ||
1400 | case 20: | ||
1401 | data_num = 2; | ||
1402 | bit_ch = 1; | ||
1403 | break; | ||
1404 | case 24: | ||
1405 | data_num = 3; | ||
1406 | bit_ch = 1; | ||
1407 | break; | ||
1408 | default: | ||
1409 | data_num = 1; | ||
1410 | bit_ch = 0; | ||
1411 | break; | ||
1412 | } | ||
1413 | |||
1414 | hdmi_set_acr(sample_rate, acr); | ||
1415 | hdmi_reg_acr(hdata, acr); | ||
1416 | |||
1417 | hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE | ||
1418 | | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE | ||
1419 | | HDMI_I2S_MUX_ENABLE); | ||
1420 | |||
1421 | hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN | ||
1422 | | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); | ||
1423 | |||
1424 | hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); | ||
1425 | |||
1426 | sample_frq = (sample_rate == 44100) ? 0 : | ||
1427 | (sample_rate == 48000) ? 2 : | ||
1428 | (sample_rate == 32000) ? 3 : | ||
1429 | (sample_rate == 96000) ? 0xa : 0x0; | ||
1430 | |||
1431 | hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); | ||
1432 | hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); | ||
1433 | |||
1434 | val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01; | ||
1435 | hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val); | ||
1436 | |||
1437 | /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ | ||
1438 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | ||
1439 | | HDMI_I2S_SEL_LRCK(6)); | ||
1440 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | ||
1441 | | HDMI_I2S_SEL_SDATA2(4)); | ||
1442 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | ||
1443 | | HDMI_I2S_SEL_SDATA2(2)); | ||
1444 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); | ||
1445 | |||
1446 | /* I2S_CON_1 & 2 */ | ||
1447 | hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE | ||
1448 | | HDMI_I2S_L_CH_LOW_POL); | ||
1449 | hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE | ||
1450 | | HDMI_I2S_SET_BIT_CH(bit_ch) | ||
1451 | | HDMI_I2S_SET_SDATA_BIT(data_num) | ||
1452 | | HDMI_I2S_BASIC_FORMAT); | ||
1453 | |||
1454 | /* Configure register related to CUV information */ | ||
1455 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 | ||
1456 | | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH | ||
1457 | | HDMI_I2S_COPYRIGHT | ||
1458 | | HDMI_I2S_LINEAR_PCM | ||
1459 | | HDMI_I2S_CONSUMER_FORMAT); | ||
1460 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); | ||
1461 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); | ||
1462 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 | ||
1463 | | HDMI_I2S_SET_SMP_FREQ(sample_frq)); | ||
1464 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, | ||
1465 | HDMI_I2S_ORG_SMP_FREQ_44_1 | ||
1466 | | HDMI_I2S_WORD_LEN_MAX24_24BITS | ||
1467 | | HDMI_I2S_WORD_LEN_MAX_24BITS); | ||
1468 | |||
1469 | hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); | ||
1470 | } | ||
1471 | |||
1472 | static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) | ||
1473 | { | ||
1474 | u32 mod; | ||
1475 | |||
1476 | mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); | ||
1477 | if (mod & HDMI_DVI_MODE_EN) | ||
1478 | return; | ||
1479 | |||
1480 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); | ||
1481 | hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? | ||
1482 | HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); | ||
1483 | } | ||
1484 | |||
517 | static void hdmi_conf_reset(struct hdmi_context *hdata) | 1485 | static void hdmi_conf_reset(struct hdmi_context *hdata) |
518 | { | 1486 | { |
1487 | u32 reg; | ||
1488 | |||
519 | /* disable hpd handle for drm */ | 1489 | /* disable hpd handle for drm */ |
520 | hdata->hpd_handle = false; | 1490 | hdata->hpd_handle = false; |
521 | 1491 | ||
1492 | if (hdata->is_v13) | ||
1493 | reg = HDMI_V13_CORE_RSTOUT; | ||
1494 | else | ||
1495 | reg = HDMI_CORE_RSTOUT; | ||
1496 | |||
522 | /* resetting HDMI core */ | 1497 | /* resetting HDMI core */ |
523 | hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT); | 1498 | hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); |
524 | mdelay(10); | 1499 | mdelay(10); |
525 | hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT); | 1500 | hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); |
526 | mdelay(10); | 1501 | mdelay(10); |
527 | 1502 | ||
528 | /* enable hpd handle for drm */ | 1503 | /* enable hpd handle for drm */ |
@@ -546,27 +1521,126 @@ static void hdmi_conf_init(struct hdmi_context *hdata) | |||
546 | HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); | 1521 | HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); |
547 | /* disable bluescreen */ | 1522 | /* disable bluescreen */ |
548 | hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); | 1523 | hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); |
549 | /* choose bluescreen (fecal) color */ | 1524 | |
550 | hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12); | 1525 | if (hdata->is_v13) { |
551 | hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34); | 1526 | /* choose bluescreen (fecal) color */ |
552 | hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56); | 1527 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); |
553 | /* enable AVI packet every vsync, fixes purple line problem */ | 1528 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); |
554 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02); | 1529 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56); |
555 | /* force RGB, look to CEA-861-D, table 7 for more detail */ | 1530 | |
556 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5); | 1531 | /* enable AVI packet every vsync, fixes purple line problem */ |
557 | hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); | 1532 | hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02); |
558 | 1533 | /* force RGB, look to CEA-861-D, table 7 for more detail */ | |
559 | hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02); | 1534 | hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5); |
560 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); | 1535 | hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); |
561 | hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04); | 1536 | |
1537 | hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02); | ||
1538 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); | ||
1539 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); | ||
1540 | } else { | ||
1541 | /* enable AVI packet every vsync, fixes purple line problem */ | ||
1542 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02); | ||
1543 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5); | ||
1544 | hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); | ||
1545 | } | ||
562 | 1546 | ||
563 | /* enable hpd handle for drm */ | 1547 | /* enable hpd handle for drm */ |
564 | hdata->hpd_handle = true; | 1548 | hdata->hpd_handle = true; |
565 | } | 1549 | } |
566 | 1550 | ||
567 | static void hdmi_timing_apply(struct hdmi_context *hdata, | 1551 | static void hdmi_v13_timing_apply(struct hdmi_context *hdata) |
568 | const struct hdmi_preset_conf *conf) | ||
569 | { | 1552 | { |
1553 | const struct hdmi_v13_preset_conf *conf = | ||
1554 | hdmi_v13_confs[hdata->cur_conf].conf; | ||
1555 | const struct hdmi_v13_core_regs *core = &conf->core; | ||
1556 | const struct hdmi_v13_tg_regs *tg = &conf->tg; | ||
1557 | int tries; | ||
1558 | |||
1559 | /* setting core registers */ | ||
1560 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); | ||
1561 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); | ||
1562 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); | ||
1563 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); | ||
1564 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); | ||
1565 | hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); | ||
1566 | hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); | ||
1567 | hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); | ||
1568 | hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); | ||
1569 | hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); | ||
1570 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); | ||
1571 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); | ||
1572 | hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); | ||
1573 | hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); | ||
1574 | hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); | ||
1575 | hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); | ||
1576 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); | ||
1577 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); | ||
1578 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); | ||
1579 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); | ||
1580 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); | ||
1581 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); | ||
1582 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); | ||
1583 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); | ||
1584 | hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); | ||
1585 | /* Timing generator registers */ | ||
1586 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); | ||
1587 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); | ||
1588 | hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); | ||
1589 | hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); | ||
1590 | hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); | ||
1591 | hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); | ||
1592 | hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); | ||
1593 | hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); | ||
1594 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); | ||
1595 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); | ||
1596 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); | ||
1597 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); | ||
1598 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); | ||
1599 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); | ||
1600 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); | ||
1601 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); | ||
1602 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); | ||
1603 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); | ||
1604 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); | ||
1605 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); | ||
1606 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); | ||
1607 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); | ||
1608 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); | ||
1609 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); | ||
1610 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); | ||
1611 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); | ||
1612 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); | ||
1613 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); | ||
1614 | |||
1615 | /* waiting for HDMIPHY's PLL to get to steady state */ | ||
1616 | for (tries = 100; tries; --tries) { | ||
1617 | u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); | ||
1618 | if (val & HDMI_PHY_STATUS_READY) | ||
1619 | break; | ||
1620 | mdelay(1); | ||
1621 | } | ||
1622 | /* steady state not achieved */ | ||
1623 | if (tries == 0) { | ||
1624 | DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); | ||
1625 | hdmi_regs_dump(hdata, "timing apply"); | ||
1626 | } | ||
1627 | |||
1628 | clk_disable(hdata->res.sclk_hdmi); | ||
1629 | clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy); | ||
1630 | clk_enable(hdata->res.sclk_hdmi); | ||
1631 | |||
1632 | /* enable HDMI and timing generator */ | ||
1633 | hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN); | ||
1634 | if (core->int_pro_mode[0]) | ||
1635 | hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN | | ||
1636 | HDMI_FIELD_EN); | ||
1637 | else | ||
1638 | hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); | ||
1639 | } | ||
1640 | |||
1641 | static void hdmi_v14_timing_apply(struct hdmi_context *hdata) | ||
1642 | { | ||
1643 | const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf; | ||
570 | const struct hdmi_core_regs *core = &conf->core; | 1644 | const struct hdmi_core_regs *core = &conf->core; |
571 | const struct hdmi_tg_regs *tg = &conf->tg; | 1645 | const struct hdmi_tg_regs *tg = &conf->tg; |
572 | int tries; | 1646 | int tries; |
@@ -574,29 +1648,102 @@ static void hdmi_timing_apply(struct hdmi_context *hdata, | |||
574 | /* setting core registers */ | 1648 | /* setting core registers */ |
575 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); | 1649 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); |
576 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); | 1650 | hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); |
577 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]); | 1651 | hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); |
578 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]); | 1652 | hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); |
579 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]); | 1653 | hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); |
580 | hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]); | 1654 | hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); |
581 | hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]); | 1655 | hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); |
582 | hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]); | 1656 | hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); |
1657 | hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); | ||
1658 | hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); | ||
1659 | hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); | ||
583 | hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); | 1660 | hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); |
584 | hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); | 1661 | hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); |
585 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]); | 1662 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); |
586 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]); | 1663 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); |
587 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]); | 1664 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); |
588 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]); | 1665 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); |
589 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]); | 1666 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); |
590 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]); | 1667 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); |
591 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); | 1668 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); |
592 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); | 1669 | hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); |
593 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); | 1670 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, |
594 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); | 1671 | core->v_sync_line_bef_2[0]); |
595 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); | 1672 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, |
596 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); | 1673 | core->v_sync_line_bef_2[1]); |
597 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); | 1674 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, |
598 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); | 1675 | core->v_sync_line_bef_1[0]); |
599 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); | 1676 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, |
1677 | core->v_sync_line_bef_1[1]); | ||
1678 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, | ||
1679 | core->v_sync_line_aft_2[0]); | ||
1680 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, | ||
1681 | core->v_sync_line_aft_2[1]); | ||
1682 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, | ||
1683 | core->v_sync_line_aft_1[0]); | ||
1684 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, | ||
1685 | core->v_sync_line_aft_1[1]); | ||
1686 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, | ||
1687 | core->v_sync_line_aft_pxl_2[0]); | ||
1688 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, | ||
1689 | core->v_sync_line_aft_pxl_2[1]); | ||
1690 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, | ||
1691 | core->v_sync_line_aft_pxl_1[0]); | ||
1692 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, | ||
1693 | core->v_sync_line_aft_pxl_1[1]); | ||
1694 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); | ||
1695 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); | ||
1696 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); | ||
1697 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); | ||
1698 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); | ||
1699 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); | ||
1700 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); | ||
1701 | hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); | ||
1702 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, | ||
1703 | core->v_sync_line_aft_3[0]); | ||
1704 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, | ||
1705 | core->v_sync_line_aft_3[1]); | ||
1706 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, | ||
1707 | core->v_sync_line_aft_4[0]); | ||
1708 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, | ||
1709 | core->v_sync_line_aft_4[1]); | ||
1710 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, | ||
1711 | core->v_sync_line_aft_5[0]); | ||
1712 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, | ||
1713 | core->v_sync_line_aft_5[1]); | ||
1714 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, | ||
1715 | core->v_sync_line_aft_6[0]); | ||
1716 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, | ||
1717 | core->v_sync_line_aft_6[1]); | ||
1718 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, | ||
1719 | core->v_sync_line_aft_pxl_3[0]); | ||
1720 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, | ||
1721 | core->v_sync_line_aft_pxl_3[1]); | ||
1722 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, | ||
1723 | core->v_sync_line_aft_pxl_4[0]); | ||
1724 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, | ||
1725 | core->v_sync_line_aft_pxl_4[1]); | ||
1726 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, | ||
1727 | core->v_sync_line_aft_pxl_5[0]); | ||
1728 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, | ||
1729 | core->v_sync_line_aft_pxl_5[1]); | ||
1730 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, | ||
1731 | core->v_sync_line_aft_pxl_6[0]); | ||
1732 | hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, | ||
1733 | core->v_sync_line_aft_pxl_6[1]); | ||
1734 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); | ||
1735 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); | ||
1736 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); | ||
1737 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); | ||
1738 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); | ||
1739 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); | ||
1740 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); | ||
1741 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); | ||
1742 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); | ||
1743 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); | ||
1744 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); | ||
1745 | hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); | ||
1746 | |||
600 | /* Timing generator registers */ | 1747 | /* Timing generator registers */ |
601 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); | 1748 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); |
602 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); | 1749 | hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); |
@@ -618,6 +1765,10 @@ static void hdmi_timing_apply(struct hdmi_context *hdata, | |||
618 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); | 1765 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); |
619 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); | 1766 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); |
620 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); | 1767 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); |
1768 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l); | ||
1769 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h); | ||
1770 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l); | ||
1771 | hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h); | ||
621 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); | 1772 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); |
622 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); | 1773 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); |
623 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); | 1774 | hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); |
@@ -626,10 +1777,11 @@ static void hdmi_timing_apply(struct hdmi_context *hdata, | |||
626 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); | 1777 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); |
627 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); | 1778 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); |
628 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); | 1779 | hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); |
1780 | hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d); | ||
629 | 1781 | ||
630 | /* waiting for HDMIPHY's PLL to get to steady state */ | 1782 | /* waiting for HDMIPHY's PLL to get to steady state */ |
631 | for (tries = 100; tries; --tries) { | 1783 | for (tries = 100; tries; --tries) { |
632 | u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS); | 1784 | u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); |
633 | if (val & HDMI_PHY_STATUS_READY) | 1785 | if (val & HDMI_PHY_STATUS_READY) |
634 | break; | 1786 | break; |
635 | mdelay(1); | 1787 | mdelay(1); |
@@ -653,9 +1805,18 @@ static void hdmi_timing_apply(struct hdmi_context *hdata, | |||
653 | hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); | 1805 | hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN); |
654 | } | 1806 | } |
655 | 1807 | ||
1808 | static void hdmi_timing_apply(struct hdmi_context *hdata) | ||
1809 | { | ||
1810 | if (hdata->is_v13) | ||
1811 | hdmi_v13_timing_apply(hdata); | ||
1812 | else | ||
1813 | hdmi_v14_timing_apply(hdata); | ||
1814 | } | ||
1815 | |||
656 | static void hdmiphy_conf_reset(struct hdmi_context *hdata) | 1816 | static void hdmiphy_conf_reset(struct hdmi_context *hdata) |
657 | { | 1817 | { |
658 | u8 buffer[2]; | 1818 | u8 buffer[2]; |
1819 | u32 reg; | ||
659 | 1820 | ||
660 | clk_disable(hdata->res.sclk_hdmi); | 1821 | clk_disable(hdata->res.sclk_hdmi); |
661 | clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel); | 1822 | clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel); |
@@ -668,15 +1829,21 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) | |||
668 | if (hdata->hdmiphy_port) | 1829 | if (hdata->hdmiphy_port) |
669 | i2c_master_send(hdata->hdmiphy_port, buffer, 2); | 1830 | i2c_master_send(hdata->hdmiphy_port, buffer, 2); |
670 | 1831 | ||
1832 | if (hdata->is_v13) | ||
1833 | reg = HDMI_V13_PHY_RSTOUT; | ||
1834 | else | ||
1835 | reg = HDMI_PHY_RSTOUT; | ||
1836 | |||
671 | /* reset hdmiphy */ | 1837 | /* reset hdmiphy */ |
672 | hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT); | 1838 | hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); |
673 | mdelay(10); | 1839 | mdelay(10); |
674 | hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT); | 1840 | hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); |
675 | mdelay(10); | 1841 | mdelay(10); |
676 | } | 1842 | } |
677 | 1843 | ||
678 | static void hdmiphy_conf_apply(struct hdmi_context *hdata) | 1844 | static void hdmiphy_conf_apply(struct hdmi_context *hdata) |
679 | { | 1845 | { |
1846 | const u8 *hdmiphy_data; | ||
680 | u8 buffer[32]; | 1847 | u8 buffer[32]; |
681 | u8 operation[2]; | 1848 | u8 operation[2]; |
682 | u8 read_buffer[32] = {0, }; | 1849 | u8 read_buffer[32] = {0, }; |
@@ -689,7 +1856,12 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata) | |||
689 | } | 1856 | } |
690 | 1857 | ||
691 | /* pixel clock */ | 1858 | /* pixel clock */ |
692 | memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32); | 1859 | if (hdata->is_v13) |
1860 | hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; | ||
1861 | else | ||
1862 | hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; | ||
1863 | |||
1864 | memcpy(buffer, hdmiphy_data, 32); | ||
693 | ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); | 1865 | ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); |
694 | if (ret != 32) { | 1866 | if (ret != 32) { |
695 | DRM_ERROR("failed to configure HDMIPHY via I2C\n"); | 1867 | DRM_ERROR("failed to configure HDMIPHY via I2C\n"); |
@@ -721,9 +1893,6 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata) | |||
721 | 1893 | ||
722 | static void hdmi_conf_apply(struct hdmi_context *hdata) | 1894 | static void hdmi_conf_apply(struct hdmi_context *hdata) |
723 | { | 1895 | { |
724 | const struct hdmi_preset_conf *conf = | ||
725 | hdmi_confs[hdata->cur_conf].conf; | ||
726 | |||
727 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 1896 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); |
728 | 1897 | ||
729 | hdmiphy_conf_reset(hdata); | 1898 | hdmiphy_conf_reset(hdata); |
@@ -731,13 +1900,55 @@ static void hdmi_conf_apply(struct hdmi_context *hdata) | |||
731 | 1900 | ||
732 | hdmi_conf_reset(hdata); | 1901 | hdmi_conf_reset(hdata); |
733 | hdmi_conf_init(hdata); | 1902 | hdmi_conf_init(hdata); |
1903 | hdmi_audio_init(hdata); | ||
734 | 1904 | ||
735 | /* setting core registers */ | 1905 | /* setting core registers */ |
736 | hdmi_timing_apply(hdata, conf); | 1906 | hdmi_timing_apply(hdata); |
1907 | hdmi_audio_control(hdata, true); | ||
737 | 1908 | ||
738 | hdmi_regs_dump(hdata, "start"); | 1909 | hdmi_regs_dump(hdata, "start"); |
739 | } | 1910 | } |
740 | 1911 | ||
1912 | static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, | ||
1913 | struct drm_display_mode *mode, | ||
1914 | struct drm_display_mode *adjusted_mode) | ||
1915 | { | ||
1916 | struct drm_display_mode *m; | ||
1917 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; | ||
1918 | int index; | ||
1919 | |||
1920 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | ||
1921 | |||
1922 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
1923 | |||
1924 | if (hdata->is_v13) | ||
1925 | index = hdmi_v13_conf_index(adjusted_mode); | ||
1926 | else | ||
1927 | index = hdmi_v14_conf_index(adjusted_mode); | ||
1928 | |||
1929 | /* just return if user desired mode exists. */ | ||
1930 | if (index >= 0) | ||
1931 | return; | ||
1932 | |||
1933 | /* | ||
1934 | * otherwise, find the most suitable mode among modes and change it | ||
1935 | * to adjusted_mode. | ||
1936 | */ | ||
1937 | list_for_each_entry(m, &connector->modes, head) { | ||
1938 | if (hdata->is_v13) | ||
1939 | index = hdmi_v13_conf_index(m); | ||
1940 | else | ||
1941 | index = hdmi_v14_conf_index(m); | ||
1942 | |||
1943 | if (index >= 0) { | ||
1944 | DRM_INFO("desired mode doesn't exist so\n"); | ||
1945 | DRM_INFO("use the most suitable mode among modes.\n"); | ||
1946 | memcpy(adjusted_mode, m, sizeof(*m)); | ||
1947 | break; | ||
1948 | } | ||
1949 | } | ||
1950 | } | ||
1951 | |||
741 | static void hdmi_mode_set(void *ctx, void *mode) | 1952 | static void hdmi_mode_set(void *ctx, void *mode) |
742 | { | 1953 | { |
743 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; | 1954 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; |
@@ -745,13 +1956,22 @@ static void hdmi_mode_set(void *ctx, void *mode) | |||
745 | 1956 | ||
746 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 1957 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); |
747 | 1958 | ||
748 | conf_idx = hdmi_conf_index(mode); | 1959 | conf_idx = hdmi_conf_index(hdata, mode); |
749 | if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs)) | 1960 | if (conf_idx >= 0) |
750 | hdata->cur_conf = conf_idx; | 1961 | hdata->cur_conf = conf_idx; |
751 | else | 1962 | else |
752 | DRM_DEBUG_KMS("not supported mode\n"); | 1963 | DRM_DEBUG_KMS("not supported mode\n"); |
753 | } | 1964 | } |
754 | 1965 | ||
1966 | static void hdmi_get_max_resol(void *ctx, unsigned int *width, | ||
1967 | unsigned int *height) | ||
1968 | { | ||
1969 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | ||
1970 | |||
1971 | *width = MAX_WIDTH; | ||
1972 | *height = MAX_HEIGHT; | ||
1973 | } | ||
1974 | |||
755 | static void hdmi_commit(void *ctx) | 1975 | static void hdmi_commit(void *ctx) |
756 | { | 1976 | { |
757 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; | 1977 | struct hdmi_context *hdata = (struct hdmi_context *)ctx; |
@@ -770,13 +1990,16 @@ static void hdmi_disable(void *ctx) | |||
770 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 1990 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); |
771 | 1991 | ||
772 | if (hdata->enabled) { | 1992 | if (hdata->enabled) { |
1993 | hdmi_audio_control(hdata, false); | ||
773 | hdmiphy_conf_reset(hdata); | 1994 | hdmiphy_conf_reset(hdata); |
774 | hdmi_conf_reset(hdata); | 1995 | hdmi_conf_reset(hdata); |
775 | } | 1996 | } |
776 | } | 1997 | } |
777 | 1998 | ||
778 | static struct exynos_hdmi_manager_ops manager_ops = { | 1999 | static struct exynos_hdmi_manager_ops manager_ops = { |
2000 | .mode_fixup = hdmi_mode_fixup, | ||
779 | .mode_set = hdmi_mode_set, | 2001 | .mode_set = hdmi_mode_set, |
2002 | .get_max_resol = hdmi_get_max_resol, | ||
780 | .commit = hdmi_commit, | 2003 | .commit = hdmi_commit, |
781 | .disable = hdmi_disable, | 2004 | .disable = hdmi_disable, |
782 | }; | 2005 | }; |
@@ -926,7 +2149,7 @@ static void hdmi_resource_poweron(struct hdmi_context *hdata) | |||
926 | hdmiphy_conf_reset(hdata); | 2149 | hdmiphy_conf_reset(hdata); |
927 | hdmi_conf_reset(hdata); | 2150 | hdmi_conf_reset(hdata); |
928 | hdmi_conf_init(hdata); | 2151 | hdmi_conf_init(hdata); |
929 | 2152 | hdmi_audio_init(hdata); | |
930 | } | 2153 | } |
931 | 2154 | ||
932 | static void hdmi_resource_poweroff(struct hdmi_context *hdata) | 2155 | static void hdmi_resource_poweroff(struct hdmi_context *hdata) |
@@ -978,14 +2201,12 @@ void hdmi_attach_ddc_client(struct i2c_client *ddc) | |||
978 | if (ddc) | 2201 | if (ddc) |
979 | hdmi_ddc = ddc; | 2202 | hdmi_ddc = ddc; |
980 | } | 2203 | } |
981 | EXPORT_SYMBOL(hdmi_attach_ddc_client); | ||
982 | 2204 | ||
983 | void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) | 2205 | void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) |
984 | { | 2206 | { |
985 | if (hdmiphy) | 2207 | if (hdmiphy) |
986 | hdmi_hdmiphy = hdmiphy; | 2208 | hdmi_hdmiphy = hdmiphy; |
987 | } | 2209 | } |
988 | EXPORT_SYMBOL(hdmi_attach_hdmiphy_client); | ||
989 | 2210 | ||
990 | static int __devinit hdmi_probe(struct platform_device *pdev) | 2211 | static int __devinit hdmi_probe(struct platform_device *pdev) |
991 | { | 2212 | { |
@@ -1022,6 +2243,7 @@ static int __devinit hdmi_probe(struct platform_device *pdev) | |||
1022 | 2243 | ||
1023 | platform_set_drvdata(pdev, drm_hdmi_ctx); | 2244 | platform_set_drvdata(pdev, drm_hdmi_ctx); |
1024 | 2245 | ||
2246 | hdata->is_v13 = pdata->is_v13; | ||
1025 | hdata->default_win = pdata->default_win; | 2247 | hdata->default_win = pdata->default_win; |
1026 | hdata->default_timing = &pdata->timing; | 2248 | hdata->default_timing = &pdata->timing; |
1027 | hdata->default_bpp = pdata->bpp; | 2249 | hdata->default_bpp = pdata->bpp; |
@@ -1167,10 +2389,3 @@ struct platform_driver hdmi_driver = { | |||
1167 | .pm = &hdmi_pm_ops, | 2389 | .pm = &hdmi_pm_ops, |
1168 | }, | 2390 | }, |
1169 | }; | 2391 | }; |
1170 | EXPORT_SYMBOL(hdmi_driver); | ||
1171 | |||
1172 | MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>"); | ||
1173 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
1174 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
1175 | MODULE_DESCRIPTION("Samsung DRM HDMI core Driver"); | ||
1176 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h index 31d6cf84c1aa..1c3b6d8f1fe7 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_hdmi.h | |||
@@ -28,56 +28,6 @@ | |||
28 | #ifndef _EXYNOS_HDMI_H_ | 28 | #ifndef _EXYNOS_HDMI_H_ |
29 | #define _EXYNOS_HDMI_H_ | 29 | #define _EXYNOS_HDMI_H_ |
30 | 30 | ||
31 | struct hdmi_conf { | ||
32 | int width; | ||
33 | int height; | ||
34 | int vrefresh; | ||
35 | bool interlace; | ||
36 | const u8 *hdmiphy_data; | ||
37 | const struct hdmi_preset_conf *conf; | ||
38 | }; | ||
39 | |||
40 | struct hdmi_resources { | ||
41 | struct clk *hdmi; | ||
42 | struct clk *sclk_hdmi; | ||
43 | struct clk *sclk_pixel; | ||
44 | struct clk *sclk_hdmiphy; | ||
45 | struct clk *hdmiphy; | ||
46 | struct regulator_bulk_data *regul_bulk; | ||
47 | int regul_count; | ||
48 | }; | ||
49 | |||
50 | struct hdmi_context { | ||
51 | struct device *dev; | ||
52 | struct drm_device *drm_dev; | ||
53 | struct fb_videomode *default_timing; | ||
54 | unsigned int default_win; | ||
55 | unsigned int default_bpp; | ||
56 | bool hpd_handle; | ||
57 | bool enabled; | ||
58 | |||
59 | struct resource *regs_res; | ||
60 | /** base address of HDMI registers */ | ||
61 | void __iomem *regs; | ||
62 | /** HDMI hotplug interrupt */ | ||
63 | unsigned int irq; | ||
64 | /** workqueue for delayed work */ | ||
65 | struct workqueue_struct *wq; | ||
66 | /** hotplug handling work */ | ||
67 | struct work_struct hotplug_work; | ||
68 | |||
69 | struct i2c_client *ddc_port; | ||
70 | struct i2c_client *hdmiphy_port; | ||
71 | |||
72 | /** current hdmiphy conf index */ | ||
73 | int cur_conf; | ||
74 | /** other resources */ | ||
75 | struct hdmi_resources res; | ||
76 | |||
77 | void *parent_ctx; | ||
78 | }; | ||
79 | |||
80 | |||
81 | void hdmi_attach_ddc_client(struct i2c_client *ddc); | 31 | void hdmi_attach_ddc_client(struct i2c_client *ddc); |
82 | void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy); | 32 | void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy); |
83 | 33 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 93846e810e38..4d5f41e19527 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -36,11 +36,57 @@ | |||
36 | 36 | ||
37 | #include "exynos_drm_drv.h" | 37 | #include "exynos_drm_drv.h" |
38 | #include "exynos_drm_hdmi.h" | 38 | #include "exynos_drm_hdmi.h" |
39 | #include "exynos_hdmi.h" | 39 | |
40 | #include "exynos_mixer.h" | 40 | #define HDMI_OVERLAY_NUMBER 3 |
41 | 41 | ||
42 | #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) | 42 | #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) |
43 | 43 | ||
44 | struct hdmi_win_data { | ||
45 | dma_addr_t dma_addr; | ||
46 | void __iomem *vaddr; | ||
47 | dma_addr_t chroma_dma_addr; | ||
48 | void __iomem *chroma_vaddr; | ||
49 | uint32_t pixel_format; | ||
50 | unsigned int bpp; | ||
51 | unsigned int crtc_x; | ||
52 | unsigned int crtc_y; | ||
53 | unsigned int crtc_width; | ||
54 | unsigned int crtc_height; | ||
55 | unsigned int fb_x; | ||
56 | unsigned int fb_y; | ||
57 | unsigned int fb_width; | ||
58 | unsigned int fb_height; | ||
59 | unsigned int mode_width; | ||
60 | unsigned int mode_height; | ||
61 | unsigned int scan_flags; | ||
62 | }; | ||
63 | |||
64 | struct mixer_resources { | ||
65 | struct device *dev; | ||
66 | int irq; | ||
67 | void __iomem *mixer_regs; | ||
68 | void __iomem *vp_regs; | ||
69 | spinlock_t reg_slock; | ||
70 | struct clk *mixer; | ||
71 | struct clk *vp; | ||
72 | struct clk *sclk_mixer; | ||
73 | struct clk *sclk_hdmi; | ||
74 | struct clk *sclk_dac; | ||
75 | }; | ||
76 | |||
77 | struct mixer_context { | ||
78 | struct fb_videomode *default_timing; | ||
79 | unsigned int default_win; | ||
80 | unsigned int default_bpp; | ||
81 | unsigned int irq; | ||
82 | int pipe; | ||
83 | bool interlace; | ||
84 | bool vp_enabled; | ||
85 | |||
86 | struct mixer_resources mixer_res; | ||
87 | struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER]; | ||
88 | }; | ||
89 | |||
44 | static const u8 filter_y_horiz_tap8[] = { | 90 | static const u8 filter_y_horiz_tap8[] = { |
45 | 0, -1, -1, -1, -1, -1, -1, -1, | 91 | 0, -1, -1, -1, -1, -1, -1, -1, |
46 | -1, -1, -1, -1, -1, 0, 0, 0, | 92 | -1, -1, -1, -1, -1, 0, 0, 0, |
@@ -1066,10 +1112,3 @@ struct platform_driver mixer_driver = { | |||
1066 | .probe = mixer_probe, | 1112 | .probe = mixer_probe, |
1067 | .remove = __devexit_p(mixer_remove), | 1113 | .remove = __devexit_p(mixer_remove), |
1068 | }; | 1114 | }; |
1069 | EXPORT_SYMBOL(mixer_driver); | ||
1070 | |||
1071 | MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>"); | ||
1072 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | ||
1073 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | ||
1074 | MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver"); | ||
1075 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h deleted file mode 100644 index cebacfefc077..000000000000 --- a/drivers/gpu/drm/exynos/exynos_mixer.h +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * Authors: | ||
5 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
6 | * Inki Dae <inki.dae@samsung.com> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | #ifndef _EXYNOS_MIXER_H_ | ||
29 | #define _EXYNOS_MIXER_H_ | ||
30 | |||
31 | #define HDMI_OVERLAY_NUMBER 3 | ||
32 | |||
33 | struct hdmi_win_data { | ||
34 | dma_addr_t dma_addr; | ||
35 | void __iomem *vaddr; | ||
36 | dma_addr_t chroma_dma_addr; | ||
37 | void __iomem *chroma_vaddr; | ||
38 | uint32_t pixel_format; | ||
39 | unsigned int bpp; | ||
40 | unsigned int crtc_x; | ||
41 | unsigned int crtc_y; | ||
42 | unsigned int crtc_width; | ||
43 | unsigned int crtc_height; | ||
44 | unsigned int fb_x; | ||
45 | unsigned int fb_y; | ||
46 | unsigned int fb_width; | ||
47 | unsigned int fb_height; | ||
48 | unsigned int mode_width; | ||
49 | unsigned int mode_height; | ||
50 | unsigned int scan_flags; | ||
51 | }; | ||
52 | |||
53 | struct mixer_resources { | ||
54 | struct device *dev; | ||
55 | /** interrupt index */ | ||
56 | int irq; | ||
57 | /** pointer to Mixer registers */ | ||
58 | void __iomem *mixer_regs; | ||
59 | /** pointer to Video Processor registers */ | ||
60 | void __iomem *vp_regs; | ||
61 | /** spinlock for protection of registers */ | ||
62 | spinlock_t reg_slock; | ||
63 | /** other resources */ | ||
64 | struct clk *mixer; | ||
65 | struct clk *vp; | ||
66 | struct clk *sclk_mixer; | ||
67 | struct clk *sclk_hdmi; | ||
68 | struct clk *sclk_dac; | ||
69 | }; | ||
70 | |||
71 | struct mixer_context { | ||
72 | unsigned int default_win; | ||
73 | struct fb_videomode *default_timing; | ||
74 | unsigned int default_bpp; | ||
75 | |||
76 | /** mixer interrupt */ | ||
77 | unsigned int irq; | ||
78 | /** current crtc pipe for vblank */ | ||
79 | int pipe; | ||
80 | /** interlace scan mode */ | ||
81 | bool interlace; | ||
82 | /** vp enabled status */ | ||
83 | bool vp_enabled; | ||
84 | |||
85 | /** mixer and vp resources */ | ||
86 | struct mixer_resources mixer_res; | ||
87 | |||
88 | /** overlay window data */ | ||
89 | struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER]; | ||
90 | }; | ||
91 | |||
92 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h index 72e6b52be740..3c04bea842ce 100644 --- a/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/drivers/gpu/drm/exynos/regs-hdmi.h | |||
@@ -19,64 +19,67 @@ | |||
19 | * Register part | 19 | * Register part |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* HDMI Version 1.3 & Common */ | ||
22 | #define HDMI_CTRL_BASE(x) ((x) + 0x00000000) | 23 | #define HDMI_CTRL_BASE(x) ((x) + 0x00000000) |
23 | #define HDMI_CORE_BASE(x) ((x) + 0x00010000) | 24 | #define HDMI_CORE_BASE(x) ((x) + 0x00010000) |
25 | #define HDMI_I2S_BASE(x) ((x) + 0x00040000) | ||
24 | #define HDMI_TG_BASE(x) ((x) + 0x00050000) | 26 | #define HDMI_TG_BASE(x) ((x) + 0x00050000) |
25 | 27 | ||
26 | /* Control registers */ | 28 | /* Control registers */ |
27 | #define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000) | 29 | #define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000) |
28 | #define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004) | 30 | #define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004) |
29 | #define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C) | 31 | #define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C) |
30 | #define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014) | 32 | #define HDMI_V13_PHY_RSTOUT HDMI_CTRL_BASE(0x0014) |
31 | #define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018) | 33 | #define HDMI_V13_PHY_VPLL HDMI_CTRL_BASE(0x0018) |
32 | #define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C) | 34 | #define HDMI_V13_PHY_CMU HDMI_CTRL_BASE(0x001C) |
33 | #define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020) | 35 | #define HDMI_V13_CORE_RSTOUT HDMI_CTRL_BASE(0x0020) |
34 | 36 | ||
35 | /* Core registers */ | 37 | /* Core registers */ |
36 | #define HDMI_CON_0 HDMI_CORE_BASE(0x0000) | 38 | #define HDMI_CON_0 HDMI_CORE_BASE(0x0000) |
37 | #define HDMI_CON_1 HDMI_CORE_BASE(0x0004) | 39 | #define HDMI_CON_1 HDMI_CORE_BASE(0x0004) |
38 | #define HDMI_CON_2 HDMI_CORE_BASE(0x0008) | 40 | #define HDMI_CON_2 HDMI_CORE_BASE(0x0008) |
39 | #define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010) | 41 | #define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010) |
40 | #define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014) | 42 | #define HDMI_V13_PHY_STATUS HDMI_CORE_BASE(0x0014) |
41 | #define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020) | 43 | #define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020) |
42 | #define HDMI_HPD HDMI_CORE_BASE(0x0030) | 44 | #define HDMI_HPD HDMI_CORE_BASE(0x0030) |
43 | #define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040) | 45 | #define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040) |
44 | #define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050) | 46 | #define HDMI_ENC_EN HDMI_CORE_BASE(0x0044) |
45 | #define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054) | 47 | #define HDMI_V13_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050) |
46 | #define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058) | 48 | #define HDMI_V13_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054) |
49 | #define HDMI_V13_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058) | ||
47 | #define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0) | 50 | #define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0) |
48 | #define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4) | 51 | #define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4) |
49 | #define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0) | 52 | #define HDMI_V13_V_BLANK_0 HDMI_CORE_BASE(0x00B0) |
50 | #define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4) | 53 | #define HDMI_V13_V_BLANK_1 HDMI_CORE_BASE(0x00B4) |
51 | #define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8) | 54 | #define HDMI_V13_V_BLANK_2 HDMI_CORE_BASE(0x00B8) |
52 | #define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0) | 55 | #define HDMI_V13_H_V_LINE_0 HDMI_CORE_BASE(0x00C0) |
53 | #define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4) | 56 | #define HDMI_V13_H_V_LINE_1 HDMI_CORE_BASE(0x00C4) |
54 | #define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8) | 57 | #define HDMI_V13_H_V_LINE_2 HDMI_CORE_BASE(0x00C8) |
55 | #define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4) | 58 | #define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4) |
56 | #define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8) | 59 | #define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8) |
57 | #define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110) | 60 | #define HDMI_V13_V_BLANK_F_0 HDMI_CORE_BASE(0x0110) |
58 | #define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114) | 61 | #define HDMI_V13_V_BLANK_F_1 HDMI_CORE_BASE(0x0114) |
59 | #define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118) | 62 | #define HDMI_V13_V_BLANK_F_2 HDMI_CORE_BASE(0x0118) |
60 | #define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120) | 63 | #define HDMI_V13_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120) |
61 | #define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124) | 64 | #define HDMI_V13_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124) |
62 | #define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128) | 65 | #define HDMI_V13_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128) |
63 | #define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130) | 66 | #define HDMI_V13_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130) |
64 | #define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134) | 67 | #define HDMI_V13_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134) |
65 | #define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138) | 68 | #define HDMI_V13_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138) |
66 | #define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140) | 69 | #define HDMI_V13_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140) |
67 | #define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144) | 70 | #define HDMI_V13_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144) |
68 | #define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148) | 71 | #define HDMI_V13_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148) |
69 | #define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150) | 72 | #define HDMI_V13_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150) |
70 | #define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154) | 73 | #define HDMI_V13_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154) |
71 | #define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158) | 74 | #define HDMI_V13_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158) |
72 | #define HDMI_ACR_CON HDMI_CORE_BASE(0x0180) | 75 | #define HDMI_V13_ACR_CON HDMI_CORE_BASE(0x0180) |
73 | #define HDMI_AVI_CON HDMI_CORE_BASE(0x0300) | 76 | #define HDMI_V13_AVI_CON HDMI_CORE_BASE(0x0300) |
74 | #define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n)) | 77 | #define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n)) |
75 | #define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0) | 78 | #define HDMI_V13_DC_CONTROL HDMI_CORE_BASE(0x05C0) |
76 | #define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4) | 79 | #define HDMI_V13_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4) |
77 | #define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8) | 80 | #define HDMI_V13_HPD_GEN HDMI_CORE_BASE(0x05C8) |
78 | #define HDMI_AUI_CON HDMI_CORE_BASE(0x0360) | 81 | #define HDMI_V13_AUI_CON HDMI_CORE_BASE(0x0360) |
79 | #define HDMI_SPD_CON HDMI_CORE_BASE(0x0400) | 82 | #define HDMI_V13_SPD_CON HDMI_CORE_BASE(0x0400) |
80 | 83 | ||
81 | /* Timing generator registers */ | 84 | /* Timing generator registers */ |
82 | #define HDMI_TG_CMD HDMI_TG_BASE(0x0000) | 85 | #define HDMI_TG_CMD HDMI_TG_BASE(0x0000) |
@@ -130,6 +133,9 @@ | |||
130 | 133 | ||
131 | /* HDMI_CON_0 */ | 134 | /* HDMI_CON_0 */ |
132 | #define HDMI_BLUE_SCR_EN (1 << 5) | 135 | #define HDMI_BLUE_SCR_EN (1 << 5) |
136 | #define HDMI_ASP_EN (1 << 2) | ||
137 | #define HDMI_ASP_DIS (0 << 2) | ||
138 | #define HDMI_ASP_MASK (1 << 2) | ||
133 | #define HDMI_EN (1 << 0) | 139 | #define HDMI_EN (1 << 0) |
134 | 140 | ||
135 | /* HDMI_PHY_STATUS */ | 141 | /* HDMI_PHY_STATUS */ |
@@ -138,10 +144,418 @@ | |||
138 | /* HDMI_MODE_SEL */ | 144 | /* HDMI_MODE_SEL */ |
139 | #define HDMI_MODE_HDMI_EN (1 << 1) | 145 | #define HDMI_MODE_HDMI_EN (1 << 1) |
140 | #define HDMI_MODE_DVI_EN (1 << 0) | 146 | #define HDMI_MODE_DVI_EN (1 << 0) |
147 | #define HDMI_DVI_MODE_EN (1) | ||
148 | #define HDMI_DVI_MODE_DIS (0) | ||
141 | #define HDMI_MODE_MASK (3 << 0) | 149 | #define HDMI_MODE_MASK (3 << 0) |
142 | 150 | ||
143 | /* HDMI_TG_CMD */ | 151 | /* HDMI_TG_CMD */ |
144 | #define HDMI_TG_EN (1 << 0) | 152 | #define HDMI_TG_EN (1 << 0) |
145 | #define HDMI_FIELD_EN (1 << 1) | 153 | #define HDMI_FIELD_EN (1 << 1) |
146 | 154 | ||
155 | |||
156 | /* HDMI Version 1.4 */ | ||
157 | /* Control registers */ | ||
158 | /* #define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000) */ | ||
159 | /* #define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004) */ | ||
160 | #define HDMI_HDCP_KEY_LOAD HDMI_CTRL_BASE(0x0008) | ||
161 | /* #define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C) */ | ||
162 | #define HDMI_INTC_CON_1 HDMI_CTRL_BASE(0x0010) | ||
163 | #define HDMI_INTC_FLAG_1 HDMI_CTRL_BASE(0x0014) | ||
164 | #define HDMI_PHY_STATUS_0 HDMI_CTRL_BASE(0x0020) | ||
165 | #define HDMI_PHY_STATUS_CMU HDMI_CTRL_BASE(0x0024) | ||
166 | #define HDMI_PHY_STATUS_PLL HDMI_CTRL_BASE(0x0028) | ||
167 | #define HDMI_PHY_CON_0 HDMI_CTRL_BASE(0x0030) | ||
168 | #define HDMI_HPD_CTRL HDMI_CTRL_BASE(0x0040) | ||
169 | #define HDMI_HPD_ST HDMI_CTRL_BASE(0x0044) | ||
170 | #define HDMI_HPD_TH_X HDMI_CTRL_BASE(0x0050) | ||
171 | #define HDMI_AUDIO_CLKSEL HDMI_CTRL_BASE(0x0070) | ||
172 | #define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0074) | ||
173 | #define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0078) | ||
174 | #define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) | ||
175 | #define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) | ||
176 | |||
177 | /* Video related registers */ | ||
178 | #define HDMI_YMAX HDMI_CORE_BASE(0x0060) | ||
179 | #define HDMI_YMIN HDMI_CORE_BASE(0x0064) | ||
180 | #define HDMI_CMAX HDMI_CORE_BASE(0x0068) | ||
181 | #define HDMI_CMIN HDMI_CORE_BASE(0x006C) | ||
182 | |||
183 | #define HDMI_V2_BLANK_0 HDMI_CORE_BASE(0x00B0) | ||
184 | #define HDMI_V2_BLANK_1 HDMI_CORE_BASE(0x00B4) | ||
185 | #define HDMI_V1_BLANK_0 HDMI_CORE_BASE(0x00B8) | ||
186 | #define HDMI_V1_BLANK_1 HDMI_CORE_BASE(0x00BC) | ||
187 | |||
188 | #define HDMI_V_LINE_0 HDMI_CORE_BASE(0x00C0) | ||
189 | #define HDMI_V_LINE_1 HDMI_CORE_BASE(0x00C4) | ||
190 | #define HDMI_H_LINE_0 HDMI_CORE_BASE(0x00C8) | ||
191 | #define HDMI_H_LINE_1 HDMI_CORE_BASE(0x00CC) | ||
192 | |||
193 | #define HDMI_HSYNC_POL HDMI_CORE_BASE(0x00E0) | ||
194 | |||
195 | #define HDMI_V_BLANK_F0_0 HDMI_CORE_BASE(0x0110) | ||
196 | #define HDMI_V_BLANK_F0_1 HDMI_CORE_BASE(0x0114) | ||
197 | #define HDMI_V_BLANK_F1_0 HDMI_CORE_BASE(0x0118) | ||
198 | #define HDMI_V_BLANK_F1_1 HDMI_CORE_BASE(0x011C) | ||
199 | |||
200 | #define HDMI_H_SYNC_START_0 HDMI_CORE_BASE(0x0120) | ||
201 | #define HDMI_H_SYNC_START_1 HDMI_CORE_BASE(0x0124) | ||
202 | #define HDMI_H_SYNC_END_0 HDMI_CORE_BASE(0x0128) | ||
203 | #define HDMI_H_SYNC_END_1 HDMI_CORE_BASE(0x012C) | ||
204 | |||
205 | #define HDMI_V_SYNC_LINE_BEF_2_0 HDMI_CORE_BASE(0x0130) | ||
206 | #define HDMI_V_SYNC_LINE_BEF_2_1 HDMI_CORE_BASE(0x0134) | ||
207 | #define HDMI_V_SYNC_LINE_BEF_1_0 HDMI_CORE_BASE(0x0138) | ||
208 | #define HDMI_V_SYNC_LINE_BEF_1_1 HDMI_CORE_BASE(0x013C) | ||
209 | |||
210 | #define HDMI_V_SYNC_LINE_AFT_2_0 HDMI_CORE_BASE(0x0140) | ||
211 | #define HDMI_V_SYNC_LINE_AFT_2_1 HDMI_CORE_BASE(0x0144) | ||
212 | #define HDMI_V_SYNC_LINE_AFT_1_0 HDMI_CORE_BASE(0x0148) | ||
213 | #define HDMI_V_SYNC_LINE_AFT_1_1 HDMI_CORE_BASE(0x014C) | ||
214 | |||
215 | #define HDMI_V_SYNC_LINE_AFT_PXL_2_0 HDMI_CORE_BASE(0x0150) | ||
216 | #define HDMI_V_SYNC_LINE_AFT_PXL_2_1 HDMI_CORE_BASE(0x0154) | ||
217 | #define HDMI_V_SYNC_LINE_AFT_PXL_1_0 HDMI_CORE_BASE(0x0158) | ||
218 | #define HDMI_V_SYNC_LINE_AFT_PXL_1_1 HDMI_CORE_BASE(0x015C) | ||
219 | |||
220 | #define HDMI_V_BLANK_F2_0 HDMI_CORE_BASE(0x0160) | ||
221 | #define HDMI_V_BLANK_F2_1 HDMI_CORE_BASE(0x0164) | ||
222 | #define HDMI_V_BLANK_F3_0 HDMI_CORE_BASE(0x0168) | ||
223 | #define HDMI_V_BLANK_F3_1 HDMI_CORE_BASE(0x016C) | ||
224 | #define HDMI_V_BLANK_F4_0 HDMI_CORE_BASE(0x0170) | ||
225 | #define HDMI_V_BLANK_F4_1 HDMI_CORE_BASE(0x0174) | ||
226 | #define HDMI_V_BLANK_F5_0 HDMI_CORE_BASE(0x0178) | ||
227 | #define HDMI_V_BLANK_F5_1 HDMI_CORE_BASE(0x017C) | ||
228 | |||
229 | #define HDMI_V_SYNC_LINE_AFT_3_0 HDMI_CORE_BASE(0x0180) | ||
230 | #define HDMI_V_SYNC_LINE_AFT_3_1 HDMI_CORE_BASE(0x0184) | ||
231 | #define HDMI_V_SYNC_LINE_AFT_4_0 HDMI_CORE_BASE(0x0188) | ||
232 | #define HDMI_V_SYNC_LINE_AFT_4_1 HDMI_CORE_BASE(0x018C) | ||
233 | #define HDMI_V_SYNC_LINE_AFT_5_0 HDMI_CORE_BASE(0x0190) | ||
234 | #define HDMI_V_SYNC_LINE_AFT_5_1 HDMI_CORE_BASE(0x0194) | ||
235 | #define HDMI_V_SYNC_LINE_AFT_6_0 HDMI_CORE_BASE(0x0198) | ||
236 | #define HDMI_V_SYNC_LINE_AFT_6_1 HDMI_CORE_BASE(0x019C) | ||
237 | |||
238 | #define HDMI_V_SYNC_LINE_AFT_PXL_3_0 HDMI_CORE_BASE(0x01A0) | ||
239 | #define HDMI_V_SYNC_LINE_AFT_PXL_3_1 HDMI_CORE_BASE(0x01A4) | ||
240 | #define HDMI_V_SYNC_LINE_AFT_PXL_4_0 HDMI_CORE_BASE(0x01A8) | ||
241 | #define HDMI_V_SYNC_LINE_AFT_PXL_4_1 HDMI_CORE_BASE(0x01AC) | ||
242 | #define HDMI_V_SYNC_LINE_AFT_PXL_5_0 HDMI_CORE_BASE(0x01B0) | ||
243 | #define HDMI_V_SYNC_LINE_AFT_PXL_5_1 HDMI_CORE_BASE(0x01B4) | ||
244 | #define HDMI_V_SYNC_LINE_AFT_PXL_6_0 HDMI_CORE_BASE(0x01B8) | ||
245 | #define HDMI_V_SYNC_LINE_AFT_PXL_6_1 HDMI_CORE_BASE(0x01BC) | ||
246 | |||
247 | #define HDMI_VACT_SPACE_1_0 HDMI_CORE_BASE(0x01C0) | ||
248 | #define HDMI_VACT_SPACE_1_1 HDMI_CORE_BASE(0x01C4) | ||
249 | #define HDMI_VACT_SPACE_2_0 HDMI_CORE_BASE(0x01C8) | ||
250 | #define HDMI_VACT_SPACE_2_1 HDMI_CORE_BASE(0x01CC) | ||
251 | #define HDMI_VACT_SPACE_3_0 HDMI_CORE_BASE(0x01D0) | ||
252 | #define HDMI_VACT_SPACE_3_1 HDMI_CORE_BASE(0x01D4) | ||
253 | #define HDMI_VACT_SPACE_4_0 HDMI_CORE_BASE(0x01D8) | ||
254 | #define HDMI_VACT_SPACE_4_1 HDMI_CORE_BASE(0x01DC) | ||
255 | #define HDMI_VACT_SPACE_5_0 HDMI_CORE_BASE(0x01E0) | ||
256 | #define HDMI_VACT_SPACE_5_1 HDMI_CORE_BASE(0x01E4) | ||
257 | #define HDMI_VACT_SPACE_6_0 HDMI_CORE_BASE(0x01E8) | ||
258 | #define HDMI_VACT_SPACE_6_1 HDMI_CORE_BASE(0x01EC) | ||
259 | |||
260 | #define HDMI_GCP_CON HDMI_CORE_BASE(0x0200) | ||
261 | #define HDMI_GCP_BYTE1 HDMI_CORE_BASE(0x0210) | ||
262 | #define HDMI_GCP_BYTE2 HDMI_CORE_BASE(0x0214) | ||
263 | #define HDMI_GCP_BYTE3 HDMI_CORE_BASE(0x0218) | ||
264 | |||
265 | /* Audio related registers */ | ||
266 | #define HDMI_ASP_CON HDMI_CORE_BASE(0x0300) | ||
267 | #define HDMI_ASP_SP_FLAT HDMI_CORE_BASE(0x0304) | ||
268 | #define HDMI_ASP_CHCFG0 HDMI_CORE_BASE(0x0310) | ||
269 | #define HDMI_ASP_CHCFG1 HDMI_CORE_BASE(0x0314) | ||
270 | #define HDMI_ASP_CHCFG2 HDMI_CORE_BASE(0x0318) | ||
271 | #define HDMI_ASP_CHCFG3 HDMI_CORE_BASE(0x031C) | ||
272 | |||
273 | #define HDMI_ACR_CON HDMI_CORE_BASE(0x0400) | ||
274 | #define HDMI_ACR_MCTS0 HDMI_CORE_BASE(0x0410) | ||
275 | #define HDMI_ACR_MCTS1 HDMI_CORE_BASE(0x0414) | ||
276 | #define HDMI_ACR_MCTS2 HDMI_CORE_BASE(0x0418) | ||
277 | #define HDMI_ACR_CTS0 HDMI_CORE_BASE(0x0420) | ||
278 | #define HDMI_ACR_CTS1 HDMI_CORE_BASE(0x0424) | ||
279 | #define HDMI_ACR_CTS2 HDMI_CORE_BASE(0x0428) | ||
280 | #define HDMI_ACR_N0 HDMI_CORE_BASE(0x0430) | ||
281 | #define HDMI_ACR_N1 HDMI_CORE_BASE(0x0434) | ||
282 | #define HDMI_ACR_N2 HDMI_CORE_BASE(0x0438) | ||
283 | |||
284 | /* Packet related registers */ | ||
285 | #define HDMI_ACP_CON HDMI_CORE_BASE(0x0500) | ||
286 | #define HDMI_ACP_TYPE HDMI_CORE_BASE(0x0514) | ||
287 | #define HDMI_ACP_DATA(n) HDMI_CORE_BASE(0x0520 + 4 * (n)) | ||
288 | |||
289 | #define HDMI_ISRC_CON HDMI_CORE_BASE(0x0600) | ||
290 | #define HDMI_ISRC1_HEADER1 HDMI_CORE_BASE(0x0614) | ||
291 | #define HDMI_ISRC1_DATA(n) HDMI_CORE_BASE(0x0620 + 4 * (n)) | ||
292 | #define HDMI_ISRC2_DATA(n) HDMI_CORE_BASE(0x06A0 + 4 * (n)) | ||
293 | |||
294 | #define HDMI_AVI_CON HDMI_CORE_BASE(0x0700) | ||
295 | #define HDMI_AVI_HEADER0 HDMI_CORE_BASE(0x0710) | ||
296 | #define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) | ||
297 | #define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) | ||
298 | #define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) | ||
299 | #define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) | ||
300 | |||
301 | #define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) | ||
302 | #define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) | ||
303 | #define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) | ||
304 | #define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) | ||
305 | #define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) | ||
306 | #define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) | ||
307 | |||
308 | #define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) | ||
309 | #define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) | ||
310 | #define HDMI_MPG_DATA(n) HDMI_CORE_BASE(0x0920 + 4 * (n)) | ||
311 | |||
312 | #define HDMI_SPD_CON HDMI_CORE_BASE(0x0A00) | ||
313 | #define HDMI_SPD_HEADER0 HDMI_CORE_BASE(0x0A10) | ||
314 | #define HDMI_SPD_HEADER1 HDMI_CORE_BASE(0x0A14) | ||
315 | #define HDMI_SPD_HEADER2 HDMI_CORE_BASE(0x0A18) | ||
316 | #define HDMI_SPD_DATA(n) HDMI_CORE_BASE(0x0A20 + 4 * (n)) | ||
317 | |||
318 | #define HDMI_GAMUT_CON HDMI_CORE_BASE(0x0B00) | ||
319 | #define HDMI_GAMUT_HEADER0 HDMI_CORE_BASE(0x0B10) | ||
320 | #define HDMI_GAMUT_HEADER1 HDMI_CORE_BASE(0x0B14) | ||
321 | #define HDMI_GAMUT_HEADER2 HDMI_CORE_BASE(0x0B18) | ||
322 | #define HDMI_GAMUT_METADATA(n) HDMI_CORE_BASE(0x0B20 + 4 * (n)) | ||
323 | |||
324 | #define HDMI_VSI_CON HDMI_CORE_BASE(0x0C00) | ||
325 | #define HDMI_VSI_HEADER0 HDMI_CORE_BASE(0x0C10) | ||
326 | #define HDMI_VSI_HEADER1 HDMI_CORE_BASE(0x0C14) | ||
327 | #define HDMI_VSI_HEADER2 HDMI_CORE_BASE(0x0C18) | ||
328 | #define HDMI_VSI_DATA(n) HDMI_CORE_BASE(0x0C20 + 4 * (n)) | ||
329 | |||
330 | #define HDMI_DC_CONTROL HDMI_CORE_BASE(0x0D00) | ||
331 | #define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x0D04) | ||
332 | |||
333 | #define HDMI_AN_SEED_SEL HDMI_CORE_BASE(0x0E48) | ||
334 | #define HDMI_AN_SEED_0 HDMI_CORE_BASE(0x0E58) | ||
335 | #define HDMI_AN_SEED_1 HDMI_CORE_BASE(0x0E5C) | ||
336 | #define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) | ||
337 | #define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) | ||
338 | |||
339 | /* HDCP related registers */ | ||
340 | #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) | ||
341 | #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) | ||
342 | |||
343 | #define HDMI_HDCP_KSV_LIST_CON HDMI_CORE_BASE(0x7064) | ||
344 | #define HDMI_HDCP_SHA_RESULT HDMI_CORE_BASE(0x7070) | ||
345 | #define HDMI_HDCP_CTRL1 HDMI_CORE_BASE(0x7080) | ||
346 | #define HDMI_HDCP_CTRL2 HDMI_CORE_BASE(0x7084) | ||
347 | #define HDMI_HDCP_CHECK_RESULT HDMI_CORE_BASE(0x7090) | ||
348 | #define HDMI_HDCP_BKSV(n) HDMI_CORE_BASE(0x70A0 + 4 * (n)) | ||
349 | #define HDMI_HDCP_AKSV(n) HDMI_CORE_BASE(0x70C0 + 4 * (n)) | ||
350 | #define HDMI_HDCP_AN(n) HDMI_CORE_BASE(0x70E0 + 4 * (n)) | ||
351 | |||
352 | #define HDMI_HDCP_BCAPS HDMI_CORE_BASE(0x7100) | ||
353 | #define HDMI_HDCP_BSTATUS_0 HDMI_CORE_BASE(0x7110) | ||
354 | #define HDMI_HDCP_BSTATUS_1 HDMI_CORE_BASE(0x7114) | ||
355 | #define HDMI_HDCP_RI_0 HDMI_CORE_BASE(0x7140) | ||
356 | #define HDMI_HDCP_RI_1 HDMI_CORE_BASE(0x7144) | ||
357 | #define HDMI_HDCP_I2C_INT HDMI_CORE_BASE(0x7180) | ||
358 | #define HDMI_HDCP_AN_INT HDMI_CORE_BASE(0x7190) | ||
359 | #define HDMI_HDCP_WDT_INT HDMI_CORE_BASE(0x71A0) | ||
360 | #define HDMI_HDCP_RI_INT HDMI_CORE_BASE(0x71B0) | ||
361 | #define HDMI_HDCP_RI_COMPARE_0 HDMI_CORE_BASE(0x71D0) | ||
362 | #define HDMI_HDCP_RI_COMPARE_1 HDMI_CORE_BASE(0x71D4) | ||
363 | #define HDMI_HDCP_FRAME_COUNT HDMI_CORE_BASE(0x71E0) | ||
364 | |||
365 | #define HDMI_RGB_ROUND_EN HDMI_CORE_BASE(0xD500) | ||
366 | #define HDMI_VACT_SPACE_R_0 HDMI_CORE_BASE(0xD504) | ||
367 | #define HDMI_VACT_SPACE_R_1 HDMI_CORE_BASE(0xD508) | ||
368 | #define HDMI_VACT_SPACE_G_0 HDMI_CORE_BASE(0xD50C) | ||
369 | #define HDMI_VACT_SPACE_G_1 HDMI_CORE_BASE(0xD510) | ||
370 | #define HDMI_VACT_SPACE_B_0 HDMI_CORE_BASE(0xD514) | ||
371 | #define HDMI_VACT_SPACE_B_1 HDMI_CORE_BASE(0xD518) | ||
372 | |||
373 | #define HDMI_BLUE_SCREEN_B_0 HDMI_CORE_BASE(0xD520) | ||
374 | #define HDMI_BLUE_SCREEN_B_1 HDMI_CORE_BASE(0xD524) | ||
375 | #define HDMI_BLUE_SCREEN_G_0 HDMI_CORE_BASE(0xD528) | ||
376 | #define HDMI_BLUE_SCREEN_G_1 HDMI_CORE_BASE(0xD52C) | ||
377 | #define HDMI_BLUE_SCREEN_R_0 HDMI_CORE_BASE(0xD530) | ||
378 | #define HDMI_BLUE_SCREEN_R_1 HDMI_CORE_BASE(0xD534) | ||
379 | |||
380 | /* HDMI I2S register */ | ||
381 | #define HDMI_I2S_CLK_CON HDMI_I2S_BASE(0x000) | ||
382 | #define HDMI_I2S_CON_1 HDMI_I2S_BASE(0x004) | ||
383 | #define HDMI_I2S_CON_2 HDMI_I2S_BASE(0x008) | ||
384 | #define HDMI_I2S_PIN_SEL_0 HDMI_I2S_BASE(0x00c) | ||
385 | #define HDMI_I2S_PIN_SEL_1 HDMI_I2S_BASE(0x010) | ||
386 | #define HDMI_I2S_PIN_SEL_2 HDMI_I2S_BASE(0x014) | ||
387 | #define HDMI_I2S_PIN_SEL_3 HDMI_I2S_BASE(0x018) | ||
388 | #define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c) | ||
389 | #define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020) | ||
390 | #define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024) | ||
391 | #define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028) | ||
392 | #define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c) | ||
393 | #define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030) | ||
394 | #define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034) | ||
395 | #define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038) | ||
396 | #define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c) | ||
397 | #define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040) | ||
398 | #define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044) | ||
399 | #define HDMI_I2S_CH_ST_SH_3 HDMI_I2S_BASE(0x048) | ||
400 | #define HDMI_I2S_CH_ST_SH_4 HDMI_I2S_BASE(0x04c) | ||
401 | #define HDMI_I2S_MUX_CH HDMI_I2S_BASE(0x054) | ||
402 | #define HDMI_I2S_MUX_CUV HDMI_I2S_BASE(0x058) | ||
403 | |||
404 | /* I2S bit definition */ | ||
405 | |||
406 | /* I2S_CLK_CON */ | ||
407 | #define HDMI_I2S_CLK_DIS (0) | ||
408 | #define HDMI_I2S_CLK_EN (1) | ||
409 | |||
410 | /* I2S_CON_1 */ | ||
411 | #define HDMI_I2S_SCLK_FALLING_EDGE (0 << 1) | ||
412 | #define HDMI_I2S_SCLK_RISING_EDGE (1 << 1) | ||
413 | #define HDMI_I2S_L_CH_LOW_POL (0) | ||
414 | #define HDMI_I2S_L_CH_HIGH_POL (1) | ||
415 | |||
416 | /* I2S_CON_2 */ | ||
417 | #define HDMI_I2S_MSB_FIRST_MODE (0 << 6) | ||
418 | #define HDMI_I2S_LSB_FIRST_MODE (1 << 6) | ||
419 | #define HDMI_I2S_BIT_CH_32FS (0 << 4) | ||
420 | #define HDMI_I2S_BIT_CH_48FS (1 << 4) | ||
421 | #define HDMI_I2S_BIT_CH_RESERVED (2 << 4) | ||
422 | #define HDMI_I2S_SDATA_16BIT (1 << 2) | ||
423 | #define HDMI_I2S_SDATA_20BIT (2 << 2) | ||
424 | #define HDMI_I2S_SDATA_24BIT (3 << 2) | ||
425 | #define HDMI_I2S_BASIC_FORMAT (0) | ||
426 | #define HDMI_I2S_L_JUST_FORMAT (2) | ||
427 | #define HDMI_I2S_R_JUST_FORMAT (3) | ||
428 | #define HDMI_I2S_CON_2_CLR (~(0xFF)) | ||
429 | #define HDMI_I2S_SET_BIT_CH(x) (((x) & 0x7) << 4) | ||
430 | #define HDMI_I2S_SET_SDATA_BIT(x) (((x) & 0x7) << 2) | ||
431 | |||
432 | /* I2S_PIN_SEL_0 */ | ||
433 | #define HDMI_I2S_SEL_SCLK(x) (((x) & 0x7) << 4) | ||
434 | #define HDMI_I2S_SEL_LRCK(x) ((x) & 0x7) | ||
435 | |||
436 | /* I2S_PIN_SEL_1 */ | ||
437 | #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) | ||
438 | #define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) | ||
439 | |||
440 | /* I2S_PIN_SEL_2 */ | ||
441 | #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) | ||
442 | #define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) | ||
443 | |||
444 | /* I2S_PIN_SEL_3 */ | ||
445 | #define HDMI_I2S_SEL_DSD(x) ((x) & 0x7) | ||
446 | |||
447 | /* I2S_DSD_CON */ | ||
448 | #define HDMI_I2S_DSD_CLK_RI_EDGE (1 << 1) | ||
449 | #define HDMI_I2S_DSD_CLK_FA_EDGE (0 << 1) | ||
450 | #define HDMI_I2S_DSD_ENABLE (1) | ||
451 | #define HDMI_I2S_DSD_DISABLE (0) | ||
452 | |||
453 | /* I2S_MUX_CON */ | ||
454 | #define HDMI_I2S_NOISE_FILTER_ZERO (0 << 5) | ||
455 | #define HDMI_I2S_NOISE_FILTER_2_STAGE (1 << 5) | ||
456 | #define HDMI_I2S_NOISE_FILTER_3_STAGE (2 << 5) | ||
457 | #define HDMI_I2S_NOISE_FILTER_4_STAGE (3 << 5) | ||
458 | #define HDMI_I2S_NOISE_FILTER_5_STAGE (4 << 5) | ||
459 | #define HDMI_I2S_IN_DISABLE (1 << 4) | ||
460 | #define HDMI_I2S_IN_ENABLE (0 << 4) | ||
461 | #define HDMI_I2S_AUD_SPDIF (0 << 2) | ||
462 | #define HDMI_I2S_AUD_I2S (1 << 2) | ||
463 | #define HDMI_I2S_AUD_DSD (2 << 2) | ||
464 | #define HDMI_I2S_CUV_SPDIF_ENABLE (0 << 1) | ||
465 | #define HDMI_I2S_CUV_I2S_ENABLE (1 << 1) | ||
466 | #define HDMI_I2S_MUX_DISABLE (0) | ||
467 | #define HDMI_I2S_MUX_ENABLE (1) | ||
468 | #define HDMI_I2S_MUX_CON_CLR (~(0xFF)) | ||
469 | |||
470 | /* I2S_CH_ST_CON */ | ||
471 | #define HDMI_I2S_CH_STATUS_RELOAD (1) | ||
472 | #define HDMI_I2S_CH_ST_CON_CLR (~(1)) | ||
473 | |||
474 | /* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */ | ||
475 | #define HDMI_I2S_CH_STATUS_MODE_0 (0 << 6) | ||
476 | #define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH (0 << 3) | ||
477 | #define HDMI_I2S_2AUD_CH_WITH_PREEMPH (1 << 3) | ||
478 | #define HDMI_I2S_DEFAULT_EMPHASIS (0 << 3) | ||
479 | #define HDMI_I2S_COPYRIGHT (0 << 2) | ||
480 | #define HDMI_I2S_NO_COPYRIGHT (1 << 2) | ||
481 | #define HDMI_I2S_LINEAR_PCM (0 << 1) | ||
482 | #define HDMI_I2S_NO_LINEAR_PCM (1 << 1) | ||
483 | #define HDMI_I2S_CONSUMER_FORMAT (0) | ||
484 | #define HDMI_I2S_PROF_FORMAT (1) | ||
485 | #define HDMI_I2S_CH_ST_0_CLR (~(0xFF)) | ||
486 | |||
487 | /* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */ | ||
488 | #define HDMI_I2S_CD_PLAYER (0x00) | ||
489 | #define HDMI_I2S_DAT_PLAYER (0x03) | ||
490 | #define HDMI_I2S_DCC_PLAYER (0x43) | ||
491 | #define HDMI_I2S_MINI_DISC_PLAYER (0x49) | ||
492 | |||
493 | /* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */ | ||
494 | #define HDMI_I2S_CHANNEL_NUM_MASK (0xF << 4) | ||
495 | #define HDMI_I2S_SOURCE_NUM_MASK (0xF) | ||
496 | #define HDMI_I2S_SET_CHANNEL_NUM(x) (((x) & (0xF)) << 4) | ||
497 | #define HDMI_I2S_SET_SOURCE_NUM(x) ((x) & (0xF)) | ||
498 | |||
499 | /* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */ | ||
500 | #define HDMI_I2S_CLK_ACCUR_LEVEL_1 (1 << 4) | ||
501 | #define HDMI_I2S_CLK_ACCUR_LEVEL_2 (0 << 4) | ||
502 | #define HDMI_I2S_CLK_ACCUR_LEVEL_3 (2 << 4) | ||
503 | #define HDMI_I2S_SMP_FREQ_44_1 (0x0) | ||
504 | #define HDMI_I2S_SMP_FREQ_48 (0x2) | ||
505 | #define HDMI_I2S_SMP_FREQ_32 (0x3) | ||
506 | #define HDMI_I2S_SMP_FREQ_96 (0xA) | ||
507 | #define HDMI_I2S_SET_SMP_FREQ(x) ((x) & (0xF)) | ||
508 | |||
509 | /* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */ | ||
510 | #define HDMI_I2S_ORG_SMP_FREQ_44_1 (0xF << 4) | ||
511 | #define HDMI_I2S_ORG_SMP_FREQ_88_2 (0x7 << 4) | ||
512 | #define HDMI_I2S_ORG_SMP_FREQ_22_05 (0xB << 4) | ||
513 | #define HDMI_I2S_ORG_SMP_FREQ_176_4 (0x3 << 4) | ||
514 | #define HDMI_I2S_WORD_LEN_NOT_DEFINE (0x0 << 1) | ||
515 | #define HDMI_I2S_WORD_LEN_MAX24_20BITS (0x1 << 1) | ||
516 | #define HDMI_I2S_WORD_LEN_MAX24_22BITS (0x2 << 1) | ||
517 | #define HDMI_I2S_WORD_LEN_MAX24_23BITS (0x4 << 1) | ||
518 | #define HDMI_I2S_WORD_LEN_MAX24_24BITS (0x5 << 1) | ||
519 | #define HDMI_I2S_WORD_LEN_MAX24_21BITS (0x6 << 1) | ||
520 | #define HDMI_I2S_WORD_LEN_MAX20_16BITS (0x1 << 1) | ||
521 | #define HDMI_I2S_WORD_LEN_MAX20_18BITS (0x2 << 1) | ||
522 | #define HDMI_I2S_WORD_LEN_MAX20_19BITS (0x4 << 1) | ||
523 | #define HDMI_I2S_WORD_LEN_MAX20_20BITS (0x5 << 1) | ||
524 | #define HDMI_I2S_WORD_LEN_MAX20_17BITS (0x6 << 1) | ||
525 | #define HDMI_I2S_WORD_LEN_MAX_24BITS (1) | ||
526 | #define HDMI_I2S_WORD_LEN_MAX_20BITS (0) | ||
527 | |||
528 | /* I2S_MUX_CH */ | ||
529 | #define HDMI_I2S_CH3_R_EN (1 << 7) | ||
530 | #define HDMI_I2S_CH3_L_EN (1 << 6) | ||
531 | #define HDMI_I2S_CH3_EN (3 << 6) | ||
532 | #define HDMI_I2S_CH2_R_EN (1 << 5) | ||
533 | #define HDMI_I2S_CH2_L_EN (1 << 4) | ||
534 | #define HDMI_I2S_CH2_EN (3 << 4) | ||
535 | #define HDMI_I2S_CH1_R_EN (1 << 3) | ||
536 | #define HDMI_I2S_CH1_L_EN (1 << 2) | ||
537 | #define HDMI_I2S_CH1_EN (3 << 2) | ||
538 | #define HDMI_I2S_CH0_R_EN (1 << 1) | ||
539 | #define HDMI_I2S_CH0_L_EN (1) | ||
540 | #define HDMI_I2S_CH0_EN (3) | ||
541 | #define HDMI_I2S_CH_ALL_EN (0xFF) | ||
542 | #define HDMI_I2S_MUX_CH_CLR (~HDMI_I2S_CH_ALL_EN) | ||
543 | |||
544 | /* I2S_MUX_CUV */ | ||
545 | #define HDMI_I2S_CUV_R_EN (1 << 1) | ||
546 | #define HDMI_I2S_CUV_L_EN (1) | ||
547 | #define HDMI_I2S_CUV_RL_EN (0x03) | ||
548 | |||
549 | /* I2S_CUV_L_R */ | ||
550 | #define HDMI_I2S_CUV_R_DATA_MASK (0x7 << 4) | ||
551 | #define HDMI_I2S_CUV_L_DATA_MASK (0x7) | ||
552 | |||
553 | /* Timing generator registers */ | ||
554 | /* TG configure/status registers */ | ||
555 | #define HDMI_TG_VACT_ST3_L HDMI_TG_BASE(0x0068) | ||
556 | #define HDMI_TG_VACT_ST3_H HDMI_TG_BASE(0x006c) | ||
557 | #define HDMI_TG_VACT_ST4_L HDMI_TG_BASE(0x0070) | ||
558 | #define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074) | ||
559 | #define HDMI_TG_3D HDMI_TG_BASE(0x00F0) | ||
560 | |||
147 | #endif /* SAMSUNG_REGS_HDMI_H */ | 561 | #endif /* SAMSUNG_REGS_HDMI_H */ |
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 754e14bdc801..42e665c7e90a 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig | |||
@@ -16,8 +16,7 @@ config DRM_GMA600 | |||
16 | depends on DRM_GMA500 | 16 | depends on DRM_GMA500 |
17 | help | 17 | help |
18 | Say yes to include support for GMA600 (Intel Moorestown/Oaktrail) | 18 | Say yes to include support for GMA600 (Intel Moorestown/Oaktrail) |
19 | platforms with LVDS ports. HDMI and MIPI are not currently | 19 | platforms with LVDS ports. MIPI is not currently supported. |
20 | supported. | ||
21 | 20 | ||
22 | config DRM_GMA3600 | 21 | config DRM_GMA3600 |
23 | bool "Intel GMA3600/3650 support (Experimental)" | 22 | bool "Intel GMA3600/3650 support (Experimental)" |
@@ -25,3 +24,10 @@ config DRM_GMA3600 | |||
25 | help | 24 | help |
26 | Say yes to include basic support for Intel GMA3600/3650 (Intel | 25 | Say yes to include basic support for Intel GMA3600/3650 (Intel |
27 | Cedar Trail) platforms. | 26 | Cedar Trail) platforms. |
27 | |||
28 | config DRM_MEDFIELD | ||
29 | bool "Intel Medfield support (Experimental)" | ||
30 | depends on DRM_GMA500 && X86_INTEL_MID | ||
31 | help | ||
32 | Say yes to include support for the Intel Medfield platform. | ||
33 | |||
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index 81c103be5e21..1583982917ce 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile | |||
@@ -37,4 +37,14 @@ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ | |||
37 | oaktrail_hdmi.o \ | 37 | oaktrail_hdmi.o \ |
38 | oaktrail_hdmi_i2c.o | 38 | oaktrail_hdmi_i2c.o |
39 | 39 | ||
40 | gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \ | ||
41 | mdfld_output.o \ | ||
42 | mdfld_intel_display.o \ | ||
43 | mdfld_dsi_output.o \ | ||
44 | mdfld_dsi_dpi.o \ | ||
45 | mdfld_dsi_pkg_sender.o \ | ||
46 | mdfld_tpo_vid.o \ | ||
47 | mdfld_tmd_vid.o \ | ||
48 | tc35876x-dsi-lvds.o | ||
49 | |||
40 | obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o | 50 | obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o |
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 53404af2e748..a54cc738926a 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c | |||
@@ -202,13 +202,12 @@ static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value) | |||
202 | pci_dev_put(pci_root); | 202 | pci_dev_put(pci_root); |
203 | } | 203 | } |
204 | 204 | ||
205 | #define PSB_APM_CMD 0x0 | ||
206 | #define PSB_APM_STS 0x04 | ||
207 | #define PSB_PM_SSC 0x20 | 205 | #define PSB_PM_SSC 0x20 |
208 | #define PSB_PM_SSS 0x30 | 206 | #define PSB_PM_SSS 0x30 |
209 | #define PSB_PWRGT_GFX_MASK 0x3 | 207 | #define PSB_PWRGT_GFX_ON 0x02 |
210 | #define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c | 208 | #define PSB_PWRGT_GFX_OFF 0x01 |
211 | #define CDV_PWRGT_DISPLAY_STS 0x000fc00c | 209 | #define PSB_PWRGT_GFX_D0 0x00 |
210 | #define PSB_PWRGT_GFX_D3 0x03 | ||
212 | 211 | ||
213 | static void cdv_init_pm(struct drm_device *dev) | 212 | static void cdv_init_pm(struct drm_device *dev) |
214 | { | 213 | { |
@@ -221,26 +220,22 @@ static void cdv_init_pm(struct drm_device *dev) | |||
221 | dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, | 220 | dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, |
222 | PSB_OSPMBA) & 0xFFFF; | 221 | PSB_OSPMBA) & 0xFFFF; |
223 | 222 | ||
224 | /* Force power on for now */ | 223 | /* Power status */ |
225 | pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); | 224 | pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); |
226 | pwr_cnt &= ~PSB_PWRGT_GFX_MASK; | ||
227 | 225 | ||
226 | /* Enable the GPU */ | ||
227 | pwr_cnt &= ~PSB_PWRGT_GFX_MASK; | ||
228 | pwr_cnt |= PSB_PWRGT_GFX_ON; | ||
228 | outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); | 229 | outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); |
230 | |||
231 | /* Wait for the GPU power */ | ||
229 | for (i = 0; i < 5; i++) { | 232 | for (i = 0; i < 5; i++) { |
230 | u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); | 233 | u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); |
231 | if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0) | 234 | if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0) |
232 | break; | 235 | return; |
233 | udelay(10); | ||
234 | } | ||
235 | pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC); | ||
236 | pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR; | ||
237 | outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC); | ||
238 | for (i = 0; i < 5; i++) { | ||
239 | u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS); | ||
240 | if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0) | ||
241 | break; | ||
242 | udelay(10); | 236 | udelay(10); |
243 | } | 237 | } |
238 | dev_err(dev->dev, "GPU: power management timed out.\n"); | ||
244 | } | 239 | } |
245 | 240 | ||
246 | /** | 241 | /** |
@@ -249,11 +244,50 @@ static void cdv_init_pm(struct drm_device *dev) | |||
249 | * | 244 | * |
250 | * Save the state we need in order to be able to restore the interface | 245 | * Save the state we need in order to be able to restore the interface |
251 | * upon resume from suspend | 246 | * upon resume from suspend |
252 | * | ||
253 | * FIXME: review | ||
254 | */ | 247 | */ |
255 | static int cdv_save_display_registers(struct drm_device *dev) | 248 | static int cdv_save_display_registers(struct drm_device *dev) |
256 | { | 249 | { |
250 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
251 | struct psb_save_area *regs = &dev_priv->regs; | ||
252 | struct drm_connector *connector; | ||
253 | |||
254 | dev_info(dev->dev, "Saving GPU registers.\n"); | ||
255 | |||
256 | pci_read_config_byte(dev->pdev, 0xF4, ®s->cdv.saveLBB); | ||
257 | |||
258 | regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D); | ||
259 | regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D); | ||
260 | |||
261 | regs->cdv.saveDSPARB = REG_READ(DSPARB); | ||
262 | regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1); | ||
263 | regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2); | ||
264 | regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3); | ||
265 | regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4); | ||
266 | regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5); | ||
267 | regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6); | ||
268 | |||
269 | regs->cdv.saveADPA = REG_READ(ADPA); | ||
270 | |||
271 | regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL); | ||
272 | regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); | ||
273 | regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); | ||
274 | regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2); | ||
275 | regs->cdv.saveLVDS = REG_READ(LVDS); | ||
276 | |||
277 | regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL); | ||
278 | |||
279 | regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS); | ||
280 | regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS); | ||
281 | regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE); | ||
282 | |||
283 | regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL); | ||
284 | |||
285 | regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R); | ||
286 | regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R); | ||
287 | |||
288 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
289 | connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); | ||
290 | |||
257 | return 0; | 291 | return 0; |
258 | } | 292 | } |
259 | 293 | ||
@@ -267,16 +301,113 @@ static int cdv_save_display_registers(struct drm_device *dev) | |||
267 | */ | 301 | */ |
268 | static int cdv_restore_display_registers(struct drm_device *dev) | 302 | static int cdv_restore_display_registers(struct drm_device *dev) |
269 | { | 303 | { |
304 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
305 | struct psb_save_area *regs = &dev_priv->regs; | ||
306 | struct drm_connector *connector; | ||
307 | u32 temp; | ||
308 | |||
309 | pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB); | ||
310 | |||
311 | REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D); | ||
312 | REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D); | ||
313 | |||
314 | /* BIOS does below anyway */ | ||
315 | REG_WRITE(DPIO_CFG, 0); | ||
316 | REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N); | ||
317 | |||
318 | temp = REG_READ(DPLL_A); | ||
319 | if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { | ||
320 | REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE); | ||
321 | REG_READ(DPLL_A); | ||
322 | } | ||
323 | |||
324 | temp = REG_READ(DPLL_B); | ||
325 | if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { | ||
326 | REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE); | ||
327 | REG_READ(DPLL_B); | ||
328 | } | ||
329 | |||
330 | udelay(500); | ||
331 | |||
332 | REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]); | ||
333 | REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]); | ||
334 | REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]); | ||
335 | REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]); | ||
336 | REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]); | ||
337 | REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]); | ||
338 | |||
339 | REG_WRITE(DSPARB, regs->cdv.saveDSPARB); | ||
340 | REG_WRITE(ADPA, regs->cdv.saveADPA); | ||
341 | |||
342 | REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2); | ||
343 | REG_WRITE(LVDS, regs->cdv.saveLVDS); | ||
344 | REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL); | ||
345 | REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS); | ||
346 | REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL); | ||
347 | REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS); | ||
348 | REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS); | ||
349 | REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE); | ||
350 | REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL); | ||
351 | |||
352 | REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL); | ||
353 | |||
354 | REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER); | ||
355 | REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR); | ||
356 | |||
357 | /* Fix arbitration bug */ | ||
358 | CDV_MSG_WRITE32(3, 0x30, 0x08027108); | ||
359 | |||
360 | drm_mode_config_reset(dev); | ||
361 | |||
362 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
363 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); | ||
364 | |||
365 | /* Resume the modeset for every activated CRTC */ | ||
366 | drm_helper_resume_force_mode(dev); | ||
270 | return 0; | 367 | return 0; |
271 | } | 368 | } |
272 | 369 | ||
273 | static int cdv_power_down(struct drm_device *dev) | 370 | static int cdv_power_down(struct drm_device *dev) |
274 | { | 371 | { |
372 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
373 | u32 pwr_cnt, pwr_mask, pwr_sts; | ||
374 | int tries = 5; | ||
375 | |||
376 | pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); | ||
377 | pwr_cnt &= ~PSB_PWRGT_GFX_MASK; | ||
378 | pwr_cnt |= PSB_PWRGT_GFX_OFF; | ||
379 | pwr_mask = PSB_PWRGT_GFX_MASK; | ||
380 | |||
381 | outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); | ||
382 | |||
383 | while (tries--) { | ||
384 | pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); | ||
385 | if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3) | ||
386 | return 0; | ||
387 | udelay(10); | ||
388 | } | ||
275 | return 0; | 389 | return 0; |
276 | } | 390 | } |
277 | 391 | ||
278 | static int cdv_power_up(struct drm_device *dev) | 392 | static int cdv_power_up(struct drm_device *dev) |
279 | { | 393 | { |
394 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
395 | u32 pwr_cnt, pwr_mask, pwr_sts; | ||
396 | int tries = 5; | ||
397 | |||
398 | pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); | ||
399 | pwr_cnt &= ~PSB_PWRGT_GFX_MASK; | ||
400 | pwr_cnt |= PSB_PWRGT_GFX_ON; | ||
401 | pwr_mask = PSB_PWRGT_GFX_MASK; | ||
402 | |||
403 | outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); | ||
404 | |||
405 | while (tries--) { | ||
406 | pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); | ||
407 | if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0) | ||
408 | return 0; | ||
409 | udelay(10); | ||
410 | } | ||
280 | return 0; | 411 | return 0; |
281 | } | 412 | } |
282 | 413 | ||
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 2a88b7beb551..9561e17621b3 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h | |||
@@ -26,7 +26,7 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device * | |||
26 | extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, | 26 | extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, |
27 | struct drm_crtc *crtc); | 27 | struct drm_crtc *crtc); |
28 | 28 | ||
29 | extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev) | 29 | static inline void cdv_intel_wait_for_vblank(struct drm_device *dev) |
30 | { | 30 | { |
31 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | 31 | /* Wait for 20ms, i.e. one cycle at 50hz. */ |
32 | /* FIXME: msleep ?? */ | 32 | /* FIXME: msleep ?? */ |
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index c100f3e9c920..a71a6cd95bdd 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "psb_intel_drv.h" | 32 | #include "psb_intel_drv.h" |
33 | #include "psb_intel_reg.h" | 33 | #include "psb_intel_reg.h" |
34 | #include "power.h" | 34 | #include "power.h" |
35 | #include "cdv_device.h" | ||
35 | #include <linux/pm_runtime.h> | 36 | #include <linux/pm_runtime.h> |
36 | 37 | ||
37 | 38 | ||
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 18d11525095e..be8455919b33 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c | |||
@@ -344,7 +344,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, | |||
344 | /* | 344 | /* |
345 | * Returns whether any encoder on the specified pipe is of the specified type | 345 | * Returns whether any encoder on the specified pipe is of the specified type |
346 | */ | 346 | */ |
347 | bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type) | 347 | static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type) |
348 | { | 348 | { |
349 | struct drm_device *dev = crtc->dev; | 349 | struct drm_device *dev = crtc->dev; |
350 | struct drm_mode_config *mode_config = &dev->mode_config; | 350 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -476,7 +476,7 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
476 | return err != target; | 476 | return err != target; |
477 | } | 477 | } |
478 | 478 | ||
479 | int cdv_intel_pipe_set_base(struct drm_crtc *crtc, | 479 | static int cdv_intel_pipe_set_base(struct drm_crtc *crtc, |
480 | int x, int y, struct drm_framebuffer *old_fb) | 480 | int x, int y, struct drm_framebuffer *old_fb) |
481 | { | 481 | { |
482 | struct drm_device *dev = crtc->dev; | 482 | struct drm_device *dev = crtc->dev; |
@@ -569,7 +569,6 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
569 | int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; | 569 | int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; |
570 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 570 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
571 | u32 temp; | 571 | u32 temp; |
572 | bool enabled; | ||
573 | 572 | ||
574 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 573 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
575 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 574 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -663,7 +662,6 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
663 | udelay(150); | 662 | udelay(150); |
664 | break; | 663 | break; |
665 | } | 664 | } |
666 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | ||
667 | /*Set FIFO Watermarks*/ | 665 | /*Set FIFO Watermarks*/ |
668 | REG_WRITE(DSPARB, 0x3F3E); | 666 | REG_WRITE(DSPARB, 0x3F3E); |
669 | } | 667 | } |
@@ -680,22 +678,6 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc) | |||
680 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | 678 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
681 | } | 679 | } |
682 | 680 | ||
683 | void cdv_intel_encoder_prepare(struct drm_encoder *encoder) | ||
684 | { | ||
685 | struct drm_encoder_helper_funcs *encoder_funcs = | ||
686 | encoder->helper_private; | ||
687 | /* lvds has its own version of prepare see cdv_intel_lvds_prepare */ | ||
688 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); | ||
689 | } | ||
690 | |||
691 | void cdv_intel_encoder_commit(struct drm_encoder *encoder) | ||
692 | { | ||
693 | struct drm_encoder_helper_funcs *encoder_funcs = | ||
694 | encoder->helper_private; | ||
695 | /* lvds has its own version of commit see cdv_intel_lvds_commit */ | ||
696 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | ||
697 | } | ||
698 | |||
699 | static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, | 681 | static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, |
700 | struct drm_display_mode *mode, | 682 | struct drm_display_mode *mode, |
701 | struct drm_display_mode *adjusted_mode) | 683 | struct drm_display_mode *adjusted_mode) |
@@ -745,7 +727,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, | |||
745 | int refclk; | 727 | int refclk; |
746 | struct cdv_intel_clock_t clock; | 728 | struct cdv_intel_clock_t clock; |
747 | u32 dpll = 0, dspcntr, pipeconf; | 729 | u32 dpll = 0, dspcntr, pipeconf; |
748 | bool ok, is_sdvo = false, is_dvo = false; | 730 | bool ok; |
749 | bool is_crt = false, is_lvds = false, is_tv = false; | 731 | bool is_crt = false, is_lvds = false, is_tv = false; |
750 | bool is_hdmi = false; | 732 | bool is_hdmi = false; |
751 | struct drm_mode_config *mode_config = &dev->mode_config; | 733 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -763,12 +745,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, | |||
763 | case INTEL_OUTPUT_LVDS: | 745 | case INTEL_OUTPUT_LVDS: |
764 | is_lvds = true; | 746 | is_lvds = true; |
765 | break; | 747 | break; |
766 | case INTEL_OUTPUT_SDVO: | ||
767 | is_sdvo = true; | ||
768 | break; | ||
769 | case INTEL_OUTPUT_DVO: | ||
770 | is_dvo = true; | ||
771 | break; | ||
772 | case INTEL_OUTPUT_TVOUT: | 748 | case INTEL_OUTPUT_TVOUT: |
773 | is_tv = true; | 749 | is_tv = true; |
774 | break; | 750 | break; |
@@ -928,7 +904,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, | |||
928 | } | 904 | } |
929 | 905 | ||
930 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | 906 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
931 | void cdv_intel_crtc_load_lut(struct drm_crtc *crtc) | 907 | static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc) |
932 | { | 908 | { |
933 | struct drm_device *dev = crtc->dev; | 909 | struct drm_device *dev = crtc->dev; |
934 | struct drm_psb_private *dev_priv = | 910 | struct drm_psb_private *dev_priv = |
@@ -968,7 +944,7 @@ void cdv_intel_crtc_load_lut(struct drm_crtc *crtc) | |||
968 | gma_power_end(dev); | 944 | gma_power_end(dev); |
969 | } else { | 945 | } else { |
970 | for (i = 0; i < 256; i++) { | 946 | for (i = 0; i < 256; i++) { |
971 | dev_priv->save_palette_a[i] = | 947 | dev_priv->regs.psb.save_palette_a[i] = |
972 | ((psb_intel_crtc->lut_r[i] + | 948 | ((psb_intel_crtc->lut_r[i] + |
973 | psb_intel_crtc->lut_adj[i]) << 16) | | 949 | psb_intel_crtc->lut_adj[i]) << 16) | |
974 | ((psb_intel_crtc->lut_g[i] + | 950 | ((psb_intel_crtc->lut_g[i] + |
@@ -1338,18 +1314,20 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev, | |||
1338 | gma_power_end(dev); | 1314 | gma_power_end(dev); |
1339 | } else { | 1315 | } else { |
1340 | dpll = (pipe == 0) ? | 1316 | dpll = (pipe == 0) ? |
1341 | dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; | 1317 | dev_priv->regs.psb.saveDPLL_A : |
1318 | dev_priv->regs.psb.saveDPLL_B; | ||
1342 | 1319 | ||
1343 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 1320 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
1344 | fp = (pipe == 0) ? | 1321 | fp = (pipe == 0) ? |
1345 | dev_priv->saveFPA0 : | 1322 | dev_priv->regs.psb.saveFPA0 : |
1346 | dev_priv->saveFPB0; | 1323 | dev_priv->regs.psb.saveFPB0; |
1347 | else | 1324 | else |
1348 | fp = (pipe == 0) ? | 1325 | fp = (pipe == 0) ? |
1349 | dev_priv->saveFPA1 : | 1326 | dev_priv->regs.psb.saveFPA1 : |
1350 | dev_priv->saveFPB1; | 1327 | dev_priv->regs.psb.saveFPB1; |
1351 | 1328 | ||
1352 | is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); | 1329 | is_lvds = (pipe == 1) && |
1330 | (dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN); | ||
1353 | } | 1331 | } |
1354 | 1332 | ||
1355 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 1333 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
@@ -1419,13 +1397,17 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev, | |||
1419 | gma_power_end(dev); | 1397 | gma_power_end(dev); |
1420 | } else { | 1398 | } else { |
1421 | htot = (pipe == 0) ? | 1399 | htot = (pipe == 0) ? |
1422 | dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; | 1400 | dev_priv->regs.psb.saveHTOTAL_A : |
1401 | dev_priv->regs.psb.saveHTOTAL_B; | ||
1423 | hsync = (pipe == 0) ? | 1402 | hsync = (pipe == 0) ? |
1424 | dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; | 1403 | dev_priv->regs.psb.saveHSYNC_A : |
1404 | dev_priv->regs.psb.saveHSYNC_B; | ||
1425 | vtot = (pipe == 0) ? | 1405 | vtot = (pipe == 0) ? |
1426 | dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; | 1406 | dev_priv->regs.psb.saveVTOTAL_A : |
1407 | dev_priv->regs.psb.saveVTOTAL_B; | ||
1427 | vsync = (pipe == 0) ? | 1408 | vsync = (pipe == 0) ? |
1428 | dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; | 1409 | dev_priv->regs.psb.saveVSYNC_A : |
1410 | dev_priv->regs.psb.saveVSYNC_B; | ||
1429 | } | 1411 | } |
1430 | 1412 | ||
1431 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 1413 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
@@ -1475,34 +1457,3 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = { | |||
1475 | .set_config = cdv_crtc_set_config, | 1457 | .set_config = cdv_crtc_set_config, |
1476 | .destroy = cdv_intel_crtc_destroy, | 1458 | .destroy = cdv_intel_crtc_destroy, |
1477 | }; | 1459 | }; |
1478 | |||
1479 | /* | ||
1480 | * Set the default value of cursor control and base register | ||
1481 | * to zero. This is a workaround for h/w defect on oaktrail | ||
1482 | */ | ||
1483 | void cdv_intel_cursor_init(struct drm_device *dev, int pipe) | ||
1484 | { | ||
1485 | uint32_t control; | ||
1486 | uint32_t base; | ||
1487 | |||
1488 | switch (pipe) { | ||
1489 | case 0: | ||
1490 | control = CURACNTR; | ||
1491 | base = CURABASE; | ||
1492 | break; | ||
1493 | case 1: | ||
1494 | control = CURBCNTR; | ||
1495 | base = CURBBASE; | ||
1496 | break; | ||
1497 | case 2: | ||
1498 | control = CURCCNTR; | ||
1499 | base = CURCBASE; | ||
1500 | break; | ||
1501 | default: | ||
1502 | return; | ||
1503 | } | ||
1504 | |||
1505 | REG_WRITE(control, 0); | ||
1506 | REG_WRITE(base, 0); | ||
1507 | } | ||
1508 | |||
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index de25560e629d..8d5269555005 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "psb_intel_drv.h" | 34 | #include "psb_intel_drv.h" |
35 | #include "psb_drv.h" | 35 | #include "psb_drv.h" |
36 | #include "psb_intel_reg.h" | 36 | #include "psb_intel_reg.h" |
37 | #include "cdv_device.h" | ||
37 | #include <linux/pm_runtime.h> | 38 | #include <linux/pm_runtime.h> |
38 | 39 | ||
39 | /* hdmi control bits */ | 40 | /* hdmi control bits */ |
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 50e744be9852..8359c1a3f45f 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c | |||
@@ -78,13 +78,14 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev) | |||
78 | 78 | ||
79 | gma_power_end(dev); | 79 | gma_power_end(dev); |
80 | } else | 80 | } else |
81 | retval = ((dev_priv->saveBLC_PWM_CTL & | 81 | retval = ((dev_priv->regs.saveBLC_PWM_CTL & |
82 | BACKLIGHT_MODULATION_FREQ_MASK) >> | 82 | BACKLIGHT_MODULATION_FREQ_MASK) >> |
83 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | 83 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; |
84 | 84 | ||
85 | return retval; | 85 | return retval; |
86 | } | 86 | } |
87 | 87 | ||
88 | #if 0 | ||
88 | /* | 89 | /* |
89 | * Set LVDS backlight level by I2C command | 90 | * Set LVDS backlight level by I2C command |
90 | */ | 91 | */ |
@@ -165,6 +166,7 @@ void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level) | |||
165 | else | 166 | else |
166 | cdv_lvds_pwm_set_brightness(dev, level); | 167 | cdv_lvds_pwm_set_brightness(dev, level); |
167 | } | 168 | } |
169 | #endif | ||
168 | 170 | ||
169 | /** | 171 | /** |
170 | * Sets the backlight level. | 172 | * Sets the backlight level. |
@@ -184,9 +186,9 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level) | |||
184 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); | 186 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); |
185 | gma_power_end(dev); | 187 | gma_power_end(dev); |
186 | } else { | 188 | } else { |
187 | blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & | 189 | blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL & |
188 | ~BACKLIGHT_DUTY_CYCLE_MASK; | 190 | ~BACKLIGHT_DUTY_CYCLE_MASK; |
189 | dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl | | 191 | dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | |
190 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); | 192 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); |
191 | } | 193 | } |
192 | } | 194 | } |
@@ -242,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector) | |||
242 | { | 244 | { |
243 | } | 245 | } |
244 | 246 | ||
245 | int cdv_intel_lvds_mode_valid(struct drm_connector *connector, | 247 | static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, |
246 | struct drm_display_mode *mode) | 248 | struct drm_display_mode *mode) |
247 | { | 249 | { |
248 | struct drm_device *dev = connector->dev; | 250 | struct drm_device *dev = connector->dev; |
@@ -267,7 +269,7 @@ int cdv_intel_lvds_mode_valid(struct drm_connector *connector, | |||
267 | return MODE_OK; | 269 | return MODE_OK; |
268 | } | 270 | } |
269 | 271 | ||
270 | bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, | 272 | static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, |
271 | struct drm_display_mode *mode, | 273 | struct drm_display_mode *mode, |
272 | struct drm_display_mode *adjusted_mode) | 274 | struct drm_display_mode *adjusted_mode) |
273 | { | 275 | { |
@@ -436,7 +438,7 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) | |||
436 | * Unregister the DDC bus for this connector then free the driver private | 438 | * Unregister the DDC bus for this connector then free the driver private |
437 | * structure. | 439 | * structure. |
438 | */ | 440 | */ |
439 | void cdv_intel_lvds_destroy(struct drm_connector *connector) | 441 | static void cdv_intel_lvds_destroy(struct drm_connector *connector) |
440 | { | 442 | { |
441 | struct psb_intel_encoder *psb_intel_encoder = | 443 | struct psb_intel_encoder *psb_intel_encoder = |
442 | psb_intel_attached_encoder(connector); | 444 | psb_intel_attached_encoder(connector); |
@@ -448,7 +450,7 @@ void cdv_intel_lvds_destroy(struct drm_connector *connector) | |||
448 | kfree(connector); | 450 | kfree(connector); |
449 | } | 451 | } |
450 | 452 | ||
451 | int cdv_intel_lvds_set_property(struct drm_connector *connector, | 453 | static int cdv_intel_lvds_set_property(struct drm_connector *connector, |
452 | struct drm_property *property, | 454 | struct drm_property *property, |
453 | uint64_t value) | 455 | uint64_t value) |
454 | { | 456 | { |
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index be616735ec91..8ea202f1ba50 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c | |||
@@ -111,39 +111,6 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | void psbfb_suspend(struct drm_device *dev) | ||
115 | { | ||
116 | struct drm_framebuffer *fb; | ||
117 | |||
118 | console_lock(); | ||
119 | mutex_lock(&dev->mode_config.mutex); | ||
120 | list_for_each_entry(fb, &dev->mode_config.fb_list, head) { | ||
121 | struct psb_framebuffer *psbfb = to_psb_fb(fb); | ||
122 | struct fb_info *info = psbfb->fbdev; | ||
123 | fb_set_suspend(info, 1); | ||
124 | drm_fb_helper_blank(FB_BLANK_POWERDOWN, info); | ||
125 | } | ||
126 | mutex_unlock(&dev->mode_config.mutex); | ||
127 | console_unlock(); | ||
128 | } | ||
129 | |||
130 | void psbfb_resume(struct drm_device *dev) | ||
131 | { | ||
132 | struct drm_framebuffer *fb; | ||
133 | |||
134 | console_lock(); | ||
135 | mutex_lock(&dev->mode_config.mutex); | ||
136 | list_for_each_entry(fb, &dev->mode_config.fb_list, head) { | ||
137 | struct psb_framebuffer *psbfb = to_psb_fb(fb); | ||
138 | struct fb_info *info = psbfb->fbdev; | ||
139 | fb_set_suspend(info, 0); | ||
140 | drm_fb_helper_blank(FB_BLANK_UNBLANK, info); | ||
141 | } | ||
142 | mutex_unlock(&dev->mode_config.mutex); | ||
143 | console_unlock(); | ||
144 | drm_helper_disable_unused_functions(dev); | ||
145 | } | ||
146 | |||
147 | static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 114 | static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
148 | { | 115 | { |
149 | struct psb_framebuffer *psbfb = vma->vm_private_data; | 116 | struct psb_framebuffer *psbfb = vma->vm_private_data; |
@@ -158,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
158 | unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; | 125 | unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; |
159 | 126 | ||
160 | page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 127 | page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
161 | address = (unsigned long)vmf->virtual_address; | 128 | address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); |
162 | 129 | ||
163 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 130 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
164 | 131 | ||
@@ -390,6 +357,7 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
390 | mode_cmd.width = sizes->surface_width; | 357 | mode_cmd.width = sizes->surface_width; |
391 | mode_cmd.height = sizes->surface_height; | 358 | mode_cmd.height = sizes->surface_height; |
392 | bpp = sizes->surface_bpp; | 359 | bpp = sizes->surface_bpp; |
360 | depth = sizes->surface_depth; | ||
393 | 361 | ||
394 | /* No 24bit packed */ | 362 | /* No 24bit packed */ |
395 | if (bpp == 24) | 363 | if (bpp == 24) |
@@ -402,7 +370,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
402 | * is ok with some fonts | 370 | * is ok with some fonts |
403 | */ | 371 | */ |
404 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines); | 372 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines); |
405 | depth = sizes->surface_depth; | ||
406 | 373 | ||
407 | size = mode_cmd.pitches[0] * mode_cmd.height; | 374 | size = mode_cmd.pitches[0] * mode_cmd.height; |
408 | size = ALIGN(size, PAGE_SIZE); | 375 | size = ALIGN(size, PAGE_SIZE); |
@@ -462,6 +429,7 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
462 | fbdev->psb_fb_helper.fb = fb; | 429 | fbdev->psb_fb_helper.fb = fb; |
463 | fbdev->psb_fb_helper.fbdev = info; | 430 | fbdev->psb_fb_helper.fbdev = info; |
464 | 431 | ||
432 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
465 | strcpy(info->fix.id, "psbfb"); | 433 | strcpy(info->fix.id, "psbfb"); |
466 | 434 | ||
467 | info->flags = FBINFO_DEFAULT; | 435 | info->flags = FBINFO_DEFAULT; |
@@ -499,18 +467,13 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
499 | info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; | 467 | info->apertures->ranges[0].size = dev_priv->gtt.stolen_size; |
500 | } | 468 | } |
501 | 469 | ||
502 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
503 | drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, | 470 | drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, |
504 | sizes->fb_width, sizes->fb_height); | 471 | sizes->fb_width, sizes->fb_height); |
505 | 472 | ||
506 | info->fix.mmio_start = pci_resource_start(dev->pdev, 0); | 473 | info->fix.mmio_start = pci_resource_start(dev->pdev, 0); |
507 | info->fix.mmio_len = pci_resource_len(dev->pdev, 0); | 474 | info->fix.mmio_len = pci_resource_len(dev->pdev, 0); |
508 | 475 | ||
509 | info->pixmap.size = 64 * 1024; | 476 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
510 | info->pixmap.buf_align = 8; | ||
511 | info->pixmap.access_align = 32; | ||
512 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
513 | info->pixmap.scan_align = 1; | ||
514 | 477 | ||
515 | dev_info(dev->dev, "allocated %dx%d fb\n", | 478 | dev_info(dev->dev, "allocated %dx%d fb\n", |
516 | psbfb->base.width, psbfb->base.height); | 479 | psbfb->base.width, psbfb->base.height); |
@@ -559,11 +522,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create | |||
559 | static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 522 | static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
560 | u16 blue, int regno) | 523 | u16 blue, int regno) |
561 | { | 524 | { |
525 | struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); | ||
526 | |||
527 | intel_crtc->lut_r[regno] = red >> 8; | ||
528 | intel_crtc->lut_g[regno] = green >> 8; | ||
529 | intel_crtc->lut_b[regno] = blue >> 8; | ||
562 | } | 530 | } |
563 | 531 | ||
564 | static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, | 532 | static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, |
565 | u16 *green, u16 *blue, int regno) | 533 | u16 *green, u16 *blue, int regno) |
566 | { | 534 | { |
535 | struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); | ||
536 | |||
537 | *red = intel_crtc->lut_r[regno] << 8; | ||
538 | *green = intel_crtc->lut_g[regno] << 8; | ||
539 | *blue = intel_crtc->lut_b[regno] << 8; | ||
567 | } | 540 | } |
568 | 541 | ||
569 | static int psbfb_probe(struct drm_fb_helper *helper, | 542 | static int psbfb_probe(struct drm_fb_helper *helper, |
@@ -588,7 +561,7 @@ struct drm_fb_helper_funcs psb_fb_helper_funcs = { | |||
588 | .fb_probe = psbfb_probe, | 561 | .fb_probe = psbfb_probe, |
589 | }; | 562 | }; |
590 | 563 | ||
591 | int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) | 564 | static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) |
592 | { | 565 | { |
593 | struct fb_info *info; | 566 | struct fb_info *info; |
594 | struct psb_framebuffer *psbfb = &fbdev->pfb; | 567 | struct psb_framebuffer *psbfb = &fbdev->pfb; |
@@ -630,7 +603,7 @@ int psb_fbdev_init(struct drm_device *dev) | |||
630 | return 0; | 603 | return 0; |
631 | } | 604 | } |
632 | 605 | ||
633 | void psb_fbdev_fini(struct drm_device *dev) | 606 | static void psb_fbdev_fini(struct drm_device *dev) |
634 | { | 607 | { |
635 | struct drm_psb_private *dev_priv = dev->dev_private; | 608 | struct drm_psb_private *dev_priv = dev->dev_private; |
636 | 609 | ||
@@ -724,10 +697,7 @@ static int psb_create_backlight_property(struct drm_device *dev) | |||
724 | if (dev_priv->backlight_property) | 697 | if (dev_priv->backlight_property) |
725 | return 0; | 698 | return 0; |
726 | 699 | ||
727 | backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE, | 700 | backlight = drm_property_create_range(dev, 0, "backlight", 0, 100); |
728 | "backlight", 2); | ||
729 | backlight->values[0] = 0; | ||
730 | backlight->values[1] = 100; | ||
731 | 701 | ||
732 | dev_priv->backlight_property = backlight; | 702 | dev_priv->backlight_property = backlight; |
733 | 703 | ||
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c index daac12120653..3c17634f6061 100644 --- a/drivers/gpu/drm/gma500/gem_glue.c +++ b/drivers/gpu/drm/gma500/gem_glue.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <drm/drmP.h> | 20 | #include <drm/drmP.h> |
21 | #include <drm/drm.h> | 21 | #include <drm/drm.h> |
22 | #include "gem_glue.h" | ||
22 | 23 | ||
23 | void drm_gem_object_release_wrap(struct drm_gem_object *obj) | 24 | void drm_gem_object_release_wrap(struct drm_gem_object *obj) |
24 | { | 25 | { |
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index aff194fbe9f3..c6465b40090f 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c | |||
@@ -57,7 +57,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) | |||
57 | * Given a gtt_range object return the GTT offset of the page table | 57 | * Given a gtt_range object return the GTT offset of the page table |
58 | * entries for this gtt_range | 58 | * entries for this gtt_range |
59 | */ | 59 | */ |
60 | u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) | 60 | static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) |
61 | { | 61 | { |
62 | struct drm_psb_private *dev_priv = dev->dev_private; | 62 | struct drm_psb_private *dev_priv = dev->dev_private; |
63 | unsigned long offset; | 63 | unsigned long offset; |
@@ -378,7 +378,7 @@ void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) | |||
378 | kfree(gt); | 378 | kfree(gt); |
379 | } | 379 | } |
380 | 380 | ||
381 | void psb_gtt_alloc(struct drm_device *dev) | 381 | static void psb_gtt_alloc(struct drm_device *dev) |
382 | { | 382 | { |
383 | struct drm_psb_private *dev_priv = dev->dev_private; | 383 | struct drm_psb_private *dev_priv = dev->dev_private; |
384 | init_rwsem(&dev_priv->gtt.sem); | 384 | init_rwsem(&dev_priv->gtt.sem); |
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index 147584ac8d02..9db90527bf0f 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c | |||
@@ -395,7 +395,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev) | |||
395 | struct drm_psb_private *dev_priv = dev->dev_private; | 395 | struct drm_psb_private *dev_priv = dev->dev_private; |
396 | int ret, i; | 396 | int ret, i; |
397 | 397 | ||
398 | dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, | 398 | dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), |
399 | GFP_KERNEL); | 399 | GFP_KERNEL); |
400 | if (dev_priv->gmbus == NULL) | 400 | if (dev_priv->gmbus == NULL) |
401 | return -ENOMEM; | 401 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c new file mode 100644 index 000000000000..af656787db0f --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_device.c | |||
@@ -0,0 +1,691 @@ | |||
1 | /************************************************************************** | ||
2 | * Copyright (c) 2011, Intel Corporation. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | **************************************************************************/ | ||
19 | |||
20 | #include "psb_drv.h" | ||
21 | #include "mid_bios.h" | ||
22 | #include "mdfld_output.h" | ||
23 | #include "mdfld_dsi_output.h" | ||
24 | #include "tc35876x-dsi-lvds.h" | ||
25 | |||
26 | #include <asm/intel_scu_ipc.h> | ||
27 | |||
28 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
29 | |||
30 | #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF | ||
31 | #define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ | ||
32 | #define BLC_PWM_FREQ_CALC_CONSTANT 32 | ||
33 | #define MHz 1000000 | ||
34 | #define BRIGHTNESS_MIN_LEVEL 1 | ||
35 | #define BRIGHTNESS_MAX_LEVEL 100 | ||
36 | #define BRIGHTNESS_MASK 0xFF | ||
37 | #define BLC_POLARITY_NORMAL 0 | ||
38 | #define BLC_POLARITY_INVERSE 1 | ||
39 | #define BLC_ADJUSTMENT_MAX 100 | ||
40 | |||
41 | #define MDFLD_BLC_PWM_PRECISION_FACTOR 10 | ||
42 | #define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE | ||
43 | #define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2 | ||
44 | |||
45 | #define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) | ||
46 | #define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16) | ||
47 | |||
48 | static struct backlight_device *mdfld_backlight_device; | ||
49 | |||
50 | int mdfld_set_brightness(struct backlight_device *bd) | ||
51 | { | ||
52 | struct drm_device *dev = | ||
53 | (struct drm_device *)bl_get_data(mdfld_backlight_device); | ||
54 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
55 | int level = bd->props.brightness; | ||
56 | |||
57 | DRM_DEBUG_DRIVER("backlight level set to %d\n", level); | ||
58 | |||
59 | /* Perform value bounds checking */ | ||
60 | if (level < BRIGHTNESS_MIN_LEVEL) | ||
61 | level = BRIGHTNESS_MIN_LEVEL; | ||
62 | |||
63 | if (gma_power_begin(dev, false)) { | ||
64 | u32 adjusted_level = 0; | ||
65 | |||
66 | /* | ||
67 | * Adjust the backlight level with the percent in | ||
68 | * dev_priv->blc_adj2 | ||
69 | */ | ||
70 | adjusted_level = level * dev_priv->blc_adj2; | ||
71 | adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX; | ||
72 | dev_priv->brightness_adjusted = adjusted_level; | ||
73 | |||
74 | if (mdfld_get_panel_type(dev, 0) == TC35876X) { | ||
75 | if (dev_priv->dpi_panel_on[0] || | ||
76 | dev_priv->dpi_panel_on[2]) | ||
77 | tc35876x_brightness_control(dev, | ||
78 | dev_priv->brightness_adjusted); | ||
79 | } else { | ||
80 | if (dev_priv->dpi_panel_on[0]) | ||
81 | mdfld_dsi_brightness_control(dev, 0, | ||
82 | dev_priv->brightness_adjusted); | ||
83 | } | ||
84 | |||
85 | if (dev_priv->dpi_panel_on[2]) | ||
86 | mdfld_dsi_brightness_control(dev, 2, | ||
87 | dev_priv->brightness_adjusted); | ||
88 | gma_power_end(dev); | ||
89 | } | ||
90 | |||
91 | /* cache the brightness for later use */ | ||
92 | dev_priv->brightness = level; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int mdfld_get_brightness(struct backlight_device *bd) | ||
97 | { | ||
98 | struct drm_device *dev = | ||
99 | (struct drm_device *)bl_get_data(mdfld_backlight_device); | ||
100 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
101 | |||
102 | DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness); | ||
103 | |||
104 | /* return locally cached var instead of HW read (due to DPST etc.) */ | ||
105 | return dev_priv->brightness; | ||
106 | } | ||
107 | |||
108 | static const struct backlight_ops mdfld_ops = { | ||
109 | .get_brightness = mdfld_get_brightness, | ||
110 | .update_status = mdfld_set_brightness, | ||
111 | }; | ||
112 | |||
113 | static int device_backlight_init(struct drm_device *dev) | ||
114 | { | ||
115 | struct drm_psb_private *dev_priv = (struct drm_psb_private *) | ||
116 | dev->dev_private; | ||
117 | |||
118 | dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX; | ||
119 | dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int mdfld_backlight_init(struct drm_device *dev) | ||
125 | { | ||
126 | struct backlight_properties props; | ||
127 | int ret = 0; | ||
128 | |||
129 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
130 | props.max_brightness = BRIGHTNESS_MAX_LEVEL; | ||
131 | props.type = BACKLIGHT_PLATFORM; | ||
132 | mdfld_backlight_device = backlight_device_register("mdfld-bl", | ||
133 | NULL, (void *)dev, &mdfld_ops, &props); | ||
134 | |||
135 | if (IS_ERR(mdfld_backlight_device)) | ||
136 | return PTR_ERR(mdfld_backlight_device); | ||
137 | |||
138 | ret = device_backlight_init(dev); | ||
139 | if (ret) | ||
140 | return ret; | ||
141 | |||
142 | mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL; | ||
143 | mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL; | ||
144 | backlight_update_status(mdfld_backlight_device); | ||
145 | return 0; | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | struct backlight_device *mdfld_get_backlight_device(void) | ||
150 | { | ||
151 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
152 | return mdfld_backlight_device; | ||
153 | #else | ||
154 | return NULL; | ||
155 | #endif | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * mdfld_save_display_registers | ||
160 | * | ||
161 | * Description: We are going to suspend so save current display | ||
162 | * register state. | ||
163 | * | ||
164 | * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio | ||
165 | */ | ||
166 | static int mdfld_save_display_registers(struct drm_device *dev, int pipe) | ||
167 | { | ||
168 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
169 | struct medfield_state *regs = &dev_priv->regs.mdfld; | ||
170 | int i; | ||
171 | |||
172 | /* register */ | ||
173 | u32 dpll_reg = MRST_DPLL_A; | ||
174 | u32 fp_reg = MRST_FPA0; | ||
175 | u32 pipeconf_reg = PIPEACONF; | ||
176 | u32 htot_reg = HTOTAL_A; | ||
177 | u32 hblank_reg = HBLANK_A; | ||
178 | u32 hsync_reg = HSYNC_A; | ||
179 | u32 vtot_reg = VTOTAL_A; | ||
180 | u32 vblank_reg = VBLANK_A; | ||
181 | u32 vsync_reg = VSYNC_A; | ||
182 | u32 pipesrc_reg = PIPEASRC; | ||
183 | u32 dspstride_reg = DSPASTRIDE; | ||
184 | u32 dsplinoff_reg = DSPALINOFF; | ||
185 | u32 dsptileoff_reg = DSPATILEOFF; | ||
186 | u32 dspsize_reg = DSPASIZE; | ||
187 | u32 dsppos_reg = DSPAPOS; | ||
188 | u32 dspsurf_reg = DSPASURF; | ||
189 | u32 mipi_reg = MIPI; | ||
190 | u32 dspcntr_reg = DSPACNTR; | ||
191 | u32 dspstatus_reg = PIPEASTAT; | ||
192 | u32 palette_reg = PALETTE_A; | ||
193 | |||
194 | /* pointer to values */ | ||
195 | u32 *dpll_val = ®s->saveDPLL_A; | ||
196 | u32 *fp_val = ®s->saveFPA0; | ||
197 | u32 *pipeconf_val = ®s->savePIPEACONF; | ||
198 | u32 *htot_val = ®s->saveHTOTAL_A; | ||
199 | u32 *hblank_val = ®s->saveHBLANK_A; | ||
200 | u32 *hsync_val = ®s->saveHSYNC_A; | ||
201 | u32 *vtot_val = ®s->saveVTOTAL_A; | ||
202 | u32 *vblank_val = ®s->saveVBLANK_A; | ||
203 | u32 *vsync_val = ®s->saveVSYNC_A; | ||
204 | u32 *pipesrc_val = ®s->savePIPEASRC; | ||
205 | u32 *dspstride_val = ®s->saveDSPASTRIDE; | ||
206 | u32 *dsplinoff_val = ®s->saveDSPALINOFF; | ||
207 | u32 *dsptileoff_val = ®s->saveDSPATILEOFF; | ||
208 | u32 *dspsize_val = ®s->saveDSPASIZE; | ||
209 | u32 *dsppos_val = ®s->saveDSPAPOS; | ||
210 | u32 *dspsurf_val = ®s->saveDSPASURF; | ||
211 | u32 *mipi_val = ®s->saveMIPI; | ||
212 | u32 *dspcntr_val = ®s->saveDSPACNTR; | ||
213 | u32 *dspstatus_val = ®s->saveDSPASTATUS; | ||
214 | u32 *palette_val = regs->save_palette_a; | ||
215 | |||
216 | switch (pipe) { | ||
217 | case 0: | ||
218 | break; | ||
219 | case 1: | ||
220 | /* regester */ | ||
221 | dpll_reg = MDFLD_DPLL_B; | ||
222 | fp_reg = MDFLD_DPLL_DIV0; | ||
223 | pipeconf_reg = PIPEBCONF; | ||
224 | htot_reg = HTOTAL_B; | ||
225 | hblank_reg = HBLANK_B; | ||
226 | hsync_reg = HSYNC_B; | ||
227 | vtot_reg = VTOTAL_B; | ||
228 | vblank_reg = VBLANK_B; | ||
229 | vsync_reg = VSYNC_B; | ||
230 | pipesrc_reg = PIPEBSRC; | ||
231 | dspstride_reg = DSPBSTRIDE; | ||
232 | dsplinoff_reg = DSPBLINOFF; | ||
233 | dsptileoff_reg = DSPBTILEOFF; | ||
234 | dspsize_reg = DSPBSIZE; | ||
235 | dsppos_reg = DSPBPOS; | ||
236 | dspsurf_reg = DSPBSURF; | ||
237 | dspcntr_reg = DSPBCNTR; | ||
238 | dspstatus_reg = PIPEBSTAT; | ||
239 | palette_reg = PALETTE_B; | ||
240 | |||
241 | /* values */ | ||
242 | dpll_val = ®s->saveDPLL_B; | ||
243 | fp_val = ®s->saveFPB0; | ||
244 | pipeconf_val = ®s->savePIPEBCONF; | ||
245 | htot_val = ®s->saveHTOTAL_B; | ||
246 | hblank_val = ®s->saveHBLANK_B; | ||
247 | hsync_val = ®s->saveHSYNC_B; | ||
248 | vtot_val = ®s->saveVTOTAL_B; | ||
249 | vblank_val = ®s->saveVBLANK_B; | ||
250 | vsync_val = ®s->saveVSYNC_B; | ||
251 | pipesrc_val = ®s->savePIPEBSRC; | ||
252 | dspstride_val = ®s->saveDSPBSTRIDE; | ||
253 | dsplinoff_val = ®s->saveDSPBLINOFF; | ||
254 | dsptileoff_val = ®s->saveDSPBTILEOFF; | ||
255 | dspsize_val = ®s->saveDSPBSIZE; | ||
256 | dsppos_val = ®s->saveDSPBPOS; | ||
257 | dspsurf_val = ®s->saveDSPBSURF; | ||
258 | dspcntr_val = ®s->saveDSPBCNTR; | ||
259 | dspstatus_val = ®s->saveDSPBSTATUS; | ||
260 | palette_val = regs->save_palette_b; | ||
261 | break; | ||
262 | case 2: | ||
263 | /* register */ | ||
264 | pipeconf_reg = PIPECCONF; | ||
265 | htot_reg = HTOTAL_C; | ||
266 | hblank_reg = HBLANK_C; | ||
267 | hsync_reg = HSYNC_C; | ||
268 | vtot_reg = VTOTAL_C; | ||
269 | vblank_reg = VBLANK_C; | ||
270 | vsync_reg = VSYNC_C; | ||
271 | pipesrc_reg = PIPECSRC; | ||
272 | dspstride_reg = DSPCSTRIDE; | ||
273 | dsplinoff_reg = DSPCLINOFF; | ||
274 | dsptileoff_reg = DSPCTILEOFF; | ||
275 | dspsize_reg = DSPCSIZE; | ||
276 | dsppos_reg = DSPCPOS; | ||
277 | dspsurf_reg = DSPCSURF; | ||
278 | mipi_reg = MIPI_C; | ||
279 | dspcntr_reg = DSPCCNTR; | ||
280 | dspstatus_reg = PIPECSTAT; | ||
281 | palette_reg = PALETTE_C; | ||
282 | |||
283 | /* pointer to values */ | ||
284 | pipeconf_val = ®s->savePIPECCONF; | ||
285 | htot_val = ®s->saveHTOTAL_C; | ||
286 | hblank_val = ®s->saveHBLANK_C; | ||
287 | hsync_val = ®s->saveHSYNC_C; | ||
288 | vtot_val = ®s->saveVTOTAL_C; | ||
289 | vblank_val = ®s->saveVBLANK_C; | ||
290 | vsync_val = ®s->saveVSYNC_C; | ||
291 | pipesrc_val = ®s->savePIPECSRC; | ||
292 | dspstride_val = ®s->saveDSPCSTRIDE; | ||
293 | dsplinoff_val = ®s->saveDSPCLINOFF; | ||
294 | dsptileoff_val = ®s->saveDSPCTILEOFF; | ||
295 | dspsize_val = ®s->saveDSPCSIZE; | ||
296 | dsppos_val = ®s->saveDSPCPOS; | ||
297 | dspsurf_val = ®s->saveDSPCSURF; | ||
298 | mipi_val = ®s->saveMIPI_C; | ||
299 | dspcntr_val = ®s->saveDSPCCNTR; | ||
300 | dspstatus_val = ®s->saveDSPCSTATUS; | ||
301 | palette_val = regs->save_palette_c; | ||
302 | break; | ||
303 | default: | ||
304 | DRM_ERROR("%s, invalid pipe number.\n", __func__); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | |||
308 | /* Pipe & plane A info */ | ||
309 | *dpll_val = PSB_RVDC32(dpll_reg); | ||
310 | *fp_val = PSB_RVDC32(fp_reg); | ||
311 | *pipeconf_val = PSB_RVDC32(pipeconf_reg); | ||
312 | *htot_val = PSB_RVDC32(htot_reg); | ||
313 | *hblank_val = PSB_RVDC32(hblank_reg); | ||
314 | *hsync_val = PSB_RVDC32(hsync_reg); | ||
315 | *vtot_val = PSB_RVDC32(vtot_reg); | ||
316 | *vblank_val = PSB_RVDC32(vblank_reg); | ||
317 | *vsync_val = PSB_RVDC32(vsync_reg); | ||
318 | *pipesrc_val = PSB_RVDC32(pipesrc_reg); | ||
319 | *dspstride_val = PSB_RVDC32(dspstride_reg); | ||
320 | *dsplinoff_val = PSB_RVDC32(dsplinoff_reg); | ||
321 | *dsptileoff_val = PSB_RVDC32(dsptileoff_reg); | ||
322 | *dspsize_val = PSB_RVDC32(dspsize_reg); | ||
323 | *dsppos_val = PSB_RVDC32(dsppos_reg); | ||
324 | *dspsurf_val = PSB_RVDC32(dspsurf_reg); | ||
325 | *dspcntr_val = PSB_RVDC32(dspcntr_reg); | ||
326 | *dspstatus_val = PSB_RVDC32(dspstatus_reg); | ||
327 | |||
328 | /*save palette (gamma) */ | ||
329 | for (i = 0; i < 256; i++) | ||
330 | palette_val[i] = PSB_RVDC32(palette_reg + (i << 2)); | ||
331 | |||
332 | if (pipe == 1) { | ||
333 | regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); | ||
334 | regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); | ||
335 | |||
336 | regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL); | ||
337 | regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | *mipi_val = PSB_RVDC32(mipi_reg); | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * mdfld_restore_display_registers | ||
347 | * | ||
348 | * Description: We are going to resume so restore display register state. | ||
349 | * | ||
350 | * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio | ||
351 | */ | ||
352 | static int mdfld_restore_display_registers(struct drm_device *dev, int pipe) | ||
353 | { | ||
354 | /* To get panel out of ULPS mode. */ | ||
355 | u32 temp = 0; | ||
356 | u32 device_ready_reg = DEVICE_READY_REG; | ||
357 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
358 | struct mdfld_dsi_config *dsi_config = NULL; | ||
359 | struct medfield_state *regs = &dev_priv->regs.mdfld; | ||
360 | u32 i = 0; | ||
361 | u32 dpll = 0; | ||
362 | u32 timeout = 0; | ||
363 | |||
364 | /* regester */ | ||
365 | u32 dpll_reg = MRST_DPLL_A; | ||
366 | u32 fp_reg = MRST_FPA0; | ||
367 | u32 pipeconf_reg = PIPEACONF; | ||
368 | u32 htot_reg = HTOTAL_A; | ||
369 | u32 hblank_reg = HBLANK_A; | ||
370 | u32 hsync_reg = HSYNC_A; | ||
371 | u32 vtot_reg = VTOTAL_A; | ||
372 | u32 vblank_reg = VBLANK_A; | ||
373 | u32 vsync_reg = VSYNC_A; | ||
374 | u32 pipesrc_reg = PIPEASRC; | ||
375 | u32 dspstride_reg = DSPASTRIDE; | ||
376 | u32 dsplinoff_reg = DSPALINOFF; | ||
377 | u32 dsptileoff_reg = DSPATILEOFF; | ||
378 | u32 dspsize_reg = DSPASIZE; | ||
379 | u32 dsppos_reg = DSPAPOS; | ||
380 | u32 dspsurf_reg = DSPASURF; | ||
381 | u32 dspstatus_reg = PIPEASTAT; | ||
382 | u32 mipi_reg = MIPI; | ||
383 | u32 dspcntr_reg = DSPACNTR; | ||
384 | u32 palette_reg = PALETTE_A; | ||
385 | |||
386 | /* values */ | ||
387 | u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE; | ||
388 | u32 fp_val = regs->saveFPA0; | ||
389 | u32 pipeconf_val = regs->savePIPEACONF; | ||
390 | u32 htot_val = regs->saveHTOTAL_A; | ||
391 | u32 hblank_val = regs->saveHBLANK_A; | ||
392 | u32 hsync_val = regs->saveHSYNC_A; | ||
393 | u32 vtot_val = regs->saveVTOTAL_A; | ||
394 | u32 vblank_val = regs->saveVBLANK_A; | ||
395 | u32 vsync_val = regs->saveVSYNC_A; | ||
396 | u32 pipesrc_val = regs->savePIPEASRC; | ||
397 | u32 dspstride_val = regs->saveDSPASTRIDE; | ||
398 | u32 dsplinoff_val = regs->saveDSPALINOFF; | ||
399 | u32 dsptileoff_val = regs->saveDSPATILEOFF; | ||
400 | u32 dspsize_val = regs->saveDSPASIZE; | ||
401 | u32 dsppos_val = regs->saveDSPAPOS; | ||
402 | u32 dspsurf_val = regs->saveDSPASURF; | ||
403 | u32 dspstatus_val = regs->saveDSPASTATUS; | ||
404 | u32 mipi_val = regs->saveMIPI; | ||
405 | u32 dspcntr_val = regs->saveDSPACNTR; | ||
406 | u32 *palette_val = regs->save_palette_a; | ||
407 | |||
408 | switch (pipe) { | ||
409 | case 0: | ||
410 | dsi_config = dev_priv->dsi_configs[0]; | ||
411 | break; | ||
412 | case 1: | ||
413 | /* regester */ | ||
414 | dpll_reg = MDFLD_DPLL_B; | ||
415 | fp_reg = MDFLD_DPLL_DIV0; | ||
416 | pipeconf_reg = PIPEBCONF; | ||
417 | htot_reg = HTOTAL_B; | ||
418 | hblank_reg = HBLANK_B; | ||
419 | hsync_reg = HSYNC_B; | ||
420 | vtot_reg = VTOTAL_B; | ||
421 | vblank_reg = VBLANK_B; | ||
422 | vsync_reg = VSYNC_B; | ||
423 | pipesrc_reg = PIPEBSRC; | ||
424 | dspstride_reg = DSPBSTRIDE; | ||
425 | dsplinoff_reg = DSPBLINOFF; | ||
426 | dsptileoff_reg = DSPBTILEOFF; | ||
427 | dspsize_reg = DSPBSIZE; | ||
428 | dsppos_reg = DSPBPOS; | ||
429 | dspsurf_reg = DSPBSURF; | ||
430 | dspcntr_reg = DSPBCNTR; | ||
431 | dspstatus_reg = PIPEBSTAT; | ||
432 | palette_reg = PALETTE_B; | ||
433 | |||
434 | /* values */ | ||
435 | dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE; | ||
436 | fp_val = regs->saveFPB0; | ||
437 | pipeconf_val = regs->savePIPEBCONF; | ||
438 | htot_val = regs->saveHTOTAL_B; | ||
439 | hblank_val = regs->saveHBLANK_B; | ||
440 | hsync_val = regs->saveHSYNC_B; | ||
441 | vtot_val = regs->saveVTOTAL_B; | ||
442 | vblank_val = regs->saveVBLANK_B; | ||
443 | vsync_val = regs->saveVSYNC_B; | ||
444 | pipesrc_val = regs->savePIPEBSRC; | ||
445 | dspstride_val = regs->saveDSPBSTRIDE; | ||
446 | dsplinoff_val = regs->saveDSPBLINOFF; | ||
447 | dsptileoff_val = regs->saveDSPBTILEOFF; | ||
448 | dspsize_val = regs->saveDSPBSIZE; | ||
449 | dsppos_val = regs->saveDSPBPOS; | ||
450 | dspsurf_val = regs->saveDSPBSURF; | ||
451 | dspcntr_val = regs->saveDSPBCNTR; | ||
452 | dspstatus_val = regs->saveDSPBSTATUS; | ||
453 | palette_val = regs->save_palette_b; | ||
454 | break; | ||
455 | case 2: | ||
456 | /* regester */ | ||
457 | pipeconf_reg = PIPECCONF; | ||
458 | htot_reg = HTOTAL_C; | ||
459 | hblank_reg = HBLANK_C; | ||
460 | hsync_reg = HSYNC_C; | ||
461 | vtot_reg = VTOTAL_C; | ||
462 | vblank_reg = VBLANK_C; | ||
463 | vsync_reg = VSYNC_C; | ||
464 | pipesrc_reg = PIPECSRC; | ||
465 | dspstride_reg = DSPCSTRIDE; | ||
466 | dsplinoff_reg = DSPCLINOFF; | ||
467 | dsptileoff_reg = DSPCTILEOFF; | ||
468 | dspsize_reg = DSPCSIZE; | ||
469 | dsppos_reg = DSPCPOS; | ||
470 | dspsurf_reg = DSPCSURF; | ||
471 | mipi_reg = MIPI_C; | ||
472 | dspcntr_reg = DSPCCNTR; | ||
473 | dspstatus_reg = PIPECSTAT; | ||
474 | palette_reg = PALETTE_C; | ||
475 | |||
476 | /* values */ | ||
477 | pipeconf_val = regs->savePIPECCONF; | ||
478 | htot_val = regs->saveHTOTAL_C; | ||
479 | hblank_val = regs->saveHBLANK_C; | ||
480 | hsync_val = regs->saveHSYNC_C; | ||
481 | vtot_val = regs->saveVTOTAL_C; | ||
482 | vblank_val = regs->saveVBLANK_C; | ||
483 | vsync_val = regs->saveVSYNC_C; | ||
484 | pipesrc_val = regs->savePIPECSRC; | ||
485 | dspstride_val = regs->saveDSPCSTRIDE; | ||
486 | dsplinoff_val = regs->saveDSPCLINOFF; | ||
487 | dsptileoff_val = regs->saveDSPCTILEOFF; | ||
488 | dspsize_val = regs->saveDSPCSIZE; | ||
489 | dsppos_val = regs->saveDSPCPOS; | ||
490 | dspsurf_val = regs->saveDSPCSURF; | ||
491 | mipi_val = regs->saveMIPI_C; | ||
492 | dspcntr_val = regs->saveDSPCCNTR; | ||
493 | dspstatus_val = regs->saveDSPCSTATUS; | ||
494 | palette_val = regs->save_palette_c; | ||
495 | |||
496 | dsi_config = dev_priv->dsi_configs[1]; | ||
497 | break; | ||
498 | default: | ||
499 | DRM_ERROR("%s, invalid pipe number.\n", __func__); | ||
500 | return -EINVAL; | ||
501 | } | ||
502 | |||
503 | /*make sure VGA plane is off. it initializes to on after reset!*/ | ||
504 | PSB_WVDC32(0x80000000, VGACNTRL); | ||
505 | |||
506 | if (pipe == 1) { | ||
507 | PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg); | ||
508 | PSB_RVDC32(dpll_reg); | ||
509 | |||
510 | PSB_WVDC32(fp_val, fp_reg); | ||
511 | } else { | ||
512 | |||
513 | dpll = PSB_RVDC32(dpll_reg); | ||
514 | |||
515 | if (!(dpll & DPLL_VCO_ENABLE)) { | ||
516 | |||
517 | /* When ungating power of DPLL, needs to wait 0.5us | ||
518 | before enable the VCO */ | ||
519 | if (dpll & MDFLD_PWR_GATE_EN) { | ||
520 | dpll &= ~MDFLD_PWR_GATE_EN; | ||
521 | PSB_WVDC32(dpll, dpll_reg); | ||
522 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
523 | udelay(500); | ||
524 | } | ||
525 | |||
526 | PSB_WVDC32(fp_val, fp_reg); | ||
527 | PSB_WVDC32(dpll_val, dpll_reg); | ||
528 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
529 | udelay(500); | ||
530 | |||
531 | dpll_val |= DPLL_VCO_ENABLE; | ||
532 | PSB_WVDC32(dpll_val, dpll_reg); | ||
533 | PSB_RVDC32(dpll_reg); | ||
534 | |||
535 | /* wait for DSI PLL to lock */ | ||
536 | while (timeout < 20000 && | ||
537 | !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { | ||
538 | udelay(150); | ||
539 | timeout++; | ||
540 | } | ||
541 | |||
542 | if (timeout == 20000) { | ||
543 | DRM_ERROR("%s, can't lock DSIPLL.\n", | ||
544 | __func__); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | } | ||
548 | } | ||
549 | /* Restore mode */ | ||
550 | PSB_WVDC32(htot_val, htot_reg); | ||
551 | PSB_WVDC32(hblank_val, hblank_reg); | ||
552 | PSB_WVDC32(hsync_val, hsync_reg); | ||
553 | PSB_WVDC32(vtot_val, vtot_reg); | ||
554 | PSB_WVDC32(vblank_val, vblank_reg); | ||
555 | PSB_WVDC32(vsync_val, vsync_reg); | ||
556 | PSB_WVDC32(pipesrc_val, pipesrc_reg); | ||
557 | PSB_WVDC32(dspstatus_val, dspstatus_reg); | ||
558 | |||
559 | /*set up the plane*/ | ||
560 | PSB_WVDC32(dspstride_val, dspstride_reg); | ||
561 | PSB_WVDC32(dsplinoff_val, dsplinoff_reg); | ||
562 | PSB_WVDC32(dsptileoff_val, dsptileoff_reg); | ||
563 | PSB_WVDC32(dspsize_val, dspsize_reg); | ||
564 | PSB_WVDC32(dsppos_val, dsppos_reg); | ||
565 | PSB_WVDC32(dspsurf_val, dspsurf_reg); | ||
566 | |||
567 | if (pipe == 1) { | ||
568 | /* restore palette (gamma) */ | ||
569 | /*DRM_UDELAY(50000); */ | ||
570 | for (i = 0; i < 256; i++) | ||
571 | PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); | ||
572 | |||
573 | PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL); | ||
574 | PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); | ||
575 | |||
576 | /*TODO: resume HDMI port */ | ||
577 | |||
578 | /*TODO: resume pipe*/ | ||
579 | |||
580 | /*enable the plane*/ | ||
581 | PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg); | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | /*set up pipe related registers*/ | ||
587 | PSB_WVDC32(mipi_val, mipi_reg); | ||
588 | |||
589 | /*setup MIPI adapter + MIPI IP registers*/ | ||
590 | if (dsi_config) | ||
591 | mdfld_dsi_controller_init(dsi_config, pipe); | ||
592 | |||
593 | if (in_atomic() || in_interrupt()) | ||
594 | mdelay(20); | ||
595 | else | ||
596 | msleep(20); | ||
597 | |||
598 | /*enable the plane*/ | ||
599 | PSB_WVDC32(dspcntr_val, dspcntr_reg); | ||
600 | |||
601 | if (in_atomic() || in_interrupt()) | ||
602 | mdelay(20); | ||
603 | else | ||
604 | msleep(20); | ||
605 | |||
606 | /* LP Hold Release */ | ||
607 | temp = REG_READ(mipi_reg); | ||
608 | temp |= LP_OUTPUT_HOLD_RELEASE; | ||
609 | REG_WRITE(mipi_reg, temp); | ||
610 | mdelay(1); | ||
611 | |||
612 | |||
613 | /* Set DSI host to exit from Utra Low Power State */ | ||
614 | temp = REG_READ(device_ready_reg); | ||
615 | temp &= ~ULPS_MASK; | ||
616 | temp |= 0x3; | ||
617 | temp |= EXIT_ULPS_DEV_READY; | ||
618 | REG_WRITE(device_ready_reg, temp); | ||
619 | mdelay(1); | ||
620 | |||
621 | temp = REG_READ(device_ready_reg); | ||
622 | temp &= ~ULPS_MASK; | ||
623 | temp |= EXITING_ULPS; | ||
624 | REG_WRITE(device_ready_reg, temp); | ||
625 | mdelay(1); | ||
626 | |||
627 | /*enable the pipe*/ | ||
628 | PSB_WVDC32(pipeconf_val, pipeconf_reg); | ||
629 | |||
630 | /* restore palette (gamma) */ | ||
631 | /*DRM_UDELAY(50000); */ | ||
632 | for (i = 0; i < 256; i++) | ||
633 | PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static int mdfld_save_registers(struct drm_device *dev) | ||
639 | { | ||
640 | /* mdfld_save_cursor_overlay_registers(dev); */ | ||
641 | mdfld_save_display_registers(dev, 0); | ||
642 | mdfld_save_display_registers(dev, 2); | ||
643 | mdfld_disable_crtc(dev, 0); | ||
644 | mdfld_disable_crtc(dev, 2); | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | static int mdfld_restore_registers(struct drm_device *dev) | ||
650 | { | ||
651 | mdfld_restore_display_registers(dev, 2); | ||
652 | mdfld_restore_display_registers(dev, 0); | ||
653 | /* mdfld_restore_cursor_overlay_registers(dev); */ | ||
654 | |||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | static int mdfld_power_down(struct drm_device *dev) | ||
659 | { | ||
660 | /* FIXME */ | ||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static int mdfld_power_up(struct drm_device *dev) | ||
665 | { | ||
666 | /* FIXME */ | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | const struct psb_ops mdfld_chip_ops = { | ||
671 | .name = "mdfld", | ||
672 | .accel_2d = 0, | ||
673 | .pipes = 3, | ||
674 | .crtcs = 3, | ||
675 | .sgx_offset = MRST_SGX_OFFSET, | ||
676 | |||
677 | .chip_setup = mid_chip_setup, | ||
678 | .crtc_helper = &mdfld_helper_funcs, | ||
679 | .crtc_funcs = &psb_intel_crtc_funcs, | ||
680 | |||
681 | .output_init = mdfld_output_init, | ||
682 | |||
683 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | ||
684 | .backlight_init = mdfld_backlight_init, | ||
685 | #endif | ||
686 | |||
687 | .save_regs = mdfld_save_registers, | ||
688 | .restore_regs = mdfld_restore_registers, | ||
689 | .power_down = mdfld_power_down, | ||
690 | .power_up = mdfld_power_up, | ||
691 | }; | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c new file mode 100644 index 000000000000..d52358b744a0 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c | |||
@@ -0,0 +1,1017 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * jim liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | */ | ||
27 | |||
28 | #include "mdfld_dsi_dpi.h" | ||
29 | #include "mdfld_output.h" | ||
30 | #include "mdfld_dsi_pkg_sender.h" | ||
31 | #include "psb_drv.h" | ||
32 | #include "tc35876x-dsi-lvds.h" | ||
33 | |||
34 | static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, | ||
35 | int pipe); | ||
36 | |||
37 | static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe) | ||
38 | { | ||
39 | u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); | ||
40 | int timeout = 0; | ||
41 | |||
42 | udelay(500); | ||
43 | |||
44 | /* This will time out after approximately 2+ seconds */ | ||
45 | while ((timeout < 20000) && | ||
46 | (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) { | ||
47 | udelay(100); | ||
48 | timeout++; | ||
49 | } | ||
50 | |||
51 | if (timeout == 20000) | ||
52 | DRM_INFO("MIPI: HS Data FIFO was never cleared!\n"); | ||
53 | } | ||
54 | |||
55 | static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe) | ||
56 | { | ||
57 | u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); | ||
58 | int timeout = 0; | ||
59 | |||
60 | udelay(500); | ||
61 | |||
62 | /* This will time out after approximately 2+ seconds */ | ||
63 | while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) | ||
64 | & DSI_FIFO_GEN_HS_CTRL_FULL)) { | ||
65 | udelay(100); | ||
66 | timeout++; | ||
67 | } | ||
68 | if (timeout == 20000) | ||
69 | DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n"); | ||
70 | } | ||
71 | |||
72 | static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe) | ||
73 | { | ||
74 | u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); | ||
75 | int timeout = 0; | ||
76 | |||
77 | udelay(500); | ||
78 | |||
79 | /* This will time out after approximately 2+ seconds */ | ||
80 | while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & | ||
81 | DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) { | ||
82 | udelay(100); | ||
83 | timeout++; | ||
84 | } | ||
85 | |||
86 | if (timeout == 20000) | ||
87 | DRM_ERROR("MIPI: DPI FIFO was never cleared\n"); | ||
88 | } | ||
89 | |||
90 | static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe) | ||
91 | { | ||
92 | u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe); | ||
93 | int timeout = 0; | ||
94 | |||
95 | udelay(500); | ||
96 | |||
97 | /* This will time out after approximately 2+ seconds */ | ||
98 | while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) | ||
99 | & DSI_INTR_STATE_SPL_PKG_SENT))) { | ||
100 | udelay(100); | ||
101 | timeout++; | ||
102 | } | ||
103 | |||
104 | if (timeout == 20000) | ||
105 | DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n"); | ||
106 | } | ||
107 | |||
108 | /* For TC35876X */ | ||
109 | |||
110 | static void dsi_set_device_ready_state(struct drm_device *dev, int state, | ||
111 | int pipe) | ||
112 | { | ||
113 | REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0); | ||
114 | } | ||
115 | |||
116 | static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, | ||
117 | int state, int pipe) | ||
118 | { | ||
119 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
120 | u32 pipeconf_reg = PIPEACONF; | ||
121 | u32 dspcntr_reg = DSPACNTR; | ||
122 | |||
123 | u32 dspcntr = dev_priv->dspcntr[pipe]; | ||
124 | u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; | ||
125 | |||
126 | if (pipe) { | ||
127 | pipeconf_reg = PIPECCONF; | ||
128 | dspcntr_reg = DSPCCNTR; | ||
129 | } else | ||
130 | mipi &= (~0x03); | ||
131 | |||
132 | if (state) { | ||
133 | /*Set up pipe */ | ||
134 | REG_WRITE(pipeconf_reg, BIT(31)); | ||
135 | |||
136 | if (REG_BIT_WAIT(pipeconf_reg, 1, 30)) | ||
137 | dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n", | ||
138 | __func__); | ||
139 | |||
140 | /*Set up display plane */ | ||
141 | REG_WRITE(dspcntr_reg, dspcntr); | ||
142 | } else { | ||
143 | u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE; | ||
144 | |||
145 | /* Put DSI lanes to ULPS to disable pipe */ | ||
146 | REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1); | ||
147 | REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */ | ||
148 | |||
149 | /* LP Hold */ | ||
150 | REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16); | ||
151 | REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */ | ||
152 | |||
153 | /* Disable display plane */ | ||
154 | REG_FLD_MOD(dspcntr_reg, 0, 31, 31); | ||
155 | |||
156 | /* Flush the plane changes ??? posted write? */ | ||
157 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
158 | REG_READ(dspbase_reg); | ||
159 | |||
160 | /* Disable PIPE */ | ||
161 | REG_FLD_MOD(pipeconf_reg, 0, 31, 31); | ||
162 | |||
163 | if (REG_BIT_WAIT(pipeconf_reg, 0, 30)) | ||
164 | dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n", | ||
165 | __func__); | ||
166 | |||
167 | if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28)) | ||
168 | dev_err(&dev->pdev->dev, "%s: FIFO not empty\n", | ||
169 | __func__); | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder, | ||
174 | int pipe) | ||
175 | { | ||
176 | struct mdfld_dsi_dpi_output *dpi_output = | ||
177 | MDFLD_DSI_DPI_OUTPUT(dsi_encoder); | ||
178 | struct mdfld_dsi_config *dsi_config = | ||
179 | mdfld_dsi_encoder_get_config(dsi_encoder); | ||
180 | struct drm_device *dev = dsi_config->dev; | ||
181 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
182 | |||
183 | if (!dev_priv->dpi_panel_on[pipe]) { | ||
184 | dev_err(dev->dev, "DPI panel is already off\n"); | ||
185 | return; | ||
186 | } | ||
187 | tc35876x_toshiba_bridge_panel_off(dev); | ||
188 | tc35876x_set_bridge_reset_state(dev, 1); | ||
189 | dsi_set_pipe_plane_enable_state(dev, 0, pipe); | ||
190 | mdfld_dsi_dpi_shut_down(dpi_output, pipe); | ||
191 | dsi_set_device_ready_state(dev, 0, pipe); | ||
192 | } | ||
193 | |||
194 | static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder, | ||
195 | int pipe) | ||
196 | { | ||
197 | struct mdfld_dsi_dpi_output *dpi_output = | ||
198 | MDFLD_DSI_DPI_OUTPUT(dsi_encoder); | ||
199 | struct mdfld_dsi_config *dsi_config = | ||
200 | mdfld_dsi_encoder_get_config(dsi_encoder); | ||
201 | struct drm_device *dev = dsi_config->dev; | ||
202 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
203 | |||
204 | if (dev_priv->dpi_panel_on[pipe]) { | ||
205 | dev_err(dev->dev, "DPI panel is already on\n"); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | /* For resume path sequence */ | ||
210 | mdfld_dsi_dpi_shut_down(dpi_output, pipe); | ||
211 | dsi_set_device_ready_state(dev, 0, pipe); | ||
212 | |||
213 | dsi_set_device_ready_state(dev, 1, pipe); | ||
214 | tc35876x_set_bridge_reset_state(dev, 0); | ||
215 | tc35876x_configure_lvds_bridge(dev); | ||
216 | mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */ | ||
217 | dsi_set_pipe_plane_enable_state(dev, 1, pipe); | ||
218 | } | ||
219 | /* End for TC35876X */ | ||
220 | |||
221 | /* ************************************************************************* *\ | ||
222 | * FUNCTION: mdfld_dsi_tpo_ic_init | ||
223 | * | ||
224 | * DESCRIPTION: This function is called only by mrst_dsi_mode_set and | ||
225 | * restore_display_registers. since this function does not | ||
226 | * acquire the mutex, it is important that the calling function | ||
227 | * does! | ||
228 | \* ************************************************************************* */ | ||
229 | static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe) | ||
230 | { | ||
231 | struct drm_device *dev = dsi_config->dev; | ||
232 | u32 dcsChannelNumber = dsi_config->channel_num; | ||
233 | u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); | ||
234 | u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); | ||
235 | u32 gen_ctrl_val = GEN_LONG_WRITE; | ||
236 | |||
237 | DRM_INFO("Enter mrst init TPO MIPI display.\n"); | ||
238 | |||
239 | gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS; | ||
240 | |||
241 | /* Flip page order */ | ||
242 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
243 | REG_WRITE(gen_data_reg, 0x00008036); | ||
244 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
245 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); | ||
246 | |||
247 | /* 0xF0 */ | ||
248 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
249 | REG_WRITE(gen_data_reg, 0x005a5af0); | ||
250 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
251 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); | ||
252 | |||
253 | /* Write protection key */ | ||
254 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
255 | REG_WRITE(gen_data_reg, 0x005a5af1); | ||
256 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
257 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); | ||
258 | |||
259 | /* 0xFC */ | ||
260 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
261 | REG_WRITE(gen_data_reg, 0x005a5afc); | ||
262 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
263 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); | ||
264 | |||
265 | /* 0xB7 */ | ||
266 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
267 | REG_WRITE(gen_data_reg, 0x770000b7); | ||
268 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
269 | REG_WRITE(gen_data_reg, 0x00000044); | ||
270 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
271 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS)); | ||
272 | |||
273 | /* 0xB6 */ | ||
274 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
275 | REG_WRITE(gen_data_reg, 0x000a0ab6); | ||
276 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
277 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); | ||
278 | |||
279 | /* 0xF2 */ | ||
280 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
281 | REG_WRITE(gen_data_reg, 0x081010f2); | ||
282 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
283 | REG_WRITE(gen_data_reg, 0x4a070708); | ||
284 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
285 | REG_WRITE(gen_data_reg, 0x000000c5); | ||
286 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
287 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); | ||
288 | |||
289 | /* 0xF8 */ | ||
290 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
291 | REG_WRITE(gen_data_reg, 0x024003f8); | ||
292 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
293 | REG_WRITE(gen_data_reg, 0x01030a04); | ||
294 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
295 | REG_WRITE(gen_data_reg, 0x0e020220); | ||
296 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
297 | REG_WRITE(gen_data_reg, 0x00000004); | ||
298 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
299 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS)); | ||
300 | |||
301 | /* 0xE2 */ | ||
302 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
303 | REG_WRITE(gen_data_reg, 0x398fc3e2); | ||
304 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
305 | REG_WRITE(gen_data_reg, 0x0000916f); | ||
306 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
307 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS)); | ||
308 | |||
309 | /* 0xB0 */ | ||
310 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
311 | REG_WRITE(gen_data_reg, 0x000000b0); | ||
312 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
313 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); | ||
314 | |||
315 | /* 0xF4 */ | ||
316 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
317 | REG_WRITE(gen_data_reg, 0x240242f4); | ||
318 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
319 | REG_WRITE(gen_data_reg, 0x78ee2002); | ||
320 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
321 | REG_WRITE(gen_data_reg, 0x2a071050); | ||
322 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
323 | REG_WRITE(gen_data_reg, 0x507fee10); | ||
324 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
325 | REG_WRITE(gen_data_reg, 0x10300710); | ||
326 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
327 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS)); | ||
328 | |||
329 | /* 0xBA */ | ||
330 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
331 | REG_WRITE(gen_data_reg, 0x19fe07ba); | ||
332 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
333 | REG_WRITE(gen_data_reg, 0x101c0a31); | ||
334 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
335 | REG_WRITE(gen_data_reg, 0x00000010); | ||
336 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
337 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); | ||
338 | |||
339 | /* 0xBB */ | ||
340 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
341 | REG_WRITE(gen_data_reg, 0x28ff07bb); | ||
342 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
343 | REG_WRITE(gen_data_reg, 0x24280a31); | ||
344 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
345 | REG_WRITE(gen_data_reg, 0x00000034); | ||
346 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
347 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); | ||
348 | |||
349 | /* 0xFB */ | ||
350 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
351 | REG_WRITE(gen_data_reg, 0x535d05fb); | ||
352 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
353 | REG_WRITE(gen_data_reg, 0x1b1a2130); | ||
354 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
355 | REG_WRITE(gen_data_reg, 0x221e180e); | ||
356 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
357 | REG_WRITE(gen_data_reg, 0x131d2120); | ||
358 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
359 | REG_WRITE(gen_data_reg, 0x535d0508); | ||
360 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
361 | REG_WRITE(gen_data_reg, 0x1c1a2131); | ||
362 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
363 | REG_WRITE(gen_data_reg, 0x231f160d); | ||
364 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
365 | REG_WRITE(gen_data_reg, 0x111b2220); | ||
366 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
367 | REG_WRITE(gen_data_reg, 0x535c2008); | ||
368 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
369 | REG_WRITE(gen_data_reg, 0x1f1d2433); | ||
370 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
371 | REG_WRITE(gen_data_reg, 0x2c251a10); | ||
372 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
373 | REG_WRITE(gen_data_reg, 0x2c34372d); | ||
374 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
375 | REG_WRITE(gen_data_reg, 0x00000023); | ||
376 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
377 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); | ||
378 | |||
379 | /* 0xFA */ | ||
380 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
381 | REG_WRITE(gen_data_reg, 0x525c0bfa); | ||
382 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
383 | REG_WRITE(gen_data_reg, 0x1c1c232f); | ||
384 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
385 | REG_WRITE(gen_data_reg, 0x2623190e); | ||
386 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
387 | REG_WRITE(gen_data_reg, 0x18212625); | ||
388 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
389 | REG_WRITE(gen_data_reg, 0x545d0d0e); | ||
390 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
391 | REG_WRITE(gen_data_reg, 0x1e1d2333); | ||
392 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
393 | REG_WRITE(gen_data_reg, 0x26231a10); | ||
394 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
395 | REG_WRITE(gen_data_reg, 0x1a222725); | ||
396 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
397 | REG_WRITE(gen_data_reg, 0x545d280f); | ||
398 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
399 | REG_WRITE(gen_data_reg, 0x21202635); | ||
400 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
401 | REG_WRITE(gen_data_reg, 0x31292013); | ||
402 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
403 | REG_WRITE(gen_data_reg, 0x31393d33); | ||
404 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
405 | REG_WRITE(gen_data_reg, 0x00000029); | ||
406 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
407 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); | ||
408 | |||
409 | /* Set DM */ | ||
410 | mdfld_wait_for_HS_DATA_FIFO(dev, pipe); | ||
411 | REG_WRITE(gen_data_reg, 0x000100f7); | ||
412 | mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); | ||
413 | REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); | ||
414 | } | ||
415 | |||
416 | static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count, | ||
417 | int num_lane, int bpp) | ||
418 | { | ||
419 | return (u16)((pixel_clock_count * bpp) / (num_lane * 8)); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Calculate the dpi time basing on a given drm mode @mode | ||
424 | * return 0 on success. | ||
425 | * FIXME: I was using proposed mode value for calculation, may need to | ||
426 | * use crtc mode values later | ||
427 | */ | ||
428 | int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, | ||
429 | struct mdfld_dsi_dpi_timing *dpi_timing, | ||
430 | int num_lane, int bpp) | ||
431 | { | ||
432 | int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive; | ||
433 | int pclk_vsync, pclk_vfp, pclk_vbp; | ||
434 | |||
435 | pclk_hactive = mode->hdisplay; | ||
436 | pclk_hfp = mode->hsync_start - mode->hdisplay; | ||
437 | pclk_hsync = mode->hsync_end - mode->hsync_start; | ||
438 | pclk_hbp = mode->htotal - mode->hsync_end; | ||
439 | |||
440 | pclk_vfp = mode->vsync_start - mode->vdisplay; | ||
441 | pclk_vsync = mode->vsync_end - mode->vsync_start; | ||
442 | pclk_vbp = mode->vtotal - mode->vsync_end; | ||
443 | |||
444 | /* | ||
445 | * byte clock counts were calculated by following formula | ||
446 | * bclock_count = pclk_count * bpp / num_lane / 8 | ||
447 | */ | ||
448 | dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
449 | pclk_hsync, num_lane, bpp); | ||
450 | dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
451 | pclk_hbp, num_lane, bpp); | ||
452 | dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
453 | pclk_hfp, num_lane, bpp); | ||
454 | dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
455 | pclk_hactive, num_lane, bpp); | ||
456 | dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
457 | pclk_vsync, num_lane, bpp); | ||
458 | dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
459 | pclk_vbp, num_lane, bpp); | ||
460 | dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count( | ||
461 | pclk_vfp, num_lane, bpp); | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, | ||
467 | int pipe) | ||
468 | { | ||
469 | struct drm_device *dev = dsi_config->dev; | ||
470 | int lane_count = dsi_config->lane_count; | ||
471 | struct mdfld_dsi_dpi_timing dpi_timing; | ||
472 | struct drm_display_mode *mode = dsi_config->mode; | ||
473 | u32 val; | ||
474 | |||
475 | /*un-ready device*/ | ||
476 | REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0); | ||
477 | |||
478 | /*init dsi adapter before kicking off*/ | ||
479 | REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); | ||
480 | |||
481 | /*enable all interrupts*/ | ||
482 | REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); | ||
483 | |||
484 | /*set up func_prg*/ | ||
485 | val = lane_count; | ||
486 | val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET; | ||
487 | |||
488 | switch (dsi_config->bpp) { | ||
489 | case 16: | ||
490 | val |= DSI_DPI_COLOR_FORMAT_RGB565; | ||
491 | break; | ||
492 | case 18: | ||
493 | val |= DSI_DPI_COLOR_FORMAT_RGB666; | ||
494 | break; | ||
495 | case 24: | ||
496 | val |= DSI_DPI_COLOR_FORMAT_RGB888; | ||
497 | break; | ||
498 | default: | ||
499 | DRM_ERROR("unsupported color format, bpp = %d\n", | ||
500 | dsi_config->bpp); | ||
501 | } | ||
502 | REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val); | ||
503 | |||
504 | REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), | ||
505 | (mode->vtotal * mode->htotal * dsi_config->bpp / | ||
506 | (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK); | ||
507 | REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), | ||
508 | 0xffff & DSI_LP_RX_TIMEOUT_MASK); | ||
509 | |||
510 | /*max value: 20 clock cycles of txclkesc*/ | ||
511 | REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), | ||
512 | 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK); | ||
513 | |||
514 | /*min 21 txclkesc, max: ffffh*/ | ||
515 | REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), | ||
516 | 0xffff & DSI_RESET_TIMER_MASK); | ||
517 | |||
518 | REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), | ||
519 | mode->vdisplay << 16 | mode->hdisplay); | ||
520 | |||
521 | /*set DPI timing registers*/ | ||
522 | mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, | ||
523 | dsi_config->lane_count, dsi_config->bpp); | ||
524 | |||
525 | REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), | ||
526 | dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); | ||
527 | REG_WRITE(MIPI_HBP_COUNT_REG(pipe), | ||
528 | dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); | ||
529 | REG_WRITE(MIPI_HFP_COUNT_REG(pipe), | ||
530 | dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); | ||
531 | REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), | ||
532 | dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); | ||
533 | REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), | ||
534 | dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); | ||
535 | REG_WRITE(MIPI_VBP_COUNT_REG(pipe), | ||
536 | dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); | ||
537 | REG_WRITE(MIPI_VFP_COUNT_REG(pipe), | ||
538 | dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); | ||
539 | |||
540 | REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46); | ||
541 | |||
542 | /*min: 7d0 max: 4e20*/ | ||
543 | REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0); | ||
544 | |||
545 | /*set up video mode*/ | ||
546 | val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE; | ||
547 | REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val); | ||
548 | |||
549 | REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); | ||
550 | |||
551 | REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); | ||
552 | |||
553 | /*TODO: figure out how to setup these registers*/ | ||
554 | if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
555 | REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); | ||
556 | else | ||
557 | REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408); | ||
558 | |||
559 | REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); | ||
560 | |||
561 | if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
562 | tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ | ||
563 | |||
564 | /*set device ready*/ | ||
565 | REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0); | ||
566 | } | ||
567 | |||
568 | void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe) | ||
569 | { | ||
570 | struct drm_device *dev = output->dev; | ||
571 | |||
572 | /* clear special packet sent bit */ | ||
573 | if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) | ||
574 | REG_WRITE(MIPI_INTR_STAT_REG(pipe), | ||
575 | DSI_INTR_STATE_SPL_PKG_SENT); | ||
576 | |||
577 | /*send turn on package*/ | ||
578 | REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON); | ||
579 | |||
580 | /*wait for SPL_PKG_SENT interrupt*/ | ||
581 | mdfld_wait_for_SPL_PKG_SENT(dev, pipe); | ||
582 | |||
583 | if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) | ||
584 | REG_WRITE(MIPI_INTR_STAT_REG(pipe), | ||
585 | DSI_INTR_STATE_SPL_PKG_SENT); | ||
586 | |||
587 | output->panel_on = 1; | ||
588 | |||
589 | /* FIXME the following is disabled to WA the X slow start issue | ||
590 | for TMD panel | ||
591 | if (pipe == 2) | ||
592 | dev_priv->dpi_panel_on2 = true; | ||
593 | else if (pipe == 0) | ||
594 | dev_priv->dpi_panel_on = true; */ | ||
595 | } | ||
596 | |||
597 | static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, | ||
598 | int pipe) | ||
599 | { | ||
600 | struct drm_device *dev = output->dev; | ||
601 | |||
602 | /*if output is on, or mode setting didn't happen, ignore this*/ | ||
603 | if ((!output->panel_on) || output->first_boot) { | ||
604 | output->first_boot = 0; | ||
605 | return; | ||
606 | } | ||
607 | |||
608 | /* Wait for dpi fifo to empty */ | ||
609 | mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe); | ||
610 | |||
611 | /* Clear the special packet interrupt bit if set */ | ||
612 | if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) | ||
613 | REG_WRITE(MIPI_INTR_STAT_REG(pipe), | ||
614 | DSI_INTR_STATE_SPL_PKG_SENT); | ||
615 | |||
616 | if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN) | ||
617 | goto shutdown_out; | ||
618 | |||
619 | REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN); | ||
620 | |||
621 | shutdown_out: | ||
622 | output->panel_on = 0; | ||
623 | output->first_boot = 0; | ||
624 | |||
625 | /* FIXME the following is disabled to WA the X slow start issue | ||
626 | for TMD panel | ||
627 | if (pipe == 2) | ||
628 | dev_priv->dpi_panel_on2 = false; | ||
629 | else if (pipe == 0) | ||
630 | dev_priv->dpi_panel_on = false; */ | ||
631 | } | ||
632 | |||
633 | static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on) | ||
634 | { | ||
635 | struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); | ||
636 | struct mdfld_dsi_dpi_output *dpi_output = | ||
637 | MDFLD_DSI_DPI_OUTPUT(dsi_encoder); | ||
638 | struct mdfld_dsi_config *dsi_config = | ||
639 | mdfld_dsi_encoder_get_config(dsi_encoder); | ||
640 | int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); | ||
641 | struct drm_device *dev = dsi_config->dev; | ||
642 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
643 | |||
644 | /*start up display island if it was shutdown*/ | ||
645 | if (!gma_power_begin(dev, true)) | ||
646 | return; | ||
647 | |||
648 | if (on) { | ||
649 | if (mdfld_get_panel_type(dev, pipe) == TMD_VID) | ||
650 | mdfld_dsi_dpi_turn_on(dpi_output, pipe); | ||
651 | else if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
652 | mdfld_dsi_configure_up(dsi_encoder, pipe); | ||
653 | else { | ||
654 | /*enable mipi port*/ | ||
655 | REG_WRITE(MIPI_PORT_CONTROL(pipe), | ||
656 | REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31)); | ||
657 | REG_READ(MIPI_PORT_CONTROL(pipe)); | ||
658 | |||
659 | mdfld_dsi_dpi_turn_on(dpi_output, pipe); | ||
660 | mdfld_dsi_tpo_ic_init(dsi_config, pipe); | ||
661 | } | ||
662 | dev_priv->dpi_panel_on[pipe] = true; | ||
663 | } else { | ||
664 | if (mdfld_get_panel_type(dev, pipe) == TMD_VID) | ||
665 | mdfld_dsi_dpi_shut_down(dpi_output, pipe); | ||
666 | else if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
667 | mdfld_dsi_configure_down(dsi_encoder, pipe); | ||
668 | else { | ||
669 | mdfld_dsi_dpi_shut_down(dpi_output, pipe); | ||
670 | |||
671 | /*disable mipi port*/ | ||
672 | REG_WRITE(MIPI_PORT_CONTROL(pipe), | ||
673 | REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31)); | ||
674 | REG_READ(MIPI_PORT_CONTROL(pipe)); | ||
675 | } | ||
676 | dev_priv->dpi_panel_on[pipe] = false; | ||
677 | } | ||
678 | gma_power_end(dev); | ||
679 | } | ||
680 | |||
681 | void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) | ||
682 | { | ||
683 | mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON); | ||
684 | } | ||
685 | |||
686 | bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, | ||
687 | struct drm_display_mode *mode, | ||
688 | struct drm_display_mode *adjusted_mode) | ||
689 | { | ||
690 | struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); | ||
691 | struct mdfld_dsi_config *dsi_config = | ||
692 | mdfld_dsi_encoder_get_config(dsi_encoder); | ||
693 | struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; | ||
694 | |||
695 | if (fixed_mode) { | ||
696 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | ||
697 | adjusted_mode->hsync_start = fixed_mode->hsync_start; | ||
698 | adjusted_mode->hsync_end = fixed_mode->hsync_end; | ||
699 | adjusted_mode->htotal = fixed_mode->htotal; | ||
700 | adjusted_mode->vdisplay = fixed_mode->vdisplay; | ||
701 | adjusted_mode->vsync_start = fixed_mode->vsync_start; | ||
702 | adjusted_mode->vsync_end = fixed_mode->vsync_end; | ||
703 | adjusted_mode->vtotal = fixed_mode->vtotal; | ||
704 | adjusted_mode->clock = fixed_mode->clock; | ||
705 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
706 | } | ||
707 | return true; | ||
708 | } | ||
709 | |||
710 | void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder) | ||
711 | { | ||
712 | mdfld_dsi_dpi_set_power(encoder, false); | ||
713 | } | ||
714 | |||
715 | void mdfld_dsi_dpi_commit(struct drm_encoder *encoder) | ||
716 | { | ||
717 | mdfld_dsi_dpi_set_power(encoder, true); | ||
718 | } | ||
719 | |||
720 | /* For TC35876X */ | ||
721 | /* This functionality was implemented in FW in iCDK */ | ||
722 | /* But removed in DV0 and later. So need to add here. */ | ||
723 | static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe) | ||
724 | { | ||
725 | struct drm_device *dev = dsi_config->dev; | ||
726 | |||
727 | REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); | ||
728 | REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); | ||
729 | REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff); | ||
730 | REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff); | ||
731 | REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14); | ||
732 | REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff); | ||
733 | REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25); | ||
734 | REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0); | ||
735 | REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); | ||
736 | REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); | ||
737 | REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820); | ||
738 | REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); | ||
739 | } | ||
740 | |||
741 | static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config, | ||
742 | int pipe) | ||
743 | { | ||
744 | struct drm_device *dev = dsi_config->dev; | ||
745 | struct mdfld_dsi_dpi_timing dpi_timing; | ||
746 | struct drm_display_mode *mode = dsi_config->mode; | ||
747 | |||
748 | mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, | ||
749 | dsi_config->lane_count, | ||
750 | dsi_config->bpp); | ||
751 | |||
752 | REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), | ||
753 | mode->vdisplay << 16 | mode->hdisplay); | ||
754 | REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), | ||
755 | dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); | ||
756 | REG_WRITE(MIPI_HBP_COUNT_REG(pipe), | ||
757 | dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); | ||
758 | REG_WRITE(MIPI_HFP_COUNT_REG(pipe), | ||
759 | dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); | ||
760 | REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), | ||
761 | dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); | ||
762 | REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), | ||
763 | dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); | ||
764 | REG_WRITE(MIPI_VBP_COUNT_REG(pipe), | ||
765 | dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); | ||
766 | REG_WRITE(MIPI_VFP_COUNT_REG(pipe), | ||
767 | dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); | ||
768 | } | ||
769 | |||
770 | static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe) | ||
771 | { | ||
772 | struct drm_device *dev = dsi_config->dev; | ||
773 | int lane_count = dsi_config->lane_count; | ||
774 | |||
775 | if (pipe) { | ||
776 | REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002); | ||
777 | REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000); | ||
778 | } else { | ||
779 | REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000); | ||
780 | REG_WRITE(MIPI_PORT_CONTROL(2), 0x00); | ||
781 | } | ||
782 | |||
783 | REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F); | ||
784 | REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F); | ||
785 | |||
786 | /* lane_count = 3 */ | ||
787 | REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count); | ||
788 | |||
789 | mdfld_mipi_set_video_timing(dsi_config, pipe); | ||
790 | } | ||
791 | |||
792 | static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe) | ||
793 | { | ||
794 | struct drm_device *dev = dsi_config->dev; | ||
795 | struct drm_display_mode *mode = dsi_config->mode; | ||
796 | |||
797 | REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); | ||
798 | REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); | ||
799 | REG_WRITE(HSYNC_A, | ||
800 | ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1)); | ||
801 | |||
802 | REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); | ||
803 | REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); | ||
804 | REG_WRITE(VSYNC_A, | ||
805 | ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1)); | ||
806 | |||
807 | REG_WRITE(PIPEASRC, | ||
808 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
809 | } | ||
810 | /* End for TC35876X */ | ||
811 | |||
812 | void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, | ||
813 | struct drm_display_mode *mode, | ||
814 | struct drm_display_mode *adjusted_mode) | ||
815 | { | ||
816 | struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); | ||
817 | struct mdfld_dsi_dpi_output *dpi_output = | ||
818 | MDFLD_DSI_DPI_OUTPUT(dsi_encoder); | ||
819 | struct mdfld_dsi_config *dsi_config = | ||
820 | mdfld_dsi_encoder_get_config(dsi_encoder); | ||
821 | struct drm_device *dev = dsi_config->dev; | ||
822 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
823 | int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); | ||
824 | |||
825 | u32 pipeconf_reg = PIPEACONF; | ||
826 | u32 dspcntr_reg = DSPACNTR; | ||
827 | |||
828 | u32 pipeconf = dev_priv->pipeconf[pipe]; | ||
829 | u32 dspcntr = dev_priv->dspcntr[pipe]; | ||
830 | u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; | ||
831 | |||
832 | if (pipe) { | ||
833 | pipeconf_reg = PIPECCONF; | ||
834 | dspcntr_reg = DSPCCNTR; | ||
835 | } else { | ||
836 | if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
837 | mipi &= (~0x03); /* Use all four lanes */ | ||
838 | else | ||
839 | mipi |= 2; | ||
840 | } | ||
841 | |||
842 | /*start up display island if it was shutdown*/ | ||
843 | if (!gma_power_begin(dev, true)) | ||
844 | return; | ||
845 | |||
846 | if (mdfld_get_panel_type(dev, pipe) == TC35876X) { | ||
847 | /* | ||
848 | * The following logic is required to reset the bridge and | ||
849 | * configure. This also starts the DSI clock at 200MHz. | ||
850 | */ | ||
851 | tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ | ||
852 | tc35876x_toshiba_bridge_panel_on(dev); | ||
853 | udelay(100); | ||
854 | /* Now start the DSI clock */ | ||
855 | REG_WRITE(MRST_DPLL_A, 0x00); | ||
856 | REG_WRITE(MRST_FPA0, 0xC1); | ||
857 | REG_WRITE(MRST_DPLL_A, 0x00800000); | ||
858 | udelay(500); | ||
859 | REG_WRITE(MRST_DPLL_A, 0x80800000); | ||
860 | |||
861 | if (REG_BIT_WAIT(pipeconf_reg, 1, 29)) | ||
862 | dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n", | ||
863 | __func__); | ||
864 | |||
865 | REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); | ||
866 | |||
867 | mipi_set_properties(dsi_config, pipe); | ||
868 | mdfld_mipi_config(dsi_config, pipe); | ||
869 | mdfld_set_pipe_timing(dsi_config, pipe); | ||
870 | |||
871 | REG_WRITE(DSPABASE, 0x00); | ||
872 | REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4)); | ||
873 | REG_WRITE(DSPASIZE, | ||
874 | ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); | ||
875 | |||
876 | REG_WRITE(DSPACNTR, 0x98000000); | ||
877 | REG_WRITE(DSPASURF, 0x00); | ||
878 | |||
879 | REG_WRITE(VGACNTRL, 0x80000000); | ||
880 | REG_WRITE(DEVICE_READY_REG, 0x00000001); | ||
881 | |||
882 | REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000); | ||
883 | } else { | ||
884 | /*set up mipi port FIXME: do at init time */ | ||
885 | REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi); | ||
886 | } | ||
887 | REG_READ(MIPI_PORT_CONTROL(pipe)); | ||
888 | |||
889 | if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { | ||
890 | /* NOP */ | ||
891 | } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { | ||
892 | /* set up DSI controller DPI interface */ | ||
893 | mdfld_dsi_dpi_controller_init(dsi_config, pipe); | ||
894 | |||
895 | /* Configure MIPI Bridge and Panel */ | ||
896 | tc35876x_configure_lvds_bridge(dev); | ||
897 | dev_priv->dpi_panel_on[pipe] = true; | ||
898 | } else { | ||
899 | /*turn on DPI interface*/ | ||
900 | mdfld_dsi_dpi_turn_on(dpi_output, pipe); | ||
901 | } | ||
902 | |||
903 | /*set up pipe*/ | ||
904 | REG_WRITE(pipeconf_reg, pipeconf); | ||
905 | REG_READ(pipeconf_reg); | ||
906 | |||
907 | /*set up display plane*/ | ||
908 | REG_WRITE(dspcntr_reg, dspcntr); | ||
909 | REG_READ(dspcntr_reg); | ||
910 | |||
911 | msleep(20); /* FIXME: this should wait for vblank */ | ||
912 | |||
913 | if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { | ||
914 | /* NOP */ | ||
915 | } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { | ||
916 | mdfld_dsi_dpi_turn_on(dpi_output, pipe); | ||
917 | } else { | ||
918 | /* init driver ic */ | ||
919 | mdfld_dsi_tpo_ic_init(dsi_config, pipe); | ||
920 | /*init backlight*/ | ||
921 | mdfld_dsi_brightness_init(dsi_config, pipe); | ||
922 | } | ||
923 | |||
924 | gma_power_end(dev); | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * Init DSI DPI encoder. | ||
929 | * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector | ||
930 | * return pointer of newly allocated DPI encoder, NULL on error | ||
931 | */ | ||
932 | struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, | ||
933 | struct mdfld_dsi_connector *dsi_connector, | ||
934 | const struct panel_funcs *p_funcs) | ||
935 | { | ||
936 | struct mdfld_dsi_dpi_output *dpi_output = NULL; | ||
937 | struct mdfld_dsi_config *dsi_config; | ||
938 | struct drm_connector *connector = NULL; | ||
939 | struct drm_encoder *encoder = NULL; | ||
940 | int pipe; | ||
941 | u32 data; | ||
942 | int ret; | ||
943 | |||
944 | pipe = dsi_connector->pipe; | ||
945 | |||
946 | if (mdfld_get_panel_type(dev, pipe) != TC35876X) { | ||
947 | dsi_config = mdfld_dsi_get_config(dsi_connector); | ||
948 | |||
949 | /* panel hard-reset */ | ||
950 | if (p_funcs->reset) { | ||
951 | ret = p_funcs->reset(pipe); | ||
952 | if (ret) { | ||
953 | DRM_ERROR("Panel %d hard-reset failed\n", pipe); | ||
954 | return NULL; | ||
955 | } | ||
956 | } | ||
957 | |||
958 | /* panel drvIC init */ | ||
959 | if (p_funcs->drv_ic_init) | ||
960 | p_funcs->drv_ic_init(dsi_config, pipe); | ||
961 | |||
962 | /* panel power mode detect */ | ||
963 | ret = mdfld_dsi_get_power_mode(dsi_config, &data, false); | ||
964 | if (ret) { | ||
965 | DRM_ERROR("Panel %d get power mode failed\n", pipe); | ||
966 | dsi_connector->status = connector_status_disconnected; | ||
967 | } else { | ||
968 | DRM_INFO("pipe %d power mode 0x%x\n", pipe, data); | ||
969 | dsi_connector->status = connector_status_connected; | ||
970 | } | ||
971 | } | ||
972 | |||
973 | dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL); | ||
974 | if (!dpi_output) { | ||
975 | DRM_ERROR("No memory\n"); | ||
976 | return NULL; | ||
977 | } | ||
978 | |||
979 | if (dsi_connector->pipe) | ||
980 | dpi_output->panel_on = 0; | ||
981 | else | ||
982 | dpi_output->panel_on = 0; | ||
983 | |||
984 | dpi_output->dev = dev; | ||
985 | if (mdfld_get_panel_type(dev, pipe) != TC35876X) | ||
986 | dpi_output->p_funcs = p_funcs; | ||
987 | dpi_output->first_boot = 1; | ||
988 | |||
989 | /*get fixed mode*/ | ||
990 | dsi_config = mdfld_dsi_get_config(dsi_connector); | ||
991 | |||
992 | /*create drm encoder object*/ | ||
993 | connector = &dsi_connector->base.base; | ||
994 | encoder = &dpi_output->base.base.base; | ||
995 | drm_encoder_init(dev, | ||
996 | encoder, | ||
997 | p_funcs->encoder_funcs, | ||
998 | DRM_MODE_ENCODER_LVDS); | ||
999 | drm_encoder_helper_add(encoder, | ||
1000 | p_funcs->encoder_helper_funcs); | ||
1001 | |||
1002 | /*attach to given connector*/ | ||
1003 | drm_mode_connector_attach_encoder(connector, encoder); | ||
1004 | |||
1005 | /*set possible crtcs and clones*/ | ||
1006 | if (dsi_connector->pipe) { | ||
1007 | encoder->possible_crtcs = (1 << 2); | ||
1008 | encoder->possible_clones = (1 << 1); | ||
1009 | } else { | ||
1010 | encoder->possible_crtcs = (1 << 0); | ||
1011 | encoder->possible_clones = (1 << 0); | ||
1012 | } | ||
1013 | |||
1014 | dsi_connector->base.encoder = &dpi_output->base.base; | ||
1015 | |||
1016 | return &dpi_output->base; | ||
1017 | } | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h new file mode 100644 index 000000000000..6f762478b959 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * jim liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | */ | ||
27 | |||
28 | #ifndef __MDFLD_DSI_DPI_H__ | ||
29 | #define __MDFLD_DSI_DPI_H__ | ||
30 | |||
31 | #include "mdfld_dsi_output.h" | ||
32 | #include "mdfld_output.h" | ||
33 | |||
34 | struct mdfld_dsi_dpi_timing { | ||
35 | u16 hsync_count; | ||
36 | u16 hbp_count; | ||
37 | u16 hfp_count; | ||
38 | u16 hactive_count; | ||
39 | u16 vsync_count; | ||
40 | u16 vbp_count; | ||
41 | u16 vfp_count; | ||
42 | }; | ||
43 | |||
44 | struct mdfld_dsi_dpi_output { | ||
45 | struct mdfld_dsi_encoder base; | ||
46 | struct drm_device *dev; | ||
47 | |||
48 | int panel_on; | ||
49 | int first_boot; | ||
50 | |||
51 | const struct panel_funcs *p_funcs; | ||
52 | }; | ||
53 | |||
54 | #define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\ | ||
55 | container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base) | ||
56 | |||
57 | /* Export functions */ | ||
58 | extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, | ||
59 | struct mdfld_dsi_dpi_timing *dpi_timing, | ||
60 | int num_lane, int bpp); | ||
61 | extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, | ||
62 | struct mdfld_dsi_connector *dsi_connector, | ||
63 | const struct panel_funcs *p_funcs); | ||
64 | |||
65 | /* MDFLD DPI helper functions */ | ||
66 | extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode); | ||
67 | extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, | ||
68 | struct drm_display_mode *mode, | ||
69 | struct drm_display_mode *adjusted_mode); | ||
70 | extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder); | ||
71 | extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder); | ||
72 | extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, | ||
73 | struct drm_display_mode *mode, | ||
74 | struct drm_display_mode *adjusted_mode); | ||
75 | extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, | ||
76 | int pipe); | ||
77 | extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, | ||
78 | int pipe); | ||
79 | #endif /*__MDFLD_DSI_DPI_H__*/ | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c new file mode 100644 index 000000000000..4c2cb4a8ad98 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c | |||
@@ -0,0 +1,618 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * jim liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | |||
30 | #include "mdfld_dsi_output.h" | ||
31 | #include "mdfld_dsi_dpi.h" | ||
32 | #include "mdfld_output.h" | ||
33 | #include "mdfld_dsi_pkg_sender.h" | ||
34 | #include "tc35876x-dsi-lvds.h" | ||
35 | #include <linux/pm_runtime.h> | ||
36 | #include <asm/intel_scu_ipc.h> | ||
37 | |||
38 | /* get the LABC from command line. */ | ||
39 | static int LABC_control = 1; | ||
40 | |||
41 | #ifdef MODULE | ||
42 | module_param(LABC_control, int, 0644); | ||
43 | #else | ||
44 | |||
45 | static int __init parse_LABC_control(char *arg) | ||
46 | { | ||
47 | /* LABC control can be passed in as a cmdline parameter */ | ||
48 | /* to enable this feature add LABC=1 to cmdline */ | ||
49 | /* to disable this feature add LABC=0 to cmdline */ | ||
50 | if (!arg) | ||
51 | return -EINVAL; | ||
52 | |||
53 | if (!strcasecmp(arg, "0")) | ||
54 | LABC_control = 0; | ||
55 | else if (!strcasecmp(arg, "1")) | ||
56 | LABC_control = 1; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | early_param("LABC", parse_LABC_control); | ||
61 | #endif | ||
62 | |||
63 | /** | ||
64 | * Check and see if the generic control or data buffer is empty and ready. | ||
65 | */ | ||
66 | void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg, | ||
67 | u32 fifo_stat) | ||
68 | { | ||
69 | u32 GEN_BF_time_out_count; | ||
70 | |||
71 | /* Check MIPI Adatper command registers */ | ||
72 | for (GEN_BF_time_out_count = 0; | ||
73 | GEN_BF_time_out_count < GEN_FB_TIME_OUT; | ||
74 | GEN_BF_time_out_count++) { | ||
75 | if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat) | ||
76 | break; | ||
77 | udelay(100); | ||
78 | } | ||
79 | |||
80 | if (GEN_BF_time_out_count == GEN_FB_TIME_OUT) | ||
81 | DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n", | ||
82 | gen_fifo_stat_reg); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * Manage the DSI MIPI keyboard and display brightness. | ||
87 | * FIXME: this is exported to OSPM code. should work out an specific | ||
88 | * display interface to OSPM. | ||
89 | */ | ||
90 | |||
91 | void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe) | ||
92 | { | ||
93 | struct mdfld_dsi_pkg_sender *sender = | ||
94 | mdfld_dsi_get_pkg_sender(dsi_config); | ||
95 | struct drm_device *dev = sender->dev; | ||
96 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
97 | u32 gen_ctrl_val; | ||
98 | |||
99 | if (!sender) { | ||
100 | DRM_ERROR("No sender found\n"); | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | /* Set default display backlight value to 85% (0xd8)*/ | ||
105 | mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1, | ||
106 | true); | ||
107 | |||
108 | /* Set minimum brightness setting of CABC function to 20% (0x33)*/ | ||
109 | mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true); | ||
110 | |||
111 | /* Enable backlight or/and LABC */ | ||
112 | gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON | | ||
113 | BACKLIGHT_ON; | ||
114 | if (LABC_control == 1) | ||
115 | gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO | ||
116 | | GAMMA_AUTO; | ||
117 | |||
118 | if (LABC_control == 1) | ||
119 | gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON; | ||
120 | |||
121 | dev_priv->mipi_ctrl_display = gen_ctrl_val; | ||
122 | |||
123 | mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val, | ||
124 | 1, true); | ||
125 | |||
126 | mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true); | ||
127 | } | ||
128 | |||
129 | void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level) | ||
130 | { | ||
131 | struct mdfld_dsi_pkg_sender *sender; | ||
132 | struct drm_psb_private *dev_priv; | ||
133 | struct mdfld_dsi_config *dsi_config; | ||
134 | u32 gen_ctrl_val = 0; | ||
135 | int p_type = TMD_VID; | ||
136 | |||
137 | if (!dev || (pipe != 0 && pipe != 2)) { | ||
138 | DRM_ERROR("Invalid parameter\n"); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | p_type = mdfld_get_panel_type(dev, 0); | ||
143 | |||
144 | dev_priv = dev->dev_private; | ||
145 | |||
146 | if (pipe) | ||
147 | dsi_config = dev_priv->dsi_configs[1]; | ||
148 | else | ||
149 | dsi_config = dev_priv->dsi_configs[0]; | ||
150 | |||
151 | sender = mdfld_dsi_get_pkg_sender(dsi_config); | ||
152 | |||
153 | if (!sender) { | ||
154 | DRM_ERROR("No sender found\n"); | ||
155 | return; | ||
156 | } | ||
157 | |||
158 | gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff; | ||
159 | |||
160 | dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n", | ||
161 | pipe, gen_ctrl_val); | ||
162 | |||
163 | if (p_type == TMD_VID) { | ||
164 | /* Set display backlight value */ | ||
165 | mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness, | ||
166 | (u8)gen_ctrl_val, 1, true); | ||
167 | } else { | ||
168 | /* Set display backlight value */ | ||
169 | mdfld_dsi_send_mcs_short(sender, write_display_brightness, | ||
170 | (u8)gen_ctrl_val, 1, true); | ||
171 | |||
172 | /* Enable backlight control */ | ||
173 | if (level == 0) | ||
174 | gen_ctrl_val = 0; | ||
175 | else | ||
176 | gen_ctrl_val = dev_priv->mipi_ctrl_display; | ||
177 | |||
178 | mdfld_dsi_send_mcs_short(sender, write_ctrl_display, | ||
179 | (u8)gen_ctrl_val, 1, true); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config, | ||
184 | u8 dcs, u32 *data, bool hs) | ||
185 | { | ||
186 | struct mdfld_dsi_pkg_sender *sender | ||
187 | = mdfld_dsi_get_pkg_sender(dsi_config); | ||
188 | |||
189 | if (!sender || !data) { | ||
190 | DRM_ERROR("Invalid parameter\n"); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs); | ||
195 | } | ||
196 | |||
197 | int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode, | ||
198 | bool hs) | ||
199 | { | ||
200 | if (!dsi_config || !mode) { | ||
201 | DRM_ERROR("Invalid parameter\n"); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | |||
205 | return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * NOTE: this function was used by OSPM. | ||
210 | * TODO: will be removed later, should work out display interfaces for OSPM | ||
211 | */ | ||
212 | void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe) | ||
213 | { | ||
214 | if (!dsi_config || ((pipe != 0) && (pipe != 2))) { | ||
215 | DRM_ERROR("Invalid parameters\n"); | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | mdfld_dsi_dpi_controller_init(dsi_config, pipe); | ||
220 | } | ||
221 | |||
222 | static void mdfld_dsi_connector_save(struct drm_connector *connector) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | static void mdfld_dsi_connector_restore(struct drm_connector *connector) | ||
227 | { | ||
228 | } | ||
229 | |||
230 | /* FIXME: start using the force parameter */ | ||
231 | static enum drm_connector_status | ||
232 | mdfld_dsi_connector_detect(struct drm_connector *connector, bool force) | ||
233 | { | ||
234 | struct mdfld_dsi_connector *dsi_connector | ||
235 | = mdfld_dsi_connector(connector); | ||
236 | |||
237 | dsi_connector->status = connector_status_connected; | ||
238 | |||
239 | return dsi_connector->status; | ||
240 | } | ||
241 | |||
242 | static int mdfld_dsi_connector_set_property(struct drm_connector *connector, | ||
243 | struct drm_property *property, | ||
244 | uint64_t value) | ||
245 | { | ||
246 | struct drm_encoder *encoder = connector->encoder; | ||
247 | struct backlight_device *psb_bd; | ||
248 | |||
249 | if (!strcmp(property->name, "scaling mode") && encoder) { | ||
250 | struct psb_intel_crtc *psb_crtc = | ||
251 | to_psb_intel_crtc(encoder->crtc); | ||
252 | bool centerechange; | ||
253 | uint64_t val; | ||
254 | |||
255 | if (!psb_crtc) | ||
256 | goto set_prop_error; | ||
257 | |||
258 | switch (value) { | ||
259 | case DRM_MODE_SCALE_FULLSCREEN: | ||
260 | break; | ||
261 | case DRM_MODE_SCALE_NO_SCALE: | ||
262 | break; | ||
263 | case DRM_MODE_SCALE_ASPECT: | ||
264 | break; | ||
265 | default: | ||
266 | goto set_prop_error; | ||
267 | } | ||
268 | |||
269 | if (drm_connector_property_get_value(connector, property, &val)) | ||
270 | goto set_prop_error; | ||
271 | |||
272 | if (val == value) | ||
273 | goto set_prop_done; | ||
274 | |||
275 | if (drm_connector_property_set_value(connector, | ||
276 | property, value)) | ||
277 | goto set_prop_error; | ||
278 | |||
279 | centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || | ||
280 | (value == DRM_MODE_SCALE_NO_SCALE); | ||
281 | |||
282 | if (psb_crtc->saved_mode.hdisplay != 0 && | ||
283 | psb_crtc->saved_mode.vdisplay != 0) { | ||
284 | if (centerechange) { | ||
285 | if (!drm_crtc_helper_set_mode(encoder->crtc, | ||
286 | &psb_crtc->saved_mode, | ||
287 | encoder->crtc->x, | ||
288 | encoder->crtc->y, | ||
289 | encoder->crtc->fb)) | ||
290 | goto set_prop_error; | ||
291 | } else { | ||
292 | struct drm_encoder_helper_funcs *funcs = | ||
293 | encoder->helper_private; | ||
294 | funcs->mode_set(encoder, | ||
295 | &psb_crtc->saved_mode, | ||
296 | &psb_crtc->saved_adjusted_mode); | ||
297 | } | ||
298 | } | ||
299 | } else if (!strcmp(property->name, "backlight") && encoder) { | ||
300 | if (drm_connector_property_set_value(connector, property, | ||
301 | value)) | ||
302 | goto set_prop_error; | ||
303 | else { | ||
304 | psb_bd = mdfld_get_backlight_device(); | ||
305 | if (psb_bd) { | ||
306 | psb_bd->props.brightness = value; | ||
307 | mdfld_set_brightness(psb_bd); | ||
308 | } | ||
309 | } | ||
310 | } | ||
311 | set_prop_done: | ||
312 | return 0; | ||
313 | set_prop_error: | ||
314 | return -1; | ||
315 | } | ||
316 | |||
317 | static void mdfld_dsi_connector_destroy(struct drm_connector *connector) | ||
318 | { | ||
319 | struct mdfld_dsi_connector *dsi_connector = | ||
320 | mdfld_dsi_connector(connector); | ||
321 | struct mdfld_dsi_pkg_sender *sender; | ||
322 | |||
323 | if (!dsi_connector) | ||
324 | return; | ||
325 | drm_sysfs_connector_remove(connector); | ||
326 | drm_connector_cleanup(connector); | ||
327 | sender = dsi_connector->pkg_sender; | ||
328 | mdfld_dsi_pkg_sender_destroy(sender); | ||
329 | kfree(dsi_connector); | ||
330 | } | ||
331 | |||
332 | static int mdfld_dsi_connector_get_modes(struct drm_connector *connector) | ||
333 | { | ||
334 | struct mdfld_dsi_connector *dsi_connector = | ||
335 | mdfld_dsi_connector(connector); | ||
336 | struct mdfld_dsi_config *dsi_config = | ||
337 | mdfld_dsi_get_config(dsi_connector); | ||
338 | struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; | ||
339 | struct drm_display_mode *dup_mode = NULL; | ||
340 | struct drm_device *dev = connector->dev; | ||
341 | |||
342 | connector->display_info.min_vfreq = 0; | ||
343 | connector->display_info.max_vfreq = 200; | ||
344 | connector->display_info.min_hfreq = 0; | ||
345 | connector->display_info.max_hfreq = 200; | ||
346 | |||
347 | if (fixed_mode) { | ||
348 | dev_dbg(dev->dev, "fixed_mode %dx%d\n", | ||
349 | fixed_mode->hdisplay, fixed_mode->vdisplay); | ||
350 | dup_mode = drm_mode_duplicate(dev, fixed_mode); | ||
351 | drm_mode_probed_add(connector, dup_mode); | ||
352 | return 1; | ||
353 | } | ||
354 | DRM_ERROR("Didn't get any modes!\n"); | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector, | ||
359 | struct drm_display_mode *mode) | ||
360 | { | ||
361 | struct mdfld_dsi_connector *dsi_connector = | ||
362 | mdfld_dsi_connector(connector); | ||
363 | struct mdfld_dsi_config *dsi_config = | ||
364 | mdfld_dsi_get_config(dsi_connector); | ||
365 | struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; | ||
366 | |||
367 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
368 | return MODE_NO_DBLESCAN; | ||
369 | |||
370 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
371 | return MODE_NO_INTERLACE; | ||
372 | |||
373 | /** | ||
374 | * FIXME: current DC has no fitting unit, reject any mode setting | ||
375 | * request | ||
376 | * Will figure out a way to do up-scaling(pannel fitting) later. | ||
377 | **/ | ||
378 | if (fixed_mode) { | ||
379 | if (mode->hdisplay != fixed_mode->hdisplay) | ||
380 | return MODE_PANEL; | ||
381 | |||
382 | if (mode->vdisplay != fixed_mode->vdisplay) | ||
383 | return MODE_PANEL; | ||
384 | } | ||
385 | |||
386 | return MODE_OK; | ||
387 | } | ||
388 | |||
389 | static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode) | ||
390 | { | ||
391 | if (mode == connector->dpms) | ||
392 | return; | ||
393 | |||
394 | /*first, execute dpms*/ | ||
395 | |||
396 | drm_helper_connector_dpms(connector, mode); | ||
397 | } | ||
398 | |||
399 | static struct drm_encoder *mdfld_dsi_connector_best_encoder( | ||
400 | struct drm_connector *connector) | ||
401 | { | ||
402 | struct mdfld_dsi_connector *dsi_connector = | ||
403 | mdfld_dsi_connector(connector); | ||
404 | struct mdfld_dsi_config *dsi_config = | ||
405 | mdfld_dsi_get_config(dsi_connector); | ||
406 | return &dsi_config->encoder->base.base; | ||
407 | } | ||
408 | |||
409 | /*DSI connector funcs*/ | ||
410 | static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { | ||
411 | .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms, | ||
412 | .save = mdfld_dsi_connector_save, | ||
413 | .restore = mdfld_dsi_connector_restore, | ||
414 | .detect = mdfld_dsi_connector_detect, | ||
415 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
416 | .set_property = mdfld_dsi_connector_set_property, | ||
417 | .destroy = mdfld_dsi_connector_destroy, | ||
418 | }; | ||
419 | |||
420 | /*DSI connector helper funcs*/ | ||
421 | static const struct drm_connector_helper_funcs | ||
422 | mdfld_dsi_connector_helper_funcs = { | ||
423 | .get_modes = mdfld_dsi_connector_get_modes, | ||
424 | .mode_valid = mdfld_dsi_connector_mode_valid, | ||
425 | .best_encoder = mdfld_dsi_connector_best_encoder, | ||
426 | }; | ||
427 | |||
428 | static int mdfld_dsi_get_default_config(struct drm_device *dev, | ||
429 | struct mdfld_dsi_config *config, int pipe) | ||
430 | { | ||
431 | if (!dev || !config) { | ||
432 | DRM_ERROR("Invalid parameters"); | ||
433 | return -EINVAL; | ||
434 | } | ||
435 | |||
436 | config->bpp = 24; | ||
437 | if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
438 | config->lane_count = 4; | ||
439 | else | ||
440 | config->lane_count = 2; | ||
441 | config->channel_num = 0; | ||
442 | |||
443 | if (mdfld_get_panel_type(dev, pipe) == TMD_VID) | ||
444 | config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE; | ||
445 | else if (mdfld_get_panel_type(dev, pipe) == TC35876X) | ||
446 | config->video_mode = | ||
447 | MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS; | ||
448 | else | ||
449 | config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE; | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | int mdfld_dsi_panel_reset(int pipe) | ||
455 | { | ||
456 | unsigned gpio; | ||
457 | int ret = 0; | ||
458 | |||
459 | switch (pipe) { | ||
460 | case 0: | ||
461 | gpio = 128; | ||
462 | break; | ||
463 | case 2: | ||
464 | gpio = 34; | ||
465 | break; | ||
466 | default: | ||
467 | DRM_ERROR("Invalid output\n"); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | |||
471 | ret = gpio_request(gpio, "gfx"); | ||
472 | if (ret) { | ||
473 | DRM_ERROR("gpio_rqueset failed\n"); | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | ret = gpio_direction_output(gpio, 1); | ||
478 | if (ret) { | ||
479 | DRM_ERROR("gpio_direction_output failed\n"); | ||
480 | goto gpio_error; | ||
481 | } | ||
482 | |||
483 | gpio_get_value(128); | ||
484 | |||
485 | gpio_error: | ||
486 | if (gpio_is_valid(gpio)) | ||
487 | gpio_free(gpio); | ||
488 | |||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * MIPI output init | ||
494 | * @dev drm device | ||
495 | * @pipe pipe number. 0 or 2 | ||
496 | * @config | ||
497 | * | ||
498 | * Do the initialization of a MIPI output, including create DRM mode objects | ||
499 | * initialization of DSI output on @pipe | ||
500 | */ | ||
501 | void mdfld_dsi_output_init(struct drm_device *dev, | ||
502 | int pipe, | ||
503 | const struct panel_funcs *p_vid_funcs) | ||
504 | { | ||
505 | struct mdfld_dsi_config *dsi_config; | ||
506 | struct mdfld_dsi_connector *dsi_connector; | ||
507 | struct drm_connector *connector; | ||
508 | struct mdfld_dsi_encoder *encoder; | ||
509 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
510 | struct panel_info dsi_panel_info; | ||
511 | u32 width_mm, height_mm; | ||
512 | |||
513 | dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); | ||
514 | |||
515 | if (!dev || ((pipe != 0) && (pipe != 2))) { | ||
516 | DRM_ERROR("Invalid parameter\n"); | ||
517 | return; | ||
518 | } | ||
519 | |||
520 | /*create a new connetor*/ | ||
521 | dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL); | ||
522 | if (!dsi_connector) { | ||
523 | DRM_ERROR("No memory"); | ||
524 | return; | ||
525 | } | ||
526 | |||
527 | dsi_connector->pipe = pipe; | ||
528 | |||
529 | dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), | ||
530 | GFP_KERNEL); | ||
531 | if (!dsi_config) { | ||
532 | DRM_ERROR("cannot allocate memory for DSI config\n"); | ||
533 | goto dsi_init_err0; | ||
534 | } | ||
535 | mdfld_dsi_get_default_config(dev, dsi_config, pipe); | ||
536 | |||
537 | dsi_connector->private = dsi_config; | ||
538 | |||
539 | dsi_config->changed = 1; | ||
540 | dsi_config->dev = dev; | ||
541 | |||
542 | dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev); | ||
543 | if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info)) | ||
544 | goto dsi_init_err0; | ||
545 | |||
546 | width_mm = dsi_panel_info.width_mm; | ||
547 | height_mm = dsi_panel_info.height_mm; | ||
548 | |||
549 | dsi_config->mode = dsi_config->fixed_mode; | ||
550 | dsi_config->connector = dsi_connector; | ||
551 | |||
552 | if (!dsi_config->fixed_mode) { | ||
553 | DRM_ERROR("No pannel fixed mode was found\n"); | ||
554 | goto dsi_init_err0; | ||
555 | } | ||
556 | |||
557 | if (pipe && dev_priv->dsi_configs[0]) { | ||
558 | dsi_config->dvr_ic_inited = 0; | ||
559 | dev_priv->dsi_configs[1] = dsi_config; | ||
560 | } else if (pipe == 0) { | ||
561 | dsi_config->dvr_ic_inited = 1; | ||
562 | dev_priv->dsi_configs[0] = dsi_config; | ||
563 | } else { | ||
564 | DRM_ERROR("Trying to init MIPI1 before MIPI0\n"); | ||
565 | goto dsi_init_err0; | ||
566 | } | ||
567 | |||
568 | |||
569 | connector = &dsi_connector->base.base; | ||
570 | drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, | ||
571 | DRM_MODE_CONNECTOR_LVDS); | ||
572 | drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); | ||
573 | |||
574 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
575 | connector->display_info.width_mm = width_mm; | ||
576 | connector->display_info.height_mm = height_mm; | ||
577 | connector->interlace_allowed = false; | ||
578 | connector->doublescan_allowed = false; | ||
579 | |||
580 | /*attach properties*/ | ||
581 | drm_connector_attach_property(connector, | ||
582 | dev->mode_config.scaling_mode_property, | ||
583 | DRM_MODE_SCALE_FULLSCREEN); | ||
584 | drm_connector_attach_property(connector, | ||
585 | dev_priv->backlight_property, | ||
586 | MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); | ||
587 | |||
588 | /*init DSI package sender on this output*/ | ||
589 | if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) { | ||
590 | DRM_ERROR("Package Sender initialization failed on pipe %d\n", | ||
591 | pipe); | ||
592 | goto dsi_init_err0; | ||
593 | } | ||
594 | |||
595 | encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs); | ||
596 | if (!encoder) { | ||
597 | DRM_ERROR("Create DPI encoder failed\n"); | ||
598 | goto dsi_init_err1; | ||
599 | } | ||
600 | encoder->private = dsi_config; | ||
601 | dsi_config->encoder = encoder; | ||
602 | encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI : | ||
603 | INTEL_OUTPUT_MIPI2; | ||
604 | drm_sysfs_connector_add(connector); | ||
605 | return; | ||
606 | |||
607 | /*TODO: add code to destroy outputs on error*/ | ||
608 | dsi_init_err1: | ||
609 | /*destroy sender*/ | ||
610 | mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender); | ||
611 | |||
612 | drm_connector_cleanup(connector); | ||
613 | |||
614 | kfree(dsi_config->fixed_mode); | ||
615 | kfree(dsi_config); | ||
616 | dsi_init_err0: | ||
617 | kfree(dsi_connector); | ||
618 | } | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h new file mode 100644 index 000000000000..21071cef92a4 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * jim liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | */ | ||
27 | |||
28 | #ifndef __MDFLD_DSI_OUTPUT_H__ | ||
29 | #define __MDFLD_DSI_OUTPUT_H__ | ||
30 | |||
31 | #include <linux/backlight.h> | ||
32 | #include <linux/version.h> | ||
33 | #include <drm/drmP.h> | ||
34 | #include <drm/drm.h> | ||
35 | #include <drm/drm_crtc.h> | ||
36 | #include <drm/drm_edid.h> | ||
37 | |||
38 | #include "psb_drv.h" | ||
39 | #include "psb_intel_drv.h" | ||
40 | #include "psb_intel_reg.h" | ||
41 | #include "mdfld_output.h" | ||
42 | |||
43 | #include <asm/mrst.h> | ||
44 | |||
45 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
46 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
47 | #define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) | ||
48 | #define FLD_MOD(orig, val, start, end) \ | ||
49 | (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) | ||
50 | |||
51 | #define REG_FLD_MOD(reg, val, start, end) \ | ||
52 | REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end)) | ||
53 | |||
54 | static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg, | ||
55 | u32 val, int start, int end) | ||
56 | { | ||
57 | int t = 100000; | ||
58 | |||
59 | while (FLD_GET(REG_READ(reg), start, end) != val) { | ||
60 | if (--t == 0) | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | #define REG_FLD_WAIT(reg, val, start, end) \ | ||
68 | REGISTER_FLD_WAIT(dev, reg, val, start, end) | ||
69 | |||
70 | #define REG_BIT_WAIT(reg, val, bitnum) \ | ||
71 | REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum) | ||
72 | |||
73 | #define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100 | ||
74 | |||
75 | #ifdef DEBUG | ||
76 | #define CHECK_PIPE(pipe) ({ \ | ||
77 | const typeof(pipe) __pipe = (pipe); \ | ||
78 | BUG_ON(__pipe != 0 && __pipe != 2); \ | ||
79 | __pipe; }) | ||
80 | #else | ||
81 | #define CHECK_PIPE(pipe) (pipe) | ||
82 | #endif | ||
83 | |||
84 | /* | ||
85 | * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2 | ||
86 | */ | ||
87 | #define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400) | ||
88 | |||
89 | /* mdfld DSI controller registers */ | ||
90 | #define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe)) | ||
91 | #define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe)) | ||
92 | #define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe)) | ||
93 | #define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe)) | ||
94 | #define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe)) | ||
95 | #define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe)) | ||
96 | #define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe)) | ||
97 | #define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe)) | ||
98 | #define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe)) | ||
99 | #define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe)) | ||
100 | #define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe)) | ||
101 | #define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe)) | ||
102 | #define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe)) | ||
103 | #define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe)) | ||
104 | #define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe)) | ||
105 | #define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe)) | ||
106 | #define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe)) | ||
107 | #define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe)) | ||
108 | #define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe)) | ||
109 | #define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe)) | ||
110 | #define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe)) | ||
111 | #define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe)) | ||
112 | #define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe)) | ||
113 | #define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe)) | ||
114 | #define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe)) | ||
115 | #define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe)) | ||
116 | #define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe)) | ||
117 | #define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe)) | ||
118 | #define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe)) | ||
119 | #define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe)) | ||
120 | #define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe)) | ||
121 | #define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe)) | ||
122 | #define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe)) | ||
123 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe)) | ||
124 | |||
125 | #define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe)) | ||
126 | #define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe)) | ||
127 | #define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe)) | ||
128 | #define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe)) | ||
129 | #define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe)) | ||
130 | |||
131 | /* non-uniform reg offset */ | ||
132 | #define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI) | ||
133 | |||
134 | #define DSI_DEVICE_READY (0x1) | ||
135 | #define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1) | ||
136 | #define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1) | ||
137 | #define DSI_POWER_STATE_ULPS_OFFSET (0x1) | ||
138 | |||
139 | |||
140 | #define DSI_ONE_DATA_LANE (0x1) | ||
141 | #define DSI_TWO_DATA_LANE (0x2) | ||
142 | #define DSI_THREE_DATA_LANE (0X3) | ||
143 | #define DSI_FOUR_DATA_LANE (0x4) | ||
144 | #define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3) | ||
145 | #define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5) | ||
146 | #define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7) | ||
147 | #define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7) | ||
148 | #define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7) | ||
149 | #define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7) | ||
150 | #define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13) | ||
151 | |||
152 | #define DSI_INTR_STATE_RXSOTERROR BIT(0) | ||
153 | |||
154 | #define DSI_INTR_STATE_SPL_PKG_SENT BIT(30) | ||
155 | #define DSI_INTR_STATE_TE BIT(31) | ||
156 | |||
157 | #define DSI_HS_TX_TIMEOUT_MASK (0xffffff) | ||
158 | |||
159 | #define DSI_LP_RX_TIMEOUT_MASK (0xffffff) | ||
160 | |||
161 | #define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f) | ||
162 | |||
163 | #define DSI_RESET_TIMER_MASK (0xffff) | ||
164 | |||
165 | #define DSI_DBI_FIFO_WM_HALF (0x0) | ||
166 | #define DSI_DBI_FIFO_WM_QUARTER (0x1) | ||
167 | #define DSI_DBI_FIFO_WM_LOW (0x2) | ||
168 | |||
169 | #define DSI_DPI_TIMING_MASK (0xffff) | ||
170 | |||
171 | #define DSI_INIT_TIMER_MASK (0xffff) | ||
172 | |||
173 | #define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff) | ||
174 | |||
175 | #define DSI_LP_BYTECLK_MASK (0x0ffff) | ||
176 | |||
177 | #define DSI_HS_CTRL_GEN_SHORT_W0 (0x03) | ||
178 | #define DSI_HS_CTRL_GEN_SHORT_W1 (0x13) | ||
179 | #define DSI_HS_CTRL_GEN_SHORT_W2 (0x23) | ||
180 | #define DSI_HS_CTRL_GEN_R0 (0x04) | ||
181 | #define DSI_HS_CTRL_GEN_R1 (0x14) | ||
182 | #define DSI_HS_CTRL_GEN_R2 (0x24) | ||
183 | #define DSI_HS_CTRL_GEN_LONG_W (0x29) | ||
184 | #define DSI_HS_CTRL_MCS_SHORT_W0 (0x05) | ||
185 | #define DSI_HS_CTRL_MCS_SHORT_W1 (0x15) | ||
186 | #define DSI_HS_CTRL_MCS_R0 (0x06) | ||
187 | #define DSI_HS_CTRL_MCS_LONG_W (0x39) | ||
188 | #define DSI_HS_CTRL_VC_OFFSET (0x06) | ||
189 | #define DSI_HS_CTRL_WC_OFFSET (0x08) | ||
190 | |||
191 | #define DSI_FIFO_GEN_HS_DATA_FULL BIT(0) | ||
192 | #define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1) | ||
193 | #define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2) | ||
194 | #define DSI_FIFO_GEN_LP_DATA_FULL BIT(8) | ||
195 | #define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9) | ||
196 | #define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10) | ||
197 | #define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16) | ||
198 | #define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17) | ||
199 | #define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18) | ||
200 | #define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24) | ||
201 | #define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25) | ||
202 | #define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26) | ||
203 | #define DSI_FIFO_DBI_EMPTY BIT(27) | ||
204 | #define DSI_FIFO_DPI_EMPTY BIT(28) | ||
205 | |||
206 | #define DSI_DBI_HS_LP_SWITCH_MASK (0x1) | ||
207 | |||
208 | #define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0) | ||
209 | #define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16) | ||
210 | |||
211 | #define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001) | ||
212 | #define DSI_DPI_CTRL_HS_TURN_ON (0x00000002) | ||
213 | |||
214 | /*dsi power modes*/ | ||
215 | #define DSI_POWER_MODE_DISPLAY_ON BIT(2) | ||
216 | #define DSI_POWER_MODE_NORMAL_ON BIT(3) | ||
217 | #define DSI_POWER_MODE_SLEEP_OUT BIT(4) | ||
218 | #define DSI_POWER_MODE_PARTIAL_ON BIT(5) | ||
219 | #define DSI_POWER_MODE_IDLE_ON BIT(6) | ||
220 | |||
221 | enum { | ||
222 | MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1, | ||
223 | MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2, | ||
224 | MDFLD_DSI_VIDEO_BURST_MODE = 3, | ||
225 | }; | ||
226 | |||
227 | #define DSI_DPI_COMPLETE_LAST_LINE BIT(2) | ||
228 | #define DSI_DPI_DISABLE_BTA BIT(3) | ||
229 | |||
230 | struct mdfld_dsi_connector { | ||
231 | struct psb_intel_connector base; | ||
232 | |||
233 | int pipe; | ||
234 | void *private; | ||
235 | void *pkg_sender; | ||
236 | |||
237 | /* Connection status */ | ||
238 | enum drm_connector_status status; | ||
239 | }; | ||
240 | |||
241 | struct mdfld_dsi_encoder { | ||
242 | struct psb_intel_encoder base; | ||
243 | void *private; | ||
244 | }; | ||
245 | |||
246 | /* | ||
247 | * DSI config, consists of one DSI connector, two DSI encoders. | ||
248 | * DRM will pick up on DSI encoder basing on differents configs. | ||
249 | */ | ||
250 | struct mdfld_dsi_config { | ||
251 | struct drm_device *dev; | ||
252 | struct drm_display_mode *fixed_mode; | ||
253 | struct drm_display_mode *mode; | ||
254 | |||
255 | struct mdfld_dsi_connector *connector; | ||
256 | struct mdfld_dsi_encoder *encoder; | ||
257 | |||
258 | int changed; | ||
259 | |||
260 | int bpp; | ||
261 | int lane_count; | ||
262 | /*Virtual channel number for this encoder*/ | ||
263 | int channel_num; | ||
264 | /*video mode configure*/ | ||
265 | int video_mode; | ||
266 | |||
267 | int dvr_ic_inited; | ||
268 | }; | ||
269 | |||
270 | static inline struct mdfld_dsi_connector *mdfld_dsi_connector( | ||
271 | struct drm_connector *connector) | ||
272 | { | ||
273 | struct psb_intel_connector *psb_connector; | ||
274 | |||
275 | psb_connector = to_psb_intel_connector(connector); | ||
276 | |||
277 | return container_of(psb_connector, struct mdfld_dsi_connector, base); | ||
278 | } | ||
279 | |||
280 | static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( | ||
281 | struct drm_encoder *encoder) | ||
282 | { | ||
283 | struct psb_intel_encoder *psb_encoder; | ||
284 | |||
285 | psb_encoder = to_psb_intel_encoder(encoder); | ||
286 | |||
287 | return container_of(psb_encoder, struct mdfld_dsi_encoder, base); | ||
288 | } | ||
289 | |||
290 | static inline struct mdfld_dsi_config * | ||
291 | mdfld_dsi_get_config(struct mdfld_dsi_connector *connector) | ||
292 | { | ||
293 | if (!connector) | ||
294 | return NULL; | ||
295 | return (struct mdfld_dsi_config *)connector->private; | ||
296 | } | ||
297 | |||
298 | static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config) | ||
299 | { | ||
300 | struct mdfld_dsi_connector *dsi_connector; | ||
301 | |||
302 | if (!config) | ||
303 | return NULL; | ||
304 | |||
305 | dsi_connector = config->connector; | ||
306 | |||
307 | if (!dsi_connector) | ||
308 | return NULL; | ||
309 | |||
310 | return dsi_connector->pkg_sender; | ||
311 | } | ||
312 | |||
313 | static inline struct mdfld_dsi_config * | ||
314 | mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder) | ||
315 | { | ||
316 | if (!encoder) | ||
317 | return NULL; | ||
318 | return (struct mdfld_dsi_config *)encoder->private; | ||
319 | } | ||
320 | |||
321 | static inline struct mdfld_dsi_connector * | ||
322 | mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder) | ||
323 | { | ||
324 | struct mdfld_dsi_config *config; | ||
325 | |||
326 | if (!encoder) | ||
327 | return NULL; | ||
328 | |||
329 | config = mdfld_dsi_encoder_get_config(encoder); | ||
330 | if (!config) | ||
331 | return NULL; | ||
332 | |||
333 | return config->connector; | ||
334 | } | ||
335 | |||
336 | static inline void *mdfld_dsi_encoder_get_pkg_sender( | ||
337 | struct mdfld_dsi_encoder *encoder) | ||
338 | { | ||
339 | struct mdfld_dsi_config *dsi_config; | ||
340 | |||
341 | dsi_config = mdfld_dsi_encoder_get_config(encoder); | ||
342 | if (!dsi_config) | ||
343 | return NULL; | ||
344 | |||
345 | return mdfld_dsi_get_pkg_sender(dsi_config); | ||
346 | } | ||
347 | |||
348 | static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder) | ||
349 | { | ||
350 | struct mdfld_dsi_connector *connector; | ||
351 | |||
352 | if (!encoder) | ||
353 | return -1; | ||
354 | |||
355 | connector = mdfld_dsi_encoder_get_connector(encoder); | ||
356 | if (!connector) | ||
357 | return -1; | ||
358 | return connector->pipe; | ||
359 | } | ||
360 | |||
361 | /* Export functions */ | ||
362 | extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, | ||
363 | u32 gen_fifo_stat_reg, u32 fifo_stat); | ||
364 | extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, | ||
365 | int pipe); | ||
366 | extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, | ||
367 | int level); | ||
368 | extern void mdfld_dsi_output_init(struct drm_device *dev, | ||
369 | int pipe, | ||
370 | const struct panel_funcs *p_vid_funcs); | ||
371 | extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, | ||
372 | int pipe); | ||
373 | |||
374 | extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, | ||
375 | u32 *mode, bool hs); | ||
376 | extern int mdfld_dsi_panel_reset(int pipe); | ||
377 | |||
378 | #endif /*__MDFLD_DSI_OUTPUT_H__*/ | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c new file mode 100644 index 000000000000..baa0e14165e0 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c | |||
@@ -0,0 +1,694 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Jackie Li<yaodong.li@intel.com> | ||
25 | */ | ||
26 | |||
27 | #include <linux/freezer.h> | ||
28 | |||
29 | #include "mdfld_dsi_output.h" | ||
30 | #include "mdfld_dsi_pkg_sender.h" | ||
31 | #include "mdfld_dsi_dpi.h" | ||
32 | |||
33 | #define MDFLD_DSI_READ_MAX_COUNT 5000 | ||
34 | |||
35 | enum data_type { | ||
36 | DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03, | ||
37 | DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13, | ||
38 | DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23, | ||
39 | DSI_DT_GENERIC_READ_0 = 0x04, | ||
40 | DSI_DT_GENERIC_READ_1 = 0x14, | ||
41 | DSI_DT_GENERIC_READ_2 = 0x24, | ||
42 | DSI_DT_GENERIC_LONG_WRITE = 0x29, | ||
43 | DSI_DT_DCS_SHORT_WRITE_0 = 0x05, | ||
44 | DSI_DT_DCS_SHORT_WRITE_1 = 0x15, | ||
45 | DSI_DT_DCS_READ = 0x06, | ||
46 | DSI_DT_DCS_LONG_WRITE = 0x39, | ||
47 | }; | ||
48 | |||
49 | enum { | ||
50 | MDFLD_DSI_PANEL_MODE_SLEEP = 0x1, | ||
51 | }; | ||
52 | |||
53 | enum { | ||
54 | MDFLD_DSI_PKG_SENDER_FREE = 0x0, | ||
55 | MDFLD_DSI_PKG_SENDER_BUSY = 0x1, | ||
56 | }; | ||
57 | |||
58 | static const char *const dsi_errors[] = { | ||
59 | "RX SOT Error", | ||
60 | "RX SOT Sync Error", | ||
61 | "RX EOT Sync Error", | ||
62 | "RX Escape Mode Entry Error", | ||
63 | "RX LP TX Sync Error", | ||
64 | "RX HS Receive Timeout Error", | ||
65 | "RX False Control Error", | ||
66 | "RX ECC Single Bit Error", | ||
67 | "RX ECC Multibit Error", | ||
68 | "RX Checksum Error", | ||
69 | "RX DSI Data Type Not Recognised", | ||
70 | "RX DSI VC ID Invalid", | ||
71 | "TX False Control Error", | ||
72 | "TX ECC Single Bit Error", | ||
73 | "TX ECC Multibit Error", | ||
74 | "TX Checksum Error", | ||
75 | "TX DSI Data Type Not Recognised", | ||
76 | "TX DSI VC ID invalid", | ||
77 | "High Contention", | ||
78 | "Low contention", | ||
79 | "DPI FIFO Under run", | ||
80 | "HS TX Timeout", | ||
81 | "LP RX Timeout", | ||
82 | "Turn Around ACK Timeout", | ||
83 | "ACK With No Error", | ||
84 | "RX Invalid TX Length", | ||
85 | "RX Prot Violation", | ||
86 | "HS Generic Write FIFO Full", | ||
87 | "LP Generic Write FIFO Full", | ||
88 | "Generic Read Data Avail" | ||
89 | "Special Packet Sent", | ||
90 | "Tearing Effect", | ||
91 | }; | ||
92 | |||
93 | static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender, | ||
94 | u32 mask) | ||
95 | { | ||
96 | struct drm_device *dev = sender->dev; | ||
97 | u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg; | ||
98 | int retry = 0xffff; | ||
99 | |||
100 | while (retry--) { | ||
101 | if ((mask & REG_READ(gen_fifo_stat_reg)) == mask) | ||
102 | return 0; | ||
103 | udelay(100); | ||
104 | } | ||
105 | DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg)); | ||
106 | return -EIO; | ||
107 | } | ||
108 | |||
109 | static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender) | ||
110 | { | ||
111 | return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) | | ||
112 | BIT(26) | BIT(27) | BIT(28))); | ||
113 | } | ||
114 | |||
115 | static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender) | ||
116 | { | ||
117 | return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26))); | ||
118 | } | ||
119 | |||
120 | static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender) | ||
121 | { | ||
122 | return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18))); | ||
123 | } | ||
124 | |||
125 | static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask) | ||
126 | { | ||
127 | u32 intr_stat_reg = sender->mipi_intr_stat_reg; | ||
128 | struct drm_device *dev = sender->dev; | ||
129 | |||
130 | dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask); | ||
131 | |||
132 | switch (mask) { | ||
133 | case BIT(0): | ||
134 | case BIT(1): | ||
135 | case BIT(2): | ||
136 | case BIT(3): | ||
137 | case BIT(4): | ||
138 | case BIT(5): | ||
139 | case BIT(6): | ||
140 | case BIT(7): | ||
141 | case BIT(8): | ||
142 | case BIT(9): | ||
143 | case BIT(10): | ||
144 | case BIT(11): | ||
145 | case BIT(12): | ||
146 | case BIT(13): | ||
147 | dev_dbg(sender->dev->dev, "No Action required\n"); | ||
148 | break; | ||
149 | case BIT(14): | ||
150 | /*wait for all fifo empty*/ | ||
151 | /*wait_for_all_fifos_empty(sender)*/; | ||
152 | break; | ||
153 | case BIT(15): | ||
154 | dev_dbg(sender->dev->dev, "No Action required\n"); | ||
155 | break; | ||
156 | case BIT(16): | ||
157 | break; | ||
158 | case BIT(17): | ||
159 | break; | ||
160 | case BIT(18): | ||
161 | case BIT(19): | ||
162 | dev_dbg(sender->dev->dev, "High/Low contention detected\n"); | ||
163 | /*wait for contention recovery time*/ | ||
164 | /*mdelay(10);*/ | ||
165 | /*wait for all fifo empty*/ | ||
166 | if (0) | ||
167 | wait_for_all_fifos_empty(sender); | ||
168 | break; | ||
169 | case BIT(20): | ||
170 | dev_dbg(sender->dev->dev, "No Action required\n"); | ||
171 | break; | ||
172 | case BIT(21): | ||
173 | /*wait for all fifo empty*/ | ||
174 | /*wait_for_all_fifos_empty(sender);*/ | ||
175 | break; | ||
176 | case BIT(22): | ||
177 | break; | ||
178 | case BIT(23): | ||
179 | case BIT(24): | ||
180 | case BIT(25): | ||
181 | case BIT(26): | ||
182 | case BIT(27): | ||
183 | dev_dbg(sender->dev->dev, "HS Gen fifo full\n"); | ||
184 | REG_WRITE(intr_stat_reg, mask); | ||
185 | wait_for_hs_fifos_empty(sender); | ||
186 | break; | ||
187 | case BIT(28): | ||
188 | dev_dbg(sender->dev->dev, "LP Gen fifo full\n"); | ||
189 | REG_WRITE(intr_stat_reg, mask); | ||
190 | wait_for_lp_fifos_empty(sender); | ||
191 | break; | ||
192 | case BIT(29): | ||
193 | case BIT(30): | ||
194 | case BIT(31): | ||
195 | dev_dbg(sender->dev->dev, "No Action required\n"); | ||
196 | break; | ||
197 | } | ||
198 | |||
199 | if (mask & REG_READ(intr_stat_reg)) | ||
200 | dev_dbg(sender->dev->dev, | ||
201 | "Cannot clean interrupt 0x%08x\n", mask); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender) | ||
206 | { | ||
207 | struct drm_device *dev = sender->dev; | ||
208 | u32 intr_stat_reg = sender->mipi_intr_stat_reg; | ||
209 | u32 mask; | ||
210 | u32 intr_stat; | ||
211 | int i; | ||
212 | int err = 0; | ||
213 | |||
214 | intr_stat = REG_READ(intr_stat_reg); | ||
215 | |||
216 | for (i = 0; i < 32; i++) { | ||
217 | mask = (0x00000001UL) << i; | ||
218 | if (intr_stat & mask) { | ||
219 | dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]); | ||
220 | err = handle_dsi_error(sender, mask); | ||
221 | if (err) | ||
222 | DRM_ERROR("Cannot handle error\n"); | ||
223 | } | ||
224 | } | ||
225 | return err; | ||
226 | } | ||
227 | |||
228 | static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
229 | u8 cmd, u8 param, bool hs) | ||
230 | { | ||
231 | struct drm_device *dev = sender->dev; | ||
232 | u32 ctrl_reg; | ||
233 | u32 val; | ||
234 | u8 virtual_channel = 0; | ||
235 | |||
236 | if (hs) { | ||
237 | ctrl_reg = sender->mipi_hs_gen_ctrl_reg; | ||
238 | |||
239 | /* FIXME: wait_for_hs_fifos_empty(sender); */ | ||
240 | } else { | ||
241 | ctrl_reg = sender->mipi_lp_gen_ctrl_reg; | ||
242 | |||
243 | /* FIXME: wait_for_lp_fifos_empty(sender); */ | ||
244 | } | ||
245 | |||
246 | val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) | | ||
247 | FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0); | ||
248 | |||
249 | REG_WRITE(ctrl_reg, val); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
255 | u8 *data, int len, bool hs) | ||
256 | { | ||
257 | struct drm_device *dev = sender->dev; | ||
258 | u32 ctrl_reg; | ||
259 | u32 data_reg; | ||
260 | u32 val; | ||
261 | u8 *p; | ||
262 | u8 b1, b2, b3, b4; | ||
263 | u8 virtual_channel = 0; | ||
264 | int i; | ||
265 | |||
266 | if (hs) { | ||
267 | ctrl_reg = sender->mipi_hs_gen_ctrl_reg; | ||
268 | data_reg = sender->mipi_hs_gen_data_reg; | ||
269 | |||
270 | /* FIXME: wait_for_hs_fifos_empty(sender); */ | ||
271 | } else { | ||
272 | ctrl_reg = sender->mipi_lp_gen_ctrl_reg; | ||
273 | data_reg = sender->mipi_lp_gen_data_reg; | ||
274 | |||
275 | /* FIXME: wait_for_lp_fifos_empty(sender); */ | ||
276 | } | ||
277 | |||
278 | p = data; | ||
279 | for (i = 0; i < len / 4; i++) { | ||
280 | b1 = *p++; | ||
281 | b2 = *p++; | ||
282 | b3 = *p++; | ||
283 | b4 = *p++; | ||
284 | |||
285 | REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1); | ||
286 | } | ||
287 | |||
288 | i = len % 4; | ||
289 | if (i) { | ||
290 | b1 = 0; b2 = 0; b3 = 0; | ||
291 | |||
292 | switch (i) { | ||
293 | case 3: | ||
294 | b1 = *p++; | ||
295 | b2 = *p++; | ||
296 | b3 = *p++; | ||
297 | break; | ||
298 | case 2: | ||
299 | b1 = *p++; | ||
300 | b2 = *p++; | ||
301 | break; | ||
302 | case 1: | ||
303 | b1 = *p++; | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1); | ||
308 | } | ||
309 | |||
310 | val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) | | ||
311 | FLD_VAL(data_type, 5, 0); | ||
312 | |||
313 | REG_WRITE(ctrl_reg, val); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
319 | u8 *data, u16 len) | ||
320 | { | ||
321 | u8 cmd; | ||
322 | |||
323 | switch (data_type) { | ||
324 | case DSI_DT_DCS_SHORT_WRITE_0: | ||
325 | case DSI_DT_DCS_SHORT_WRITE_1: | ||
326 | case DSI_DT_DCS_LONG_WRITE: | ||
327 | cmd = *data; | ||
328 | break; | ||
329 | default: | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | /*this prevents other package sending while doing msleep*/ | ||
334 | sender->status = MDFLD_DSI_PKG_SENDER_BUSY; | ||
335 | |||
336 | /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/ | ||
337 | if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { | ||
338 | /*TODO: replace it with msleep later*/ | ||
339 | mdelay(120); | ||
340 | } | ||
341 | |||
342 | if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { | ||
343 | /*TODO: replace it with msleep later*/ | ||
344 | mdelay(120); | ||
345 | } | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
350 | u8 *data, u16 len) | ||
351 | { | ||
352 | u8 cmd; | ||
353 | |||
354 | switch (data_type) { | ||
355 | case DSI_DT_DCS_SHORT_WRITE_0: | ||
356 | case DSI_DT_DCS_SHORT_WRITE_1: | ||
357 | case DSI_DT_DCS_LONG_WRITE: | ||
358 | cmd = *data; | ||
359 | break; | ||
360 | default: | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /*update panel status*/ | ||
365 | if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { | ||
366 | sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP; | ||
367 | /*TODO: replace it with msleep later*/ | ||
368 | mdelay(120); | ||
369 | } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { | ||
370 | sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP; | ||
371 | /*TODO: replace it with msleep later*/ | ||
372 | mdelay(120); | ||
373 | } else if (unlikely(cmd == DCS_SOFT_RESET)) { | ||
374 | /*TODO: replace it with msleep later*/ | ||
375 | mdelay(5); | ||
376 | } | ||
377 | |||
378 | sender->status = MDFLD_DSI_PKG_SENDER_FREE; | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
384 | u8 *data, u16 len, bool hs) | ||
385 | { | ||
386 | int ret; | ||
387 | |||
388 | /*handle DSI error*/ | ||
389 | ret = dsi_error_handler(sender); | ||
390 | if (ret) { | ||
391 | DRM_ERROR("Error handling failed\n"); | ||
392 | return -EAGAIN; | ||
393 | } | ||
394 | |||
395 | /* send pkg */ | ||
396 | if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) { | ||
397 | DRM_ERROR("sender is busy\n"); | ||
398 | return -EAGAIN; | ||
399 | } | ||
400 | |||
401 | ret = send_pkg_prepare(sender, data_type, data, len); | ||
402 | if (ret) { | ||
403 | DRM_ERROR("send_pkg_prepare error\n"); | ||
404 | return ret; | ||
405 | } | ||
406 | |||
407 | switch (data_type) { | ||
408 | case DSI_DT_GENERIC_SHORT_WRITE_0: | ||
409 | case DSI_DT_GENERIC_SHORT_WRITE_1: | ||
410 | case DSI_DT_GENERIC_SHORT_WRITE_2: | ||
411 | case DSI_DT_GENERIC_READ_0: | ||
412 | case DSI_DT_GENERIC_READ_1: | ||
413 | case DSI_DT_GENERIC_READ_2: | ||
414 | case DSI_DT_DCS_SHORT_WRITE_0: | ||
415 | case DSI_DT_DCS_SHORT_WRITE_1: | ||
416 | case DSI_DT_DCS_READ: | ||
417 | ret = send_short_pkg(sender, data_type, data[0], data[1], hs); | ||
418 | break; | ||
419 | case DSI_DT_GENERIC_LONG_WRITE: | ||
420 | case DSI_DT_DCS_LONG_WRITE: | ||
421 | ret = send_long_pkg(sender, data_type, data, len, hs); | ||
422 | break; | ||
423 | } | ||
424 | |||
425 | send_pkg_done(sender, data_type, data, len); | ||
426 | |||
427 | /*FIXME: should I query complete and fifo empty here?*/ | ||
428 | |||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, | ||
433 | u32 len, bool hs) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | |||
437 | if (!sender || !data || !len) { | ||
438 | DRM_ERROR("Invalid parameters\n"); | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | |||
442 | spin_lock_irqsave(&sender->lock, flags); | ||
443 | send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs); | ||
444 | spin_unlock_irqrestore(&sender->lock, flags); | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd, | ||
450 | u8 param, u8 param_num, bool hs) | ||
451 | { | ||
452 | u8 data[2]; | ||
453 | unsigned long flags; | ||
454 | u8 data_type; | ||
455 | |||
456 | if (!sender) { | ||
457 | DRM_ERROR("Invalid parameter\n"); | ||
458 | return -EINVAL; | ||
459 | } | ||
460 | |||
461 | data[0] = cmd; | ||
462 | |||
463 | if (param_num) { | ||
464 | data_type = DSI_DT_DCS_SHORT_WRITE_1; | ||
465 | data[1] = param; | ||
466 | } else { | ||
467 | data_type = DSI_DT_DCS_SHORT_WRITE_0; | ||
468 | data[1] = 0; | ||
469 | } | ||
470 | |||
471 | spin_lock_irqsave(&sender->lock, flags); | ||
472 | send_pkg(sender, data_type, data, sizeof(data), hs); | ||
473 | spin_unlock_irqrestore(&sender->lock, flags); | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0, | ||
479 | u8 param1, u8 param_num, bool hs) | ||
480 | { | ||
481 | u8 data[2]; | ||
482 | unsigned long flags; | ||
483 | u8 data_type; | ||
484 | |||
485 | if (!sender || param_num > 2) { | ||
486 | DRM_ERROR("Invalid parameter\n"); | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | |||
490 | switch (param_num) { | ||
491 | case 0: | ||
492 | data_type = DSI_DT_GENERIC_SHORT_WRITE_0; | ||
493 | data[0] = 0; | ||
494 | data[1] = 0; | ||
495 | break; | ||
496 | case 1: | ||
497 | data_type = DSI_DT_GENERIC_SHORT_WRITE_1; | ||
498 | data[0] = param0; | ||
499 | data[1] = 0; | ||
500 | break; | ||
501 | case 2: | ||
502 | data_type = DSI_DT_GENERIC_SHORT_WRITE_2; | ||
503 | data[0] = param0; | ||
504 | data[1] = param1; | ||
505 | break; | ||
506 | } | ||
507 | |||
508 | spin_lock_irqsave(&sender->lock, flags); | ||
509 | send_pkg(sender, data_type, data, sizeof(data), hs); | ||
510 | spin_unlock_irqrestore(&sender->lock, flags); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, | ||
516 | u32 len, bool hs) | ||
517 | { | ||
518 | unsigned long flags; | ||
519 | |||
520 | if (!sender || !data || !len) { | ||
521 | DRM_ERROR("Invalid parameters\n"); | ||
522 | return -EINVAL; | ||
523 | } | ||
524 | |||
525 | spin_lock_irqsave(&sender->lock, flags); | ||
526 | send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs); | ||
527 | spin_unlock_irqrestore(&sender->lock, flags); | ||
528 | |||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type, | ||
533 | u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs) | ||
534 | { | ||
535 | unsigned long flags; | ||
536 | struct drm_device *dev = sender->dev; | ||
537 | int i; | ||
538 | u32 gen_data_reg; | ||
539 | int retry = MDFLD_DSI_READ_MAX_COUNT; | ||
540 | |||
541 | if (!sender || !data_out || !len_out) { | ||
542 | DRM_ERROR("Invalid parameters\n"); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * do reading. | ||
548 | * 0) send out generic read request | ||
549 | * 1) polling read data avail interrupt | ||
550 | * 2) read data | ||
551 | */ | ||
552 | spin_lock_irqsave(&sender->lock, flags); | ||
553 | |||
554 | REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); | ||
555 | |||
556 | if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) | ||
557 | DRM_ERROR("Can NOT clean read data valid interrupt\n"); | ||
558 | |||
559 | /*send out read request*/ | ||
560 | send_pkg(sender, data_type, data, len, hs); | ||
561 | |||
562 | /*polling read data avail interrupt*/ | ||
563 | while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) { | ||
564 | udelay(100); | ||
565 | retry--; | ||
566 | } | ||
567 | |||
568 | if (!retry) { | ||
569 | spin_unlock_irqrestore(&sender->lock, flags); | ||
570 | return -ETIMEDOUT; | ||
571 | } | ||
572 | |||
573 | REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); | ||
574 | |||
575 | /*read data*/ | ||
576 | if (hs) | ||
577 | gen_data_reg = sender->mipi_hs_gen_data_reg; | ||
578 | else | ||
579 | gen_data_reg = sender->mipi_lp_gen_data_reg; | ||
580 | |||
581 | for (i = 0; i < len_out; i++) | ||
582 | *(data_out + i) = REG_READ(gen_data_reg); | ||
583 | |||
584 | spin_unlock_irqrestore(&sender->lock, flags); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd, | ||
590 | u32 *data, u16 len, bool hs) | ||
591 | { | ||
592 | if (!sender || !data || !len) { | ||
593 | DRM_ERROR("Invalid parameters\n"); | ||
594 | return -EINVAL; | ||
595 | } | ||
596 | |||
597 | return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1, | ||
598 | data, len, hs); | ||
599 | } | ||
600 | |||
601 | int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, | ||
602 | int pipe) | ||
603 | { | ||
604 | struct mdfld_dsi_pkg_sender *pkg_sender; | ||
605 | struct mdfld_dsi_config *dsi_config = | ||
606 | mdfld_dsi_get_config(dsi_connector); | ||
607 | struct drm_device *dev = dsi_config->dev; | ||
608 | u32 mipi_val = 0; | ||
609 | |||
610 | if (!dsi_connector) { | ||
611 | DRM_ERROR("Invalid parameter\n"); | ||
612 | return -EINVAL; | ||
613 | } | ||
614 | |||
615 | pkg_sender = dsi_connector->pkg_sender; | ||
616 | |||
617 | if (!pkg_sender || IS_ERR(pkg_sender)) { | ||
618 | pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender), | ||
619 | GFP_KERNEL); | ||
620 | if (!pkg_sender) { | ||
621 | DRM_ERROR("Create DSI pkg sender failed\n"); | ||
622 | return -ENOMEM; | ||
623 | } | ||
624 | dsi_connector->pkg_sender = (void *)pkg_sender; | ||
625 | } | ||
626 | |||
627 | pkg_sender->dev = dev; | ||
628 | pkg_sender->dsi_connector = dsi_connector; | ||
629 | pkg_sender->pipe = pipe; | ||
630 | pkg_sender->pkg_num = 0; | ||
631 | pkg_sender->panel_mode = 0; | ||
632 | pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE; | ||
633 | |||
634 | /*init regs*/ | ||
635 | if (pipe == 0) { | ||
636 | pkg_sender->dpll_reg = MRST_DPLL_A; | ||
637 | pkg_sender->dspcntr_reg = DSPACNTR; | ||
638 | pkg_sender->pipeconf_reg = PIPEACONF; | ||
639 | pkg_sender->dsplinoff_reg = DSPALINOFF; | ||
640 | pkg_sender->dspsurf_reg = DSPASURF; | ||
641 | pkg_sender->pipestat_reg = PIPEASTAT; | ||
642 | } else if (pipe == 2) { | ||
643 | pkg_sender->dpll_reg = MRST_DPLL_A; | ||
644 | pkg_sender->dspcntr_reg = DSPCCNTR; | ||
645 | pkg_sender->pipeconf_reg = PIPECCONF; | ||
646 | pkg_sender->dsplinoff_reg = DSPCLINOFF; | ||
647 | pkg_sender->dspsurf_reg = DSPCSURF; | ||
648 | pkg_sender->pipestat_reg = PIPECSTAT; | ||
649 | } | ||
650 | |||
651 | pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe); | ||
652 | pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe); | ||
653 | pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); | ||
654 | pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe); | ||
655 | pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); | ||
656 | pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); | ||
657 | pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe); | ||
658 | pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe); | ||
659 | pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe); | ||
660 | pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe); | ||
661 | |||
662 | /*init lock*/ | ||
663 | spin_lock_init(&pkg_sender->lock); | ||
664 | |||
665 | if (mdfld_get_panel_type(dev, pipe) != TC35876X) { | ||
666 | /** | ||
667 | * For video mode, don't enable DPI timing output here, | ||
668 | * will init the DPI timing output during mode setting. | ||
669 | */ | ||
670 | mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; | ||
671 | |||
672 | if (pipe == 0) | ||
673 | mipi_val |= 0x2; | ||
674 | |||
675 | REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val); | ||
676 | REG_READ(MIPI_PORT_CONTROL(pipe)); | ||
677 | |||
678 | /* do dsi controller init */ | ||
679 | mdfld_dsi_controller_init(dsi_config, pipe); | ||
680 | } | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender) | ||
686 | { | ||
687 | if (!sender || IS_ERR(sender)) | ||
688 | return; | ||
689 | |||
690 | /*free*/ | ||
691 | kfree(sender); | ||
692 | } | ||
693 | |||
694 | |||
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h new file mode 100644 index 000000000000..459cd7ea8b81 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Jackie Li<yaodong.li@intel.com> | ||
25 | */ | ||
26 | #ifndef __MDFLD_DSI_PKG_SENDER_H__ | ||
27 | #define __MDFLD_DSI_PKG_SENDER_H__ | ||
28 | |||
29 | #include <linux/kthread.h> | ||
30 | |||
31 | #define MDFLD_MAX_DCS_PARAM 8 | ||
32 | |||
33 | struct mdfld_dsi_pkg_sender { | ||
34 | struct drm_device *dev; | ||
35 | struct mdfld_dsi_connector *dsi_connector; | ||
36 | u32 status; | ||
37 | u32 panel_mode; | ||
38 | |||
39 | int pipe; | ||
40 | |||
41 | spinlock_t lock; | ||
42 | |||
43 | u32 pkg_num; | ||
44 | |||
45 | /* Registers */ | ||
46 | u32 dpll_reg; | ||
47 | u32 dspcntr_reg; | ||
48 | u32 pipeconf_reg; | ||
49 | u32 pipestat_reg; | ||
50 | u32 dsplinoff_reg; | ||
51 | u32 dspsurf_reg; | ||
52 | |||
53 | u32 mipi_intr_stat_reg; | ||
54 | u32 mipi_lp_gen_data_reg; | ||
55 | u32 mipi_hs_gen_data_reg; | ||
56 | u32 mipi_lp_gen_ctrl_reg; | ||
57 | u32 mipi_hs_gen_ctrl_reg; | ||
58 | u32 mipi_gen_fifo_stat_reg; | ||
59 | u32 mipi_data_addr_reg; | ||
60 | u32 mipi_data_len_reg; | ||
61 | u32 mipi_cmd_addr_reg; | ||
62 | u32 mipi_cmd_len_reg; | ||
63 | }; | ||
64 | |||
65 | /* DCS definitions */ | ||
66 | #define DCS_SOFT_RESET 0x01 | ||
67 | #define DCS_ENTER_SLEEP_MODE 0x10 | ||
68 | #define DCS_EXIT_SLEEP_MODE 0x11 | ||
69 | #define DCS_SET_DISPLAY_OFF 0x28 | ||
70 | #define DCS_SET_DISPLAY_ON 0x29 | ||
71 | #define DCS_SET_COLUMN_ADDRESS 0x2a | ||
72 | #define DCS_SET_PAGE_ADDRESS 0x2b | ||
73 | #define DCS_WRITE_MEM_START 0x2c | ||
74 | #define DCS_SET_TEAR_OFF 0x34 | ||
75 | #define DCS_SET_TEAR_ON 0x35 | ||
76 | |||
77 | extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, | ||
78 | int pipe); | ||
79 | extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender); | ||
80 | int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd, | ||
81 | u8 param, u8 param_num, bool hs); | ||
82 | int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, | ||
83 | u32 len, bool hs); | ||
84 | int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0, | ||
85 | u8 param1, u8 param_num, bool hs); | ||
86 | int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, | ||
87 | u32 len, bool hs); | ||
88 | /* Read interfaces */ | ||
89 | int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd, | ||
90 | u32 *data, u16 len, bool hs); | ||
91 | |||
92 | #endif | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c new file mode 100644 index 000000000000..a35a2921bdf7 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c | |||
@@ -0,0 +1,1180 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2007 Intel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | * | ||
17 | * Authors: | ||
18 | * Eric Anholt <eric@anholt.net> | ||
19 | */ | ||
20 | |||
21 | #include <linux/i2c.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | |||
24 | #include <drm/drmP.h> | ||
25 | #include "psb_intel_reg.h" | ||
26 | #include "psb_intel_display.h" | ||
27 | #include "framebuffer.h" | ||
28 | #include "mdfld_output.h" | ||
29 | #include "mdfld_dsi_output.h" | ||
30 | |||
31 | /* Hardcoded currently */ | ||
32 | static int ksel = KSEL_CRYSTAL_19; | ||
33 | |||
34 | struct psb_intel_range_t { | ||
35 | int min, max; | ||
36 | }; | ||
37 | |||
38 | struct mrst_limit_t { | ||
39 | struct psb_intel_range_t dot, m, p1; | ||
40 | }; | ||
41 | |||
42 | struct mrst_clock_t { | ||
43 | /* derived values */ | ||
44 | int dot; | ||
45 | int m; | ||
46 | int p1; | ||
47 | }; | ||
48 | |||
49 | #define COUNT_MAX 0x10000000 | ||
50 | |||
51 | void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe) | ||
52 | { | ||
53 | int count, temp; | ||
54 | u32 pipeconf_reg = PIPEACONF; | ||
55 | |||
56 | switch (pipe) { | ||
57 | case 0: | ||
58 | break; | ||
59 | case 1: | ||
60 | pipeconf_reg = PIPEBCONF; | ||
61 | break; | ||
62 | case 2: | ||
63 | pipeconf_reg = PIPECCONF; | ||
64 | break; | ||
65 | default: | ||
66 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | /* FIXME JLIU7_PO */ | ||
71 | psb_intel_wait_for_vblank(dev); | ||
72 | return; | ||
73 | |||
74 | /* Wait for for the pipe disable to take effect. */ | ||
75 | for (count = 0; count < COUNT_MAX; count++) { | ||
76 | temp = REG_READ(pipeconf_reg); | ||
77 | if ((temp & PIPEACONF_PIPE_STATE) == 0) | ||
78 | break; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) | ||
83 | { | ||
84 | int count, temp; | ||
85 | u32 pipeconf_reg = PIPEACONF; | ||
86 | |||
87 | switch (pipe) { | ||
88 | case 0: | ||
89 | break; | ||
90 | case 1: | ||
91 | pipeconf_reg = PIPEBCONF; | ||
92 | break; | ||
93 | case 2: | ||
94 | pipeconf_reg = PIPECCONF; | ||
95 | break; | ||
96 | default: | ||
97 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | /* FIXME JLIU7_PO */ | ||
102 | psb_intel_wait_for_vblank(dev); | ||
103 | return; | ||
104 | |||
105 | /* Wait for for the pipe enable to take effect. */ | ||
106 | for (count = 0; count < COUNT_MAX; count++) { | ||
107 | temp = REG_READ(pipeconf_reg); | ||
108 | if ((temp & PIPEACONF_PIPE_STATE) == 1) | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void psb_intel_crtc_prepare(struct drm_crtc *crtc) | ||
114 | { | ||
115 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
116 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
117 | } | ||
118 | |||
119 | static void psb_intel_crtc_commit(struct drm_crtc *crtc) | ||
120 | { | ||
121 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
122 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | ||
123 | } | ||
124 | |||
125 | static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, | ||
126 | struct drm_display_mode *mode, | ||
127 | struct drm_display_mode *adjusted_mode) | ||
128 | { | ||
129 | return true; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Return the pipe currently connected to the panel fitter, | ||
134 | * or -1 if the panel fitter is not present or not in use | ||
135 | */ | ||
136 | static int psb_intel_panel_fitter_pipe(struct drm_device *dev) | ||
137 | { | ||
138 | u32 pfit_control; | ||
139 | |||
140 | pfit_control = REG_READ(PFIT_CONTROL); | ||
141 | |||
142 | /* See if the panel fitter is in use */ | ||
143 | if ((pfit_control & PFIT_ENABLE) == 0) | ||
144 | return -1; | ||
145 | |||
146 | /* 965 can place panel fitter on either pipe */ | ||
147 | return (pfit_control >> 29) & 0x3; | ||
148 | } | ||
149 | |||
150 | static struct drm_device globle_dev; | ||
151 | |||
152 | void mdfld__intel_plane_set_alpha(int enable) | ||
153 | { | ||
154 | struct drm_device *dev = &globle_dev; | ||
155 | int dspcntr_reg = DSPACNTR; | ||
156 | u32 dspcntr; | ||
157 | |||
158 | dspcntr = REG_READ(dspcntr_reg); | ||
159 | |||
160 | if (enable) { | ||
161 | dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA; | ||
162 | dspcntr |= DISPPLANE_32BPP; | ||
163 | } else { | ||
164 | dspcntr &= ~DISPPLANE_32BPP; | ||
165 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
166 | } | ||
167 | |||
168 | REG_WRITE(dspcntr_reg, dspcntr); | ||
169 | } | ||
170 | |||
171 | static int check_fb(struct drm_framebuffer *fb) | ||
172 | { | ||
173 | if (!fb) | ||
174 | return 0; | ||
175 | |||
176 | switch (fb->bits_per_pixel) { | ||
177 | case 8: | ||
178 | case 16: | ||
179 | case 24: | ||
180 | case 32: | ||
181 | return 0; | ||
182 | default: | ||
183 | DRM_ERROR("Unknown color depth\n"); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
189 | struct drm_framebuffer *old_fb) | ||
190 | { | ||
191 | struct drm_device *dev = crtc->dev; | ||
192 | /* struct drm_i915_master_private *master_priv; */ | ||
193 | struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); | ||
194 | struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); | ||
195 | int pipe = psb_intel_crtc->pipe; | ||
196 | unsigned long start, offset; | ||
197 | int dsplinoff = DSPALINOFF; | ||
198 | int dspsurf = DSPASURF; | ||
199 | int dspstride = DSPASTRIDE; | ||
200 | int dspcntr_reg = DSPACNTR; | ||
201 | u32 dspcntr; | ||
202 | int ret; | ||
203 | |||
204 | memcpy(&globle_dev, dev, sizeof(struct drm_device)); | ||
205 | |||
206 | dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe); | ||
207 | |||
208 | /* no fb bound */ | ||
209 | if (!crtc->fb) { | ||
210 | dev_dbg(dev->dev, "No FB bound\n"); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | ret = check_fb(crtc->fb); | ||
215 | if (ret) | ||
216 | return ret; | ||
217 | |||
218 | switch (pipe) { | ||
219 | case 0: | ||
220 | dsplinoff = DSPALINOFF; | ||
221 | break; | ||
222 | case 1: | ||
223 | dsplinoff = DSPBLINOFF; | ||
224 | dspsurf = DSPBSURF; | ||
225 | dspstride = DSPBSTRIDE; | ||
226 | dspcntr_reg = DSPBCNTR; | ||
227 | break; | ||
228 | case 2: | ||
229 | dsplinoff = DSPCLINOFF; | ||
230 | dspsurf = DSPCSURF; | ||
231 | dspstride = DSPCSTRIDE; | ||
232 | dspcntr_reg = DSPCCNTR; | ||
233 | break; | ||
234 | default: | ||
235 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | if (!gma_power_begin(dev, true)) | ||
240 | return 0; | ||
241 | |||
242 | start = psbfb->gtt->offset; | ||
243 | offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8); | ||
244 | |||
245 | REG_WRITE(dspstride, crtc->fb->pitches[0]); | ||
246 | dspcntr = REG_READ(dspcntr_reg); | ||
247 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | ||
248 | |||
249 | switch (crtc->fb->bits_per_pixel) { | ||
250 | case 8: | ||
251 | dspcntr |= DISPPLANE_8BPP; | ||
252 | break; | ||
253 | case 16: | ||
254 | if (crtc->fb->depth == 15) | ||
255 | dspcntr |= DISPPLANE_15_16BPP; | ||
256 | else | ||
257 | dspcntr |= DISPPLANE_16BPP; | ||
258 | break; | ||
259 | case 24: | ||
260 | case 32: | ||
261 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | ||
262 | break; | ||
263 | } | ||
264 | REG_WRITE(dspcntr_reg, dspcntr); | ||
265 | |||
266 | dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", | ||
267 | start, offset, x, y); | ||
268 | REG_WRITE(dsplinoff, offset); | ||
269 | REG_READ(dsplinoff); | ||
270 | REG_WRITE(dspsurf, start); | ||
271 | REG_READ(dspsurf); | ||
272 | |||
273 | gma_power_end(dev); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Disable the pipe, plane and pll. | ||
280 | * | ||
281 | */ | ||
282 | void mdfld_disable_crtc(struct drm_device *dev, int pipe) | ||
283 | { | ||
284 | int dpll_reg = MRST_DPLL_A; | ||
285 | int dspcntr_reg = DSPACNTR; | ||
286 | int dspbase_reg = MRST_DSPABASE; | ||
287 | int pipeconf_reg = PIPEACONF; | ||
288 | u32 temp; | ||
289 | |||
290 | dev_dbg(dev->dev, "pipe = %d\n", pipe); | ||
291 | |||
292 | |||
293 | switch (pipe) { | ||
294 | case 0: | ||
295 | break; | ||
296 | case 1: | ||
297 | dpll_reg = MDFLD_DPLL_B; | ||
298 | dspcntr_reg = DSPBCNTR; | ||
299 | dspbase_reg = DSPBSURF; | ||
300 | pipeconf_reg = PIPEBCONF; | ||
301 | break; | ||
302 | case 2: | ||
303 | dpll_reg = MRST_DPLL_A; | ||
304 | dspcntr_reg = DSPCCNTR; | ||
305 | dspbase_reg = MDFLD_DSPCBASE; | ||
306 | pipeconf_reg = PIPECCONF; | ||
307 | break; | ||
308 | default: | ||
309 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | if (pipe != 1) | ||
314 | mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe), | ||
315 | HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); | ||
316 | |||
317 | /* Disable display plane */ | ||
318 | temp = REG_READ(dspcntr_reg); | ||
319 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
320 | REG_WRITE(dspcntr_reg, | ||
321 | temp & ~DISPLAY_PLANE_ENABLE); | ||
322 | /* Flush the plane changes */ | ||
323 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
324 | REG_READ(dspbase_reg); | ||
325 | } | ||
326 | |||
327 | /* FIXME_JLIU7 MDFLD_PO revisit */ | ||
328 | |||
329 | /* Next, disable display pipes */ | ||
330 | temp = REG_READ(pipeconf_reg); | ||
331 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
332 | temp &= ~PIPEACONF_ENABLE; | ||
333 | temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; | ||
334 | REG_WRITE(pipeconf_reg, temp); | ||
335 | REG_READ(pipeconf_reg); | ||
336 | |||
337 | /* Wait for for the pipe disable to take effect. */ | ||
338 | mdfldWaitForPipeDisable(dev, pipe); | ||
339 | } | ||
340 | |||
341 | temp = REG_READ(dpll_reg); | ||
342 | if (temp & DPLL_VCO_ENABLE) { | ||
343 | if ((pipe != 1 && | ||
344 | !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) | ||
345 | & PIPEACONF_ENABLE)) || pipe == 1) { | ||
346 | temp &= ~(DPLL_VCO_ENABLE); | ||
347 | REG_WRITE(dpll_reg, temp); | ||
348 | REG_READ(dpll_reg); | ||
349 | /* Wait for the clocks to turn off. */ | ||
350 | /* FIXME_MDFLD PO may need more delay */ | ||
351 | udelay(500); | ||
352 | |||
353 | if (!(temp & MDFLD_PWR_GATE_EN)) { | ||
354 | /* gating power of DPLL */ | ||
355 | REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN); | ||
356 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
357 | udelay(5000); | ||
358 | } | ||
359 | } | ||
360 | } | ||
361 | |||
362 | } | ||
363 | |||
364 | /** | ||
365 | * Sets the power management mode of the pipe and plane. | ||
366 | * | ||
367 | * This code should probably grow support for turning the cursor off and back | ||
368 | * on appropriately at the same time as we're turning the pipe off/on. | ||
369 | */ | ||
370 | static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
371 | { | ||
372 | struct drm_device *dev = crtc->dev; | ||
373 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
374 | struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); | ||
375 | int pipe = psb_intel_crtc->pipe; | ||
376 | int dpll_reg = MRST_DPLL_A; | ||
377 | int dspcntr_reg = DSPACNTR; | ||
378 | int dspbase_reg = MRST_DSPABASE; | ||
379 | int pipeconf_reg = PIPEACONF; | ||
380 | u32 pipestat_reg = PIPEASTAT; | ||
381 | u32 pipeconf = dev_priv->pipeconf[pipe]; | ||
382 | u32 temp; | ||
383 | int timeout = 0; | ||
384 | |||
385 | dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe); | ||
386 | |||
387 | /* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */ | ||
388 | /* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */ | ||
389 | |||
390 | switch (pipe) { | ||
391 | case 0: | ||
392 | break; | ||
393 | case 1: | ||
394 | dpll_reg = DPLL_B; | ||
395 | dspcntr_reg = DSPBCNTR; | ||
396 | dspbase_reg = MRST_DSPBBASE; | ||
397 | pipeconf_reg = PIPEBCONF; | ||
398 | dpll_reg = MDFLD_DPLL_B; | ||
399 | break; | ||
400 | case 2: | ||
401 | dpll_reg = MRST_DPLL_A; | ||
402 | dspcntr_reg = DSPCCNTR; | ||
403 | dspbase_reg = MDFLD_DSPCBASE; | ||
404 | pipeconf_reg = PIPECCONF; | ||
405 | pipestat_reg = PIPECSTAT; | ||
406 | break; | ||
407 | default: | ||
408 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | if (!gma_power_begin(dev, true)) | ||
413 | return; | ||
414 | |||
415 | /* XXX: When our outputs are all unaware of DPMS modes other than off | ||
416 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | ||
417 | */ | ||
418 | switch (mode) { | ||
419 | case DRM_MODE_DPMS_ON: | ||
420 | case DRM_MODE_DPMS_STANDBY: | ||
421 | case DRM_MODE_DPMS_SUSPEND: | ||
422 | /* Enable the DPLL */ | ||
423 | temp = REG_READ(dpll_reg); | ||
424 | |||
425 | if ((temp & DPLL_VCO_ENABLE) == 0) { | ||
426 | /* When ungating power of DPLL, needs to wait 0.5us | ||
427 | before enable the VCO */ | ||
428 | if (temp & MDFLD_PWR_GATE_EN) { | ||
429 | temp &= ~MDFLD_PWR_GATE_EN; | ||
430 | REG_WRITE(dpll_reg, temp); | ||
431 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
432 | udelay(500); | ||
433 | } | ||
434 | |||
435 | REG_WRITE(dpll_reg, temp); | ||
436 | REG_READ(dpll_reg); | ||
437 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
438 | udelay(500); | ||
439 | |||
440 | REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); | ||
441 | REG_READ(dpll_reg); | ||
442 | |||
443 | /** | ||
444 | * wait for DSI PLL to lock | ||
445 | * NOTE: only need to poll status of pipe 0 and pipe 1, | ||
446 | * since both MIPI pipes share the same PLL. | ||
447 | */ | ||
448 | while ((pipe != 2) && (timeout < 20000) && | ||
449 | !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { | ||
450 | udelay(150); | ||
451 | timeout++; | ||
452 | } | ||
453 | } | ||
454 | |||
455 | /* Enable the plane */ | ||
456 | temp = REG_READ(dspcntr_reg); | ||
457 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | ||
458 | REG_WRITE(dspcntr_reg, | ||
459 | temp | DISPLAY_PLANE_ENABLE); | ||
460 | /* Flush the plane changes */ | ||
461 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
462 | } | ||
463 | |||
464 | /* Enable the pipe */ | ||
465 | temp = REG_READ(pipeconf_reg); | ||
466 | if ((temp & PIPEACONF_ENABLE) == 0) { | ||
467 | REG_WRITE(pipeconf_reg, pipeconf); | ||
468 | |||
469 | /* Wait for for the pipe enable to take effect. */ | ||
470 | mdfldWaitForPipeEnable(dev, pipe); | ||
471 | } | ||
472 | |||
473 | /*workaround for sighting 3741701 Random X blank display*/ | ||
474 | /*perform w/a in video mode only on pipe A or C*/ | ||
475 | if (pipe == 0 || pipe == 2) { | ||
476 | REG_WRITE(pipestat_reg, REG_READ(pipestat_reg)); | ||
477 | msleep(100); | ||
478 | if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) | ||
479 | dev_dbg(dev->dev, "OK"); | ||
480 | else { | ||
481 | dev_dbg(dev->dev, "STUCK!!!!"); | ||
482 | /*shutdown controller*/ | ||
483 | temp = REG_READ(dspcntr_reg); | ||
484 | REG_WRITE(dspcntr_reg, | ||
485 | temp & ~DISPLAY_PLANE_ENABLE); | ||
486 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
487 | /*mdfld_dsi_dpi_shut_down(dev, pipe);*/ | ||
488 | REG_WRITE(0xb048, 1); | ||
489 | msleep(100); | ||
490 | temp = REG_READ(pipeconf_reg); | ||
491 | temp &= ~PIPEACONF_ENABLE; | ||
492 | REG_WRITE(pipeconf_reg, temp); | ||
493 | msleep(100); /*wait for pipe disable*/ | ||
494 | REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0); | ||
495 | msleep(100); | ||
496 | REG_WRITE(0xb004, REG_READ(0xb004)); | ||
497 | /* try to bring the controller back up again*/ | ||
498 | REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1); | ||
499 | temp = REG_READ(dspcntr_reg); | ||
500 | REG_WRITE(dspcntr_reg, | ||
501 | temp | DISPLAY_PLANE_ENABLE); | ||
502 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
503 | /*mdfld_dsi_dpi_turn_on(dev, pipe);*/ | ||
504 | REG_WRITE(0xb048, 2); | ||
505 | msleep(100); | ||
506 | temp = REG_READ(pipeconf_reg); | ||
507 | temp |= PIPEACONF_ENABLE; | ||
508 | REG_WRITE(pipeconf_reg, temp); | ||
509 | } | ||
510 | } | ||
511 | |||
512 | psb_intel_crtc_load_lut(crtc); | ||
513 | |||
514 | /* Give the overlay scaler a chance to enable | ||
515 | if it's on this pipe */ | ||
516 | /* psb_intel_crtc_dpms_video(crtc, true); TODO */ | ||
517 | |||
518 | break; | ||
519 | case DRM_MODE_DPMS_OFF: | ||
520 | /* Give the overlay scaler a chance to disable | ||
521 | * if it's on this pipe */ | ||
522 | /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ | ||
523 | if (pipe != 1) | ||
524 | mdfld_dsi_gen_fifo_ready(dev, | ||
525 | MIPI_GEN_FIFO_STAT_REG(pipe), | ||
526 | HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); | ||
527 | |||
528 | /* Disable the VGA plane that we never use */ | ||
529 | REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); | ||
530 | |||
531 | /* Disable display plane */ | ||
532 | temp = REG_READ(dspcntr_reg); | ||
533 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
534 | REG_WRITE(dspcntr_reg, | ||
535 | temp & ~DISPLAY_PLANE_ENABLE); | ||
536 | /* Flush the plane changes */ | ||
537 | REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); | ||
538 | REG_READ(dspbase_reg); | ||
539 | } | ||
540 | |||
541 | /* Next, disable display pipes */ | ||
542 | temp = REG_READ(pipeconf_reg); | ||
543 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
544 | temp &= ~PIPEACONF_ENABLE; | ||
545 | temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; | ||
546 | REG_WRITE(pipeconf_reg, temp); | ||
547 | REG_READ(pipeconf_reg); | ||
548 | |||
549 | /* Wait for for the pipe disable to take effect. */ | ||
550 | mdfldWaitForPipeDisable(dev, pipe); | ||
551 | } | ||
552 | |||
553 | temp = REG_READ(dpll_reg); | ||
554 | if (temp & DPLL_VCO_ENABLE) { | ||
555 | if ((pipe != 1 && !((REG_READ(PIPEACONF) | ||
556 | | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) | ||
557 | || pipe == 1) { | ||
558 | temp &= ~(DPLL_VCO_ENABLE); | ||
559 | REG_WRITE(dpll_reg, temp); | ||
560 | REG_READ(dpll_reg); | ||
561 | /* Wait for the clocks to turn off. */ | ||
562 | /* FIXME_MDFLD PO may need more delay */ | ||
563 | udelay(500); | ||
564 | } | ||
565 | } | ||
566 | break; | ||
567 | } | ||
568 | gma_power_end(dev); | ||
569 | } | ||
570 | |||
571 | |||
572 | #define MDFLD_LIMT_DPLL_19 0 | ||
573 | #define MDFLD_LIMT_DPLL_25 1 | ||
574 | #define MDFLD_LIMT_DPLL_83 2 | ||
575 | #define MDFLD_LIMT_DPLL_100 3 | ||
576 | #define MDFLD_LIMT_DSIPLL_19 4 | ||
577 | #define MDFLD_LIMT_DSIPLL_25 5 | ||
578 | #define MDFLD_LIMT_DSIPLL_83 6 | ||
579 | #define MDFLD_LIMT_DSIPLL_100 7 | ||
580 | |||
581 | #define MDFLD_DOT_MIN 19750 | ||
582 | #define MDFLD_DOT_MAX 120000 | ||
583 | #define MDFLD_DPLL_M_MIN_19 113 | ||
584 | #define MDFLD_DPLL_M_MAX_19 155 | ||
585 | #define MDFLD_DPLL_P1_MIN_19 2 | ||
586 | #define MDFLD_DPLL_P1_MAX_19 10 | ||
587 | #define MDFLD_DPLL_M_MIN_25 101 | ||
588 | #define MDFLD_DPLL_M_MAX_25 130 | ||
589 | #define MDFLD_DPLL_P1_MIN_25 2 | ||
590 | #define MDFLD_DPLL_P1_MAX_25 10 | ||
591 | #define MDFLD_DPLL_M_MIN_83 64 | ||
592 | #define MDFLD_DPLL_M_MAX_83 64 | ||
593 | #define MDFLD_DPLL_P1_MIN_83 2 | ||
594 | #define MDFLD_DPLL_P1_MAX_83 2 | ||
595 | #define MDFLD_DPLL_M_MIN_100 64 | ||
596 | #define MDFLD_DPLL_M_MAX_100 64 | ||
597 | #define MDFLD_DPLL_P1_MIN_100 2 | ||
598 | #define MDFLD_DPLL_P1_MAX_100 2 | ||
599 | #define MDFLD_DSIPLL_M_MIN_19 131 | ||
600 | #define MDFLD_DSIPLL_M_MAX_19 175 | ||
601 | #define MDFLD_DSIPLL_P1_MIN_19 3 | ||
602 | #define MDFLD_DSIPLL_P1_MAX_19 8 | ||
603 | #define MDFLD_DSIPLL_M_MIN_25 97 | ||
604 | #define MDFLD_DSIPLL_M_MAX_25 140 | ||
605 | #define MDFLD_DSIPLL_P1_MIN_25 3 | ||
606 | #define MDFLD_DSIPLL_P1_MAX_25 9 | ||
607 | #define MDFLD_DSIPLL_M_MIN_83 33 | ||
608 | #define MDFLD_DSIPLL_M_MAX_83 92 | ||
609 | #define MDFLD_DSIPLL_P1_MIN_83 2 | ||
610 | #define MDFLD_DSIPLL_P1_MAX_83 3 | ||
611 | #define MDFLD_DSIPLL_M_MIN_100 97 | ||
612 | #define MDFLD_DSIPLL_M_MAX_100 140 | ||
613 | #define MDFLD_DSIPLL_P1_MIN_100 3 | ||
614 | #define MDFLD_DSIPLL_P1_MAX_100 9 | ||
615 | |||
616 | static const struct mrst_limit_t mdfld_limits[] = { | ||
617 | { /* MDFLD_LIMT_DPLL_19 */ | ||
618 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
619 | .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19}, | ||
620 | .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19}, | ||
621 | }, | ||
622 | { /* MDFLD_LIMT_DPLL_25 */ | ||
623 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
624 | .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25}, | ||
625 | .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25}, | ||
626 | }, | ||
627 | { /* MDFLD_LIMT_DPLL_83 */ | ||
628 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
629 | .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83}, | ||
630 | .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83}, | ||
631 | }, | ||
632 | { /* MDFLD_LIMT_DPLL_100 */ | ||
633 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
634 | .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100}, | ||
635 | .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100}, | ||
636 | }, | ||
637 | { /* MDFLD_LIMT_DSIPLL_19 */ | ||
638 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
639 | .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19}, | ||
640 | .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19}, | ||
641 | }, | ||
642 | { /* MDFLD_LIMT_DSIPLL_25 */ | ||
643 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
644 | .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25}, | ||
645 | .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25}, | ||
646 | }, | ||
647 | { /* MDFLD_LIMT_DSIPLL_83 */ | ||
648 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
649 | .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83}, | ||
650 | .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83}, | ||
651 | }, | ||
652 | { /* MDFLD_LIMT_DSIPLL_100 */ | ||
653 | .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, | ||
654 | .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100}, | ||
655 | .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100}, | ||
656 | }, | ||
657 | }; | ||
658 | |||
659 | #define MDFLD_M_MIN 21 | ||
660 | #define MDFLD_M_MAX 180 | ||
661 | static const u32 mdfld_m_converts[] = { | ||
662 | /* M configuration table from 9-bit LFSR table */ | ||
663 | 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */ | ||
664 | 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */ | ||
665 | 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */ | ||
666 | 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */ | ||
667 | 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */ | ||
668 | 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ | ||
669 | 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */ | ||
670 | 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */ | ||
671 | 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */ | ||
672 | 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */ | ||
673 | 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */ | ||
674 | 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */ | ||
675 | 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */ | ||
676 | 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */ | ||
677 | 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */ | ||
678 | 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */ | ||
679 | }; | ||
680 | |||
681 | static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc) | ||
682 | { | ||
683 | const struct mrst_limit_t *limit = NULL; | ||
684 | struct drm_device *dev = crtc->dev; | ||
685 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
686 | |||
687 | if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) | ||
688 | || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { | ||
689 | if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) | ||
690 | limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; | ||
691 | else if (ksel == KSEL_BYPASS_25) | ||
692 | limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25]; | ||
693 | else if ((ksel == KSEL_BYPASS_83_100) && | ||
694 | (dev_priv->core_freq == 166)) | ||
695 | limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83]; | ||
696 | else if ((ksel == KSEL_BYPASS_83_100) && | ||
697 | (dev_priv->core_freq == 100 || | ||
698 | dev_priv->core_freq == 200)) | ||
699 | limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; | ||
700 | } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { | ||
701 | if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) | ||
702 | limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; | ||
703 | else if (ksel == KSEL_BYPASS_25) | ||
704 | limit = &mdfld_limits[MDFLD_LIMT_DPLL_25]; | ||
705 | else if ((ksel == KSEL_BYPASS_83_100) && | ||
706 | (dev_priv->core_freq == 166)) | ||
707 | limit = &mdfld_limits[MDFLD_LIMT_DPLL_83]; | ||
708 | else if ((ksel == KSEL_BYPASS_83_100) && | ||
709 | (dev_priv->core_freq == 100 || | ||
710 | dev_priv->core_freq == 200)) | ||
711 | limit = &mdfld_limits[MDFLD_LIMT_DPLL_100]; | ||
712 | } else { | ||
713 | limit = NULL; | ||
714 | dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n"); | ||
715 | } | ||
716 | |||
717 | return limit; | ||
718 | } | ||
719 | |||
720 | /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ | ||
721 | static void mdfld_clock(int refclk, struct mrst_clock_t *clock) | ||
722 | { | ||
723 | clock->dot = (refclk * clock->m) / clock->p1; | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * Returns a set of divisors for the desired target clock with the given refclk, | ||
728 | * or FALSE. Divisor values are the actual divisors for | ||
729 | */ | ||
730 | static bool | ||
731 | mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk, | ||
732 | struct mrst_clock_t *best_clock) | ||
733 | { | ||
734 | struct mrst_clock_t clock; | ||
735 | const struct mrst_limit_t *limit = mdfld_limit(crtc); | ||
736 | int err = target; | ||
737 | |||
738 | memset(best_clock, 0, sizeof(*best_clock)); | ||
739 | |||
740 | for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { | ||
741 | for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; | ||
742 | clock.p1++) { | ||
743 | int this_err; | ||
744 | |||
745 | mdfld_clock(refclk, &clock); | ||
746 | |||
747 | this_err = abs(clock.dot - target); | ||
748 | if (this_err < err) { | ||
749 | *best_clock = clock; | ||
750 | err = this_err; | ||
751 | } | ||
752 | } | ||
753 | } | ||
754 | return err != target; | ||
755 | } | ||
756 | |||
757 | static int mdfld_crtc_mode_set(struct drm_crtc *crtc, | ||
758 | struct drm_display_mode *mode, | ||
759 | struct drm_display_mode *adjusted_mode, | ||
760 | int x, int y, | ||
761 | struct drm_framebuffer *old_fb) | ||
762 | { | ||
763 | struct drm_device *dev = crtc->dev; | ||
764 | struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); | ||
765 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
766 | int pipe = psb_intel_crtc->pipe; | ||
767 | int fp_reg = MRST_FPA0; | ||
768 | int dpll_reg = MRST_DPLL_A; | ||
769 | int dspcntr_reg = DSPACNTR; | ||
770 | int pipeconf_reg = PIPEACONF; | ||
771 | int htot_reg = HTOTAL_A; | ||
772 | int hblank_reg = HBLANK_A; | ||
773 | int hsync_reg = HSYNC_A; | ||
774 | int vtot_reg = VTOTAL_A; | ||
775 | int vblank_reg = VBLANK_A; | ||
776 | int vsync_reg = VSYNC_A; | ||
777 | int dspsize_reg = DSPASIZE; | ||
778 | int dsppos_reg = DSPAPOS; | ||
779 | int pipesrc_reg = PIPEASRC; | ||
780 | u32 *pipeconf = &dev_priv->pipeconf[pipe]; | ||
781 | u32 *dspcntr = &dev_priv->dspcntr[pipe]; | ||
782 | int refclk = 0; | ||
783 | int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, | ||
784 | clk_tmp = 0; | ||
785 | struct mrst_clock_t clock; | ||
786 | bool ok; | ||
787 | u32 dpll = 0, fp = 0; | ||
788 | bool is_mipi = false, is_mipi2 = false, is_hdmi = false; | ||
789 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
790 | struct psb_intel_encoder *psb_intel_encoder = NULL; | ||
791 | uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; | ||
792 | struct drm_encoder *encoder; | ||
793 | struct drm_connector *connector; | ||
794 | int timeout = 0; | ||
795 | int ret; | ||
796 | |||
797 | dev_dbg(dev->dev, "pipe = 0x%x\n", pipe); | ||
798 | |||
799 | #if 0 | ||
800 | if (pipe == 1) { | ||
801 | if (!gma_power_begin(dev, true)) | ||
802 | return 0; | ||
803 | android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode, | ||
804 | x, y, old_fb); | ||
805 | goto mrst_crtc_mode_set_exit; | ||
806 | } | ||
807 | #endif | ||
808 | |||
809 | switch (pipe) { | ||
810 | case 0: | ||
811 | break; | ||
812 | case 1: | ||
813 | fp_reg = FPB0; | ||
814 | dpll_reg = DPLL_B; | ||
815 | dspcntr_reg = DSPBCNTR; | ||
816 | pipeconf_reg = PIPEBCONF; | ||
817 | htot_reg = HTOTAL_B; | ||
818 | hblank_reg = HBLANK_B; | ||
819 | hsync_reg = HSYNC_B; | ||
820 | vtot_reg = VTOTAL_B; | ||
821 | vblank_reg = VBLANK_B; | ||
822 | vsync_reg = VSYNC_B; | ||
823 | dspsize_reg = DSPBSIZE; | ||
824 | dsppos_reg = DSPBPOS; | ||
825 | pipesrc_reg = PIPEBSRC; | ||
826 | fp_reg = MDFLD_DPLL_DIV0; | ||
827 | dpll_reg = MDFLD_DPLL_B; | ||
828 | break; | ||
829 | case 2: | ||
830 | dpll_reg = MRST_DPLL_A; | ||
831 | dspcntr_reg = DSPCCNTR; | ||
832 | pipeconf_reg = PIPECCONF; | ||
833 | htot_reg = HTOTAL_C; | ||
834 | hblank_reg = HBLANK_C; | ||
835 | hsync_reg = HSYNC_C; | ||
836 | vtot_reg = VTOTAL_C; | ||
837 | vblank_reg = VBLANK_C; | ||
838 | vsync_reg = VSYNC_C; | ||
839 | dspsize_reg = DSPCSIZE; | ||
840 | dsppos_reg = DSPCPOS; | ||
841 | pipesrc_reg = PIPECSRC; | ||
842 | break; | ||
843 | default: | ||
844 | DRM_ERROR("Illegal Pipe Number.\n"); | ||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | ret = check_fb(crtc->fb); | ||
849 | if (ret) | ||
850 | return ret; | ||
851 | |||
852 | dev_dbg(dev->dev, "adjusted_hdisplay = %d\n", | ||
853 | adjusted_mode->hdisplay); | ||
854 | dev_dbg(dev->dev, "adjusted_vdisplay = %d\n", | ||
855 | adjusted_mode->vdisplay); | ||
856 | dev_dbg(dev->dev, "adjusted_hsync_start = %d\n", | ||
857 | adjusted_mode->hsync_start); | ||
858 | dev_dbg(dev->dev, "adjusted_hsync_end = %d\n", | ||
859 | adjusted_mode->hsync_end); | ||
860 | dev_dbg(dev->dev, "adjusted_htotal = %d\n", | ||
861 | adjusted_mode->htotal); | ||
862 | dev_dbg(dev->dev, "adjusted_vsync_start = %d\n", | ||
863 | adjusted_mode->vsync_start); | ||
864 | dev_dbg(dev->dev, "adjusted_vsync_end = %d\n", | ||
865 | adjusted_mode->vsync_end); | ||
866 | dev_dbg(dev->dev, "adjusted_vtotal = %d\n", | ||
867 | adjusted_mode->vtotal); | ||
868 | dev_dbg(dev->dev, "adjusted_clock = %d\n", | ||
869 | adjusted_mode->clock); | ||
870 | dev_dbg(dev->dev, "hdisplay = %d\n", | ||
871 | mode->hdisplay); | ||
872 | dev_dbg(dev->dev, "vdisplay = %d\n", | ||
873 | mode->vdisplay); | ||
874 | |||
875 | if (!gma_power_begin(dev, true)) | ||
876 | return 0; | ||
877 | |||
878 | memcpy(&psb_intel_crtc->saved_mode, mode, | ||
879 | sizeof(struct drm_display_mode)); | ||
880 | memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, | ||
881 | sizeof(struct drm_display_mode)); | ||
882 | |||
883 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
884 | if (!connector) | ||
885 | continue; | ||
886 | |||
887 | encoder = connector->encoder; | ||
888 | |||
889 | if (!encoder) | ||
890 | continue; | ||
891 | |||
892 | if (encoder->crtc != crtc) | ||
893 | continue; | ||
894 | |||
895 | psb_intel_encoder = psb_intel_attached_encoder(connector); | ||
896 | |||
897 | switch (psb_intel_encoder->type) { | ||
898 | case INTEL_OUTPUT_MIPI: | ||
899 | is_mipi = true; | ||
900 | break; | ||
901 | case INTEL_OUTPUT_MIPI2: | ||
902 | is_mipi2 = true; | ||
903 | break; | ||
904 | case INTEL_OUTPUT_HDMI: | ||
905 | is_hdmi = true; | ||
906 | break; | ||
907 | } | ||
908 | } | ||
909 | |||
910 | /* Disable the VGA plane that we never use */ | ||
911 | REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); | ||
912 | |||
913 | /* Disable the panel fitter if it was on our pipe */ | ||
914 | if (psb_intel_panel_fitter_pipe(dev) == pipe) | ||
915 | REG_WRITE(PFIT_CONTROL, 0); | ||
916 | |||
917 | /* pipesrc and dspsize control the size that is scaled from, | ||
918 | * which should always be the user's requested size. | ||
919 | */ | ||
920 | if (pipe == 1) { | ||
921 | /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 | ||
922 | * (PYR) or 480x854 (TMD), set the sprite width/height and | ||
923 | * souce image size registers with the adjusted mode for | ||
924 | * pipe B. | ||
925 | */ | ||
926 | |||
927 | /* | ||
928 | * The defined sprite rectangle must always be completely | ||
929 | * contained within the displayable area of the screen image | ||
930 | * (frame buffer). | ||
931 | */ | ||
932 | REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16) | ||
933 | | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1)); | ||
934 | /* Set the CRTC with encoder mode. */ | ||
935 | REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | ||
936 | | (mode->crtc_vdisplay - 1)); | ||
937 | } else { | ||
938 | REG_WRITE(dspsize_reg, | ||
939 | ((mode->crtc_vdisplay - 1) << 16) | | ||
940 | (mode->crtc_hdisplay - 1)); | ||
941 | REG_WRITE(pipesrc_reg, | ||
942 | ((mode->crtc_hdisplay - 1) << 16) | | ||
943 | (mode->crtc_vdisplay - 1)); | ||
944 | } | ||
945 | |||
946 | REG_WRITE(dsppos_reg, 0); | ||
947 | |||
948 | if (psb_intel_encoder) | ||
949 | drm_connector_property_get_value(connector, | ||
950 | dev->mode_config.scaling_mode_property, &scalingType); | ||
951 | |||
952 | if (scalingType == DRM_MODE_SCALE_NO_SCALE) { | ||
953 | /* Medfield doesn't have register support for centering so we | ||
954 | * need to mess with the h/vblank and h/vsync start and ends | ||
955 | * to get centering | ||
956 | */ | ||
957 | int offsetX = 0, offsetY = 0; | ||
958 | |||
959 | offsetX = (adjusted_mode->crtc_hdisplay - | ||
960 | mode->crtc_hdisplay) / 2; | ||
961 | offsetY = (adjusted_mode->crtc_vdisplay - | ||
962 | mode->crtc_vdisplay) / 2; | ||
963 | |||
964 | REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) | | ||
965 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
966 | REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) | | ||
967 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
968 | REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - | ||
969 | offsetX - 1) | | ||
970 | ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); | ||
971 | REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - | ||
972 | offsetX - 1) | | ||
973 | ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); | ||
974 | REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - | ||
975 | offsetY - 1) | | ||
976 | ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); | ||
977 | REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - | ||
978 | offsetY - 1) | | ||
979 | ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); | ||
980 | } else { | ||
981 | REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | ||
982 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
983 | REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | | ||
984 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
985 | REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | | ||
986 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
987 | REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | | ||
988 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
989 | REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | | ||
990 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
991 | REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | | ||
992 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
993 | } | ||
994 | |||
995 | /* Flush the plane changes */ | ||
996 | { | ||
997 | struct drm_crtc_helper_funcs *crtc_funcs = | ||
998 | crtc->helper_private; | ||
999 | crtc_funcs->mode_set_base(crtc, x, y, old_fb); | ||
1000 | } | ||
1001 | |||
1002 | /* setup pipeconf */ | ||
1003 | *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */ | ||
1004 | |||
1005 | /* Set up the display plane register */ | ||
1006 | *dspcntr = REG_READ(dspcntr_reg); | ||
1007 | *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS; | ||
1008 | *dspcntr |= DISPLAY_PLANE_ENABLE; | ||
1009 | |||
1010 | if (is_mipi2) | ||
1011 | goto mrst_crtc_mode_set_exit; | ||
1012 | clk = adjusted_mode->clock; | ||
1013 | |||
1014 | if (is_hdmi) { | ||
1015 | if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) { | ||
1016 | refclk = 19200; | ||
1017 | |||
1018 | if (is_mipi || is_mipi2) | ||
1019 | clk_n = 1, clk_p2 = 8; | ||
1020 | else if (is_hdmi) | ||
1021 | clk_n = 1, clk_p2 = 10; | ||
1022 | } else if (ksel == KSEL_BYPASS_25) { | ||
1023 | refclk = 25000; | ||
1024 | |||
1025 | if (is_mipi || is_mipi2) | ||
1026 | clk_n = 1, clk_p2 = 8; | ||
1027 | else if (is_hdmi) | ||
1028 | clk_n = 1, clk_p2 = 10; | ||
1029 | } else if ((ksel == KSEL_BYPASS_83_100) && | ||
1030 | dev_priv->core_freq == 166) { | ||
1031 | refclk = 83000; | ||
1032 | |||
1033 | if (is_mipi || is_mipi2) | ||
1034 | clk_n = 4, clk_p2 = 8; | ||
1035 | else if (is_hdmi) | ||
1036 | clk_n = 4, clk_p2 = 10; | ||
1037 | } else if ((ksel == KSEL_BYPASS_83_100) && | ||
1038 | (dev_priv->core_freq == 100 || | ||
1039 | dev_priv->core_freq == 200)) { | ||
1040 | refclk = 100000; | ||
1041 | if (is_mipi || is_mipi2) | ||
1042 | clk_n = 4, clk_p2 = 8; | ||
1043 | else if (is_hdmi) | ||
1044 | clk_n = 4, clk_p2 = 10; | ||
1045 | } | ||
1046 | |||
1047 | if (is_mipi) | ||
1048 | clk_byte = dev_priv->bpp / 8; | ||
1049 | else if (is_mipi2) | ||
1050 | clk_byte = dev_priv->bpp2 / 8; | ||
1051 | |||
1052 | clk_tmp = clk * clk_n * clk_p2 * clk_byte; | ||
1053 | |||
1054 | dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n", | ||
1055 | clk, clk_n, clk_p2); | ||
1056 | dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n", | ||
1057 | adjusted_mode->clock, clk_tmp); | ||
1058 | |||
1059 | ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock); | ||
1060 | |||
1061 | if (!ok) { | ||
1062 | DRM_ERROR | ||
1063 | ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n"); | ||
1064 | } else { | ||
1065 | m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)]; | ||
1066 | |||
1067 | dev_dbg(dev->dev, "dot clock = %d," | ||
1068 | "m = %d, p1 = %d, m_conv = %d.\n", | ||
1069 | clock.dot, clock.m, | ||
1070 | clock.p1, m_conv); | ||
1071 | } | ||
1072 | |||
1073 | dpll = REG_READ(dpll_reg); | ||
1074 | |||
1075 | if (dpll & DPLL_VCO_ENABLE) { | ||
1076 | dpll &= ~DPLL_VCO_ENABLE; | ||
1077 | REG_WRITE(dpll_reg, dpll); | ||
1078 | REG_READ(dpll_reg); | ||
1079 | |||
1080 | /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */ | ||
1081 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
1082 | udelay(500); | ||
1083 | |||
1084 | /* reset M1, N1 & P1 */ | ||
1085 | REG_WRITE(fp_reg, 0); | ||
1086 | dpll &= ~MDFLD_P1_MASK; | ||
1087 | REG_WRITE(dpll_reg, dpll); | ||
1088 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
1089 | udelay(500); | ||
1090 | } | ||
1091 | |||
1092 | /* When ungating power of DPLL, needs to wait 0.5us before | ||
1093 | * enable the VCO */ | ||
1094 | if (dpll & MDFLD_PWR_GATE_EN) { | ||
1095 | dpll &= ~MDFLD_PWR_GATE_EN; | ||
1096 | REG_WRITE(dpll_reg, dpll); | ||
1097 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
1098 | udelay(500); | ||
1099 | } | ||
1100 | dpll = 0; | ||
1101 | |||
1102 | #if 0 /* FIXME revisit later */ | ||
1103 | if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 || | ||
1104 | ksel == KSEL_BYPASS_25) | ||
1105 | dpll &= ~MDFLD_INPUT_REF_SEL; | ||
1106 | else if (ksel == KSEL_BYPASS_83_100) | ||
1107 | dpll |= MDFLD_INPUT_REF_SEL; | ||
1108 | #endif /* FIXME revisit later */ | ||
1109 | |||
1110 | if (is_hdmi) | ||
1111 | dpll |= MDFLD_VCO_SEL; | ||
1112 | |||
1113 | fp = (clk_n / 2) << 16; | ||
1114 | fp |= m_conv; | ||
1115 | |||
1116 | /* compute bitmask from p1 value */ | ||
1117 | dpll |= (1 << (clock.p1 - 2)) << 17; | ||
1118 | |||
1119 | #if 0 /* 1080p30 & 720p */ | ||
1120 | dpll = 0x00050000; | ||
1121 | fp = 0x000001be; | ||
1122 | #endif | ||
1123 | #if 0 /* 480p */ | ||
1124 | dpll = 0x02010000; | ||
1125 | fp = 0x000000d2; | ||
1126 | #endif | ||
1127 | } else { | ||
1128 | #if 0 /*DBI_TPO_480x864*/ | ||
1129 | dpll = 0x00020000; | ||
1130 | fp = 0x00000156; | ||
1131 | #endif /* DBI_TPO_480x864 */ /* get from spec. */ | ||
1132 | |||
1133 | dpll = 0x00800000; | ||
1134 | fp = 0x000000c1; | ||
1135 | } | ||
1136 | |||
1137 | REG_WRITE(fp_reg, fp); | ||
1138 | REG_WRITE(dpll_reg, dpll); | ||
1139 | /* FIXME_MDFLD PO - change 500 to 1 after PO */ | ||
1140 | udelay(500); | ||
1141 | |||
1142 | dpll |= DPLL_VCO_ENABLE; | ||
1143 | REG_WRITE(dpll_reg, dpll); | ||
1144 | REG_READ(dpll_reg); | ||
1145 | |||
1146 | /* wait for DSI PLL to lock */ | ||
1147 | while (timeout < 20000 && | ||
1148 | !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { | ||
1149 | udelay(150); | ||
1150 | timeout++; | ||
1151 | } | ||
1152 | |||
1153 | if (is_mipi) | ||
1154 | goto mrst_crtc_mode_set_exit; | ||
1155 | |||
1156 | dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi); | ||
1157 | |||
1158 | REG_WRITE(pipeconf_reg, *pipeconf); | ||
1159 | REG_READ(pipeconf_reg); | ||
1160 | |||
1161 | /* Wait for for the pipe enable to take effect. */ | ||
1162 | REG_WRITE(dspcntr_reg, *dspcntr); | ||
1163 | psb_intel_wait_for_vblank(dev); | ||
1164 | |||
1165 | mrst_crtc_mode_set_exit: | ||
1166 | |||
1167 | gma_power_end(dev); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1172 | const struct drm_crtc_helper_funcs mdfld_helper_funcs = { | ||
1173 | .dpms = mdfld_crtc_dpms, | ||
1174 | .mode_fixup = psb_intel_crtc_mode_fixup, | ||
1175 | .mode_set = mdfld_crtc_mode_set, | ||
1176 | .mode_set_base = mdfld__intel_pipe_set_base, | ||
1177 | .prepare = psb_intel_crtc_prepare, | ||
1178 | .commit = psb_intel_crtc_commit, | ||
1179 | }; | ||
1180 | |||
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c new file mode 100644 index 000000000000..c95966bb0c96 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_output.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicensen | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Thomas Eaton <thomas.g.eaton@intel.com> | ||
25 | * Scott Rowe <scott.m.rowe@intel.com> | ||
26 | */ | ||
27 | |||
28 | #include "mdfld_output.h" | ||
29 | #include "mdfld_dsi_dpi.h" | ||
30 | #include "mdfld_dsi_output.h" | ||
31 | |||
32 | #include "tc35876x-dsi-lvds.h" | ||
33 | |||
34 | int mdfld_get_panel_type(struct drm_device *dev, int pipe) | ||
35 | { | ||
36 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
37 | return dev_priv->mdfld_panel_id; | ||
38 | } | ||
39 | |||
40 | static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe, | ||
41 | int p_type) | ||
42 | { | ||
43 | switch (p_type) { | ||
44 | case TPO_VID: | ||
45 | mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs); | ||
46 | break; | ||
47 | case TC35876X: | ||
48 | tc35876x_init(dev); | ||
49 | mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs); | ||
50 | break; | ||
51 | case TMD_VID: | ||
52 | mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs); | ||
53 | break; | ||
54 | case HDMI: | ||
55 | /* if (dev_priv->mdfld_hdmi_present) | ||
56 | mdfld_hdmi_init(dev, &dev_priv->mode_dev); */ | ||
57 | break; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | |||
62 | int mdfld_output_init(struct drm_device *dev) | ||
63 | { | ||
64 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
65 | |||
66 | /* FIXME: hardcoded for now */ | ||
67 | dev_priv->mdfld_panel_id = TC35876X; | ||
68 | /* MIPI panel 1 */ | ||
69 | mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id); | ||
70 | /* HDMI panel */ | ||
71 | mdfld_init_panel(dev, 1, HDMI); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h new file mode 100644 index 000000000000..ab2b27c0f037 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_output.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicensen | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Thomas Eaton <thomas.g.eaton@intel.com> | ||
25 | * Scott Rowe <scott.m.rowe@intel.com> | ||
26 | */ | ||
27 | |||
28 | #ifndef MDFLD_OUTPUT_H | ||
29 | #define MDFLD_OUTPUT_H | ||
30 | |||
31 | #include "psb_drv.h" | ||
32 | |||
33 | #define TPO_PANEL_WIDTH 84 | ||
34 | #define TPO_PANEL_HEIGHT 46 | ||
35 | #define TMD_PANEL_WIDTH 39 | ||
36 | #define TMD_PANEL_HEIGHT 71 | ||
37 | |||
38 | struct mdfld_dsi_config; | ||
39 | |||
40 | enum panel_type { | ||
41 | TPO_VID, | ||
42 | TMD_VID, | ||
43 | HDMI, | ||
44 | TC35876X, | ||
45 | }; | ||
46 | |||
47 | struct panel_info { | ||
48 | u32 width_mm; | ||
49 | u32 height_mm; | ||
50 | /* Other info */ | ||
51 | }; | ||
52 | |||
53 | struct panel_funcs { | ||
54 | const struct drm_encoder_funcs *encoder_funcs; | ||
55 | const struct drm_encoder_helper_funcs *encoder_helper_funcs; | ||
56 | struct drm_display_mode * (*get_config_mode)(struct drm_device *); | ||
57 | int (*get_panel_info)(struct drm_device *, int, struct panel_info *); | ||
58 | int (*reset)(int pipe); | ||
59 | void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe); | ||
60 | }; | ||
61 | |||
62 | int mdfld_output_init(struct drm_device *dev); | ||
63 | |||
64 | struct backlight_device *mdfld_get_backlight_device(void); | ||
65 | int mdfld_set_brightness(struct backlight_device *bd); | ||
66 | |||
67 | int mdfld_get_panel_type(struct drm_device *dev, int pipe); | ||
68 | |||
69 | extern const struct drm_crtc_helper_funcs mdfld_helper_funcs; | ||
70 | |||
71 | extern const struct panel_funcs mdfld_tmd_vid_funcs; | ||
72 | extern const struct panel_funcs mdfld_tpo_vid_funcs; | ||
73 | |||
74 | extern void mdfld_disable_crtc(struct drm_device *dev, int pipe); | ||
75 | extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe); | ||
76 | extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe); | ||
77 | #endif | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c new file mode 100644 index 000000000000..dc0c6c3d3d29 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Jim Liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | * Gideon Eaton <eaton. | ||
27 | * Scott Rowe <scott.m.rowe@intel.com> | ||
28 | */ | ||
29 | |||
30 | #include "mdfld_dsi_dpi.h" | ||
31 | #include "mdfld_dsi_pkg_sender.h" | ||
32 | |||
33 | static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev) | ||
34 | { | ||
35 | struct drm_display_mode *mode; | ||
36 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
37 | struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; | ||
38 | bool use_gct = false; /*Disable GCT for now*/ | ||
39 | |||
40 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | ||
41 | if (!mode) | ||
42 | return NULL; | ||
43 | |||
44 | if (use_gct) { | ||
45 | mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; | ||
46 | mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; | ||
47 | mode->hsync_start = mode->hdisplay + \ | ||
48 | ((ti->hsync_offset_hi << 8) | \ | ||
49 | ti->hsync_offset_lo); | ||
50 | mode->hsync_end = mode->hsync_start + \ | ||
51 | ((ti->hsync_pulse_width_hi << 8) | \ | ||
52 | ti->hsync_pulse_width_lo); | ||
53 | mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ | ||
54 | ti->hblank_lo); | ||
55 | mode->vsync_start = \ | ||
56 | mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ | ||
57 | ti->vsync_offset_lo); | ||
58 | mode->vsync_end = \ | ||
59 | mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ | ||
60 | ti->vsync_pulse_width_lo); | ||
61 | mode->vtotal = mode->vdisplay + \ | ||
62 | ((ti->vblank_hi << 8) | ti->vblank_lo); | ||
63 | mode->clock = ti->pixel_clock * 10; | ||
64 | |||
65 | dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay); | ||
66 | dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay); | ||
67 | dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start); | ||
68 | dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end); | ||
69 | dev_dbg(dev->dev, "htotal is %d\n", mode->htotal); | ||
70 | dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start); | ||
71 | dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end); | ||
72 | dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal); | ||
73 | dev_dbg(dev->dev, "clock is %d\n", mode->clock); | ||
74 | } else { | ||
75 | mode->hdisplay = 480; | ||
76 | mode->vdisplay = 854; | ||
77 | mode->hsync_start = 487; | ||
78 | mode->hsync_end = 490; | ||
79 | mode->htotal = 499; | ||
80 | mode->vsync_start = 861; | ||
81 | mode->vsync_end = 865; | ||
82 | mode->vtotal = 873; | ||
83 | mode->clock = 33264; | ||
84 | } | ||
85 | |||
86 | drm_mode_set_name(mode); | ||
87 | drm_mode_set_crtcinfo(mode, 0); | ||
88 | |||
89 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
90 | |||
91 | return mode; | ||
92 | } | ||
93 | |||
94 | static int tmd_vid_get_panel_info(struct drm_device *dev, | ||
95 | int pipe, | ||
96 | struct panel_info *pi) | ||
97 | { | ||
98 | if (!dev || !pi) | ||
99 | return -EINVAL; | ||
100 | |||
101 | pi->width_mm = TMD_PANEL_WIDTH; | ||
102 | pi->height_mm = TMD_PANEL_HEIGHT; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* ************************************************************************* *\ | ||
108 | * FUNCTION: mdfld_init_TMD_MIPI | ||
109 | * | ||
110 | * DESCRIPTION: This function is called only by mrst_dsi_mode_set and | ||
111 | * restore_display_registers. since this function does not | ||
112 | * acquire the mutex, it is important that the calling function | ||
113 | * does! | ||
114 | \* ************************************************************************* */ | ||
115 | |||
116 | /* FIXME: make the below data u8 instead of u32; note byte order! */ | ||
117 | static u32 tmd_cmd_mcap_off[] = {0x000000b2}; | ||
118 | static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef}; | ||
119 | static u32 tmd_cmd_set_lane_num[] = {0x006360ef}; | ||
120 | static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef}; | ||
121 | static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef}; | ||
122 | static u32 tmd_cmd_set_mode[] = {0x000000b3}; | ||
123 | static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef}; | ||
124 | static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df}; | ||
125 | static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055}; | ||
126 | static u32 tmd_cmd_set_video_mode[] = {0x00000153}; | ||
127 | /*no auto_bl,need add in furture*/ | ||
128 | static u32 tmd_cmd_enable_backlight[] = {0x00005ab4}; | ||
129 | static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd}; | ||
130 | |||
131 | static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config, | ||
132 | int pipe) | ||
133 | { | ||
134 | struct mdfld_dsi_pkg_sender *sender | ||
135 | = mdfld_dsi_get_pkg_sender(dsi_config); | ||
136 | |||
137 | DRM_INFO("Enter mdfld init TMD MIPI display.\n"); | ||
138 | |||
139 | if (!sender) { | ||
140 | DRM_ERROR("Cannot get sender\n"); | ||
141 | return; | ||
142 | } | ||
143 | |||
144 | if (dsi_config->dvr_ic_inited) | ||
145 | return; | ||
146 | |||
147 | msleep(3); | ||
148 | |||
149 | /* FIXME: make the below data u8 instead of u32; note byte order! */ | ||
150 | |||
151 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off, | ||
152 | sizeof(tmd_cmd_mcap_off), false); | ||
153 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch, | ||
154 | sizeof(tmd_cmd_enable_lane_switch), false); | ||
155 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num, | ||
156 | sizeof(tmd_cmd_set_lane_num), false); | ||
157 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0, | ||
158 | sizeof(tmd_cmd_pushing_clock0), false); | ||
159 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1, | ||
160 | sizeof(tmd_cmd_pushing_clock1), false); | ||
161 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode, | ||
162 | sizeof(tmd_cmd_set_mode), false); | ||
163 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode, | ||
164 | sizeof(tmd_cmd_set_sync_pulse_mode), false); | ||
165 | mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column, | ||
166 | sizeof(tmd_cmd_set_column), false); | ||
167 | mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page, | ||
168 | sizeof(tmd_cmd_set_page), false); | ||
169 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode, | ||
170 | sizeof(tmd_cmd_set_video_mode), false); | ||
171 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight, | ||
172 | sizeof(tmd_cmd_enable_backlight), false); | ||
173 | mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming, | ||
174 | sizeof(tmd_cmd_set_backlight_dimming), false); | ||
175 | |||
176 | dsi_config->dvr_ic_inited = 1; | ||
177 | } | ||
178 | |||
179 | /*TPO DPI encoder helper funcs*/ | ||
180 | static const struct drm_encoder_helper_funcs | ||
181 | mdfld_tpo_dpi_encoder_helper_funcs = { | ||
182 | .dpms = mdfld_dsi_dpi_dpms, | ||
183 | .mode_fixup = mdfld_dsi_dpi_mode_fixup, | ||
184 | .prepare = mdfld_dsi_dpi_prepare, | ||
185 | .mode_set = mdfld_dsi_dpi_mode_set, | ||
186 | .commit = mdfld_dsi_dpi_commit, | ||
187 | }; | ||
188 | |||
189 | /*TPO DPI encoder funcs*/ | ||
190 | static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { | ||
191 | .destroy = drm_encoder_cleanup, | ||
192 | }; | ||
193 | |||
194 | const struct panel_funcs mdfld_tmd_vid_funcs = { | ||
195 | .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, | ||
196 | .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, | ||
197 | .get_config_mode = &tmd_vid_get_config_mode, | ||
198 | .get_panel_info = tmd_vid_get_panel_info, | ||
199 | .reset = mdfld_dsi_panel_reset, | ||
200 | .drv_ic_init = mdfld_dsi_tmd_drv_ic_init, | ||
201 | }; | ||
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c new file mode 100644 index 000000000000..d8d4170725b2 --- /dev/null +++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * jim liu <jim.liu@intel.com> | ||
25 | * Jackie Li<yaodong.li@intel.com> | ||
26 | */ | ||
27 | |||
28 | #include "mdfld_dsi_dpi.h" | ||
29 | |||
30 | static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev) | ||
31 | { | ||
32 | struct drm_display_mode *mode; | ||
33 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
34 | struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; | ||
35 | bool use_gct = false; | ||
36 | |||
37 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | ||
38 | if (!mode) | ||
39 | return NULL; | ||
40 | |||
41 | if (use_gct) { | ||
42 | mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; | ||
43 | mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; | ||
44 | mode->hsync_start = mode->hdisplay + | ||
45 | ((ti->hsync_offset_hi << 8) | | ||
46 | ti->hsync_offset_lo); | ||
47 | mode->hsync_end = mode->hsync_start + | ||
48 | ((ti->hsync_pulse_width_hi << 8) | | ||
49 | ti->hsync_pulse_width_lo); | ||
50 | mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | | ||
51 | ti->hblank_lo); | ||
52 | mode->vsync_start = | ||
53 | mode->vdisplay + ((ti->vsync_offset_hi << 8) | | ||
54 | ti->vsync_offset_lo); | ||
55 | mode->vsync_end = | ||
56 | mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | | ||
57 | ti->vsync_pulse_width_lo); | ||
58 | mode->vtotal = mode->vdisplay + | ||
59 | ((ti->vblank_hi << 8) | ti->vblank_lo); | ||
60 | mode->clock = ti->pixel_clock * 10; | ||
61 | |||
62 | dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay); | ||
63 | dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay); | ||
64 | dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start); | ||
65 | dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end); | ||
66 | dev_dbg(dev->dev, "htotal is %d\n", mode->htotal); | ||
67 | dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start); | ||
68 | dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end); | ||
69 | dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal); | ||
70 | dev_dbg(dev->dev, "clock is %d\n", mode->clock); | ||
71 | } else { | ||
72 | mode->hdisplay = 864; | ||
73 | mode->vdisplay = 480; | ||
74 | mode->hsync_start = 873; | ||
75 | mode->hsync_end = 876; | ||
76 | mode->htotal = 887; | ||
77 | mode->vsync_start = 487; | ||
78 | mode->vsync_end = 490; | ||
79 | mode->vtotal = 499; | ||
80 | mode->clock = 33264; | ||
81 | } | ||
82 | |||
83 | drm_mode_set_name(mode); | ||
84 | drm_mode_set_crtcinfo(mode, 0); | ||
85 | |||
86 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
87 | |||
88 | return mode; | ||
89 | } | ||
90 | |||
91 | static int tpo_vid_get_panel_info(struct drm_device *dev, | ||
92 | int pipe, | ||
93 | struct panel_info *pi) | ||
94 | { | ||
95 | if (!dev || !pi) | ||
96 | return -EINVAL; | ||
97 | |||
98 | pi->width_mm = TPO_PANEL_WIDTH; | ||
99 | pi->height_mm = TPO_PANEL_HEIGHT; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /*TPO DPI encoder helper funcs*/ | ||
105 | static const struct drm_encoder_helper_funcs | ||
106 | mdfld_tpo_dpi_encoder_helper_funcs = { | ||
107 | .dpms = mdfld_dsi_dpi_dpms, | ||
108 | .mode_fixup = mdfld_dsi_dpi_mode_fixup, | ||
109 | .prepare = mdfld_dsi_dpi_prepare, | ||
110 | .mode_set = mdfld_dsi_dpi_mode_set, | ||
111 | .commit = mdfld_dsi_dpi_commit, | ||
112 | }; | ||
113 | |||
114 | /*TPO DPI encoder funcs*/ | ||
115 | static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { | ||
116 | .destroy = drm_encoder_cleanup, | ||
117 | }; | ||
118 | |||
119 | const struct panel_funcs mdfld_tpo_vid_funcs = { | ||
120 | .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, | ||
121 | .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, | ||
122 | .get_config_mode = &tpo_vid_get_config_mode, | ||
123 | .get_panel_info = tpo_vid_get_panel_info, | ||
124 | }; | ||
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index e80ee82f6caf..49bac41beefb 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c | |||
@@ -270,7 +270,7 @@ out_err1: | |||
270 | return NULL; | 270 | return NULL; |
271 | } | 271 | } |
272 | 272 | ||
273 | void psb_mmu_free_pt(struct psb_mmu_pt *pt) | 273 | static void psb_mmu_free_pt(struct psb_mmu_pt *pt) |
274 | { | 274 | { |
275 | __free_page(pt->p); | 275 | __free_page(pt->p); |
276 | kfree(pt); | 276 | kfree(pt); |
@@ -351,7 +351,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) | |||
351 | return pt; | 351 | return pt; |
352 | } | 352 | } |
353 | 353 | ||
354 | struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, | 354 | static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, |
355 | unsigned long addr) | 355 | unsigned long addr) |
356 | { | 356 | { |
357 | uint32_t index = psb_mmu_pd_index(addr); | 357 | uint32_t index = psb_mmu_pd_index(addr); |
@@ -488,15 +488,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) | |||
488 | return pd; | 488 | return pd; |
489 | } | 489 | } |
490 | 490 | ||
491 | /* Returns the physical address of the PD shared by sgx/msvdx */ | ||
492 | uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) | ||
493 | { | ||
494 | struct psb_mmu_pd *pd; | ||
495 | |||
496 | pd = psb_mmu_get_default_pd(driver); | ||
497 | return page_to_pfn(pd->p) << PAGE_SHIFT; | ||
498 | } | ||
499 | |||
500 | void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) | 491 | void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) |
501 | { | 492 | { |
502 | psb_mmu_free_pagedir(driver->default_pd); | 493 | psb_mmu_free_pagedir(driver->default_pd); |
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 9d12a3ee1600..a39b0d0d680f 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c | |||
@@ -115,7 +115,7 @@ static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock) | |||
115 | clock->dot = (refclk * clock->m) / (14 * clock->p1); | 115 | clock->dot = (refclk * clock->m) / (14 * clock->p1); |
116 | } | 116 | } |
117 | 117 | ||
118 | void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock) | 118 | static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock) |
119 | { | 119 | { |
120 | pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n", | 120 | pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n", |
121 | prefix, clock->dot, clock->m, clock->p1); | 121 | prefix, clock->dot, clock->m, clock->p1); |
@@ -169,7 +169,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
169 | int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE; | 169 | int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE; |
170 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 170 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
171 | u32 temp; | 171 | u32 temp; |
172 | bool enabled; | ||
173 | 172 | ||
174 | if (!gma_power_begin(dev, true)) | 173 | if (!gma_power_begin(dev, true)) |
175 | return; | 174 | return; |
@@ -253,8 +252,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
253 | break; | 252 | break; |
254 | } | 253 | } |
255 | 254 | ||
256 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | ||
257 | |||
258 | /*Set FIFO Watermarks*/ | 255 | /*Set FIFO Watermarks*/ |
259 | REG_WRITE(DSPARB, 0x3FFF); | 256 | REG_WRITE(DSPARB, 0x3FFF); |
260 | REG_WRITE(DSPFW1, 0x3F88080A); | 257 | REG_WRITE(DSPFW1, 0x3F88080A); |
@@ -310,7 +307,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, | |||
310 | struct oaktrail_clock_t clock; | 307 | struct oaktrail_clock_t clock; |
311 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 308 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
312 | bool ok, is_sdvo = false; | 309 | bool ok, is_sdvo = false; |
313 | bool is_crt = false, is_lvds = false, is_tv = false; | 310 | bool is_lvds = false; |
314 | bool is_mipi = false; | 311 | bool is_mipi = false; |
315 | struct drm_mode_config *mode_config = &dev->mode_config; | 312 | struct drm_mode_config *mode_config = &dev->mode_config; |
316 | struct psb_intel_encoder *psb_intel_encoder = NULL; | 313 | struct psb_intel_encoder *psb_intel_encoder = NULL; |
@@ -340,12 +337,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, | |||
340 | case INTEL_OUTPUT_SDVO: | 337 | case INTEL_OUTPUT_SDVO: |
341 | is_sdvo = true; | 338 | is_sdvo = true; |
342 | break; | 339 | break; |
343 | case INTEL_OUTPUT_TVOUT: | ||
344 | is_tv = true; | ||
345 | break; | ||
346 | case INTEL_OUTPUT_ANALOG: | ||
347 | is_crt = true; | ||
348 | break; | ||
349 | case INTEL_OUTPUT_MIPI: | 340 | case INTEL_OUTPUT_MIPI: |
350 | is_mipi = true; | 341 | is_mipi = true; |
351 | break; | 342 | break; |
@@ -428,9 +419,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, | |||
428 | else | 419 | else |
429 | dspcntr |= DISPPLANE_SEL_PIPE_B; | 420 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
430 | 421 | ||
431 | dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE; | ||
432 | dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE; | ||
433 | |||
434 | if (is_mipi) | 422 | if (is_mipi) |
435 | goto oaktrail_crtc_mode_set_exit; | 423 | goto oaktrail_crtc_mode_set_exit; |
436 | 424 | ||
@@ -517,7 +505,7 @@ static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc, | |||
517 | return true; | 505 | return true; |
518 | } | 506 | } |
519 | 507 | ||
520 | int oaktrail_pipe_set_base(struct drm_crtc *crtc, | 508 | static int oaktrail_pipe_set_base(struct drm_crtc *crtc, |
521 | int x, int y, struct drm_framebuffer *old_fb) | 509 | int x, int y, struct drm_framebuffer *old_fb) |
522 | { | 510 | { |
523 | struct drm_device *dev = crtc->dev; | 511 | struct drm_device *dev = crtc->dev; |
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 63aea2f010d9..41d1924ea31e 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c | |||
@@ -141,7 +141,7 @@ static const struct backlight_ops oaktrail_ops = { | |||
141 | .update_status = oaktrail_set_brightness, | 141 | .update_status = oaktrail_set_brightness, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | int oaktrail_backlight_init(struct drm_device *dev) | 144 | static int oaktrail_backlight_init(struct drm_device *dev) |
145 | { | 145 | { |
146 | struct drm_psb_private *dev_priv = dev->dev_private; | 146 | struct drm_psb_private *dev_priv = dev->dev_private; |
147 | int ret; | 147 | int ret; |
@@ -176,10 +176,6 @@ int oaktrail_backlight_init(struct drm_device *dev) | |||
176 | * for power management | 176 | * for power management |
177 | */ | 177 | */ |
178 | 178 | ||
179 | static void oaktrail_init_pm(struct drm_device *dev) | ||
180 | { | ||
181 | } | ||
182 | |||
183 | /** | 179 | /** |
184 | * oaktrail_save_display_registers - save registers lost on suspend | 180 | * oaktrail_save_display_registers - save registers lost on suspend |
185 | * @dev: our DRM device | 181 | * @dev: our DRM device |
@@ -190,81 +186,82 @@ static void oaktrail_init_pm(struct drm_device *dev) | |||
190 | static int oaktrail_save_display_registers(struct drm_device *dev) | 186 | static int oaktrail_save_display_registers(struct drm_device *dev) |
191 | { | 187 | { |
192 | struct drm_psb_private *dev_priv = dev->dev_private; | 188 | struct drm_psb_private *dev_priv = dev->dev_private; |
189 | struct psb_save_area *regs = &dev_priv->regs; | ||
193 | int i; | 190 | int i; |
194 | u32 pp_stat; | 191 | u32 pp_stat; |
195 | 192 | ||
196 | /* Display arbitration control + watermarks */ | 193 | /* Display arbitration control + watermarks */ |
197 | dev_priv->saveDSPARB = PSB_RVDC32(DSPARB); | 194 | regs->psb.saveDSPARB = PSB_RVDC32(DSPARB); |
198 | dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1); | 195 | regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1); |
199 | dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2); | 196 | regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2); |
200 | dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3); | 197 | regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3); |
201 | dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4); | 198 | regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4); |
202 | dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5); | 199 | regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5); |
203 | dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6); | 200 | regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6); |
204 | dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); | 201 | regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); |
205 | 202 | ||
206 | /* Pipe & plane A info */ | 203 | /* Pipe & plane A info */ |
207 | dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF); | 204 | regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF); |
208 | dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC); | 205 | regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC); |
209 | dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0); | 206 | regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0); |
210 | dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1); | 207 | regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1); |
211 | dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A); | 208 | regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A); |
212 | dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A); | 209 | regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A); |
213 | dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A); | 210 | regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A); |
214 | dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A); | 211 | regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A); |
215 | dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A); | 212 | regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A); |
216 | dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A); | 213 | regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A); |
217 | dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A); | 214 | regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A); |
218 | dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A); | 215 | regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A); |
219 | dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR); | 216 | regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR); |
220 | dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE); | 217 | regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE); |
221 | dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE); | 218 | regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE); |
222 | dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF); | 219 | regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF); |
223 | dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF); | 220 | regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF); |
224 | dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF); | 221 | regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF); |
225 | 222 | ||
226 | /* Save cursor regs */ | 223 | /* Save cursor regs */ |
227 | dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR); | 224 | regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR); |
228 | dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE); | 225 | regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE); |
229 | dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS); | 226 | regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS); |
230 | 227 | ||
231 | /* Save palette (gamma) */ | 228 | /* Save palette (gamma) */ |
232 | for (i = 0; i < 256; i++) | 229 | for (i = 0; i < 256; i++) |
233 | dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2)); | 230 | regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2)); |
234 | 231 | ||
235 | if (dev_priv->hdmi_priv) | 232 | if (dev_priv->hdmi_priv) |
236 | oaktrail_hdmi_save(dev); | 233 | oaktrail_hdmi_save(dev); |
237 | 234 | ||
238 | /* Save performance state */ | 235 | /* Save performance state */ |
239 | dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE); | 236 | regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE); |
240 | 237 | ||
241 | /* LVDS state */ | 238 | /* LVDS state */ |
242 | dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL); | 239 | regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL); |
243 | dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); | 240 | regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); |
244 | dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS); | 241 | regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS); |
245 | dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL); | 242 | regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL); |
246 | dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2); | 243 | regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2); |
247 | dev_priv->saveLVDS = PSB_RVDC32(LVDS); | 244 | regs->psb.saveLVDS = PSB_RVDC32(LVDS); |
248 | dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); | 245 | regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); |
249 | dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON); | 246 | regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON); |
250 | dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF); | 247 | regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF); |
251 | dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE); | 248 | regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE); |
252 | 249 | ||
253 | /* HW overlay */ | 250 | /* HW overlay */ |
254 | dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD); | 251 | regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD); |
255 | dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0); | 252 | regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0); |
256 | dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1); | 253 | regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1); |
257 | dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2); | 254 | regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2); |
258 | dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3); | 255 | regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3); |
259 | dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4); | 256 | regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4); |
260 | dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5); | 257 | regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5); |
261 | 258 | ||
262 | /* DPST registers */ | 259 | /* DPST registers */ |
263 | dev_priv->saveHISTOGRAM_INT_CONTROL_REG = | 260 | regs->psb.saveHISTOGRAM_INT_CONTROL_REG = |
264 | PSB_RVDC32(HISTOGRAM_INT_CONTROL); | 261 | PSB_RVDC32(HISTOGRAM_INT_CONTROL); |
265 | dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG = | 262 | regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG = |
266 | PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL); | 263 | PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL); |
267 | dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC); | 264 | regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC); |
268 | 265 | ||
269 | if (dev_priv->iLVDS_enable) { | 266 | if (dev_priv->iLVDS_enable) { |
270 | /* Shut down the panel */ | 267 | /* Shut down the panel */ |
@@ -302,79 +299,80 @@ static int oaktrail_save_display_registers(struct drm_device *dev) | |||
302 | static int oaktrail_restore_display_registers(struct drm_device *dev) | 299 | static int oaktrail_restore_display_registers(struct drm_device *dev) |
303 | { | 300 | { |
304 | struct drm_psb_private *dev_priv = dev->dev_private; | 301 | struct drm_psb_private *dev_priv = dev->dev_private; |
302 | struct psb_save_area *regs = &dev_priv->regs; | ||
305 | u32 pp_stat; | 303 | u32 pp_stat; |
306 | int i; | 304 | int i; |
307 | 305 | ||
308 | /* Display arbitration + watermarks */ | 306 | /* Display arbitration + watermarks */ |
309 | PSB_WVDC32(dev_priv->saveDSPARB, DSPARB); | 307 | PSB_WVDC32(regs->psb.saveDSPARB, DSPARB); |
310 | PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1); | 308 | PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1); |
311 | PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2); | 309 | PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2); |
312 | PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3); | 310 | PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3); |
313 | PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4); | 311 | PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4); |
314 | PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5); | 312 | PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5); |
315 | PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6); | 313 | PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6); |
316 | PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT); | 314 | PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT); |
317 | 315 | ||
318 | /* Make sure VGA plane is off. it initializes to on after reset!*/ | 316 | /* Make sure VGA plane is off. it initializes to on after reset!*/ |
319 | PSB_WVDC32(0x80000000, VGACNTRL); | 317 | PSB_WVDC32(0x80000000, VGACNTRL); |
320 | 318 | ||
321 | /* set the plls */ | 319 | /* set the plls */ |
322 | PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0); | 320 | PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0); |
323 | PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1); | 321 | PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1); |
324 | 322 | ||
325 | /* Actually enable it */ | 323 | /* Actually enable it */ |
326 | PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A); | 324 | PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A); |
327 | DRM_UDELAY(150); | 325 | DRM_UDELAY(150); |
328 | 326 | ||
329 | /* Restore mode */ | 327 | /* Restore mode */ |
330 | PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A); | 328 | PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A); |
331 | PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A); | 329 | PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A); |
332 | PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A); | 330 | PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A); |
333 | PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A); | 331 | PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A); |
334 | PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A); | 332 | PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A); |
335 | PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A); | 333 | PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A); |
336 | PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC); | 334 | PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC); |
337 | PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A); | 335 | PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A); |
338 | 336 | ||
339 | /* Restore performance mode*/ | 337 | /* Restore performance mode*/ |
340 | PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE); | 338 | PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE); |
341 | 339 | ||
342 | /* Enable the pipe*/ | 340 | /* Enable the pipe*/ |
343 | if (dev_priv->iLVDS_enable) | 341 | if (dev_priv->iLVDS_enable) |
344 | PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF); | 342 | PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF); |
345 | 343 | ||
346 | /* Set up the plane*/ | 344 | /* Set up the plane*/ |
347 | PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF); | 345 | PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF); |
348 | PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE); | 346 | PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE); |
349 | PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF); | 347 | PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF); |
350 | 348 | ||
351 | /* Enable the plane */ | 349 | /* Enable the plane */ |
352 | PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR); | 350 | PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR); |
353 | PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF); | 351 | PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF); |
354 | 352 | ||
355 | /* Enable Cursor A */ | 353 | /* Enable Cursor A */ |
356 | PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR); | 354 | PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR); |
357 | PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS); | 355 | PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS); |
358 | PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE); | 356 | PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE); |
359 | 357 | ||
360 | /* Restore palette (gamma) */ | 358 | /* Restore palette (gamma) */ |
361 | for (i = 0; i < 256; i++) | 359 | for (i = 0; i < 256; i++) |
362 | PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2)); | 360 | PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2)); |
363 | 361 | ||
364 | if (dev_priv->hdmi_priv) | 362 | if (dev_priv->hdmi_priv) |
365 | oaktrail_hdmi_restore(dev); | 363 | oaktrail_hdmi_restore(dev); |
366 | 364 | ||
367 | if (dev_priv->iLVDS_enable) { | 365 | if (dev_priv->iLVDS_enable) { |
368 | PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2); | 366 | PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2); |
369 | PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/ | 367 | PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/ |
370 | PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL); | 368 | PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL); |
371 | PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); | 369 | PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); |
372 | PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS); | 370 | PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS); |
373 | PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL); | 371 | PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL); |
374 | PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON); | 372 | PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON); |
375 | PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF); | 373 | PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF); |
376 | PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE); | 374 | PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE); |
377 | PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL); | 375 | PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL); |
378 | } | 376 | } |
379 | 377 | ||
380 | /* Wait for cycle delay */ | 378 | /* Wait for cycle delay */ |
@@ -388,20 +386,20 @@ static int oaktrail_restore_display_registers(struct drm_device *dev) | |||
388 | } while (pp_stat & 0x10000000); | 386 | } while (pp_stat & 0x10000000); |
389 | 387 | ||
390 | /* Restore HW overlay */ | 388 | /* Restore HW overlay */ |
391 | PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD); | 389 | PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD); |
392 | PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0); | 390 | PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0); |
393 | PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1); | 391 | PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1); |
394 | PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2); | 392 | PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2); |
395 | PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3); | 393 | PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3); |
396 | PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4); | 394 | PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4); |
397 | PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5); | 395 | PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5); |
398 | 396 | ||
399 | /* DPST registers */ | 397 | /* DPST registers */ |
400 | PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG, | 398 | PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG, |
401 | HISTOGRAM_INT_CONTROL); | 399 | HISTOGRAM_INT_CONTROL); |
402 | PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG, | 400 | PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG, |
403 | HISTOGRAM_LOGIC_CONTROL); | 401 | HISTOGRAM_LOGIC_CONTROL); |
404 | PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC); | 402 | PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC); |
405 | 403 | ||
406 | return 0; | 404 | return 0; |
407 | } | 405 | } |
@@ -502,7 +500,6 @@ const struct psb_ops oaktrail_chip_ops = { | |||
502 | .backlight_init = oaktrail_backlight_init, | 500 | .backlight_init = oaktrail_backlight_init, |
503 | #endif | 501 | #endif |
504 | 502 | ||
505 | .init_pm = oaktrail_init_pm, | ||
506 | .save_regs = oaktrail_save_display_registers, | 503 | .save_regs = oaktrail_save_display_registers, |
507 | .restore_regs = oaktrail_restore_display_registers, | 504 | .restore_regs = oaktrail_restore_display_registers, |
508 | .power_down = oaktrail_power_down, | 505 | .power_down = oaktrail_power_down, |
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 025d30970cc0..f8b367b45f66 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c | |||
@@ -125,59 +125,6 @@ static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = { | |||
125 | .nf = { .min = NF_MIN, .max = NF_MAX }, | 125 | .nf = { .min = NF_MIN, .max = NF_MAX }, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | static void wait_for_vblank(struct drm_device *dev) | ||
129 | { | ||
130 | /* FIXME: Can we do this as a sleep ? */ | ||
131 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | ||
132 | mdelay(20); | ||
133 | } | ||
134 | |||
135 | static void scu_busy_loop(void *scu_base) | ||
136 | { | ||
137 | u32 status = 0; | ||
138 | u32 loop_count = 0; | ||
139 | |||
140 | status = readl(scu_base + 0x04); | ||
141 | while (status & 1) { | ||
142 | udelay(1); /* scu processing time is in few u secods */ | ||
143 | status = readl(scu_base + 0x04); | ||
144 | loop_count++; | ||
145 | /* break if scu doesn't reset busy bit after huge retry */ | ||
146 | if (loop_count > 1000) { | ||
147 | DRM_DEBUG_KMS("SCU IPC timed out"); | ||
148 | return; | ||
149 | } | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static void oaktrail_hdmi_reset(struct drm_device *dev) | ||
154 | { | ||
155 | void *base; | ||
156 | /* FIXME: at least make these defines */ | ||
157 | unsigned int scu_ipc_mmio = 0xff11c000; | ||
158 | int scu_len = 1024; | ||
159 | |||
160 | base = ioremap((resource_size_t)scu_ipc_mmio, scu_len); | ||
161 | if (base == NULL) { | ||
162 | DRM_ERROR("failed to map SCU mmio\n"); | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | /* scu ipc: assert hdmi controller reset */ | ||
167 | writel(0xff11d118, base + 0x0c); | ||
168 | writel(0x7fffffdf, base + 0x80); | ||
169 | writel(0x42005, base + 0x0); | ||
170 | scu_busy_loop(base); | ||
171 | |||
172 | /* scu ipc: de-assert hdmi controller reset */ | ||
173 | writel(0xff11d118, base + 0x0c); | ||
174 | writel(0x7fffffff, base + 0x80); | ||
175 | writel(0x42005, base + 0x0); | ||
176 | scu_busy_loop(base); | ||
177 | |||
178 | iounmap(base); | ||
179 | } | ||
180 | |||
181 | static void oaktrail_hdmi_audio_enable(struct drm_device *dev) | 128 | static void oaktrail_hdmi_audio_enable(struct drm_device *dev) |
182 | { | 129 | { |
183 | struct drm_psb_private *dev_priv = dev->dev_private; | 130 | struct drm_psb_private *dev_priv = dev->dev_private; |
@@ -208,104 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev) | |||
208 | HDMI_READ(HDMI_HCR); | 155 | HDMI_READ(HDMI_HCR); |
209 | } | 156 | } |
210 | 157 | ||
211 | void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode) | ||
212 | { | ||
213 | struct drm_device *dev = crtc->dev; | ||
214 | u32 temp; | ||
215 | |||
216 | switch (mode) { | ||
217 | case DRM_MODE_DPMS_OFF: | ||
218 | /* Disable VGACNTRL */ | ||
219 | REG_WRITE(VGACNTRL, 0x80000000); | ||
220 | |||
221 | /* Disable plane */ | ||
222 | temp = REG_READ(DSPBCNTR); | ||
223 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
224 | REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE); | ||
225 | REG_READ(DSPBCNTR); | ||
226 | /* Flush the plane changes */ | ||
227 | REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); | ||
228 | REG_READ(DSPBSURF); | ||
229 | } | ||
230 | |||
231 | /* Disable pipe B */ | ||
232 | temp = REG_READ(PIPEBCONF); | ||
233 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
234 | REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE); | ||
235 | REG_READ(PIPEBCONF); | ||
236 | } | ||
237 | |||
238 | /* Disable LNW Pipes, etc */ | ||
239 | temp = REG_READ(PCH_PIPEBCONF); | ||
240 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
241 | REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE); | ||
242 | REG_READ(PCH_PIPEBCONF); | ||
243 | } | ||
244 | /* wait for pipe off */ | ||
245 | udelay(150); | ||
246 | /* Disable dpll */ | ||
247 | temp = REG_READ(DPLL_CTRL); | ||
248 | if ((temp & DPLL_PWRDN) == 0) { | ||
249 | REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET)); | ||
250 | REG_WRITE(DPLL_STATUS, 0x1); | ||
251 | } | ||
252 | /* wait for dpll off */ | ||
253 | udelay(150); | ||
254 | break; | ||
255 | case DRM_MODE_DPMS_ON: | ||
256 | case DRM_MODE_DPMS_STANDBY: | ||
257 | case DRM_MODE_DPMS_SUSPEND: | ||
258 | /* Enable dpll */ | ||
259 | temp = REG_READ(DPLL_CTRL); | ||
260 | if ((temp & DPLL_PWRDN) != 0) { | ||
261 | REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET)); | ||
262 | temp = REG_READ(DPLL_CLK_ENABLE); | ||
263 | REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI); | ||
264 | REG_READ(DPLL_CLK_ENABLE); | ||
265 | } | ||
266 | /* wait for dpll warm up */ | ||
267 | udelay(150); | ||
268 | |||
269 | /* Enable pipe B */ | ||
270 | temp = REG_READ(PIPEBCONF); | ||
271 | if ((temp & PIPEACONF_ENABLE) == 0) { | ||
272 | REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE); | ||
273 | REG_READ(PIPEBCONF); | ||
274 | } | ||
275 | |||
276 | /* Enable LNW Pipe B */ | ||
277 | temp = REG_READ(PCH_PIPEBCONF); | ||
278 | if ((temp & PIPEACONF_ENABLE) == 0) { | ||
279 | REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE); | ||
280 | REG_READ(PCH_PIPEBCONF); | ||
281 | } | ||
282 | wait_for_vblank(dev); | ||
283 | |||
284 | /* Enable plane */ | ||
285 | temp = REG_READ(DSPBCNTR); | ||
286 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | ||
287 | REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE); | ||
288 | /* Flush the plane changes */ | ||
289 | REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); | ||
290 | REG_READ(DSPBSURF); | ||
291 | } | ||
292 | psb_intel_crtc_load_lut(crtc); | ||
293 | } | ||
294 | /* DSPARB */ | ||
295 | REG_WRITE(DSPARB, 0x00003fbf); | ||
296 | /* FW1 */ | ||
297 | REG_WRITE(0x70034, 0x3f880a0a); | ||
298 | /* FW2 */ | ||
299 | REG_WRITE(0x70038, 0x0b060808); | ||
300 | /* FW4 */ | ||
301 | REG_WRITE(0x70050, 0x08030404); | ||
302 | /* FW5 */ | ||
303 | REG_WRITE(0x70054, 0x04040404); | ||
304 | /* LNC Chicken Bits */ | ||
305 | REG_WRITE(0x70400, 0x4000); | ||
306 | } | ||
307 | |||
308 | |||
309 | static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) | 158 | static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) |
310 | { | 159 | { |
311 | static int dpms_mode = -1; | 160 | static int dpms_mode = -1; |
@@ -327,182 +176,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
327 | HDMI_WRITE(HDMI_VIDEO_REG, temp); | 176 | HDMI_WRITE(HDMI_VIDEO_REG, temp); |
328 | } | 177 | } |
329 | 178 | ||
330 | static unsigned int htotal_calculate(struct drm_display_mode *mode) | ||
331 | { | ||
332 | u32 htotal, new_crtc_htotal; | ||
333 | |||
334 | htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16); | ||
335 | |||
336 | /* | ||
337 | * 1024 x 768 new_crtc_htotal = 0x1024; | ||
338 | * 1280 x 1024 new_crtc_htotal = 0x0c34; | ||
339 | */ | ||
340 | new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock; | ||
341 | |||
342 | return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16); | ||
343 | } | ||
344 | |||
345 | static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target, | ||
346 | int refclk, struct oaktrail_hdmi_clock *best_clock) | ||
347 | { | ||
348 | int np_min, np_max, nr_min, nr_max; | ||
349 | int np, nr, nf; | ||
350 | |||
351 | np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10); | ||
352 | np_max = oaktrail_hdmi_limit.vco.max / (target * 10); | ||
353 | if (np_min < oaktrail_hdmi_limit.np.min) | ||
354 | np_min = oaktrail_hdmi_limit.np.min; | ||
355 | if (np_max > oaktrail_hdmi_limit.np.max) | ||
356 | np_max = oaktrail_hdmi_limit.np.max; | ||
357 | |||
358 | nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max)); | ||
359 | nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min)); | ||
360 | if (nr_min < oaktrail_hdmi_limit.nr.min) | ||
361 | nr_min = oaktrail_hdmi_limit.nr.min; | ||
362 | if (nr_max > oaktrail_hdmi_limit.nr.max) | ||
363 | nr_max = oaktrail_hdmi_limit.nr.max; | ||
364 | |||
365 | np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max)); | ||
366 | nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np)); | ||
367 | nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk); | ||
368 | DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf); | ||
369 | |||
370 | /* | ||
371 | * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000; | ||
372 | * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000; | ||
373 | */ | ||
374 | best_clock->np = np; | ||
375 | best_clock->nr = nr - 1; | ||
376 | best_clock->nf = (nf << 14); | ||
377 | } | ||
378 | |||
379 | int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, | ||
380 | struct drm_display_mode *mode, | ||
381 | struct drm_display_mode *adjusted_mode, | ||
382 | int x, int y, | ||
383 | struct drm_framebuffer *old_fb) | ||
384 | { | ||
385 | struct drm_device *dev = crtc->dev; | ||
386 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
387 | struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; | ||
388 | int pipe = 1; | ||
389 | int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | ||
390 | int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | ||
391 | int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | ||
392 | int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; | ||
393 | int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; | ||
394 | int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; | ||
395 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | ||
396 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | ||
397 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | ||
398 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | ||
399 | int refclk; | ||
400 | struct oaktrail_hdmi_clock clock; | ||
401 | u32 dspcntr, pipeconf, dpll, temp; | ||
402 | int dspcntr_reg = DSPBCNTR; | ||
403 | |||
404 | /* Disable the VGA plane that we never use */ | ||
405 | REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); | ||
406 | |||
407 | /* XXX: Disable the panel fitter if it was on our pipe */ | ||
408 | |||
409 | /* Disable dpll if necessary */ | ||
410 | dpll = REG_READ(DPLL_CTRL); | ||
411 | if ((dpll & DPLL_PWRDN) == 0) { | ||
412 | REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET)); | ||
413 | REG_WRITE(DPLL_DIV_CTRL, 0x00000000); | ||
414 | REG_WRITE(DPLL_STATUS, 0x1); | ||
415 | } | ||
416 | udelay(150); | ||
417 | |||
418 | /* reset controller: FIXME - can we sort out the ioremap mess ? */ | ||
419 | iounmap(hdmi_dev->regs); | ||
420 | oaktrail_hdmi_reset(dev); | ||
421 | |||
422 | /* program and enable dpll */ | ||
423 | refclk = 25000; | ||
424 | oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock); | ||
425 | |||
426 | /* Setting DPLL */ | ||
427 | dpll = REG_READ(DPLL_CTRL); | ||
428 | dpll &= ~DPLL_PDIV_MASK; | ||
429 | dpll &= ~(DPLL_PWRDN | DPLL_RESET); | ||
430 | REG_WRITE(DPLL_CTRL, 0x00000008); | ||
431 | REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr)); | ||
432 | REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1)); | ||
433 | REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN)); | ||
434 | REG_WRITE(DPLL_UPDATE, 0x80000000); | ||
435 | REG_WRITE(DPLL_CLK_ENABLE, 0x80050102); | ||
436 | udelay(150); | ||
437 | |||
438 | hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len); | ||
439 | if (hdmi_dev->regs == NULL) { | ||
440 | DRM_ERROR("failed to do hdmi mmio mapping\n"); | ||
441 | return -ENOMEM; | ||
442 | } | ||
443 | |||
444 | /* configure HDMI */ | ||
445 | HDMI_WRITE(0x1004, 0x1fd); | ||
446 | HDMI_WRITE(0x2000, 0x1); | ||
447 | HDMI_WRITE(0x2008, 0x0); | ||
448 | HDMI_WRITE(0x3130, 0x8); | ||
449 | HDMI_WRITE(0x101c, 0x1800810); | ||
450 | |||
451 | temp = htotal_calculate(adjusted_mode); | ||
452 | REG_WRITE(htot_reg, temp); | ||
453 | REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
454 | REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
455 | REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
456 | REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
457 | REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
458 | REG_WRITE(pipesrc_reg, | ||
459 | ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); | ||
460 | |||
461 | REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
462 | REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
463 | REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
464 | REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
465 | REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
466 | REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
467 | REG_WRITE(PCH_PIPEBSRC, | ||
468 | ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); | ||
469 | |||
470 | temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; | ||
471 | HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp); | ||
472 | |||
473 | REG_WRITE(dspsize_reg, | ||
474 | ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); | ||
475 | REG_WRITE(dsppos_reg, 0); | ||
476 | |||
477 | /* Flush the plane changes */ | ||
478 | { | ||
479 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
480 | crtc_funcs->mode_set_base(crtc, x, y, old_fb); | ||
481 | } | ||
482 | |||
483 | /* Set up the display plane register */ | ||
484 | dspcntr = REG_READ(dspcntr_reg); | ||
485 | dspcntr |= DISPPLANE_GAMMA_ENABLE; | ||
486 | dspcntr |= DISPPLANE_SEL_PIPE_B; | ||
487 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
488 | |||
489 | /* setup pipeconf */ | ||
490 | pipeconf = REG_READ(pipeconf_reg); | ||
491 | pipeconf |= PIPEACONF_ENABLE; | ||
492 | |||
493 | REG_WRITE(pipeconf_reg, pipeconf); | ||
494 | REG_READ(pipeconf_reg); | ||
495 | |||
496 | REG_WRITE(PCH_PIPEBCONF, pipeconf); | ||
497 | REG_READ(PCH_PIPEBCONF); | ||
498 | wait_for_vblank(dev); | ||
499 | |||
500 | REG_WRITE(dspcntr_reg, dspcntr); | ||
501 | wait_for_vblank(dev); | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, | 179 | static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, |
507 | struct drm_display_mode *mode) | 180 | struct drm_display_mode *mode) |
508 | { | 181 | { |
@@ -692,7 +365,7 @@ failed_connector: | |||
692 | 365 | ||
693 | static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { | 366 | static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { |
694 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) }, | 367 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) }, |
695 | {} | 368 | { 0 } |
696 | }; | 369 | }; |
697 | 370 | ||
698 | void oaktrail_hdmi_setup(struct drm_device *dev) | 371 | void oaktrail_hdmi_setup(struct drm_device *dev) |
@@ -766,6 +439,7 @@ void oaktrail_hdmi_save(struct drm_device *dev) | |||
766 | { | 439 | { |
767 | struct drm_psb_private *dev_priv = dev->dev_private; | 440 | struct drm_psb_private *dev_priv = dev->dev_private; |
768 | struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; | 441 | struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; |
442 | struct psb_state *regs = &dev_priv->regs.psb; | ||
769 | int i; | 443 | int i; |
770 | 444 | ||
771 | /* dpll */ | 445 | /* dpll */ |
@@ -776,14 +450,14 @@ void oaktrail_hdmi_save(struct drm_device *dev) | |||
776 | hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE); | 450 | hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE); |
777 | 451 | ||
778 | /* pipe B */ | 452 | /* pipe B */ |
779 | dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF); | 453 | regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF); |
780 | dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC); | 454 | regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC); |
781 | dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B); | 455 | regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B); |
782 | dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B); | 456 | regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B); |
783 | dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B); | 457 | regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B); |
784 | dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B); | 458 | regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B); |
785 | dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B); | 459 | regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B); |
786 | dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B); | 460 | regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B); |
787 | 461 | ||
788 | hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF); | 462 | hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF); |
789 | hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC); | 463 | hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC); |
@@ -795,21 +469,21 @@ void oaktrail_hdmi_save(struct drm_device *dev) | |||
795 | hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B); | 469 | hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B); |
796 | 470 | ||
797 | /* plane */ | 471 | /* plane */ |
798 | dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR); | 472 | regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR); |
799 | dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE); | 473 | regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE); |
800 | dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE); | 474 | regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE); |
801 | dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF); | 475 | regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF); |
802 | dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF); | 476 | regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF); |
803 | dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF); | 477 | regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF); |
804 | 478 | ||
805 | /* cursor B */ | 479 | /* cursor B */ |
806 | dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR); | 480 | regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR); |
807 | dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE); | 481 | regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE); |
808 | dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS); | 482 | regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS); |
809 | 483 | ||
810 | /* save palette */ | 484 | /* save palette */ |
811 | for (i = 0; i < 256; i++) | 485 | for (i = 0; i < 256; i++) |
812 | dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2)); | 486 | regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2)); |
813 | } | 487 | } |
814 | 488 | ||
815 | /* restore HDMI register state */ | 489 | /* restore HDMI register state */ |
@@ -817,6 +491,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev) | |||
817 | { | 491 | { |
818 | struct drm_psb_private *dev_priv = dev->dev_private; | 492 | struct drm_psb_private *dev_priv = dev->dev_private; |
819 | struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; | 493 | struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; |
494 | struct psb_state *regs = &dev_priv->regs.psb; | ||
820 | int i; | 495 | int i; |
821 | 496 | ||
822 | /* dpll */ | 497 | /* dpll */ |
@@ -828,13 +503,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev) | |||
828 | DRM_UDELAY(150); | 503 | DRM_UDELAY(150); |
829 | 504 | ||
830 | /* pipe */ | 505 | /* pipe */ |
831 | PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC); | 506 | PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC); |
832 | PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B); | 507 | PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B); |
833 | PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B); | 508 | PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B); |
834 | PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B); | 509 | PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B); |
835 | PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B); | 510 | PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B); |
836 | PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B); | 511 | PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B); |
837 | PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B); | 512 | PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B); |
838 | 513 | ||
839 | PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC); | 514 | PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC); |
840 | PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B); | 515 | PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B); |
@@ -844,22 +519,22 @@ void oaktrail_hdmi_restore(struct drm_device *dev) | |||
844 | PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B); | 519 | PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B); |
845 | PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B); | 520 | PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B); |
846 | 521 | ||
847 | PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF); | 522 | PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF); |
848 | PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF); | 523 | PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF); |
849 | 524 | ||
850 | /* plane */ | 525 | /* plane */ |
851 | PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF); | 526 | PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF); |
852 | PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE); | 527 | PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE); |
853 | PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF); | 528 | PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF); |
854 | PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR); | 529 | PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR); |
855 | PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF); | 530 | PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF); |
856 | 531 | ||
857 | /* cursor B */ | 532 | /* cursor B */ |
858 | PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR); | 533 | PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR); |
859 | PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS); | 534 | PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS); |
860 | PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE); | 535 | PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE); |
861 | 536 | ||
862 | /* restore palette */ | 537 | /* restore palette */ |
863 | for (i = 0; i < 256; i++) | 538 | for (i = 0; i < 256; i++) |
864 | PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2)); | 539 | PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2)); |
865 | } | 540 | } |
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index 705440874ac0..5e84fbde749b 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c | |||
@@ -127,7 +127,7 @@ static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap, | |||
127 | { | 127 | { |
128 | struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap); | 128 | struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap); |
129 | struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; | 129 | struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; |
130 | int i, err = 0; | 130 | int i; |
131 | 131 | ||
132 | mutex_lock(&i2c_dev->i2c_lock); | 132 | mutex_lock(&i2c_dev->i2c_lock); |
133 | 133 | ||
@@ -139,9 +139,9 @@ static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap, | |||
139 | for (i = 0; i < num; i++) { | 139 | for (i = 0; i < num; i++) { |
140 | if (pmsg->len && pmsg->buf) { | 140 | if (pmsg->len && pmsg->buf) { |
141 | if (pmsg->flags & I2C_M_RD) | 141 | if (pmsg->flags & I2C_M_RD) |
142 | err = xfer_read(adap, pmsg); | 142 | xfer_read(adap, pmsg); |
143 | else | 143 | else |
144 | err = xfer_write(adap, pmsg); | 144 | xfer_write(adap, pmsg); |
145 | } | 145 | } |
146 | pmsg++; /* next message */ | 146 | pmsg++; /* next message */ |
147 | } | 147 | } |
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 238bbe105304..654f32b22b21 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c | |||
@@ -192,7 +192,7 @@ static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev) | |||
192 | 192 | ||
193 | gma_power_end(dev); | 193 | gma_power_end(dev); |
194 | } else | 194 | } else |
195 | ret = ((dev_priv->saveBLC_PWM_CTL & | 195 | ret = ((dev_priv->regs.saveBLC_PWM_CTL & |
196 | BACKLIGHT_MODULATION_FREQ_MASK) >> | 196 | BACKLIGHT_MODULATION_FREQ_MASK) >> |
197 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; | 197 | BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; |
198 | 198 | ||
@@ -331,7 +331,6 @@ void oaktrail_lvds_init(struct drm_device *dev, | |||
331 | struct drm_encoder *encoder; | 331 | struct drm_encoder *encoder; |
332 | struct drm_psb_private *dev_priv = dev->dev_private; | 332 | struct drm_psb_private *dev_priv = dev->dev_private; |
333 | struct edid *edid; | 333 | struct edid *edid; |
334 | int ret = 0; | ||
335 | struct i2c_adapter *i2c_adap; | 334 | struct i2c_adapter *i2c_adap; |
336 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 335 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
337 | 336 | ||
@@ -400,7 +399,7 @@ void oaktrail_lvds_init(struct drm_device *dev, | |||
400 | if (edid) { | 399 | if (edid) { |
401 | drm_mode_connector_update_edid_property(connector, | 400 | drm_mode_connector_update_edid_property(connector, |
402 | edid); | 401 | edid); |
403 | ret = drm_add_edid_modes(connector, edid); | 402 | drm_add_edid_modes(connector, edid); |
404 | kfree(edid); | 403 | kfree(edid); |
405 | } | 404 | } |
406 | 405 | ||
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c index 94025693bae1..889b854751da 100644 --- a/drivers/gpu/drm/gma500/power.c +++ b/drivers/gpu/drm/gma500/power.c | |||
@@ -58,7 +58,8 @@ void gma_power_init(struct drm_device *dev) | |||
58 | spin_lock_init(&power_ctrl_lock); | 58 | spin_lock_init(&power_ctrl_lock); |
59 | mutex_init(&power_mutex); | 59 | mutex_init(&power_mutex); |
60 | 60 | ||
61 | dev_priv->ops->init_pm(dev); | 61 | if (dev_priv->ops->init_pm) |
62 | dev_priv->ops->init_pm(dev); | ||
62 | } | 63 | } |
63 | 64 | ||
64 | /** | 65 | /** |
@@ -101,9 +102,6 @@ static void gma_resume_display(struct pci_dev *pdev) | |||
101 | struct drm_device *dev = pci_get_drvdata(pdev); | 102 | struct drm_device *dev = pci_get_drvdata(pdev); |
102 | struct drm_psb_private *dev_priv = dev->dev_private; | 103 | struct drm_psb_private *dev_priv = dev->dev_private; |
103 | 104 | ||
104 | if (dev_priv->suspended == false) | ||
105 | return; | ||
106 | |||
107 | /* turn on the display power island */ | 105 | /* turn on the display power island */ |
108 | dev_priv->ops->power_up(dev); | 106 | dev_priv->ops->power_up(dev); |
109 | dev_priv->suspended = false; | 107 | dev_priv->suspended = false; |
@@ -132,9 +130,9 @@ static void gma_suspend_pci(struct pci_dev *pdev) | |||
132 | 130 | ||
133 | pci_save_state(pdev); | 131 | pci_save_state(pdev); |
134 | pci_read_config_dword(pdev, 0x5C, &bsm); | 132 | pci_read_config_dword(pdev, 0x5C, &bsm); |
135 | dev_priv->saveBSM = bsm; | 133 | dev_priv->regs.saveBSM = bsm; |
136 | pci_read_config_dword(pdev, 0xFC, &vbt); | 134 | pci_read_config_dword(pdev, 0xFC, &vbt); |
137 | dev_priv->saveVBT = vbt; | 135 | dev_priv->regs.saveVBT = vbt; |
138 | pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); | 136 | pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); |
139 | pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); | 137 | pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); |
140 | 138 | ||
@@ -162,8 +160,8 @@ static bool gma_resume_pci(struct pci_dev *pdev) | |||
162 | 160 | ||
163 | pci_set_power_state(pdev, PCI_D0); | 161 | pci_set_power_state(pdev, PCI_D0); |
164 | pci_restore_state(pdev); | 162 | pci_restore_state(pdev); |
165 | pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM); | 163 | pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM); |
166 | pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT); | 164 | pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT); |
167 | /* restoring MSI address and data in PCIx space */ | 165 | /* restoring MSI address and data in PCIx space */ |
168 | pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); | 166 | pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); |
169 | pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); | 167 | pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); |
@@ -195,6 +193,7 @@ int gma_power_suspend(struct device *_dev) | |||
195 | if (!dev_priv->suspended) { | 193 | if (!dev_priv->suspended) { |
196 | if (dev_priv->display_count) { | 194 | if (dev_priv->display_count) { |
197 | mutex_unlock(&power_mutex); | 195 | mutex_unlock(&power_mutex); |
196 | dev_err(dev->dev, "GPU hardware busy, cannot suspend\n"); | ||
198 | return -EBUSY; | 197 | return -EBUSY; |
199 | } | 198 | } |
200 | psb_irq_uninstall(dev); | 199 | psb_irq_uninstall(dev); |
@@ -302,7 +301,7 @@ int psb_runtime_suspend(struct device *dev) | |||
302 | 301 | ||
303 | int psb_runtime_resume(struct device *dev) | 302 | int psb_runtime_resume(struct device *dev) |
304 | { | 303 | { |
305 | return gma_power_resume(dev);; | 304 | return gma_power_resume(dev); |
306 | } | 305 | } |
307 | 306 | ||
308 | int psb_runtime_idle(struct device *dev) | 307 | int psb_runtime_idle(struct device *dev) |
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index e5f5906172b0..95d163e4f1f4 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c | |||
@@ -177,16 +177,17 @@ static int psb_save_display_registers(struct drm_device *dev) | |||
177 | struct drm_psb_private *dev_priv = dev->dev_private; | 177 | struct drm_psb_private *dev_priv = dev->dev_private; |
178 | struct drm_crtc *crtc; | 178 | struct drm_crtc *crtc; |
179 | struct drm_connector *connector; | 179 | struct drm_connector *connector; |
180 | struct psb_state *regs = &dev_priv->regs.psb; | ||
180 | 181 | ||
181 | /* Display arbitration control + watermarks */ | 182 | /* Display arbitration control + watermarks */ |
182 | dev_priv->saveDSPARB = PSB_RVDC32(DSPARB); | 183 | regs->saveDSPARB = PSB_RVDC32(DSPARB); |
183 | dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1); | 184 | regs->saveDSPFW1 = PSB_RVDC32(DSPFW1); |
184 | dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2); | 185 | regs->saveDSPFW2 = PSB_RVDC32(DSPFW2); |
185 | dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3); | 186 | regs->saveDSPFW3 = PSB_RVDC32(DSPFW3); |
186 | dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4); | 187 | regs->saveDSPFW4 = PSB_RVDC32(DSPFW4); |
187 | dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5); | 188 | regs->saveDSPFW5 = PSB_RVDC32(DSPFW5); |
188 | dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6); | 189 | regs->saveDSPFW6 = PSB_RVDC32(DSPFW6); |
189 | dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); | 190 | regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); |
190 | 191 | ||
191 | /* Save crtc and output state */ | 192 | /* Save crtc and output state */ |
192 | mutex_lock(&dev->mode_config.mutex); | 193 | mutex_lock(&dev->mode_config.mutex); |
@@ -213,16 +214,17 @@ static int psb_restore_display_registers(struct drm_device *dev) | |||
213 | struct drm_psb_private *dev_priv = dev->dev_private; | 214 | struct drm_psb_private *dev_priv = dev->dev_private; |
214 | struct drm_crtc *crtc; | 215 | struct drm_crtc *crtc; |
215 | struct drm_connector *connector; | 216 | struct drm_connector *connector; |
217 | struct psb_state *regs = &dev_priv->regs.psb; | ||
216 | 218 | ||
217 | /* Display arbitration + watermarks */ | 219 | /* Display arbitration + watermarks */ |
218 | PSB_WVDC32(dev_priv->saveDSPARB, DSPARB); | 220 | PSB_WVDC32(regs->saveDSPARB, DSPARB); |
219 | PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1); | 221 | PSB_WVDC32(regs->saveDSPFW1, DSPFW1); |
220 | PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2); | 222 | PSB_WVDC32(regs->saveDSPFW2, DSPFW2); |
221 | PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3); | 223 | PSB_WVDC32(regs->saveDSPFW3, DSPFW3); |
222 | PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4); | 224 | PSB_WVDC32(regs->saveDSPFW4, DSPFW4); |
223 | PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5); | 225 | PSB_WVDC32(regs->saveDSPFW5, DSPFW5); |
224 | PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6); | 226 | PSB_WVDC32(regs->saveDSPFW6, DSPFW6); |
225 | PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT); | 227 | PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT); |
226 | 228 | ||
227 | /*make sure VGA plane is off. it initializes to on after reset!*/ | 229 | /*make sure VGA plane is off. it initializes to on after reset!*/ |
228 | PSB_WVDC32(0x80000000, VGACNTRL); | 230 | PSB_WVDC32(0x80000000, VGACNTRL); |
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index f14768f2b364..c34adf9d910a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c | |||
@@ -60,6 +60,16 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { | |||
60 | /* Atom E620 */ | 60 | /* Atom E620 */ |
61 | { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, | 61 | { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops}, |
62 | #endif | 62 | #endif |
63 | #if defined(CONFIG_DRM_MEDFIELD) | ||
64 | {0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
65 | {0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
66 | {0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
67 | {0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
68 | {0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
69 | {0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
70 | {0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
71 | {0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops}, | ||
72 | #endif | ||
63 | #if defined(CONFIG_DRM_GMA3600) | 73 | #if defined(CONFIG_DRM_GMA3600) |
64 | { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, | 74 | { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, |
65 | { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, | 75 | { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, |
@@ -70,7 +80,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { | |||
70 | { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, | 80 | { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, |
71 | { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, | 81 | { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops}, |
72 | #endif | 82 | #endif |
73 | { 0, 0, 0} | 83 | { 0, } |
74 | }; | 84 | }; |
75 | MODULE_DEVICE_TABLE(pci, pciidlist); | 85 | MODULE_DEVICE_TABLE(pci, pciidlist); |
76 | 86 | ||
@@ -78,27 +88,27 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
78 | * Standard IOCTLs. | 88 | * Standard IOCTLs. |
79 | */ | 89 | */ |
80 | 90 | ||
81 | #define DRM_IOCTL_PSB_ADB \ | 91 | #define DRM_IOCTL_GMA_ADB \ |
82 | DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t) | 92 | DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t) |
83 | #define DRM_IOCTL_PSB_MODE_OPERATION \ | 93 | #define DRM_IOCTL_GMA_MODE_OPERATION \ |
84 | DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \ | 94 | DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \ |
85 | struct drm_psb_mode_operation_arg) | 95 | struct drm_psb_mode_operation_arg) |
86 | #define DRM_IOCTL_PSB_STOLEN_MEMORY \ | 96 | #define DRM_IOCTL_GMA_STOLEN_MEMORY \ |
87 | DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \ | 97 | DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \ |
88 | struct drm_psb_stolen_memory_arg) | 98 | struct drm_psb_stolen_memory_arg) |
89 | #define DRM_IOCTL_PSB_GAMMA \ | 99 | #define DRM_IOCTL_GMA_GAMMA \ |
90 | DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \ | 100 | DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \ |
91 | struct drm_psb_dpst_lut_arg) | 101 | struct drm_psb_dpst_lut_arg) |
92 | #define DRM_IOCTL_PSB_DPST_BL \ | 102 | #define DRM_IOCTL_GMA_DPST_BL \ |
93 | DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \ | 103 | DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \ |
94 | uint32_t) | 104 | uint32_t) |
95 | #define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \ | 105 | #define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID \ |
96 | DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \ | 106 | DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \ |
97 | struct drm_psb_get_pipe_from_crtc_id_arg) | 107 | struct drm_psb_get_pipe_from_crtc_id_arg) |
98 | #define DRM_IOCTL_PSB_GEM_CREATE \ | 108 | #define DRM_IOCTL_GMA_GEM_CREATE \ |
99 | DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \ | 109 | DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \ |
100 | struct drm_psb_gem_create) | 110 | struct drm_psb_gem_create) |
101 | #define DRM_IOCTL_PSB_GEM_MMAP \ | 111 | #define DRM_IOCTL_GMA_GEM_MMAP \ |
102 | DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \ | 112 | DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \ |
103 | struct drm_psb_gem_mmap) | 113 | struct drm_psb_gem_mmap) |
104 | 114 | ||
@@ -113,22 +123,19 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data, | |||
113 | static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, | 123 | static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, |
114 | struct drm_file *file_priv); | 124 | struct drm_file *file_priv); |
115 | 125 | ||
116 | #define PSB_IOCTL_DEF(ioctl, func, flags) \ | ||
117 | [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} | ||
118 | |||
119 | static struct drm_ioctl_desc psb_ioctls[] = { | 126 | static struct drm_ioctl_desc psb_ioctls[] = { |
120 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH), | 127 | DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH), |
121 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl, | 128 | DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl, |
122 | DRM_AUTH), | 129 | DRM_AUTH), |
123 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl, | 130 | DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl, |
124 | DRM_AUTH), | 131 | DRM_AUTH), |
125 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH), | 132 | DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH), |
126 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH), | 133 | DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH), |
127 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID, | 134 | DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID, |
128 | psb_intel_get_pipe_from_crtc_id, 0), | 135 | psb_intel_get_pipe_from_crtc_id, 0), |
129 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl, | 136 | DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl, |
130 | DRM_UNLOCKED | DRM_AUTH), | 137 | DRM_UNLOCKED | DRM_AUTH), |
131 | PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl, | 138 | DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl, |
132 | DRM_UNLOCKED | DRM_AUTH), | 139 | DRM_UNLOCKED | DRM_AUTH), |
133 | }; | 140 | }; |
134 | 141 | ||
@@ -268,10 +275,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) | |||
268 | { | 275 | { |
269 | struct drm_psb_private *dev_priv; | 276 | struct drm_psb_private *dev_priv; |
270 | unsigned long resource_start; | 277 | unsigned long resource_start; |
271 | struct psb_gtt *pg; | ||
272 | unsigned long irqflags; | 278 | unsigned long irqflags; |
273 | int ret = -ENOMEM; | 279 | int ret = -ENOMEM; |
274 | uint32_t tt_pages; | ||
275 | struct drm_connector *connector; | 280 | struct drm_connector *connector; |
276 | struct psb_intel_encoder *psb_intel_encoder; | 281 | struct psb_intel_encoder *psb_intel_encoder; |
277 | 282 | ||
@@ -283,6 +288,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) | |||
283 | dev_priv->dev = dev; | 288 | dev_priv->dev = dev; |
284 | dev->dev_private = (void *) dev_priv; | 289 | dev->dev_private = (void *) dev_priv; |
285 | 290 | ||
291 | pci_set_master(dev->pdev); | ||
292 | |||
286 | if (!IS_PSB(dev)) { | 293 | if (!IS_PSB(dev)) { |
287 | if (pci_enable_msi(dev->pdev)) | 294 | if (pci_enable_msi(dev->pdev)) |
288 | dev_warn(dev->dev, "Enabling MSI failed!\n"); | 295 | dev_warn(dev->dev, "Enabling MSI failed!\n"); |
@@ -327,12 +334,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) | |||
327 | if (!dev_priv->mmu) | 334 | if (!dev_priv->mmu) |
328 | goto out_err; | 335 | goto out_err; |
329 | 336 | ||
330 | pg = &dev_priv->gtt; | ||
331 | |||
332 | tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? | ||
333 | (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT; | ||
334 | |||
335 | |||
336 | dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); | 337 | dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); |
337 | if (!dev_priv->pf_pd) | 338 | if (!dev_priv->pf_pd) |
338 | goto out_err; | 339 | goto out_err; |
@@ -409,7 +410,7 @@ out_err: | |||
409 | return ret; | 410 | return ret; |
410 | } | 411 | } |
411 | 412 | ||
412 | int psb_driver_device_is_agp(struct drm_device *dev) | 413 | static int psb_driver_device_is_agp(struct drm_device *dev) |
413 | { | 414 | { |
414 | return 0; | 415 | return 0; |
415 | } | 416 | } |
@@ -600,7 +601,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
600 | /* When a client dies: | 601 | /* When a client dies: |
601 | * - Check for and clean up flipped page state | 602 | * - Check for and clean up flipped page state |
602 | */ | 603 | */ |
603 | void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) | 604 | static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) |
604 | { | 605 | { |
605 | } | 606 | } |
606 | 607 | ||
@@ -677,7 +678,9 @@ static struct pci_driver psb_pci_driver = { | |||
677 | .id_table = pciidlist, | 678 | .id_table = pciidlist, |
678 | .probe = psb_probe, | 679 | .probe = psb_probe, |
679 | .remove = psb_remove, | 680 | .remove = psb_remove, |
680 | .driver.pm = &psb_pm_ops, | 681 | .driver = { |
682 | .pm = &psb_pm_ops, | ||
683 | } | ||
681 | }; | 684 | }; |
682 | 685 | ||
683 | static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 686 | static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index eb1568a0da95..40ce2c9bc2e4 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h | |||
@@ -276,6 +276,217 @@ struct intel_gmbus { | |||
276 | u32 reg0; | 276 | u32 reg0; |
277 | }; | 277 | }; |
278 | 278 | ||
279 | /* | ||
280 | * Register save state. This is used to hold the context when the | ||
281 | * device is powered off. In the case of Oaktrail this can (but does not | ||
282 | * yet) include screen blank. Operations occuring during the save | ||
283 | * update the register cache instead. | ||
284 | */ | ||
285 | struct psb_state { | ||
286 | uint32_t saveDSPACNTR; | ||
287 | uint32_t saveDSPBCNTR; | ||
288 | uint32_t savePIPEACONF; | ||
289 | uint32_t savePIPEBCONF; | ||
290 | uint32_t savePIPEASRC; | ||
291 | uint32_t savePIPEBSRC; | ||
292 | uint32_t saveFPA0; | ||
293 | uint32_t saveFPA1; | ||
294 | uint32_t saveDPLL_A; | ||
295 | uint32_t saveDPLL_A_MD; | ||
296 | uint32_t saveHTOTAL_A; | ||
297 | uint32_t saveHBLANK_A; | ||
298 | uint32_t saveHSYNC_A; | ||
299 | uint32_t saveVTOTAL_A; | ||
300 | uint32_t saveVBLANK_A; | ||
301 | uint32_t saveVSYNC_A; | ||
302 | uint32_t saveDSPASTRIDE; | ||
303 | uint32_t saveDSPASIZE; | ||
304 | uint32_t saveDSPAPOS; | ||
305 | uint32_t saveDSPABASE; | ||
306 | uint32_t saveDSPASURF; | ||
307 | uint32_t saveDSPASTATUS; | ||
308 | uint32_t saveFPB0; | ||
309 | uint32_t saveFPB1; | ||
310 | uint32_t saveDPLL_B; | ||
311 | uint32_t saveDPLL_B_MD; | ||
312 | uint32_t saveHTOTAL_B; | ||
313 | uint32_t saveHBLANK_B; | ||
314 | uint32_t saveHSYNC_B; | ||
315 | uint32_t saveVTOTAL_B; | ||
316 | uint32_t saveVBLANK_B; | ||
317 | uint32_t saveVSYNC_B; | ||
318 | uint32_t saveDSPBSTRIDE; | ||
319 | uint32_t saveDSPBSIZE; | ||
320 | uint32_t saveDSPBPOS; | ||
321 | uint32_t saveDSPBBASE; | ||
322 | uint32_t saveDSPBSURF; | ||
323 | uint32_t saveDSPBSTATUS; | ||
324 | uint32_t saveVCLK_DIVISOR_VGA0; | ||
325 | uint32_t saveVCLK_DIVISOR_VGA1; | ||
326 | uint32_t saveVCLK_POST_DIV; | ||
327 | uint32_t saveVGACNTRL; | ||
328 | uint32_t saveADPA; | ||
329 | uint32_t saveLVDS; | ||
330 | uint32_t saveDVOA; | ||
331 | uint32_t saveDVOB; | ||
332 | uint32_t saveDVOC; | ||
333 | uint32_t savePP_ON; | ||
334 | uint32_t savePP_OFF; | ||
335 | uint32_t savePP_CONTROL; | ||
336 | uint32_t savePP_CYCLE; | ||
337 | uint32_t savePFIT_CONTROL; | ||
338 | uint32_t savePaletteA[256]; | ||
339 | uint32_t savePaletteB[256]; | ||
340 | uint32_t saveCLOCKGATING; | ||
341 | uint32_t saveDSPARB; | ||
342 | uint32_t saveDSPATILEOFF; | ||
343 | uint32_t saveDSPBTILEOFF; | ||
344 | uint32_t saveDSPAADDR; | ||
345 | uint32_t saveDSPBADDR; | ||
346 | uint32_t savePFIT_AUTO_RATIOS; | ||
347 | uint32_t savePFIT_PGM_RATIOS; | ||
348 | uint32_t savePP_ON_DELAYS; | ||
349 | uint32_t savePP_OFF_DELAYS; | ||
350 | uint32_t savePP_DIVISOR; | ||
351 | uint32_t saveBCLRPAT_A; | ||
352 | uint32_t saveBCLRPAT_B; | ||
353 | uint32_t saveDSPALINOFF; | ||
354 | uint32_t saveDSPBLINOFF; | ||
355 | uint32_t savePERF_MODE; | ||
356 | uint32_t saveDSPFW1; | ||
357 | uint32_t saveDSPFW2; | ||
358 | uint32_t saveDSPFW3; | ||
359 | uint32_t saveDSPFW4; | ||
360 | uint32_t saveDSPFW5; | ||
361 | uint32_t saveDSPFW6; | ||
362 | uint32_t saveCHICKENBIT; | ||
363 | uint32_t saveDSPACURSOR_CTRL; | ||
364 | uint32_t saveDSPBCURSOR_CTRL; | ||
365 | uint32_t saveDSPACURSOR_BASE; | ||
366 | uint32_t saveDSPBCURSOR_BASE; | ||
367 | uint32_t saveDSPACURSOR_POS; | ||
368 | uint32_t saveDSPBCURSOR_POS; | ||
369 | uint32_t save_palette_a[256]; | ||
370 | uint32_t save_palette_b[256]; | ||
371 | uint32_t saveOV_OVADD; | ||
372 | uint32_t saveOV_OGAMC0; | ||
373 | uint32_t saveOV_OGAMC1; | ||
374 | uint32_t saveOV_OGAMC2; | ||
375 | uint32_t saveOV_OGAMC3; | ||
376 | uint32_t saveOV_OGAMC4; | ||
377 | uint32_t saveOV_OGAMC5; | ||
378 | uint32_t saveOVC_OVADD; | ||
379 | uint32_t saveOVC_OGAMC0; | ||
380 | uint32_t saveOVC_OGAMC1; | ||
381 | uint32_t saveOVC_OGAMC2; | ||
382 | uint32_t saveOVC_OGAMC3; | ||
383 | uint32_t saveOVC_OGAMC4; | ||
384 | uint32_t saveOVC_OGAMC5; | ||
385 | |||
386 | /* DPST register save */ | ||
387 | uint32_t saveHISTOGRAM_INT_CONTROL_REG; | ||
388 | uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG; | ||
389 | uint32_t savePWM_CONTROL_LOGIC; | ||
390 | }; | ||
391 | |||
392 | struct medfield_state { | ||
393 | uint32_t saveDPLL_A; | ||
394 | uint32_t saveFPA0; | ||
395 | uint32_t savePIPEACONF; | ||
396 | uint32_t saveHTOTAL_A; | ||
397 | uint32_t saveHBLANK_A; | ||
398 | uint32_t saveHSYNC_A; | ||
399 | uint32_t saveVTOTAL_A; | ||
400 | uint32_t saveVBLANK_A; | ||
401 | uint32_t saveVSYNC_A; | ||
402 | uint32_t savePIPEASRC; | ||
403 | uint32_t saveDSPASTRIDE; | ||
404 | uint32_t saveDSPALINOFF; | ||
405 | uint32_t saveDSPATILEOFF; | ||
406 | uint32_t saveDSPASIZE; | ||
407 | uint32_t saveDSPAPOS; | ||
408 | uint32_t saveDSPASURF; | ||
409 | uint32_t saveDSPACNTR; | ||
410 | uint32_t saveDSPASTATUS; | ||
411 | uint32_t save_palette_a[256]; | ||
412 | uint32_t saveMIPI; | ||
413 | |||
414 | uint32_t saveDPLL_B; | ||
415 | uint32_t saveFPB0; | ||
416 | uint32_t savePIPEBCONF; | ||
417 | uint32_t saveHTOTAL_B; | ||
418 | uint32_t saveHBLANK_B; | ||
419 | uint32_t saveHSYNC_B; | ||
420 | uint32_t saveVTOTAL_B; | ||
421 | uint32_t saveVBLANK_B; | ||
422 | uint32_t saveVSYNC_B; | ||
423 | uint32_t savePIPEBSRC; | ||
424 | uint32_t saveDSPBSTRIDE; | ||
425 | uint32_t saveDSPBLINOFF; | ||
426 | uint32_t saveDSPBTILEOFF; | ||
427 | uint32_t saveDSPBSIZE; | ||
428 | uint32_t saveDSPBPOS; | ||
429 | uint32_t saveDSPBSURF; | ||
430 | uint32_t saveDSPBCNTR; | ||
431 | uint32_t saveDSPBSTATUS; | ||
432 | uint32_t save_palette_b[256]; | ||
433 | |||
434 | uint32_t savePIPECCONF; | ||
435 | uint32_t saveHTOTAL_C; | ||
436 | uint32_t saveHBLANK_C; | ||
437 | uint32_t saveHSYNC_C; | ||
438 | uint32_t saveVTOTAL_C; | ||
439 | uint32_t saveVBLANK_C; | ||
440 | uint32_t saveVSYNC_C; | ||
441 | uint32_t savePIPECSRC; | ||
442 | uint32_t saveDSPCSTRIDE; | ||
443 | uint32_t saveDSPCLINOFF; | ||
444 | uint32_t saveDSPCTILEOFF; | ||
445 | uint32_t saveDSPCSIZE; | ||
446 | uint32_t saveDSPCPOS; | ||
447 | uint32_t saveDSPCSURF; | ||
448 | uint32_t saveDSPCCNTR; | ||
449 | uint32_t saveDSPCSTATUS; | ||
450 | uint32_t save_palette_c[256]; | ||
451 | uint32_t saveMIPI_C; | ||
452 | |||
453 | uint32_t savePFIT_CONTROL; | ||
454 | uint32_t savePFIT_PGM_RATIOS; | ||
455 | uint32_t saveHDMIPHYMISCCTL; | ||
456 | uint32_t saveHDMIB_CONTROL; | ||
457 | }; | ||
458 | |||
459 | struct cdv_state { | ||
460 | uint32_t saveDSPCLK_GATE_D; | ||
461 | uint32_t saveRAMCLK_GATE_D; | ||
462 | uint32_t saveDSPARB; | ||
463 | uint32_t saveDSPFW[6]; | ||
464 | uint32_t saveADPA; | ||
465 | uint32_t savePP_CONTROL; | ||
466 | uint32_t savePFIT_PGM_RATIOS; | ||
467 | uint32_t saveLVDS; | ||
468 | uint32_t savePFIT_CONTROL; | ||
469 | uint32_t savePP_ON_DELAYS; | ||
470 | uint32_t savePP_OFF_DELAYS; | ||
471 | uint32_t savePP_CYCLE; | ||
472 | uint32_t saveVGACNTRL; | ||
473 | uint32_t saveIER; | ||
474 | uint32_t saveIMR; | ||
475 | u8 saveLBB; | ||
476 | }; | ||
477 | |||
478 | struct psb_save_area { | ||
479 | uint32_t saveBSM; | ||
480 | uint32_t saveVBT; | ||
481 | union { | ||
482 | struct psb_state psb; | ||
483 | struct medfield_state mdfld; | ||
484 | struct cdv_state cdv; | ||
485 | }; | ||
486 | uint32_t saveBLC_PWM_CTL2; | ||
487 | uint32_t saveBLC_PWM_CTL; | ||
488 | }; | ||
489 | |||
279 | struct psb_ops; | 490 | struct psb_ops; |
280 | 491 | ||
281 | #define PSB_NUM_PIPE 3 | 492 | #define PSB_NUM_PIPE 3 |
@@ -397,216 +608,21 @@ struct drm_psb_private { | |||
397 | struct oaktrail_vbt vbt_data; | 608 | struct oaktrail_vbt vbt_data; |
398 | struct oaktrail_gct_data gct_data; | 609 | struct oaktrail_gct_data gct_data; |
399 | 610 | ||
400 | /* MIPI Panel type etc */ | 611 | /* Oaktrail HDMI state */ |
401 | int panel_id; | ||
402 | bool dual_mipi; /* dual display - DPI & DBI */ | ||
403 | bool dpi_panel_on; /* The DPI panel power is on */ | ||
404 | bool dpi_panel_on2; /* The DPI panel power is on */ | ||
405 | bool dbi_panel_on; /* The DBI panel power is on */ | ||
406 | bool dbi_panel_on2; /* The DBI panel power is on */ | ||
407 | u32 dsr_fb_update; /* DSR FB update counter */ | ||
408 | |||
409 | /* Moorestown HDMI state */ | ||
410 | struct oaktrail_hdmi_dev *hdmi_priv; | 612 | struct oaktrail_hdmi_dev *hdmi_priv; |
411 | 613 | ||
412 | /* Moorestown pipe config register value cache */ | ||
413 | uint32_t pipeconf; | ||
414 | uint32_t pipeconf1; | ||
415 | uint32_t pipeconf2; | ||
416 | |||
417 | /* Moorestown plane control register value cache */ | ||
418 | uint32_t dspcntr; | ||
419 | uint32_t dspcntr1; | ||
420 | uint32_t dspcntr2; | ||
421 | |||
422 | /* Moorestown MM backlight cache */ | ||
423 | uint8_t saveBKLTCNT; | ||
424 | uint8_t saveBKLTREQ; | ||
425 | uint8_t saveBKLTBRTL; | ||
426 | |||
427 | /* | 614 | /* |
428 | * Register state | 615 | * Register state |
429 | */ | 616 | */ |
430 | uint32_t saveDSPACNTR; | 617 | |
431 | uint32_t saveDSPBCNTR; | 618 | struct psb_save_area regs; |
432 | uint32_t savePIPEACONF; | ||
433 | uint32_t savePIPEBCONF; | ||
434 | uint32_t savePIPEASRC; | ||
435 | uint32_t savePIPEBSRC; | ||
436 | uint32_t saveFPA0; | ||
437 | uint32_t saveFPA1; | ||
438 | uint32_t saveDPLL_A; | ||
439 | uint32_t saveDPLL_A_MD; | ||
440 | uint32_t saveHTOTAL_A; | ||
441 | uint32_t saveHBLANK_A; | ||
442 | uint32_t saveHSYNC_A; | ||
443 | uint32_t saveVTOTAL_A; | ||
444 | uint32_t saveVBLANK_A; | ||
445 | uint32_t saveVSYNC_A; | ||
446 | uint32_t saveDSPASTRIDE; | ||
447 | uint32_t saveDSPASIZE; | ||
448 | uint32_t saveDSPAPOS; | ||
449 | uint32_t saveDSPABASE; | ||
450 | uint32_t saveDSPASURF; | ||
451 | uint32_t saveDSPASTATUS; | ||
452 | uint32_t saveFPB0; | ||
453 | uint32_t saveFPB1; | ||
454 | uint32_t saveDPLL_B; | ||
455 | uint32_t saveDPLL_B_MD; | ||
456 | uint32_t saveHTOTAL_B; | ||
457 | uint32_t saveHBLANK_B; | ||
458 | uint32_t saveHSYNC_B; | ||
459 | uint32_t saveVTOTAL_B; | ||
460 | uint32_t saveVBLANK_B; | ||
461 | uint32_t saveVSYNC_B; | ||
462 | uint32_t saveDSPBSTRIDE; | ||
463 | uint32_t saveDSPBSIZE; | ||
464 | uint32_t saveDSPBPOS; | ||
465 | uint32_t saveDSPBBASE; | ||
466 | uint32_t saveDSPBSURF; | ||
467 | uint32_t saveDSPBSTATUS; | ||
468 | uint32_t saveVCLK_DIVISOR_VGA0; | ||
469 | uint32_t saveVCLK_DIVISOR_VGA1; | ||
470 | uint32_t saveVCLK_POST_DIV; | ||
471 | uint32_t saveVGACNTRL; | ||
472 | uint32_t saveADPA; | ||
473 | uint32_t saveLVDS; | ||
474 | uint32_t saveDVOA; | ||
475 | uint32_t saveDVOB; | ||
476 | uint32_t saveDVOC; | ||
477 | uint32_t savePP_ON; | ||
478 | uint32_t savePP_OFF; | ||
479 | uint32_t savePP_CONTROL; | ||
480 | uint32_t savePP_CYCLE; | ||
481 | uint32_t savePFIT_CONTROL; | ||
482 | uint32_t savePaletteA[256]; | ||
483 | uint32_t savePaletteB[256]; | ||
484 | uint32_t saveBLC_PWM_CTL2; | ||
485 | uint32_t saveBLC_PWM_CTL; | ||
486 | uint32_t saveCLOCKGATING; | ||
487 | uint32_t saveDSPARB; | ||
488 | uint32_t saveDSPATILEOFF; | ||
489 | uint32_t saveDSPBTILEOFF; | ||
490 | uint32_t saveDSPAADDR; | ||
491 | uint32_t saveDSPBADDR; | ||
492 | uint32_t savePFIT_AUTO_RATIOS; | ||
493 | uint32_t savePFIT_PGM_RATIOS; | ||
494 | uint32_t savePP_ON_DELAYS; | ||
495 | uint32_t savePP_OFF_DELAYS; | ||
496 | uint32_t savePP_DIVISOR; | ||
497 | uint32_t saveBSM; | ||
498 | uint32_t saveVBT; | ||
499 | uint32_t saveBCLRPAT_A; | ||
500 | uint32_t saveBCLRPAT_B; | ||
501 | uint32_t saveDSPALINOFF; | ||
502 | uint32_t saveDSPBLINOFF; | ||
503 | uint32_t savePERF_MODE; | ||
504 | uint32_t saveDSPFW1; | ||
505 | uint32_t saveDSPFW2; | ||
506 | uint32_t saveDSPFW3; | ||
507 | uint32_t saveDSPFW4; | ||
508 | uint32_t saveDSPFW5; | ||
509 | uint32_t saveDSPFW6; | ||
510 | uint32_t saveCHICKENBIT; | ||
511 | uint32_t saveDSPACURSOR_CTRL; | ||
512 | uint32_t saveDSPBCURSOR_CTRL; | ||
513 | uint32_t saveDSPACURSOR_BASE; | ||
514 | uint32_t saveDSPBCURSOR_BASE; | ||
515 | uint32_t saveDSPACURSOR_POS; | ||
516 | uint32_t saveDSPBCURSOR_POS; | ||
517 | uint32_t save_palette_a[256]; | ||
518 | uint32_t save_palette_b[256]; | ||
519 | uint32_t saveOV_OVADD; | ||
520 | uint32_t saveOV_OGAMC0; | ||
521 | uint32_t saveOV_OGAMC1; | ||
522 | uint32_t saveOV_OGAMC2; | ||
523 | uint32_t saveOV_OGAMC3; | ||
524 | uint32_t saveOV_OGAMC4; | ||
525 | uint32_t saveOV_OGAMC5; | ||
526 | uint32_t saveOVC_OVADD; | ||
527 | uint32_t saveOVC_OGAMC0; | ||
528 | uint32_t saveOVC_OGAMC1; | ||
529 | uint32_t saveOVC_OGAMC2; | ||
530 | uint32_t saveOVC_OGAMC3; | ||
531 | uint32_t saveOVC_OGAMC4; | ||
532 | uint32_t saveOVC_OGAMC5; | ||
533 | 619 | ||
534 | /* MSI reg save */ | 620 | /* MSI reg save */ |
535 | uint32_t msi_addr; | 621 | uint32_t msi_addr; |
536 | uint32_t msi_data; | 622 | uint32_t msi_data; |
537 | 623 | ||
538 | /* Medfield specific register save state */ | ||
539 | uint32_t saveHDMIPHYMISCCTL; | ||
540 | uint32_t saveHDMIB_CONTROL; | ||
541 | uint32_t saveDSPCCNTR; | ||
542 | uint32_t savePIPECCONF; | ||
543 | uint32_t savePIPECSRC; | ||
544 | uint32_t saveHTOTAL_C; | ||
545 | uint32_t saveHBLANK_C; | ||
546 | uint32_t saveHSYNC_C; | ||
547 | uint32_t saveVTOTAL_C; | ||
548 | uint32_t saveVBLANK_C; | ||
549 | uint32_t saveVSYNC_C; | ||
550 | uint32_t saveDSPCSTRIDE; | ||
551 | uint32_t saveDSPCSIZE; | ||
552 | uint32_t saveDSPCPOS; | ||
553 | uint32_t saveDSPCSURF; | ||
554 | uint32_t saveDSPCSTATUS; | ||
555 | uint32_t saveDSPCLINOFF; | ||
556 | uint32_t saveDSPCTILEOFF; | ||
557 | uint32_t saveDSPCCURSOR_CTRL; | ||
558 | uint32_t saveDSPCCURSOR_BASE; | ||
559 | uint32_t saveDSPCCURSOR_POS; | ||
560 | uint32_t save_palette_c[256]; | ||
561 | uint32_t saveOV_OVADD_C; | ||
562 | uint32_t saveOV_OGAMC0_C; | ||
563 | uint32_t saveOV_OGAMC1_C; | ||
564 | uint32_t saveOV_OGAMC2_C; | ||
565 | uint32_t saveOV_OGAMC3_C; | ||
566 | uint32_t saveOV_OGAMC4_C; | ||
567 | uint32_t saveOV_OGAMC5_C; | ||
568 | |||
569 | /* DSI register save */ | ||
570 | uint32_t saveDEVICE_READY_REG; | ||
571 | uint32_t saveINTR_EN_REG; | ||
572 | uint32_t saveDSI_FUNC_PRG_REG; | ||
573 | uint32_t saveHS_TX_TIMEOUT_REG; | ||
574 | uint32_t saveLP_RX_TIMEOUT_REG; | ||
575 | uint32_t saveTURN_AROUND_TIMEOUT_REG; | ||
576 | uint32_t saveDEVICE_RESET_REG; | ||
577 | uint32_t saveDPI_RESOLUTION_REG; | ||
578 | uint32_t saveHORIZ_SYNC_PAD_COUNT_REG; | ||
579 | uint32_t saveHORIZ_BACK_PORCH_COUNT_REG; | ||
580 | uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG; | ||
581 | uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG; | ||
582 | uint32_t saveVERT_SYNC_PAD_COUNT_REG; | ||
583 | uint32_t saveVERT_BACK_PORCH_COUNT_REG; | ||
584 | uint32_t saveVERT_FRONT_PORCH_COUNT_REG; | ||
585 | uint32_t saveHIGH_LOW_SWITCH_COUNT_REG; | ||
586 | uint32_t saveINIT_COUNT_REG; | ||
587 | uint32_t saveMAX_RET_PAK_REG; | ||
588 | uint32_t saveVIDEO_FMT_REG; | ||
589 | uint32_t saveEOT_DISABLE_REG; | ||
590 | uint32_t saveLP_BYTECLK_REG; | ||
591 | uint32_t saveHS_LS_DBI_ENABLE_REG; | ||
592 | uint32_t saveTXCLKESC_REG; | ||
593 | uint32_t saveDPHY_PARAM_REG; | ||
594 | uint32_t saveMIPI_CONTROL_REG; | ||
595 | uint32_t saveMIPI; | ||
596 | uint32_t saveMIPI_C; | ||
597 | |||
598 | /* DPST register save */ | ||
599 | uint32_t saveHISTOGRAM_INT_CONTROL_REG; | ||
600 | uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG; | ||
601 | uint32_t savePWM_CONTROL_LOGIC; | ||
602 | 624 | ||
603 | /* | 625 | /* |
604 | * DSI info. | ||
605 | */ | ||
606 | void * dbi_dsr_info; | ||
607 | void * dbi_dpu_info; | ||
608 | void * dsi_configs[2]; | ||
609 | /* | ||
610 | * LID-Switch | 626 | * LID-Switch |
611 | */ | 627 | */ |
612 | spinlock_t lid_lock; | 628 | spinlock_t lid_lock; |
@@ -635,6 +651,24 @@ struct drm_psb_private { | |||
635 | 651 | ||
636 | /* 2D acceleration */ | 652 | /* 2D acceleration */ |
637 | spinlock_t lock_2d; | 653 | spinlock_t lock_2d; |
654 | |||
655 | /* | ||
656 | * Panel brightness | ||
657 | */ | ||
658 | int brightness; | ||
659 | int brightness_adjusted; | ||
660 | |||
661 | bool dsr_enable; | ||
662 | u32 dsr_fb_update; | ||
663 | bool dpi_panel_on[3]; | ||
664 | void *dsi_configs[2]; | ||
665 | u32 bpp; | ||
666 | u32 bpp2; | ||
667 | |||
668 | u32 pipeconf[3]; | ||
669 | u32 dspcntr[3]; | ||
670 | |||
671 | int mdfld_panel_id; | ||
638 | }; | 672 | }; |
639 | 673 | ||
640 | 674 | ||
@@ -830,6 +864,9 @@ extern const struct psb_ops psb_chip_ops; | |||
830 | /* oaktrail_device.c */ | 864 | /* oaktrail_device.c */ |
831 | extern const struct psb_ops oaktrail_chip_ops; | 865 | extern const struct psb_ops oaktrail_chip_ops; |
832 | 866 | ||
867 | /* mdlfd_device.c */ | ||
868 | extern const struct psb_ops mdfld_chip_ops; | ||
869 | |||
833 | /* cdv_device.c */ | 870 | /* cdv_device.c */ |
834 | extern const struct psb_ops cdv_chip_ops; | 871 | extern const struct psb_ops cdv_chip_ops; |
835 | 872 | ||
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 49e983508d5c..2616558457c8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c | |||
@@ -333,7 +333,7 @@ void psb_intel_wait_for_vblank(struct drm_device *dev) | |||
333 | mdelay(20); | 333 | mdelay(20); |
334 | } | 334 | } |
335 | 335 | ||
336 | int psb_intel_pipe_set_base(struct drm_crtc *crtc, | 336 | static int psb_intel_pipe_set_base(struct drm_crtc *crtc, |
337 | int x, int y, struct drm_framebuffer *old_fb) | 337 | int x, int y, struct drm_framebuffer *old_fb) |
338 | { | 338 | { |
339 | struct drm_device *dev = crtc->dev; | 339 | struct drm_device *dev = crtc->dev; |
@@ -433,7 +433,6 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
433 | int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; | 433 | int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; |
434 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 434 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
435 | u32 temp; | 435 | u32 temp; |
436 | bool enabled; | ||
437 | 436 | ||
438 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 437 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
439 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 438 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -518,8 +517,6 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
518 | break; | 517 | break; |
519 | } | 518 | } |
520 | 519 | ||
521 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | ||
522 | |||
523 | /*Set FIFO Watermarks*/ | 520 | /*Set FIFO Watermarks*/ |
524 | REG_WRITE(DSPARB, 0x3F3E); | 521 | REG_WRITE(DSPARB, 0x3F3E); |
525 | } | 522 | } |
@@ -611,8 +608,8 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, | |||
611 | int refclk; | 608 | int refclk; |
612 | struct psb_intel_clock_t clock; | 609 | struct psb_intel_clock_t clock; |
613 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 610 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
614 | bool ok, is_sdvo = false, is_dvo = false; | 611 | bool ok, is_sdvo = false; |
615 | bool is_crt = false, is_lvds = false, is_tv = false; | 612 | bool is_lvds = false, is_tv = false; |
616 | struct drm_mode_config *mode_config = &dev->mode_config; | 613 | struct drm_mode_config *mode_config = &dev->mode_config; |
617 | struct drm_connector *connector; | 614 | struct drm_connector *connector; |
618 | 615 | ||
@@ -637,15 +634,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, | |||
637 | case INTEL_OUTPUT_SDVO: | 634 | case INTEL_OUTPUT_SDVO: |
638 | is_sdvo = true; | 635 | is_sdvo = true; |
639 | break; | 636 | break; |
640 | case INTEL_OUTPUT_DVO: | ||
641 | is_dvo = true; | ||
642 | break; | ||
643 | case INTEL_OUTPUT_TVOUT: | 637 | case INTEL_OUTPUT_TVOUT: |
644 | is_tv = true; | 638 | is_tv = true; |
645 | break; | 639 | break; |
646 | case INTEL_OUTPUT_ANALOG: | ||
647 | is_crt = true; | ||
648 | break; | ||
649 | } | 640 | } |
650 | } | 641 | } |
651 | 642 | ||
@@ -845,7 +836,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc) | |||
845 | gma_power_end(dev); | 836 | gma_power_end(dev); |
846 | } else { | 837 | } else { |
847 | for (i = 0; i < 256; i++) { | 838 | for (i = 0; i < 256; i++) { |
848 | dev_priv->save_palette_a[i] = | 839 | dev_priv->regs.psb.save_palette_a[i] = |
849 | ((psb_intel_crtc->lut_r[i] + | 840 | ((psb_intel_crtc->lut_r[i] + |
850 | psb_intel_crtc->lut_adj[i]) << 16) | | 841 | psb_intel_crtc->lut_adj[i]) << 16) | |
851 | ((psb_intel_crtc->lut_g[i] + | 842 | ((psb_intel_crtc->lut_g[i] + |
@@ -1141,18 +1132,20 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev, | |||
1141 | gma_power_end(dev); | 1132 | gma_power_end(dev); |
1142 | } else { | 1133 | } else { |
1143 | dpll = (pipe == 0) ? | 1134 | dpll = (pipe == 0) ? |
1144 | dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; | 1135 | dev_priv->regs.psb.saveDPLL_A : |
1136 | dev_priv->regs.psb.saveDPLL_B; | ||
1145 | 1137 | ||
1146 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 1138 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
1147 | fp = (pipe == 0) ? | 1139 | fp = (pipe == 0) ? |
1148 | dev_priv->saveFPA0 : | 1140 | dev_priv->regs.psb.saveFPA0 : |
1149 | dev_priv->saveFPB0; | 1141 | dev_priv->regs.psb.saveFPB0; |
1150 | else | 1142 | else |
1151 | fp = (pipe == 0) ? | 1143 | fp = (pipe == 0) ? |
1152 | dev_priv->saveFPA1 : | 1144 | dev_priv->regs.psb.saveFPA1 : |
1153 | dev_priv->saveFPB1; | 1145 | dev_priv->regs.psb.saveFPB1; |
1154 | 1146 | ||
1155 | is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); | 1147 | is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS & |
1148 | LVDS_PORT_EN); | ||
1156 | } | 1149 | } |
1157 | 1150 | ||
1158 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 1151 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
@@ -1218,13 +1211,17 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, | |||
1218 | gma_power_end(dev); | 1211 | gma_power_end(dev); |
1219 | } else { | 1212 | } else { |
1220 | htot = (pipe == 0) ? | 1213 | htot = (pipe == 0) ? |
1221 | dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; | 1214 | dev_priv->regs.psb.saveHTOTAL_A : |
1215 | dev_priv->regs.psb.saveHTOTAL_B; | ||
1222 | hsync = (pipe == 0) ? | 1216 | hsync = (pipe == 0) ? |
1223 | dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; | 1217 | dev_priv->regs.psb.saveHSYNC_A : |
1218 | dev_priv->regs.psb.saveHSYNC_B; | ||
1224 | vtot = (pipe == 0) ? | 1219 | vtot = (pipe == 0) ? |
1225 | dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; | 1220 | dev_priv->regs.psb.saveVTOTAL_A : |
1221 | dev_priv->regs.psb.saveVTOTAL_B; | ||
1226 | vsync = (pipe == 0) ? | 1222 | vsync = (pipe == 0) ? |
1227 | dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; | 1223 | dev_priv->regs.psb.saveVSYNC_A : |
1224 | dev_priv->regs.psb.saveVSYNC_B; | ||
1228 | } | 1225 | } |
1229 | 1226 | ||
1230 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 1227 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
@@ -1419,13 +1416,6 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask) | |||
1419 | return index_mask; | 1416 | return index_mask; |
1420 | } | 1417 | } |
1421 | 1418 | ||
1422 | |||
1423 | void psb_intel_modeset_cleanup(struct drm_device *dev) | ||
1424 | { | ||
1425 | drm_mode_config_cleanup(dev); | ||
1426 | } | ||
1427 | |||
1428 | |||
1429 | /* current intel driver doesn't take advantage of encoders | 1419 | /* current intel driver doesn't take advantage of encoders |
1430 | always give back the encoder for the connector | 1420 | always give back the encoder for the connector |
1431 | */ | 1421 | */ |
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 0a437586d8cc..c83f5b5d1057 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c | |||
@@ -77,7 +77,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) | |||
77 | ret = REG_READ(BLC_PWM_CTL); | 77 | ret = REG_READ(BLC_PWM_CTL); |
78 | gma_power_end(dev); | 78 | gma_power_end(dev); |
79 | } else /* Powered off, use the saved value */ | 79 | } else /* Powered off, use the saved value */ |
80 | ret = dev_priv->saveBLC_PWM_CTL; | 80 | ret = dev_priv->regs.saveBLC_PWM_CTL; |
81 | 81 | ||
82 | /* Top 15bits hold the frequency mask */ | 82 | /* Top 15bits hold the frequency mask */ |
83 | ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >> | 83 | ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >> |
@@ -86,7 +86,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) | |||
86 | ret *= 2; /* Return a 16bit range as needed for setting */ | 86 | ret *= 2; /* Return a 16bit range as needed for setting */ |
87 | if (ret == 0) | 87 | if (ret == 0) |
88 | dev_err(dev->dev, "BL bug: Reg %08x save %08X\n", | 88 | dev_err(dev->dev, "BL bug: Reg %08x save %08X\n", |
89 | REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL); | 89 | REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL); |
90 | return ret; | 90 | return ret; |
91 | } | 91 | } |
92 | 92 | ||
@@ -203,13 +203,13 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level) | |||
203 | REG_WRITE(BLC_PWM_CTL, | 203 | REG_WRITE(BLC_PWM_CTL, |
204 | (blc_pwm_ctl | | 204 | (blc_pwm_ctl | |
205 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); | 205 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); |
206 | dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl | | 206 | dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | |
207 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); | 207 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); |
208 | gma_power_end(dev); | 208 | gma_power_end(dev); |
209 | } else { | 209 | } else { |
210 | blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & | 210 | blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL & |
211 | ~BACKLIGHT_DUTY_CYCLE_MASK; | 211 | ~BACKLIGHT_DUTY_CYCLE_MASK; |
212 | dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl | | 212 | dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl | |
213 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); | 213 | (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); |
214 | } | 214 | } |
215 | } | 215 | } |
@@ -283,7 +283,7 @@ static void psb_intel_lvds_save(struct drm_connector *connector) | |||
283 | lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); | 283 | lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); |
284 | 284 | ||
285 | /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/ | 285 | /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/ |
286 | dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & | 286 | dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL & |
287 | BACKLIGHT_DUTY_CYCLE_MASK); | 287 | BACKLIGHT_DUTY_CYCLE_MASK); |
288 | 288 | ||
289 | /* | 289 | /* |
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h index fcc0af03d685..e89d3a2e8fdc 100644 --- a/drivers/gpu/drm/gma500/psb_intel_reg.h +++ b/drivers/gpu/drm/gma500/psb_intel_reg.h | |||
@@ -177,6 +177,9 @@ | |||
177 | #define LVDSPP_OFF 0x6120c | 177 | #define LVDSPP_OFF 0x6120c |
178 | #define PP_CYCLE 0x61210 | 178 | #define PP_CYCLE 0x61210 |
179 | 179 | ||
180 | #define PP_ON_DELAYS 0x61208 /* Cedartrail */ | ||
181 | #define PP_OFF_DELAYS 0x6120c /* Cedartrail */ | ||
182 | |||
180 | #define PFIT_CONTROL 0x61230 | 183 | #define PFIT_CONTROL 0x61230 |
181 | #define PFIT_ENABLE (1 << 31) | 184 | #define PFIT_ENABLE (1 << 31) |
182 | #define PFIT_PIPE_MASK (3 << 29) | 185 | #define PFIT_PIPE_MASK (3 << 29) |
@@ -1252,6 +1255,12 @@ No status bits are changed. | |||
1252 | # define SB_BYTE_ENABLE_SHIFT 4 | 1255 | # define SB_BYTE_ENABLE_SHIFT 4 |
1253 | # define SB_BUSY (1 << 0) | 1256 | # define SB_BUSY (1 << 0) |
1254 | 1257 | ||
1258 | #define DSPCLK_GATE_D 0x6200 | ||
1259 | # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */ | ||
1260 | # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) | ||
1261 | # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) | ||
1262 | |||
1263 | #define RAMCLK_GATE_D 0x6210 | ||
1255 | 1264 | ||
1256 | /* 32-bit value read/written from the DPIO reg. */ | 1265 | /* 32-bit value read/written from the DPIO reg. */ |
1257 | #define SB_DATA 0x02104 /* cedarview */ | 1266 | #define SB_DATA 0x02104 /* cedarview */ |
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 88b42971c0fd..36330cabcea2 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c | |||
@@ -1301,7 +1301,7 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector) | |||
1301 | return NULL; | 1301 | return NULL; |
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | enum drm_connector_status | 1304 | static enum drm_connector_status |
1305 | psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1305 | psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) |
1306 | { | 1306 | { |
1307 | struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector); | 1307 | struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector); |
@@ -2312,10 +2312,8 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s | |||
2312 | psb_intel_sdvo_connector->max_##name = data_value[0]; \ | 2312 | psb_intel_sdvo_connector->max_##name = data_value[0]; \ |
2313 | psb_intel_sdvo_connector->cur_##name = response; \ | 2313 | psb_intel_sdvo_connector->cur_##name = response; \ |
2314 | psb_intel_sdvo_connector->name = \ | 2314 | psb_intel_sdvo_connector->name = \ |
2315 | drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \ | 2315 | drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ |
2316 | if (!psb_intel_sdvo_connector->name) return false; \ | 2316 | if (!psb_intel_sdvo_connector->name) return false; \ |
2317 | psb_intel_sdvo_connector->name->values[0] = 0; \ | ||
2318 | psb_intel_sdvo_connector->name->values[1] = data_value[0]; \ | ||
2319 | drm_connector_attach_property(connector, \ | 2317 | drm_connector_attach_property(connector, \ |
2320 | psb_intel_sdvo_connector->name, \ | 2318 | psb_intel_sdvo_connector->name, \ |
2321 | psb_intel_sdvo_connector->cur_##name); \ | 2319 | psb_intel_sdvo_connector->cur_##name); \ |
@@ -2349,25 +2347,19 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, | |||
2349 | psb_intel_sdvo_connector->left_margin = data_value[0] - response; | 2347 | psb_intel_sdvo_connector->left_margin = data_value[0] - response; |
2350 | psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin; | 2348 | psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin; |
2351 | psb_intel_sdvo_connector->left = | 2349 | psb_intel_sdvo_connector->left = |
2352 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2350 | drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); |
2353 | "left_margin", 2); | ||
2354 | if (!psb_intel_sdvo_connector->left) | 2351 | if (!psb_intel_sdvo_connector->left) |
2355 | return false; | 2352 | return false; |
2356 | 2353 | ||
2357 | psb_intel_sdvo_connector->left->values[0] = 0; | ||
2358 | psb_intel_sdvo_connector->left->values[1] = data_value[0]; | ||
2359 | drm_connector_attach_property(connector, | 2354 | drm_connector_attach_property(connector, |
2360 | psb_intel_sdvo_connector->left, | 2355 | psb_intel_sdvo_connector->left, |
2361 | psb_intel_sdvo_connector->left_margin); | 2356 | psb_intel_sdvo_connector->left_margin); |
2362 | 2357 | ||
2363 | psb_intel_sdvo_connector->right = | 2358 | psb_intel_sdvo_connector->right = |
2364 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2359 | drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); |
2365 | "right_margin", 2); | ||
2366 | if (!psb_intel_sdvo_connector->right) | 2360 | if (!psb_intel_sdvo_connector->right) |
2367 | return false; | 2361 | return false; |
2368 | 2362 | ||
2369 | psb_intel_sdvo_connector->right->values[0] = 0; | ||
2370 | psb_intel_sdvo_connector->right->values[1] = data_value[0]; | ||
2371 | drm_connector_attach_property(connector, | 2363 | drm_connector_attach_property(connector, |
2372 | psb_intel_sdvo_connector->right, | 2364 | psb_intel_sdvo_connector->right, |
2373 | psb_intel_sdvo_connector->right_margin); | 2365 | psb_intel_sdvo_connector->right_margin); |
@@ -2391,25 +2383,19 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, | |||
2391 | psb_intel_sdvo_connector->top_margin = data_value[0] - response; | 2383 | psb_intel_sdvo_connector->top_margin = data_value[0] - response; |
2392 | psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin; | 2384 | psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin; |
2393 | psb_intel_sdvo_connector->top = | 2385 | psb_intel_sdvo_connector->top = |
2394 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2386 | drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]); |
2395 | "top_margin", 2); | ||
2396 | if (!psb_intel_sdvo_connector->top) | 2387 | if (!psb_intel_sdvo_connector->top) |
2397 | return false; | 2388 | return false; |
2398 | 2389 | ||
2399 | psb_intel_sdvo_connector->top->values[0] = 0; | ||
2400 | psb_intel_sdvo_connector->top->values[1] = data_value[0]; | ||
2401 | drm_connector_attach_property(connector, | 2390 | drm_connector_attach_property(connector, |
2402 | psb_intel_sdvo_connector->top, | 2391 | psb_intel_sdvo_connector->top, |
2403 | psb_intel_sdvo_connector->top_margin); | 2392 | psb_intel_sdvo_connector->top_margin); |
2404 | 2393 | ||
2405 | psb_intel_sdvo_connector->bottom = | 2394 | psb_intel_sdvo_connector->bottom = |
2406 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2395 | drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]); |
2407 | "bottom_margin", 2); | ||
2408 | if (!psb_intel_sdvo_connector->bottom) | 2396 | if (!psb_intel_sdvo_connector->bottom) |
2409 | return false; | 2397 | return false; |
2410 | 2398 | ||
2411 | psb_intel_sdvo_connector->bottom->values[0] = 0; | ||
2412 | psb_intel_sdvo_connector->bottom->values[1] = data_value[0]; | ||
2413 | drm_connector_attach_property(connector, | 2399 | drm_connector_attach_property(connector, |
2414 | psb_intel_sdvo_connector->bottom, | 2400 | psb_intel_sdvo_connector->bottom, |
2415 | psb_intel_sdvo_connector->bottom_margin); | 2401 | psb_intel_sdvo_connector->bottom_margin); |
@@ -2438,12 +2424,10 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, | |||
2438 | psb_intel_sdvo_connector->max_dot_crawl = 1; | 2424 | psb_intel_sdvo_connector->max_dot_crawl = 1; |
2439 | psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1; | 2425 | psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1; |
2440 | psb_intel_sdvo_connector->dot_crawl = | 2426 | psb_intel_sdvo_connector->dot_crawl = |
2441 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2); | 2427 | drm_property_create_range(dev, 0, "dot_crawl", 0, 1); |
2442 | if (!psb_intel_sdvo_connector->dot_crawl) | 2428 | if (!psb_intel_sdvo_connector->dot_crawl) |
2443 | return false; | 2429 | return false; |
2444 | 2430 | ||
2445 | psb_intel_sdvo_connector->dot_crawl->values[0] = 0; | ||
2446 | psb_intel_sdvo_connector->dot_crawl->values[1] = 1; | ||
2447 | drm_connector_attach_property(connector, | 2431 | drm_connector_attach_property(connector, |
2448 | psb_intel_sdvo_connector->dot_crawl, | 2432 | psb_intel_sdvo_connector->dot_crawl, |
2449 | psb_intel_sdvo_connector->cur_dot_crawl); | 2433 | psb_intel_sdvo_connector->cur_dot_crawl); |
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 7be802baceb5..1869586457b1 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include "psb_reg.h" | 27 | #include "psb_reg.h" |
28 | #include "psb_intel_reg.h" | 28 | #include "psb_intel_reg.h" |
29 | #include "power.h" | 29 | #include "power.h" |
30 | #include "psb_irq.h" | ||
31 | #include "mdfld_output.h" | ||
30 | 32 | ||
31 | /* | 33 | /* |
32 | * inline functions | 34 | * inline functions |
@@ -113,7 +115,7 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) | |||
113 | } | 115 | } |
114 | } | 116 | } |
115 | 117 | ||
116 | void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe) | 118 | static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe) |
117 | { | 119 | { |
118 | if (gma_power_begin(dev_priv->dev, false)) { | 120 | if (gma_power_begin(dev_priv->dev, false)) { |
119 | u32 pipe_event = mid_pipe_event(pipe); | 121 | u32 pipe_event = mid_pipe_event(pipe); |
@@ -124,7 +126,7 @@ void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe) | |||
124 | } | 126 | } |
125 | } | 127 | } |
126 | 128 | ||
127 | void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe) | 129 | static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe) |
128 | { | 130 | { |
129 | if (dev_priv->pipestat[pipe] == 0) { | 131 | if (dev_priv->pipestat[pipe] == 0) { |
130 | if (gma_power_begin(dev_priv->dev, false)) { | 132 | if (gma_power_begin(dev_priv->dev, false)) { |
@@ -453,6 +455,11 @@ int psb_enable_vblank(struct drm_device *dev, int pipe) | |||
453 | uint32_t reg_val = 0; | 455 | uint32_t reg_val = 0; |
454 | uint32_t pipeconf_reg = mid_pipeconf(pipe); | 456 | uint32_t pipeconf_reg = mid_pipeconf(pipe); |
455 | 457 | ||
458 | /* Medfield is different - we should perhaps extract out vblank | ||
459 | and blacklight etc ops */ | ||
460 | if (IS_MFLD(dev)) | ||
461 | return mdfld_enable_te(dev, pipe); | ||
462 | |||
456 | if (gma_power_begin(dev, false)) { | 463 | if (gma_power_begin(dev, false)) { |
457 | reg_val = REG_READ(pipeconf_reg); | 464 | reg_val = REG_READ(pipeconf_reg); |
458 | gma_power_end(dev); | 465 | gma_power_end(dev); |
@@ -485,6 +492,8 @@ void psb_disable_vblank(struct drm_device *dev, int pipe) | |||
485 | struct drm_psb_private *dev_priv = dev->dev_private; | 492 | struct drm_psb_private *dev_priv = dev->dev_private; |
486 | unsigned long irqflags; | 493 | unsigned long irqflags; |
487 | 494 | ||
495 | if (IS_MFLD(dev)) | ||
496 | mdfld_disable_te(dev, pipe); | ||
488 | spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); | 497 | spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); |
489 | 498 | ||
490 | if (pipe == 0) | 499 | if (pipe == 0) |
@@ -499,6 +508,55 @@ void psb_disable_vblank(struct drm_device *dev, int pipe) | |||
499 | spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); | 508 | spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); |
500 | } | 509 | } |
501 | 510 | ||
511 | /* | ||
512 | * It is used to enable TE interrupt | ||
513 | */ | ||
514 | int mdfld_enable_te(struct drm_device *dev, int pipe) | ||
515 | { | ||
516 | struct drm_psb_private *dev_priv = | ||
517 | (struct drm_psb_private *) dev->dev_private; | ||
518 | unsigned long irqflags; | ||
519 | uint32_t reg_val = 0; | ||
520 | uint32_t pipeconf_reg = mid_pipeconf(pipe); | ||
521 | |||
522 | if (gma_power_begin(dev, false)) { | ||
523 | reg_val = REG_READ(pipeconf_reg); | ||
524 | gma_power_end(dev); | ||
525 | } | ||
526 | |||
527 | if (!(reg_val & PIPEACONF_ENABLE)) | ||
528 | return -EINVAL; | ||
529 | |||
530 | spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); | ||
531 | |||
532 | mid_enable_pipe_event(dev_priv, pipe); | ||
533 | psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE); | ||
534 | |||
535 | spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * It is used to disable TE interrupt | ||
542 | */ | ||
543 | void mdfld_disable_te(struct drm_device *dev, int pipe) | ||
544 | { | ||
545 | struct drm_psb_private *dev_priv = | ||
546 | (struct drm_psb_private *) dev->dev_private; | ||
547 | unsigned long irqflags; | ||
548 | |||
549 | if (!dev_priv->dsr_enable) | ||
550 | return; | ||
551 | |||
552 | spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); | ||
553 | |||
554 | mid_disable_pipe_event(dev_priv, pipe); | ||
555 | psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE); | ||
556 | |||
557 | spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); | ||
558 | } | ||
559 | |||
502 | /* Called from drm generic code, passed a 'crtc', which | 560 | /* Called from drm generic code, passed a 'crtc', which |
503 | * we use as a pipe index | 561 | * we use as a pipe index |
504 | */ | 562 | */ |
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h index 216fda38b57d..603045bee58a 100644 --- a/drivers/gpu/drm/gma500/psb_irq.h +++ b/drivers/gpu/drm/gma500/psb_irq.h | |||
@@ -42,4 +42,6 @@ int psb_enable_vblank(struct drm_device *dev, int pipe); | |||
42 | void psb_disable_vblank(struct drm_device *dev, int pipe); | 42 | void psb_disable_vblank(struct drm_device *dev, int pipe); |
43 | u32 psb_get_vblank_counter(struct drm_device *dev, int pipe); | 43 | u32 psb_get_vblank_counter(struct drm_device *dev, int pipe); |
44 | 44 | ||
45 | int mdfld_enable_te(struct drm_device *dev, int pipe); | ||
46 | void mdfld_disable_te(struct drm_device *dev, int pipe); | ||
45 | #endif /* _SYSIRQ_H_ */ | 47 | #endif /* _SYSIRQ_H_ */ |
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c new file mode 100644 index 000000000000..4a07ab596174 --- /dev/null +++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c | |||
@@ -0,0 +1,829 @@ | |||
1 | /* | ||
2 | * Copyright © 2011 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include "mdfld_dsi_dpi.h" | ||
26 | #include "mdfld_output.h" | ||
27 | #include "mdfld_dsi_pkg_sender.h" | ||
28 | #include "tc35876x-dsi-lvds.h" | ||
29 | #include <linux/i2c/tc35876x.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <asm/intel_scu_ipc.h> | ||
33 | |||
34 | static struct i2c_client *tc35876x_client; | ||
35 | static struct i2c_client *cmi_lcd_i2c_client; | ||
36 | |||
37 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
38 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
39 | |||
40 | /* DSI D-PHY Layer Registers */ | ||
41 | #define D0W_DPHYCONTTX 0x0004 | ||
42 | #define CLW_DPHYCONTRX 0x0020 | ||
43 | #define D0W_DPHYCONTRX 0x0024 | ||
44 | #define D1W_DPHYCONTRX 0x0028 | ||
45 | #define D2W_DPHYCONTRX 0x002C | ||
46 | #define D3W_DPHYCONTRX 0x0030 | ||
47 | #define COM_DPHYCONTRX 0x0038 | ||
48 | #define CLW_CNTRL 0x0040 | ||
49 | #define D0W_CNTRL 0x0044 | ||
50 | #define D1W_CNTRL 0x0048 | ||
51 | #define D2W_CNTRL 0x004C | ||
52 | #define D3W_CNTRL 0x0050 | ||
53 | #define DFTMODE_CNTRL 0x0054 | ||
54 | |||
55 | /* DSI PPI Layer Registers */ | ||
56 | #define PPI_STARTPPI 0x0104 | ||
57 | #define PPI_BUSYPPI 0x0108 | ||
58 | #define PPI_LINEINITCNT 0x0110 | ||
59 | #define PPI_LPTXTIMECNT 0x0114 | ||
60 | #define PPI_LANEENABLE 0x0134 | ||
61 | #define PPI_TX_RX_TA 0x013C | ||
62 | #define PPI_CLS_ATMR 0x0140 | ||
63 | #define PPI_D0S_ATMR 0x0144 | ||
64 | #define PPI_D1S_ATMR 0x0148 | ||
65 | #define PPI_D2S_ATMR 0x014C | ||
66 | #define PPI_D3S_ATMR 0x0150 | ||
67 | #define PPI_D0S_CLRSIPOCOUNT 0x0164 | ||
68 | #define PPI_D1S_CLRSIPOCOUNT 0x0168 | ||
69 | #define PPI_D2S_CLRSIPOCOUNT 0x016C | ||
70 | #define PPI_D3S_CLRSIPOCOUNT 0x0170 | ||
71 | #define CLS_PRE 0x0180 | ||
72 | #define D0S_PRE 0x0184 | ||
73 | #define D1S_PRE 0x0188 | ||
74 | #define D2S_PRE 0x018C | ||
75 | #define D3S_PRE 0x0190 | ||
76 | #define CLS_PREP 0x01A0 | ||
77 | #define D0S_PREP 0x01A4 | ||
78 | #define D1S_PREP 0x01A8 | ||
79 | #define D2S_PREP 0x01AC | ||
80 | #define D3S_PREP 0x01B0 | ||
81 | #define CLS_ZERO 0x01C0 | ||
82 | #define D0S_ZERO 0x01C4 | ||
83 | #define D1S_ZERO 0x01C8 | ||
84 | #define D2S_ZERO 0x01CC | ||
85 | #define D3S_ZERO 0x01D0 | ||
86 | #define PPI_CLRFLG 0x01E0 | ||
87 | #define PPI_CLRSIPO 0x01E4 | ||
88 | #define HSTIMEOUT 0x01F0 | ||
89 | #define HSTIMEOUTENABLE 0x01F4 | ||
90 | |||
91 | /* DSI Protocol Layer Registers */ | ||
92 | #define DSI_STARTDSI 0x0204 | ||
93 | #define DSI_BUSYDSI 0x0208 | ||
94 | #define DSI_LANEENABLE 0x0210 | ||
95 | #define DSI_LANESTATUS0 0x0214 | ||
96 | #define DSI_LANESTATUS1 0x0218 | ||
97 | #define DSI_INTSTATUS 0x0220 | ||
98 | #define DSI_INTMASK 0x0224 | ||
99 | #define DSI_INTCLR 0x0228 | ||
100 | #define DSI_LPTXTO 0x0230 | ||
101 | |||
102 | /* DSI General Registers */ | ||
103 | #define DSIERRCNT 0x0300 | ||
104 | |||
105 | /* DSI Application Layer Registers */ | ||
106 | #define APLCTRL 0x0400 | ||
107 | #define RDPKTLN 0x0404 | ||
108 | |||
109 | /* Video Path Registers */ | ||
110 | #define VPCTRL 0x0450 | ||
111 | #define HTIM1 0x0454 | ||
112 | #define HTIM2 0x0458 | ||
113 | #define VTIM1 0x045C | ||
114 | #define VTIM2 0x0460 | ||
115 | #define VFUEN 0x0464 | ||
116 | |||
117 | /* LVDS Registers */ | ||
118 | #define LVMX0003 0x0480 | ||
119 | #define LVMX0407 0x0484 | ||
120 | #define LVMX0811 0x0488 | ||
121 | #define LVMX1215 0x048C | ||
122 | #define LVMX1619 0x0490 | ||
123 | #define LVMX2023 0x0494 | ||
124 | #define LVMX2427 0x0498 | ||
125 | #define LVCFG 0x049C | ||
126 | #define LVPHY0 0x04A0 | ||
127 | #define LVPHY1 0x04A4 | ||
128 | |||
129 | /* System Registers */ | ||
130 | #define SYSSTAT 0x0500 | ||
131 | #define SYSRST 0x0504 | ||
132 | |||
133 | /* GPIO Registers */ | ||
134 | /*#define GPIOC 0x0520*/ | ||
135 | #define GPIOO 0x0524 | ||
136 | #define GPIOI 0x0528 | ||
137 | |||
138 | /* I2C Registers */ | ||
139 | #define I2CTIMCTRL 0x0540 | ||
140 | #define I2CMADDR 0x0544 | ||
141 | #define WDATAQ 0x0548 | ||
142 | #define RDATAQ 0x054C | ||
143 | |||
144 | /* Chip/Rev Registers */ | ||
145 | #define IDREG 0x0580 | ||
146 | |||
147 | /* Debug Registers */ | ||
148 | #define DEBUG00 0x05A0 | ||
149 | #define DEBUG01 0x05A4 | ||
150 | |||
151 | /* Panel CABC registers */ | ||
152 | #define PANEL_PWM_CONTROL 0x90 | ||
153 | #define PANEL_FREQ_DIVIDER_HI 0x91 | ||
154 | #define PANEL_FREQ_DIVIDER_LO 0x92 | ||
155 | #define PANEL_DUTY_CONTROL 0x93 | ||
156 | #define PANEL_MODIFY_RGB 0x94 | ||
157 | #define PANEL_FRAMERATE_CONTROL 0x96 | ||
158 | #define PANEL_PWM_MIN 0x97 | ||
159 | #define PANEL_PWM_REF 0x98 | ||
160 | #define PANEL_PWM_MAX 0x99 | ||
161 | #define PANEL_ALLOW_DISTORT 0x9A | ||
162 | #define PANEL_BYPASS_PWMI 0x9B | ||
163 | |||
164 | /* Panel color management registers */ | ||
165 | #define PANEL_CM_ENABLE 0x700 | ||
166 | #define PANEL_CM_HUE 0x701 | ||
167 | #define PANEL_CM_SATURATION 0x702 | ||
168 | #define PANEL_CM_INTENSITY 0x703 | ||
169 | #define PANEL_CM_BRIGHTNESS 0x704 | ||
170 | #define PANEL_CM_CE_ENABLE 0x705 | ||
171 | #define PANEL_CM_PEAK_EN 0x710 | ||
172 | #define PANEL_CM_GAIN 0x711 | ||
173 | #define PANEL_CM_HUETABLE_START 0x730 | ||
174 | #define PANEL_CM_HUETABLE_END 0x747 /* inclusive */ | ||
175 | |||
176 | /* Input muxing for registers LVMX0003...LVMX2427 */ | ||
177 | enum { | ||
178 | INPUT_R0, /* 0 */ | ||
179 | INPUT_R1, | ||
180 | INPUT_R2, | ||
181 | INPUT_R3, | ||
182 | INPUT_R4, | ||
183 | INPUT_R5, | ||
184 | INPUT_R6, | ||
185 | INPUT_R7, | ||
186 | INPUT_G0, /* 8 */ | ||
187 | INPUT_G1, | ||
188 | INPUT_G2, | ||
189 | INPUT_G3, | ||
190 | INPUT_G4, | ||
191 | INPUT_G5, | ||
192 | INPUT_G6, | ||
193 | INPUT_G7, | ||
194 | INPUT_B0, /* 16 */ | ||
195 | INPUT_B1, | ||
196 | INPUT_B2, | ||
197 | INPUT_B3, | ||
198 | INPUT_B4, | ||
199 | INPUT_B5, | ||
200 | INPUT_B6, | ||
201 | INPUT_B7, | ||
202 | INPUT_HSYNC, /* 24 */ | ||
203 | INPUT_VSYNC, | ||
204 | INPUT_DE, | ||
205 | LOGIC_0, | ||
206 | /* 28...31 undefined */ | ||
207 | }; | ||
208 | |||
209 | #define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \ | ||
210 | (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \ | ||
211 | FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0)) | ||
212 | |||
213 | /** | ||
214 | * tc35876x_regw - Write DSI-LVDS bridge register using I2C | ||
215 | * @client: struct i2c_client to use | ||
216 | * @reg: register address | ||
217 | * @value: value to write | ||
218 | * | ||
219 | * Returns 0 on success, or a negative error value. | ||
220 | */ | ||
221 | static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value) | ||
222 | { | ||
223 | int r; | ||
224 | u8 tx_data[] = { | ||
225 | /* NOTE: Register address big-endian, data little-endian. */ | ||
226 | (reg >> 8) & 0xff, | ||
227 | reg & 0xff, | ||
228 | value & 0xff, | ||
229 | (value >> 8) & 0xff, | ||
230 | (value >> 16) & 0xff, | ||
231 | (value >> 24) & 0xff, | ||
232 | }; | ||
233 | struct i2c_msg msgs[] = { | ||
234 | { | ||
235 | .addr = client->addr, | ||
236 | .flags = 0, | ||
237 | .buf = tx_data, | ||
238 | .len = ARRAY_SIZE(tx_data), | ||
239 | }, | ||
240 | }; | ||
241 | |||
242 | r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
243 | if (r < 0) { | ||
244 | dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n", | ||
245 | __func__, reg, value, r); | ||
246 | return r; | ||
247 | } | ||
248 | |||
249 | if (r < ARRAY_SIZE(msgs)) { | ||
250 | dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n", | ||
251 | __func__, reg, value, r); | ||
252 | return -EAGAIN; | ||
253 | } | ||
254 | |||
255 | dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n", | ||
256 | __func__, reg, value); | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * tc35876x_regr - Read DSI-LVDS bridge register using I2C | ||
263 | * @client: struct i2c_client to use | ||
264 | * @reg: register address | ||
265 | * @value: pointer for storing the value | ||
266 | * | ||
267 | * Returns 0 on success, or a negative error value. | ||
268 | */ | ||
269 | static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value) | ||
270 | { | ||
271 | int r; | ||
272 | u8 tx_data[] = { | ||
273 | (reg >> 8) & 0xff, | ||
274 | reg & 0xff, | ||
275 | }; | ||
276 | u8 rx_data[4]; | ||
277 | struct i2c_msg msgs[] = { | ||
278 | { | ||
279 | .addr = client->addr, | ||
280 | .flags = 0, | ||
281 | .buf = tx_data, | ||
282 | .len = ARRAY_SIZE(tx_data), | ||
283 | }, | ||
284 | { | ||
285 | .addr = client->addr, | ||
286 | .flags = I2C_M_RD, | ||
287 | .buf = rx_data, | ||
288 | .len = ARRAY_SIZE(rx_data), | ||
289 | }, | ||
290 | }; | ||
291 | |||
292 | r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | ||
293 | if (r < 0) { | ||
294 | dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__, | ||
295 | reg, r); | ||
296 | return r; | ||
297 | } | ||
298 | |||
299 | if (r < ARRAY_SIZE(msgs)) { | ||
300 | dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__, | ||
301 | reg, r); | ||
302 | return -EAGAIN; | ||
303 | } | ||
304 | |||
305 | *value = rx_data[0] << 24 | rx_data[1] << 16 | | ||
306 | rx_data[2] << 8 | rx_data[3]; | ||
307 | |||
308 | dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__, | ||
309 | reg, *value); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state) | ||
315 | { | ||
316 | struct tc35876x_platform_data *pdata; | ||
317 | |||
318 | if (WARN(!tc35876x_client, "%s called before probe", __func__)) | ||
319 | return; | ||
320 | |||
321 | dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state); | ||
322 | |||
323 | pdata = dev_get_platdata(&tc35876x_client->dev); | ||
324 | |||
325 | if (pdata->gpio_bridge_reset == -1) | ||
326 | return; | ||
327 | |||
328 | if (state) { | ||
329 | gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0); | ||
330 | mdelay(10); | ||
331 | } else { | ||
332 | /* Pull MIPI Bridge reset pin to Low */ | ||
333 | gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0); | ||
334 | mdelay(20); | ||
335 | /* Pull MIPI Bridge reset pin to High */ | ||
336 | gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1); | ||
337 | mdelay(40); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | void tc35876x_configure_lvds_bridge(struct drm_device *dev) | ||
342 | { | ||
343 | struct i2c_client *i2c = tc35876x_client; | ||
344 | u32 ppi_lptxtimecnt; | ||
345 | u32 txtagocnt; | ||
346 | u32 txtasurecnt; | ||
347 | u32 id; | ||
348 | |||
349 | if (WARN(!tc35876x_client, "%s called before probe", __func__)) | ||
350 | return; | ||
351 | |||
352 | dev_dbg(&tc35876x_client->dev, "%s\n", __func__); | ||
353 | |||
354 | if (!tc35876x_regr(i2c, IDREG, &id)) | ||
355 | dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id); | ||
356 | else | ||
357 | dev_err(&tc35876x_client->dev, "Cannot read ID\n"); | ||
358 | |||
359 | ppi_lptxtimecnt = 4; | ||
360 | txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4; | ||
361 | txtasurecnt = 3 * ppi_lptxtimecnt / 2; | ||
362 | tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) | | ||
363 | FLD_VAL(txtasurecnt, 10, 0)); | ||
364 | tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0)); | ||
365 | |||
366 | tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); | ||
367 | tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); | ||
368 | tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); | ||
369 | tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); | ||
370 | |||
371 | /* Enabling MIPI & PPI lanes, Enable 4 lanes */ | ||
372 | tc35876x_regw(i2c, PPI_LANEENABLE, | ||
373 | BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); | ||
374 | tc35876x_regw(i2c, DSI_LANEENABLE, | ||
375 | BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); | ||
376 | tc35876x_regw(i2c, PPI_STARTPPI, BIT(0)); | ||
377 | tc35876x_regw(i2c, DSI_STARTDSI, BIT(0)); | ||
378 | |||
379 | /* Setting LVDS output frequency */ | ||
380 | tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) | | ||
381 | FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */ | ||
382 | |||
383 | /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */ | ||
384 | tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5)); | ||
385 | |||
386 | /* Horizontal back porch and horizontal pulse width. 0x00280028 */ | ||
387 | tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0)); | ||
388 | |||
389 | /* Horizontal front porch and horizontal active video size. 0x00500500*/ | ||
390 | tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0)); | ||
391 | |||
392 | /* Vertical back porch and vertical sync pulse width. 0x000e000a */ | ||
393 | tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0)); | ||
394 | |||
395 | /* Vertical front porch and vertical display size. 0x000e0320 */ | ||
396 | tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0)); | ||
397 | |||
398 | /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */ | ||
399 | tc35876x_regw(i2c, VFUEN, BIT(0)); | ||
400 | |||
401 | /* Soft reset LCD controller. */ | ||
402 | tc35876x_regw(i2c, SYSRST, BIT(2)); | ||
403 | |||
404 | /* LVDS-TX input muxing */ | ||
405 | tc35876x_regw(i2c, LVMX0003, | ||
406 | INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2)); | ||
407 | tc35876x_regw(i2c, LVMX0407, | ||
408 | INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6)); | ||
409 | tc35876x_regw(i2c, LVMX0811, | ||
410 | INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3)); | ||
411 | tc35876x_regw(i2c, LVMX1215, | ||
412 | INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5)); | ||
413 | tc35876x_regw(i2c, LVMX1619, | ||
414 | INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0)); | ||
415 | tc35876x_regw(i2c, LVMX2023, | ||
416 | INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5)); | ||
417 | tc35876x_regw(i2c, LVMX2427, | ||
418 | INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC)); | ||
419 | |||
420 | /* Enable LVDS transmitter. */ | ||
421 | tc35876x_regw(i2c, LVCFG, BIT(0)); | ||
422 | |||
423 | /* Clear notifications. Don't write reserved bits. Was write 0xffffffff | ||
424 | * to 0x0288, must be in error?! */ | ||
425 | tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0)); | ||
426 | } | ||
427 | |||
428 | #define GPIOPWMCTRL 0x38F | ||
429 | #define PWM0CLKDIV0 0x62 /* low byte */ | ||
430 | #define PWM0CLKDIV1 0x61 /* high byte */ | ||
431 | |||
432 | #define SYSTEMCLK 19200000UL /* 19.2 MHz */ | ||
433 | #define PWM_FREQUENCY 9600 /* Hz */ | ||
434 | |||
435 | /* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */ | ||
436 | static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f) | ||
437 | { | ||
438 | return (baseclk - f) / f; | ||
439 | } | ||
440 | |||
441 | static void tc35876x_brightness_init(struct drm_device *dev) | ||
442 | { | ||
443 | int ret; | ||
444 | u8 pwmctrl; | ||
445 | u16 clkdiv; | ||
446 | |||
447 | /* Make sure the PWM reference is the 19.2 MHz system clock. Read first | ||
448 | * instead of setting directly to catch potential conflicts between PWM | ||
449 | * users. */ | ||
450 | ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl); | ||
451 | if (ret || pwmctrl != 0x01) { | ||
452 | if (ret) | ||
453 | dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n"); | ||
454 | else | ||
455 | dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl); | ||
456 | |||
457 | ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01); | ||
458 | if (ret) | ||
459 | dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n"); | ||
460 | } | ||
461 | |||
462 | clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY); | ||
463 | |||
464 | ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff); | ||
465 | if (!ret) | ||
466 | ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff); | ||
467 | |||
468 | if (ret) | ||
469 | dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n"); | ||
470 | else | ||
471 | dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n", | ||
472 | clkdiv, PWM_FREQUENCY); | ||
473 | } | ||
474 | |||
475 | #define PWM0DUTYCYCLE 0x67 | ||
476 | |||
477 | void tc35876x_brightness_control(struct drm_device *dev, int level) | ||
478 | { | ||
479 | int ret; | ||
480 | u8 duty_val; | ||
481 | u8 panel_duty_val; | ||
482 | |||
483 | level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); | ||
484 | |||
485 | /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */ | ||
486 | duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL; | ||
487 | |||
488 | /* I won't pretend to understand this formula. The panel spec is quite | ||
489 | * bad engrish. | ||
490 | */ | ||
491 | panel_duty_val = (2 * level - 100) * 0xA9 / | ||
492 | MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56; | ||
493 | |||
494 | ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val); | ||
495 | if (ret) | ||
496 | dev_err(&tc35876x_client->dev, "%s: ipc write fail\n", | ||
497 | __func__); | ||
498 | |||
499 | if (cmi_lcd_i2c_client) { | ||
500 | ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, | ||
501 | PANEL_PWM_MAX, panel_duty_val); | ||
502 | if (ret < 0) | ||
503 | dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n", | ||
504 | __func__); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev) | ||
509 | { | ||
510 | struct tc35876x_platform_data *pdata; | ||
511 | |||
512 | if (WARN(!tc35876x_client, "%s called before probe", __func__)) | ||
513 | return; | ||
514 | |||
515 | dev_dbg(&tc35876x_client->dev, "%s\n", __func__); | ||
516 | |||
517 | pdata = dev_get_platdata(&tc35876x_client->dev); | ||
518 | |||
519 | if (pdata->gpio_panel_bl_en != -1) | ||
520 | gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0); | ||
521 | |||
522 | if (pdata->gpio_panel_vadd != -1) | ||
523 | gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0); | ||
524 | } | ||
525 | |||
526 | void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev) | ||
527 | { | ||
528 | struct tc35876x_platform_data *pdata; | ||
529 | struct drm_psb_private *dev_priv = dev->dev_private; | ||
530 | |||
531 | if (WARN(!tc35876x_client, "%s called before probe", __func__)) | ||
532 | return; | ||
533 | |||
534 | dev_dbg(&tc35876x_client->dev, "%s\n", __func__); | ||
535 | |||
536 | pdata = dev_get_platdata(&tc35876x_client->dev); | ||
537 | |||
538 | if (pdata->gpio_panel_vadd != -1) { | ||
539 | gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1); | ||
540 | msleep(260); | ||
541 | } | ||
542 | |||
543 | if (cmi_lcd_i2c_client) { | ||
544 | int ret; | ||
545 | dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n"); | ||
546 | /* Bit 4 is average_saving. Setting it to 1, the brightness is | ||
547 | * referenced to the average of the frame content. 0 means | ||
548 | * reference to the maximum of frame contents. Bits 3:0 are | ||
549 | * allow_distort. When set to a nonzero value, all color values | ||
550 | * between 255-allow_distort*2 and 255 are mapped to the | ||
551 | * 255-allow_distort*2 value. | ||
552 | */ | ||
553 | ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, | ||
554 | PANEL_ALLOW_DISTORT, 0x10); | ||
555 | if (ret < 0) | ||
556 | dev_err(&cmi_lcd_i2c_client->dev, | ||
557 | "i2c write failed (%d)\n", ret); | ||
558 | ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, | ||
559 | PANEL_BYPASS_PWMI, 0); | ||
560 | if (ret < 0) | ||
561 | dev_err(&cmi_lcd_i2c_client->dev, | ||
562 | "i2c write failed (%d)\n", ret); | ||
563 | /* Set minimum brightness value - this is tunable */ | ||
564 | ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, | ||
565 | PANEL_PWM_MIN, 0x35); | ||
566 | if (ret < 0) | ||
567 | dev_err(&cmi_lcd_i2c_client->dev, | ||
568 | "i2c write failed (%d)\n", ret); | ||
569 | } | ||
570 | |||
571 | if (pdata->gpio_panel_bl_en != -1) | ||
572 | gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1); | ||
573 | |||
574 | tc35876x_brightness_control(dev, dev_priv->brightness_adjusted); | ||
575 | } | ||
576 | |||
577 | static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev) | ||
578 | { | ||
579 | struct drm_display_mode *mode; | ||
580 | |||
581 | dev_dbg(&dev->pdev->dev, "%s\n", __func__); | ||
582 | |||
583 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | ||
584 | if (!mode) | ||
585 | return NULL; | ||
586 | |||
587 | /* FIXME: do this properly. */ | ||
588 | mode->hdisplay = 1280; | ||
589 | mode->vdisplay = 800; | ||
590 | mode->hsync_start = 1360; | ||
591 | mode->hsync_end = 1400; | ||
592 | mode->htotal = 1440; | ||
593 | mode->vsync_start = 814; | ||
594 | mode->vsync_end = 824; | ||
595 | mode->vtotal = 838; | ||
596 | mode->clock = 33324 << 1; | ||
597 | |||
598 | dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay); | ||
599 | dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay); | ||
600 | dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start); | ||
601 | dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end); | ||
602 | dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal); | ||
603 | dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start); | ||
604 | dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end); | ||
605 | dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal); | ||
606 | dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock); | ||
607 | |||
608 | drm_mode_set_name(mode); | ||
609 | drm_mode_set_crtcinfo(mode, 0); | ||
610 | |||
611 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
612 | |||
613 | return mode; | ||
614 | } | ||
615 | |||
616 | /* DV1 Active area 216.96 x 135.6 mm */ | ||
617 | #define DV1_PANEL_WIDTH 217 | ||
618 | #define DV1_PANEL_HEIGHT 136 | ||
619 | |||
620 | static int tc35876x_get_panel_info(struct drm_device *dev, int pipe, | ||
621 | struct panel_info *pi) | ||
622 | { | ||
623 | if (!dev || !pi) | ||
624 | return -EINVAL; | ||
625 | |||
626 | pi->width_mm = DV1_PANEL_WIDTH; | ||
627 | pi->height_mm = DV1_PANEL_HEIGHT; | ||
628 | |||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | static int tc35876x_bridge_probe(struct i2c_client *client, | ||
633 | const struct i2c_device_id *id) | ||
634 | { | ||
635 | struct tc35876x_platform_data *pdata; | ||
636 | |||
637 | dev_info(&client->dev, "%s\n", __func__); | ||
638 | |||
639 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
640 | dev_err(&client->dev, "%s: i2c_check_functionality() failed\n", | ||
641 | __func__); | ||
642 | return -ENODEV; | ||
643 | } | ||
644 | |||
645 | pdata = dev_get_platdata(&client->dev); | ||
646 | if (!pdata) { | ||
647 | dev_err(&client->dev, "%s: no platform data\n", __func__); | ||
648 | return -ENODEV; | ||
649 | } | ||
650 | |||
651 | if (pdata->gpio_bridge_reset != -1) { | ||
652 | gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset"); | ||
653 | gpio_direction_output(pdata->gpio_bridge_reset, 0); | ||
654 | } | ||
655 | |||
656 | if (pdata->gpio_panel_bl_en != -1) { | ||
657 | gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en"); | ||
658 | gpio_direction_output(pdata->gpio_panel_bl_en, 0); | ||
659 | } | ||
660 | |||
661 | if (pdata->gpio_panel_vadd != -1) { | ||
662 | gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd"); | ||
663 | gpio_direction_output(pdata->gpio_panel_vadd, 0); | ||
664 | } | ||
665 | |||
666 | tc35876x_client = client; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int tc35876x_bridge_remove(struct i2c_client *client) | ||
672 | { | ||
673 | struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev); | ||
674 | |||
675 | dev_dbg(&client->dev, "%s\n", __func__); | ||
676 | |||
677 | if (pdata->gpio_bridge_reset != -1) | ||
678 | gpio_free(pdata->gpio_bridge_reset); | ||
679 | |||
680 | if (pdata->gpio_panel_bl_en != -1) | ||
681 | gpio_free(pdata->gpio_panel_bl_en); | ||
682 | |||
683 | if (pdata->gpio_panel_vadd != -1) | ||
684 | gpio_free(pdata->gpio_panel_vadd); | ||
685 | |||
686 | tc35876x_client = NULL; | ||
687 | |||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | static const struct i2c_device_id tc35876x_bridge_id[] = { | ||
692 | { "i2c_disp_brig", 0 }, | ||
693 | { } | ||
694 | }; | ||
695 | MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id); | ||
696 | |||
697 | static struct i2c_driver tc35876x_bridge_i2c_driver = { | ||
698 | .driver = { | ||
699 | .name = "i2c_disp_brig", | ||
700 | }, | ||
701 | .id_table = tc35876x_bridge_id, | ||
702 | .probe = tc35876x_bridge_probe, | ||
703 | .remove = __devexit_p(tc35876x_bridge_remove), | ||
704 | }; | ||
705 | |||
706 | /* LCD panel I2C */ | ||
707 | static int cmi_lcd_i2c_probe(struct i2c_client *client, | ||
708 | const struct i2c_device_id *id) | ||
709 | { | ||
710 | dev_info(&client->dev, "%s\n", __func__); | ||
711 | |||
712 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
713 | dev_err(&client->dev, "%s: i2c_check_functionality() failed\n", | ||
714 | __func__); | ||
715 | return -ENODEV; | ||
716 | } | ||
717 | |||
718 | cmi_lcd_i2c_client = client; | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static int cmi_lcd_i2c_remove(struct i2c_client *client) | ||
724 | { | ||
725 | dev_dbg(&client->dev, "%s\n", __func__); | ||
726 | |||
727 | cmi_lcd_i2c_client = NULL; | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static const struct i2c_device_id cmi_lcd_i2c_id[] = { | ||
733 | { "cmi-lcd", 0 }, | ||
734 | { } | ||
735 | }; | ||
736 | MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id); | ||
737 | |||
738 | static struct i2c_driver cmi_lcd_i2c_driver = { | ||
739 | .driver = { | ||
740 | .name = "cmi-lcd", | ||
741 | }, | ||
742 | .id_table = cmi_lcd_i2c_id, | ||
743 | .probe = cmi_lcd_i2c_probe, | ||
744 | .remove = __devexit_p(cmi_lcd_i2c_remove), | ||
745 | }; | ||
746 | |||
747 | /* HACK to create I2C device while it's not created by platform code */ | ||
748 | #define CMI_LCD_I2C_ADAPTER 2 | ||
749 | #define CMI_LCD_I2C_ADDR 0x60 | ||
750 | |||
751 | static int cmi_lcd_hack_create_device(void) | ||
752 | { | ||
753 | struct i2c_adapter *adapter; | ||
754 | struct i2c_client *client; | ||
755 | struct i2c_board_info info = { | ||
756 | .type = "cmi-lcd", | ||
757 | .addr = CMI_LCD_I2C_ADDR, | ||
758 | }; | ||
759 | |||
760 | pr_debug("%s\n", __func__); | ||
761 | |||
762 | adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER); | ||
763 | if (!adapter) { | ||
764 | pr_err("%s: i2c_get_adapter(%d) failed\n", __func__, | ||
765 | CMI_LCD_I2C_ADAPTER); | ||
766 | return -EINVAL; | ||
767 | } | ||
768 | |||
769 | client = i2c_new_device(adapter, &info); | ||
770 | if (!client) { | ||
771 | pr_err("%s: i2c_new_device() failed\n", __func__); | ||
772 | i2c_put_adapter(adapter); | ||
773 | return -EINVAL; | ||
774 | } | ||
775 | |||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = { | ||
780 | .dpms = mdfld_dsi_dpi_dpms, | ||
781 | .mode_fixup = mdfld_dsi_dpi_mode_fixup, | ||
782 | .prepare = mdfld_dsi_dpi_prepare, | ||
783 | .mode_set = mdfld_dsi_dpi_mode_set, | ||
784 | .commit = mdfld_dsi_dpi_commit, | ||
785 | }; | ||
786 | |||
787 | static const struct drm_encoder_funcs tc35876x_encoder_funcs = { | ||
788 | .destroy = drm_encoder_cleanup, | ||
789 | }; | ||
790 | |||
791 | const struct panel_funcs mdfld_tc35876x_funcs = { | ||
792 | .encoder_funcs = &tc35876x_encoder_funcs, | ||
793 | .encoder_helper_funcs = &tc35876x_encoder_helper_funcs, | ||
794 | .get_config_mode = tc35876x_get_config_mode, | ||
795 | .get_panel_info = tc35876x_get_panel_info, | ||
796 | }; | ||
797 | |||
798 | void tc35876x_init(struct drm_device *dev) | ||
799 | { | ||
800 | int r; | ||
801 | |||
802 | dev_dbg(&dev->pdev->dev, "%s\n", __func__); | ||
803 | |||
804 | cmi_lcd_hack_create_device(); | ||
805 | |||
806 | r = i2c_add_driver(&cmi_lcd_i2c_driver); | ||
807 | if (r < 0) | ||
808 | dev_err(&dev->pdev->dev, | ||
809 | "%s: i2c_add_driver() for %s failed (%d)\n", | ||
810 | __func__, cmi_lcd_i2c_driver.driver.name, r); | ||
811 | |||
812 | r = i2c_add_driver(&tc35876x_bridge_i2c_driver); | ||
813 | if (r < 0) | ||
814 | dev_err(&dev->pdev->dev, | ||
815 | "%s: i2c_add_driver() for %s failed (%d)\n", | ||
816 | __func__, tc35876x_bridge_i2c_driver.driver.name, r); | ||
817 | |||
818 | tc35876x_brightness_init(dev); | ||
819 | } | ||
820 | |||
821 | void tc35876x_exit(void) | ||
822 | { | ||
823 | pr_debug("%s\n", __func__); | ||
824 | |||
825 | i2c_del_driver(&tc35876x_bridge_i2c_driver); | ||
826 | |||
827 | if (cmi_lcd_i2c_client) | ||
828 | i2c_del_driver(&cmi_lcd_i2c_driver); | ||
829 | } | ||
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h new file mode 100644 index 000000000000..b14b7f9e7d1e --- /dev/null +++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright © 2011 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef __MDFLD_DSI_LVDS_BRIDGE_H__ | ||
26 | #define __MDFLD_DSI_LVDS_BRIDGE_H__ | ||
27 | |||
28 | void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state); | ||
29 | void tc35876x_configure_lvds_bridge(struct drm_device *dev); | ||
30 | void tc35876x_brightness_control(struct drm_device *dev, int level); | ||
31 | void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev); | ||
32 | void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev); | ||
33 | void tc35876x_init(struct drm_device *dev); | ||
34 | void tc35876x_exit(void); | ||
35 | |||
36 | extern const struct panel_funcs mdfld_tc35876x_funcs; | ||
37 | |||
38 | #endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/ | ||
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 07d55df6623e..d3f2e8785010 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c | |||
@@ -252,10 +252,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder, | |||
252 | 252 | ||
253 | drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names); | 253 | drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names); |
254 | 254 | ||
255 | priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, | 255 | priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); |
256 | "scale", 2); | ||
257 | priv->scale_property->values[0] = 0; | ||
258 | priv->scale_property->values[1] = 2; | ||
259 | 256 | ||
260 | drm_connector_attach_property(connector, conf->tv_select_subconnector_property, | 257 | drm_connector_attach_property(connector, conf->tv_select_subconnector_property, |
261 | priv->select_subconnector); | 258 | priv->select_subconnector); |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 7f4b4e10246e..2c8a60c3b98e 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -99,7 +99,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
99 | buf_priv = buf->dev_private; | 99 | buf_priv = buf->dev_private; |
100 | 100 | ||
101 | vma->vm_flags |= (VM_IO | VM_DONTCOPY); | 101 | vma->vm_flags |= (VM_IO | VM_DONTCOPY); |
102 | vma->vm_file = filp; | ||
103 | 102 | ||
104 | buf_priv->currently_mapped = I810_BUF_MAPPED; | 103 | buf_priv->currently_mapped = I810_BUF_MAPPED; |
105 | 104 | ||
@@ -1208,6 +1207,8 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags) | |||
1208 | dev->types[8] = _DRM_STAT_SECONDARY; | 1207 | dev->types[8] = _DRM_STAT_SECONDARY; |
1209 | dev->types[9] = _DRM_STAT_DMA; | 1208 | dev->types[9] = _DRM_STAT_DMA; |
1210 | 1209 | ||
1210 | pci_set_master(dev->pdev); | ||
1211 | |||
1211 | return 0; | 1212 | return 0; |
1212 | } | 1213 | } |
1213 | 1214 | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 808b255d7fc6..ce7fc77678b4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | 3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. |
4 | 4 | ||
5 | ccflags-y := -Iinclude/drm | 5 | ccflags-y := -Iinclude/drm |
6 | i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | 6 | i915-y := i915_drv.o i915_dma.o i915_irq.o \ |
7 | i915_debugfs.o \ | 7 | i915_debugfs.o \ |
8 | i915_suspend.o \ | 8 | i915_suspend.o \ |
9 | i915_gem.o \ | 9 | i915_gem.o \ |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index deaa657292b4..fdb7ccefffbd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -83,6 +83,7 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
83 | B(supports_tv); | 83 | B(supports_tv); |
84 | B(has_bsd_ring); | 84 | B(has_bsd_ring); |
85 | B(has_blt_ring); | 85 | B(has_blt_ring); |
86 | B(has_llc); | ||
86 | #undef B | 87 | #undef B |
87 | 88 | ||
88 | return 0; | 89 | return 0; |
@@ -563,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
563 | return 0; | 564 | return 0; |
564 | } | 565 | } |
565 | 566 | ||
566 | static void i915_dump_object(struct seq_file *m, | ||
567 | struct io_mapping *mapping, | ||
568 | struct drm_i915_gem_object *obj) | ||
569 | { | ||
570 | int page, page_count, i; | ||
571 | |||
572 | page_count = obj->base.size / PAGE_SIZE; | ||
573 | for (page = 0; page < page_count; page++) { | ||
574 | u32 *mem = io_mapping_map_wc(mapping, | ||
575 | obj->gtt_offset + page * PAGE_SIZE); | ||
576 | for (i = 0; i < PAGE_SIZE; i += 4) | ||
577 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | ||
578 | io_mapping_unmap(mem); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | static int i915_batchbuffer_info(struct seq_file *m, void *data) | ||
583 | { | ||
584 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
585 | struct drm_device *dev = node->minor->dev; | ||
586 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
587 | struct drm_i915_gem_object *obj; | ||
588 | int ret; | ||
589 | |||
590 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
591 | if (ret) | ||
592 | return ret; | ||
593 | |||
594 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
595 | if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
596 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | ||
597 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); | ||
598 | } | ||
599 | } | ||
600 | |||
601 | mutex_unlock(&dev->struct_mutex); | ||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | static int i915_ringbuffer_data(struct seq_file *m, void *data) | 567 | static int i915_ringbuffer_data(struct seq_file *m, void *data) |
606 | { | 568 | { |
607 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 569 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -668,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
668 | static const char *ring_str(int ring) | 630 | static const char *ring_str(int ring) |
669 | { | 631 | { |
670 | switch (ring) { | 632 | switch (ring) { |
671 | case RING_RENDER: return " render"; | 633 | case RCS: return "render"; |
672 | case RING_BSD: return " bsd"; | 634 | case VCS: return "bsd"; |
673 | case RING_BLT: return " blt"; | 635 | case BCS: return "blt"; |
674 | default: return ""; | 636 | default: return ""; |
675 | } | 637 | } |
676 | } | 638 | } |
@@ -713,7 +675,7 @@ static void print_error_buffers(struct seq_file *m, | |||
713 | seq_printf(m, "%s [%d]:\n", name, count); | 675 | seq_printf(m, "%s [%d]:\n", name, count); |
714 | 676 | ||
715 | while (count--) { | 677 | while (count--) { |
716 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", | 678 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", |
717 | err->gtt_offset, | 679 | err->gtt_offset, |
718 | err->size, | 680 | err->size, |
719 | err->read_domains, | 681 | err->read_domains, |
@@ -723,6 +685,7 @@ static void print_error_buffers(struct seq_file *m, | |||
723 | tiling_flag(err->tiling), | 685 | tiling_flag(err->tiling), |
724 | dirty_flag(err->dirty), | 686 | dirty_flag(err->dirty), |
725 | purgeable_flag(err->purgeable), | 687 | purgeable_flag(err->purgeable), |
688 | err->ring != -1 ? " " : "", | ||
726 | ring_str(err->ring), | 689 | ring_str(err->ring), |
727 | cache_level_str(err->cache_level)); | 690 | cache_level_str(err->cache_level)); |
728 | 691 | ||
@@ -736,6 +699,38 @@ static void print_error_buffers(struct seq_file *m, | |||
736 | } | 699 | } |
737 | } | 700 | } |
738 | 701 | ||
702 | static void i915_ring_error_state(struct seq_file *m, | ||
703 | struct drm_device *dev, | ||
704 | struct drm_i915_error_state *error, | ||
705 | unsigned ring) | ||
706 | { | ||
707 | seq_printf(m, "%s command stream:\n", ring_str(ring)); | ||
708 | seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); | ||
709 | seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); | ||
710 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); | ||
711 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); | ||
712 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); | ||
713 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); | ||
714 | if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { | ||
715 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
716 | seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); | ||
717 | } | ||
718 | if (INTEL_INFO(dev)->gen >= 4) | ||
719 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); | ||
720 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); | ||
721 | if (INTEL_INFO(dev)->gen >= 6) { | ||
722 | seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); | ||
723 | seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); | ||
724 | seq_printf(m, " SYNC_0: 0x%08x\n", | ||
725 | error->semaphore_mboxes[ring][0]); | ||
726 | seq_printf(m, " SYNC_1: 0x%08x\n", | ||
727 | error->semaphore_mboxes[ring][1]); | ||
728 | } | ||
729 | seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); | ||
730 | seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); | ||
731 | seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); | ||
732 | } | ||
733 | |||
739 | static int i915_error_state(struct seq_file *m, void *unused) | 734 | static int i915_error_state(struct seq_file *m, void *unused) |
740 | { | 735 | { |
741 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 736 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -743,7 +738,7 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
743 | drm_i915_private_t *dev_priv = dev->dev_private; | 738 | drm_i915_private_t *dev_priv = dev->dev_private; |
744 | struct drm_i915_error_state *error; | 739 | struct drm_i915_error_state *error; |
745 | unsigned long flags; | 740 | unsigned long flags; |
746 | int i, page, offset, elt; | 741 | int i, j, page, offset, elt; |
747 | 742 | ||
748 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 743 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
749 | if (!dev_priv->first_error) { | 744 | if (!dev_priv->first_error) { |
@@ -758,35 +753,20 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
758 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | 753 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); |
759 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 754 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
760 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 755 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
756 | |||
757 | for (i = 0; i < dev_priv->num_fence_regs; i++) | ||
758 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | ||
759 | |||
761 | if (INTEL_INFO(dev)->gen >= 6) { | 760 | if (INTEL_INFO(dev)->gen >= 6) { |
762 | seq_printf(m, "ERROR: 0x%08x\n", error->error); | 761 | seq_printf(m, "ERROR: 0x%08x\n", error->error); |
763 | seq_printf(m, "Blitter command stream:\n"); | 762 | seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); |
764 | seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); | ||
765 | seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); | ||
766 | seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); | ||
767 | seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); | ||
768 | seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); | ||
769 | seq_printf(m, "Video (BSD) command stream:\n"); | ||
770 | seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); | ||
771 | seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); | ||
772 | seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); | ||
773 | seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); | ||
774 | seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); | ||
775 | } | ||
776 | seq_printf(m, "Render command stream:\n"); | ||
777 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
778 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); | ||
779 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); | ||
780 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); | ||
781 | if (INTEL_INFO(dev)->gen >= 4) { | ||
782 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
783 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
784 | } | 763 | } |
785 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | ||
786 | seq_printf(m, " seqno: 0x%08x\n", error->seqno); | ||
787 | 764 | ||
788 | for (i = 0; i < dev_priv->num_fence_regs; i++) | 765 | i915_ring_error_state(m, dev, error, RCS); |
789 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | 766 | if (HAS_BLT(dev)) |
767 | i915_ring_error_state(m, dev, error, BCS); | ||
768 | if (HAS_BSD(dev)) | ||
769 | i915_ring_error_state(m, dev, error, VCS); | ||
790 | 770 | ||
791 | if (error->active_bo) | 771 | if (error->active_bo) |
792 | print_error_buffers(m, "Active", | 772 | print_error_buffers(m, "Active", |
@@ -798,10 +778,10 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
798 | error->pinned_bo, | 778 | error->pinned_bo, |
799 | error->pinned_bo_count); | 779 | error->pinned_bo_count); |
800 | 780 | ||
801 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { | 781 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
802 | if (error->batchbuffer[i]) { | 782 | struct drm_i915_error_object *obj; |
803 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | ||
804 | 783 | ||
784 | if ((obj = error->ring[i].batchbuffer)) { | ||
805 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", | 785 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", |
806 | dev_priv->ring[i].name, | 786 | dev_priv->ring[i].name, |
807 | obj->gtt_offset); | 787 | obj->gtt_offset); |
@@ -813,11 +793,20 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
813 | } | 793 | } |
814 | } | 794 | } |
815 | } | 795 | } |
816 | } | ||
817 | 796 | ||
818 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { | 797 | if (error->ring[i].num_requests) { |
819 | if (error->ringbuffer[i]) { | 798 | seq_printf(m, "%s --- %d requests\n", |
820 | struct drm_i915_error_object *obj = error->ringbuffer[i]; | 799 | dev_priv->ring[i].name, |
800 | error->ring[i].num_requests); | ||
801 | for (j = 0; j < error->ring[i].num_requests; j++) { | ||
802 | seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", | ||
803 | error->ring[i].requests[j].seqno, | ||
804 | error->ring[i].requests[j].jiffies, | ||
805 | error->ring[i].requests[j].tail); | ||
806 | } | ||
807 | } | ||
808 | |||
809 | if ((obj = error->ring[i].ringbuffer)) { | ||
821 | seq_printf(m, "%s --- ringbuffer = 0x%08x\n", | 810 | seq_printf(m, "%s --- ringbuffer = 0x%08x\n", |
822 | dev_priv->ring[i].name, | 811 | dev_priv->ring[i].name, |
823 | obj->gtt_offset); | 812 | obj->gtt_offset); |
@@ -1414,9 +1403,108 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) | |||
1414 | return 0; | 1403 | return 0; |
1415 | } | 1404 | } |
1416 | 1405 | ||
1406 | static const char *swizzle_string(unsigned swizzle) | ||
1407 | { | ||
1408 | switch(swizzle) { | ||
1409 | case I915_BIT_6_SWIZZLE_NONE: | ||
1410 | return "none"; | ||
1411 | case I915_BIT_6_SWIZZLE_9: | ||
1412 | return "bit9"; | ||
1413 | case I915_BIT_6_SWIZZLE_9_10: | ||
1414 | return "bit9/bit10"; | ||
1415 | case I915_BIT_6_SWIZZLE_9_11: | ||
1416 | return "bit9/bit11"; | ||
1417 | case I915_BIT_6_SWIZZLE_9_10_11: | ||
1418 | return "bit9/bit10/bit11"; | ||
1419 | case I915_BIT_6_SWIZZLE_9_17: | ||
1420 | return "bit9/bit17"; | ||
1421 | case I915_BIT_6_SWIZZLE_9_10_17: | ||
1422 | return "bit9/bit10/bit17"; | ||
1423 | case I915_BIT_6_SWIZZLE_UNKNOWN: | ||
1424 | return "unkown"; | ||
1425 | } | ||
1426 | |||
1427 | return "bug"; | ||
1428 | } | ||
1429 | |||
1430 | static int i915_swizzle_info(struct seq_file *m, void *data) | ||
1431 | { | ||
1432 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1433 | struct drm_device *dev = node->minor->dev; | ||
1434 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1435 | |||
1436 | mutex_lock(&dev->struct_mutex); | ||
1437 | seq_printf(m, "bit6 swizzle for X-tiling = %s\n", | ||
1438 | swizzle_string(dev_priv->mm.bit_6_swizzle_x)); | ||
1439 | seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", | ||
1440 | swizzle_string(dev_priv->mm.bit_6_swizzle_y)); | ||
1441 | |||
1442 | if (IS_GEN3(dev) || IS_GEN4(dev)) { | ||
1443 | seq_printf(m, "DDC = 0x%08x\n", | ||
1444 | I915_READ(DCC)); | ||
1445 | seq_printf(m, "C0DRB3 = 0x%04x\n", | ||
1446 | I915_READ16(C0DRB3)); | ||
1447 | seq_printf(m, "C1DRB3 = 0x%04x\n", | ||
1448 | I915_READ16(C1DRB3)); | ||
1449 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { | ||
1450 | seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", | ||
1451 | I915_READ(MAD_DIMM_C0)); | ||
1452 | seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", | ||
1453 | I915_READ(MAD_DIMM_C1)); | ||
1454 | seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", | ||
1455 | I915_READ(MAD_DIMM_C2)); | ||
1456 | seq_printf(m, "TILECTL = 0x%08x\n", | ||
1457 | I915_READ(TILECTL)); | ||
1458 | seq_printf(m, "ARB_MODE = 0x%08x\n", | ||
1459 | I915_READ(ARB_MODE)); | ||
1460 | seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", | ||
1461 | I915_READ(DISP_ARB_CTL)); | ||
1462 | } | ||
1463 | mutex_unlock(&dev->struct_mutex); | ||
1464 | |||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | static int i915_ppgtt_info(struct seq_file *m, void *data) | ||
1469 | { | ||
1470 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1471 | struct drm_device *dev = node->minor->dev; | ||
1472 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1473 | struct intel_ring_buffer *ring; | ||
1474 | int i, ret; | ||
1475 | |||
1476 | |||
1477 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1478 | if (ret) | ||
1479 | return ret; | ||
1480 | if (INTEL_INFO(dev)->gen == 6) | ||
1481 | seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); | ||
1482 | |||
1483 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
1484 | ring = &dev_priv->ring[i]; | ||
1485 | |||
1486 | seq_printf(m, "%s\n", ring->name); | ||
1487 | if (INTEL_INFO(dev)->gen == 7) | ||
1488 | seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); | ||
1489 | seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); | ||
1490 | seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); | ||
1491 | seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); | ||
1492 | } | ||
1493 | if (dev_priv->mm.aliasing_ppgtt) { | ||
1494 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
1495 | |||
1496 | seq_printf(m, "aliasing PPGTT:\n"); | ||
1497 | seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); | ||
1498 | } | ||
1499 | seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); | ||
1500 | mutex_unlock(&dev->struct_mutex); | ||
1501 | |||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1417 | static int | 1505 | static int |
1418 | i915_wedged_open(struct inode *inode, | 1506 | i915_debugfs_common_open(struct inode *inode, |
1419 | struct file *filp) | 1507 | struct file *filp) |
1420 | { | 1508 | { |
1421 | filp->private_data = inode->i_private; | 1509 | filp->private_data = inode->i_private; |
1422 | return 0; | 1510 | return 0; |
@@ -1472,20 +1560,12 @@ i915_wedged_write(struct file *filp, | |||
1472 | 1560 | ||
1473 | static const struct file_operations i915_wedged_fops = { | 1561 | static const struct file_operations i915_wedged_fops = { |
1474 | .owner = THIS_MODULE, | 1562 | .owner = THIS_MODULE, |
1475 | .open = i915_wedged_open, | 1563 | .open = i915_debugfs_common_open, |
1476 | .read = i915_wedged_read, | 1564 | .read = i915_wedged_read, |
1477 | .write = i915_wedged_write, | 1565 | .write = i915_wedged_write, |
1478 | .llseek = default_llseek, | 1566 | .llseek = default_llseek, |
1479 | }; | 1567 | }; |
1480 | 1568 | ||
1481 | static int | ||
1482 | i915_max_freq_open(struct inode *inode, | ||
1483 | struct file *filp) | ||
1484 | { | ||
1485 | filp->private_data = inode->i_private; | ||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | static ssize_t | 1569 | static ssize_t |
1490 | i915_max_freq_read(struct file *filp, | 1570 | i915_max_freq_read(struct file *filp, |
1491 | char __user *ubuf, | 1571 | char __user *ubuf, |
@@ -1542,20 +1622,12 @@ i915_max_freq_write(struct file *filp, | |||
1542 | 1622 | ||
1543 | static const struct file_operations i915_max_freq_fops = { | 1623 | static const struct file_operations i915_max_freq_fops = { |
1544 | .owner = THIS_MODULE, | 1624 | .owner = THIS_MODULE, |
1545 | .open = i915_max_freq_open, | 1625 | .open = i915_debugfs_common_open, |
1546 | .read = i915_max_freq_read, | 1626 | .read = i915_max_freq_read, |
1547 | .write = i915_max_freq_write, | 1627 | .write = i915_max_freq_write, |
1548 | .llseek = default_llseek, | 1628 | .llseek = default_llseek, |
1549 | }; | 1629 | }; |
1550 | 1630 | ||
1551 | static int | ||
1552 | i915_cache_sharing_open(struct inode *inode, | ||
1553 | struct file *filp) | ||
1554 | { | ||
1555 | filp->private_data = inode->i_private; | ||
1556 | return 0; | ||
1557 | } | ||
1558 | |||
1559 | static ssize_t | 1631 | static ssize_t |
1560 | i915_cache_sharing_read(struct file *filp, | 1632 | i915_cache_sharing_read(struct file *filp, |
1561 | char __user *ubuf, | 1633 | char __user *ubuf, |
@@ -1621,7 +1693,7 @@ i915_cache_sharing_write(struct file *filp, | |||
1621 | 1693 | ||
1622 | static const struct file_operations i915_cache_sharing_fops = { | 1694 | static const struct file_operations i915_cache_sharing_fops = { |
1623 | .owner = THIS_MODULE, | 1695 | .owner = THIS_MODULE, |
1624 | .open = i915_cache_sharing_open, | 1696 | .open = i915_debugfs_common_open, |
1625 | .read = i915_cache_sharing_read, | 1697 | .read = i915_cache_sharing_read, |
1626 | .write = i915_cache_sharing_write, | 1698 | .write = i915_cache_sharing_write, |
1627 | .llseek = default_llseek, | 1699 | .llseek = default_llseek, |
@@ -1653,21 +1725,6 @@ drm_add_fake_info_node(struct drm_minor *minor, | |||
1653 | return 0; | 1725 | return 0; |
1654 | } | 1726 | } |
1655 | 1727 | ||
1656 | static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | ||
1657 | { | ||
1658 | struct drm_device *dev = minor->dev; | ||
1659 | struct dentry *ent; | ||
1660 | |||
1661 | ent = debugfs_create_file("i915_wedged", | ||
1662 | S_IRUGO | S_IWUSR, | ||
1663 | root, dev, | ||
1664 | &i915_wedged_fops); | ||
1665 | if (IS_ERR(ent)) | ||
1666 | return PTR_ERR(ent); | ||
1667 | |||
1668 | return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); | ||
1669 | } | ||
1670 | |||
1671 | static int i915_forcewake_open(struct inode *inode, struct file *file) | 1728 | static int i915_forcewake_open(struct inode *inode, struct file *file) |
1672 | { | 1729 | { |
1673 | struct drm_device *dev = inode->i_private; | 1730 | struct drm_device *dev = inode->i_private; |
@@ -1729,34 +1786,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | |||
1729 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | 1786 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); |
1730 | } | 1787 | } |
1731 | 1788 | ||
1732 | static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) | 1789 | static int i915_debugfs_create(struct dentry *root, |
1733 | { | 1790 | struct drm_minor *minor, |
1734 | struct drm_device *dev = minor->dev; | 1791 | const char *name, |
1735 | struct dentry *ent; | 1792 | const struct file_operations *fops) |
1736 | |||
1737 | ent = debugfs_create_file("i915_max_freq", | ||
1738 | S_IRUGO | S_IWUSR, | ||
1739 | root, dev, | ||
1740 | &i915_max_freq_fops); | ||
1741 | if (IS_ERR(ent)) | ||
1742 | return PTR_ERR(ent); | ||
1743 | |||
1744 | return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); | ||
1745 | } | ||
1746 | |||
1747 | static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) | ||
1748 | { | 1793 | { |
1749 | struct drm_device *dev = minor->dev; | 1794 | struct drm_device *dev = minor->dev; |
1750 | struct dentry *ent; | 1795 | struct dentry *ent; |
1751 | 1796 | ||
1752 | ent = debugfs_create_file("i915_cache_sharing", | 1797 | ent = debugfs_create_file(name, |
1753 | S_IRUGO | S_IWUSR, | 1798 | S_IRUGO | S_IWUSR, |
1754 | root, dev, | 1799 | root, dev, |
1755 | &i915_cache_sharing_fops); | 1800 | fops); |
1756 | if (IS_ERR(ent)) | 1801 | if (IS_ERR(ent)) |
1757 | return PTR_ERR(ent); | 1802 | return PTR_ERR(ent); |
1758 | 1803 | ||
1759 | return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); | 1804 | return drm_add_fake_info_node(minor, ent, fops); |
1760 | } | 1805 | } |
1761 | 1806 | ||
1762 | static struct drm_info_list i915_debugfs_list[] = { | 1807 | static struct drm_info_list i915_debugfs_list[] = { |
@@ -1782,7 +1827,6 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1782 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, | 1827 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, |
1783 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, | 1828 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, |
1784 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, | 1829 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, |
1785 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | ||
1786 | {"i915_error_state", i915_error_state, 0}, | 1830 | {"i915_error_state", i915_error_state, 0}, |
1787 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, | 1831 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, |
1788 | {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, | 1832 | {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, |
@@ -1798,6 +1842,8 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1798 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, | 1842 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, |
1799 | {"i915_context_status", i915_context_status, 0}, | 1843 | {"i915_context_status", i915_context_status, 0}, |
1800 | {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, | 1844 | {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, |
1845 | {"i915_swizzle_info", i915_swizzle_info, 0}, | ||
1846 | {"i915_ppgtt_info", i915_ppgtt_info, 0}, | ||
1801 | }; | 1847 | }; |
1802 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 1848 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
1803 | 1849 | ||
@@ -1805,17 +1851,25 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
1805 | { | 1851 | { |
1806 | int ret; | 1852 | int ret; |
1807 | 1853 | ||
1808 | ret = i915_wedged_create(minor->debugfs_root, minor); | 1854 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
1855 | "i915_wedged", | ||
1856 | &i915_wedged_fops); | ||
1809 | if (ret) | 1857 | if (ret) |
1810 | return ret; | 1858 | return ret; |
1811 | 1859 | ||
1812 | ret = i915_forcewake_create(minor->debugfs_root, minor); | 1860 | ret = i915_forcewake_create(minor->debugfs_root, minor); |
1813 | if (ret) | 1861 | if (ret) |
1814 | return ret; | 1862 | return ret; |
1815 | ret = i915_max_freq_create(minor->debugfs_root, minor); | 1863 | |
1864 | ret = i915_debugfs_create(minor->debugfs_root, minor, | ||
1865 | "i915_max_freq", | ||
1866 | &i915_max_freq_fops); | ||
1816 | if (ret) | 1867 | if (ret) |
1817 | return ret; | 1868 | return ret; |
1818 | ret = i915_cache_sharing_create(minor->debugfs_root, minor); | 1869 | |
1870 | ret = i915_debugfs_create(minor->debugfs_root, minor, | ||
1871 | "i915_cache_sharing", | ||
1872 | &i915_cache_sharing_fops); | ||
1819 | if (ret) | 1873 | if (ret) |
1820 | return ret; | 1874 | return ret; |
1821 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ddfe3d902b2a..9341eb8ce93b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -784,6 +784,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
784 | case I915_PARAM_HAS_GEN7_SOL_RESET: | 784 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
785 | value = 1; | 785 | value = 1; |
786 | break; | 786 | break; |
787 | case I915_PARAM_HAS_LLC: | ||
788 | value = HAS_LLC(dev); | ||
789 | break; | ||
787 | default: | 790 | default: |
788 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 791 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
789 | param->param); | 792 | param->param); |
@@ -1193,22 +1196,39 @@ static int i915_load_gem_init(struct drm_device *dev) | |||
1193 | /* Basic memrange allocator for stolen space */ | 1196 | /* Basic memrange allocator for stolen space */ |
1194 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); | 1197 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); |
1195 | 1198 | ||
1196 | /* Let GEM Manage all of the aperture. | ||
1197 | * | ||
1198 | * However, leave one page at the end still bound to the scratch page. | ||
1199 | * There are a number of places where the hardware apparently | ||
1200 | * prefetches past the end of the object, and we've seen multiple | ||
1201 | * hangs with the GPU head pointer stuck in a batchbuffer bound | ||
1202 | * at the last page of the aperture. One page should be enough to | ||
1203 | * keep any prefetching inside of the aperture. | ||
1204 | */ | ||
1205 | i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); | ||
1206 | |||
1207 | mutex_lock(&dev->struct_mutex); | 1199 | mutex_lock(&dev->struct_mutex); |
1208 | ret = i915_gem_init_ringbuffer(dev); | 1200 | if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) { |
1201 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the | ||
1202 | * aperture accordingly when using aliasing ppgtt. */ | ||
1203 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; | ||
1204 | /* For paranoia keep the guard page in between. */ | ||
1205 | gtt_size -= PAGE_SIZE; | ||
1206 | |||
1207 | i915_gem_do_init(dev, 0, mappable_size, gtt_size); | ||
1208 | |||
1209 | ret = i915_gem_init_aliasing_ppgtt(dev); | ||
1210 | if (ret) | ||
1211 | return ret; | ||
1212 | } else { | ||
1213 | /* Let GEM Manage all of the aperture. | ||
1214 | * | ||
1215 | * However, leave one page at the end still bound to the scratch | ||
1216 | * page. There are a number of places where the hardware | ||
1217 | * apparently prefetches past the end of the object, and we've | ||
1218 | * seen multiple hangs with the GPU head pointer stuck in a | ||
1219 | * batchbuffer bound at the last page of the aperture. One page | ||
1220 | * should be enough to keep any prefetching inside of the | ||
1221 | * aperture. | ||
1222 | */ | ||
1223 | i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); | ||
1224 | } | ||
1225 | |||
1226 | ret = i915_gem_init_hw(dev); | ||
1209 | mutex_unlock(&dev->struct_mutex); | 1227 | mutex_unlock(&dev->struct_mutex); |
1210 | if (ret) | 1228 | if (ret) { |
1229 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
1211 | return ret; | 1230 | return ret; |
1231 | } | ||
1212 | 1232 | ||
1213 | /* Try to set up FBC with a reasonable compressed buffer size */ | 1233 | /* Try to set up FBC with a reasonable compressed buffer size */ |
1214 | if (I915_HAS_FBC(dev) && i915_powersave) { | 1234 | if (I915_HAS_FBC(dev) && i915_powersave) { |
@@ -1295,6 +1315,7 @@ cleanup_gem: | |||
1295 | mutex_lock(&dev->struct_mutex); | 1315 | mutex_lock(&dev->struct_mutex); |
1296 | i915_gem_cleanup_ringbuffer(dev); | 1316 | i915_gem_cleanup_ringbuffer(dev); |
1297 | mutex_unlock(&dev->struct_mutex); | 1317 | mutex_unlock(&dev->struct_mutex); |
1318 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
1298 | cleanup_vga_switcheroo: | 1319 | cleanup_vga_switcheroo: |
1299 | vga_switcheroo_unregister_client(dev->pdev); | 1320 | vga_switcheroo_unregister_client(dev->pdev); |
1300 | cleanup_vga_client: | 1321 | cleanup_vga_client: |
@@ -1930,6 +1951,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1930 | goto free_priv; | 1951 | goto free_priv; |
1931 | } | 1952 | } |
1932 | 1953 | ||
1954 | pci_set_master(dev->pdev); | ||
1955 | |||
1933 | /* overlay on gen2 is broken and can't address above 1G */ | 1956 | /* overlay on gen2 is broken and can't address above 1G */ |
1934 | if (IS_GEN2(dev)) | 1957 | if (IS_GEN2(dev)) |
1935 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1958 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
@@ -2129,7 +2152,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
2129 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 2152 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
2130 | 2153 | ||
2131 | mutex_lock(&dev->struct_mutex); | 2154 | mutex_lock(&dev->struct_mutex); |
2132 | ret = i915_gpu_idle(dev); | 2155 | ret = i915_gpu_idle(dev, true); |
2133 | if (ret) | 2156 | if (ret) |
2134 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 2157 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
2135 | mutex_unlock(&dev->struct_mutex); | 2158 | mutex_unlock(&dev->struct_mutex); |
@@ -2182,6 +2205,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
2182 | i915_gem_free_all_phys_object(dev); | 2205 | i915_gem_free_all_phys_object(dev); |
2183 | i915_gem_cleanup_ringbuffer(dev); | 2206 | i915_gem_cleanup_ringbuffer(dev); |
2184 | mutex_unlock(&dev->struct_mutex); | 2207 | mutex_unlock(&dev->struct_mutex); |
2208 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
2185 | if (I915_HAS_FBC(dev) && i915_powersave) | 2209 | if (I915_HAS_FBC(dev) && i915_powersave) |
2186 | i915_cleanup_compression(dev); | 2210 | i915_cleanup_compression(dev); |
2187 | drm_mm_takedown(&dev_priv->mm.stolen); | 2211 | drm_mm_takedown(&dev_priv->mm.stolen); |
@@ -2247,18 +2271,12 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
2247 | 2271 | ||
2248 | i915_gem_lastclose(dev); | 2272 | i915_gem_lastclose(dev); |
2249 | 2273 | ||
2250 | if (dev_priv->agp_heap) | ||
2251 | i915_mem_takedown(&(dev_priv->agp_heap)); | ||
2252 | |||
2253 | i915_dma_cleanup(dev); | 2274 | i915_dma_cleanup(dev); |
2254 | } | 2275 | } |
2255 | 2276 | ||
2256 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | 2277 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
2257 | { | 2278 | { |
2258 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2259 | i915_gem_release(dev, file_priv); | 2279 | i915_gem_release(dev, file_priv); |
2260 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
2261 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | ||
2262 | } | 2280 | } |
2263 | 2281 | ||
2264 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | 2282 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
@@ -2277,11 +2295,11 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
2277 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | 2295 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
2278 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), | 2296 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), |
2279 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2297 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2280 | DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), | 2298 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
2281 | DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), | 2299 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
2282 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2300 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2283 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | 2301 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
2284 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2302 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2285 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2303 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2286 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), | 2304 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
2287 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | 2305 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 308f81913562..0694e170a338 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -103,6 +103,11 @@ MODULE_PARM_DESC(enable_hangcheck, | |||
103 | "WARNING: Disabling this can cause system wide hangs. " | 103 | "WARNING: Disabling this can cause system wide hangs. " |
104 | "(default: true)"); | 104 | "(default: true)"); |
105 | 105 | ||
106 | bool i915_enable_ppgtt __read_mostly = 1; | ||
107 | module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); | ||
108 | MODULE_PARM_DESC(i915_enable_ppgtt, | ||
109 | "Enable PPGTT (default: true)"); | ||
110 | |||
106 | static struct drm_driver driver; | 111 | static struct drm_driver driver; |
107 | extern int intel_agp_enabled; | 112 | extern int intel_agp_enabled; |
108 | 113 | ||
@@ -198,7 +203,7 @@ static const struct intel_device_info intel_pineview_info = { | |||
198 | 203 | ||
199 | static const struct intel_device_info intel_ironlake_d_info = { | 204 | static const struct intel_device_info intel_ironlake_d_info = { |
200 | .gen = 5, | 205 | .gen = 5, |
201 | .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, | 206 | .need_gfx_hws = 1, .has_hotplug = 1, |
202 | .has_bsd_ring = 1, | 207 | .has_bsd_ring = 1, |
203 | }; | 208 | }; |
204 | 209 | ||
@@ -214,6 +219,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { | |||
214 | .need_gfx_hws = 1, .has_hotplug = 1, | 219 | .need_gfx_hws = 1, .has_hotplug = 1, |
215 | .has_bsd_ring = 1, | 220 | .has_bsd_ring = 1, |
216 | .has_blt_ring = 1, | 221 | .has_blt_ring = 1, |
222 | .has_llc = 1, | ||
217 | }; | 223 | }; |
218 | 224 | ||
219 | static const struct intel_device_info intel_sandybridge_m_info = { | 225 | static const struct intel_device_info intel_sandybridge_m_info = { |
@@ -222,6 +228,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
222 | .has_fbc = 1, | 228 | .has_fbc = 1, |
223 | .has_bsd_ring = 1, | 229 | .has_bsd_ring = 1, |
224 | .has_blt_ring = 1, | 230 | .has_blt_ring = 1, |
231 | .has_llc = 1, | ||
225 | }; | 232 | }; |
226 | 233 | ||
227 | static const struct intel_device_info intel_ivybridge_d_info = { | 234 | static const struct intel_device_info intel_ivybridge_d_info = { |
@@ -229,6 +236,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { | |||
229 | .need_gfx_hws = 1, .has_hotplug = 1, | 236 | .need_gfx_hws = 1, .has_hotplug = 1, |
230 | .has_bsd_ring = 1, | 237 | .has_bsd_ring = 1, |
231 | .has_blt_ring = 1, | 238 | .has_blt_ring = 1, |
239 | .has_llc = 1, | ||
232 | }; | 240 | }; |
233 | 241 | ||
234 | static const struct intel_device_info intel_ivybridge_m_info = { | 242 | static const struct intel_device_info intel_ivybridge_m_info = { |
@@ -237,6 +245,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { | |||
237 | .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ | 245 | .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ |
238 | .has_bsd_ring = 1, | 246 | .has_bsd_ring = 1, |
239 | .has_blt_ring = 1, | 247 | .has_blt_ring = 1, |
248 | .has_llc = 1, | ||
240 | }; | 249 | }; |
241 | 250 | ||
242 | static const struct pci_device_id pciidlist[] = { /* aka */ | 251 | static const struct pci_device_id pciidlist[] = { /* aka */ |
@@ -376,16 +385,27 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
376 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | 385 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
377 | } | 386 | } |
378 | 387 | ||
388 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | ||
389 | { | ||
390 | u32 gtfifodbg; | ||
391 | gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); | ||
392 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | ||
393 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | ||
394 | I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||
395 | } | ||
396 | |||
379 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | 397 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
380 | { | 398 | { |
381 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 399 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
382 | POSTING_READ(FORCEWAKE); | 400 | /* The below doubles as a POSTING_READ */ |
401 | gen6_gt_check_fifodbg(dev_priv); | ||
383 | } | 402 | } |
384 | 403 | ||
385 | void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 404 | void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
386 | { | 405 | { |
387 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); | 406 | I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); |
388 | POSTING_READ(FORCEWAKE_MT); | 407 | /* The below doubles as a POSTING_READ */ |
408 | gen6_gt_check_fifodbg(dev_priv); | ||
389 | } | 409 | } |
390 | 410 | ||
391 | /* | 411 | /* |
@@ -401,8 +421,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
401 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | 421 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); |
402 | } | 422 | } |
403 | 423 | ||
404 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | 424 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
405 | { | 425 | { |
426 | int ret = 0; | ||
427 | |||
406 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | 428 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { |
407 | int loop = 500; | 429 | int loop = 500; |
408 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 430 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
@@ -410,10 +432,13 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
410 | udelay(10); | 432 | udelay(10); |
411 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | 433 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
412 | } | 434 | } |
413 | WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES); | 435 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) |
436 | ++ret; | ||
414 | dev_priv->gt_fifo_count = fifo; | 437 | dev_priv->gt_fifo_count = fifo; |
415 | } | 438 | } |
416 | dev_priv->gt_fifo_count--; | 439 | dev_priv->gt_fifo_count--; |
440 | |||
441 | return ret; | ||
417 | } | 442 | } |
418 | 443 | ||
419 | static int i915_drm_freeze(struct drm_device *dev) | 444 | static int i915_drm_freeze(struct drm_device *dev) |
@@ -494,7 +519,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
494 | mutex_lock(&dev->struct_mutex); | 519 | mutex_lock(&dev->struct_mutex); |
495 | dev_priv->mm.suspended = 0; | 520 | dev_priv->mm.suspended = 0; |
496 | 521 | ||
497 | error = i915_gem_init_ringbuffer(dev); | 522 | error = i915_gem_init_hw(dev); |
498 | mutex_unlock(&dev->struct_mutex); | 523 | mutex_unlock(&dev->struct_mutex); |
499 | 524 | ||
500 | if (HAS_PCH_SPLIT(dev)) | 525 | if (HAS_PCH_SPLIT(dev)) |
@@ -633,7 +658,7 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags) | |||
633 | } | 658 | } |
634 | 659 | ||
635 | /** | 660 | /** |
636 | * i965_reset - reset chip after a hang | 661 | * i915_reset - reset chip after a hang |
637 | * @dev: drm device to reset | 662 | * @dev: drm device to reset |
638 | * @flags: reset domains | 663 | * @flags: reset domains |
639 | * | 664 | * |
@@ -709,12 +734,16 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
709 | !dev_priv->mm.suspended) { | 734 | !dev_priv->mm.suspended) { |
710 | dev_priv->mm.suspended = 0; | 735 | dev_priv->mm.suspended = 0; |
711 | 736 | ||
737 | i915_gem_init_swizzling(dev); | ||
738 | |||
712 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); | 739 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); |
713 | if (HAS_BSD(dev)) | 740 | if (HAS_BSD(dev)) |
714 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); | 741 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); |
715 | if (HAS_BLT(dev)) | 742 | if (HAS_BLT(dev)) |
716 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); | 743 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); |
717 | 744 | ||
745 | i915_gem_init_ppgtt(dev); | ||
746 | |||
718 | mutex_unlock(&dev->struct_mutex); | 747 | mutex_unlock(&dev->struct_mutex); |
719 | drm_irq_uninstall(dev); | 748 | drm_irq_uninstall(dev); |
720 | drm_mode_config_reset(dev); | 749 | drm_mode_config_reset(dev); |
@@ -977,11 +1006,15 @@ __i915_read(64, q) | |||
977 | 1006 | ||
978 | #define __i915_write(x, y) \ | 1007 | #define __i915_write(x, y) \ |
979 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1008 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1009 | u32 __fifo_ret = 0; \ | ||
980 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1010 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
981 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1011 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
982 | __gen6_gt_wait_for_fifo(dev_priv); \ | 1012 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
983 | } \ | 1013 | } \ |
984 | write##y(val, dev_priv->regs + reg); \ | 1014 | write##y(val, dev_priv->regs + reg); \ |
1015 | if (unlikely(__fifo_ret)) { \ | ||
1016 | gen6_gt_check_fifodbg(dev_priv); \ | ||
1017 | } \ | ||
985 | } | 1018 | } |
986 | __i915_write(8, b) | 1019 | __i915_write(8, b) |
987 | __i915_write(16, w) | 1020 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9689ca38b2b3..c0f19f572004 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "intel_ringbuffer.h" | 35 | #include "intel_ringbuffer.h" |
36 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
37 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
38 | #include <linux/i2c-algo-bit.h> | ||
38 | #include <drm/intel-gtt.h> | 39 | #include <drm/intel-gtt.h> |
39 | #include <linux/backlight.h> | 40 | #include <linux/backlight.h> |
40 | 41 | ||
@@ -135,6 +136,7 @@ struct drm_i915_fence_reg { | |||
135 | struct list_head lru_list; | 136 | struct list_head lru_list; |
136 | struct drm_i915_gem_object *obj; | 137 | struct drm_i915_gem_object *obj; |
137 | uint32_t setup_seqno; | 138 | uint32_t setup_seqno; |
139 | int pin_count; | ||
138 | }; | 140 | }; |
139 | 141 | ||
140 | struct sdvo_device_mapping { | 142 | struct sdvo_device_mapping { |
@@ -152,33 +154,40 @@ struct drm_i915_error_state { | |||
152 | u32 eir; | 154 | u32 eir; |
153 | u32 pgtbl_er; | 155 | u32 pgtbl_er; |
154 | u32 pipestat[I915_MAX_PIPES]; | 156 | u32 pipestat[I915_MAX_PIPES]; |
155 | u32 ipeir; | 157 | u32 tail[I915_NUM_RINGS]; |
156 | u32 ipehr; | 158 | u32 head[I915_NUM_RINGS]; |
157 | u32 instdone; | 159 | u32 ipeir[I915_NUM_RINGS]; |
158 | u32 acthd; | 160 | u32 ipehr[I915_NUM_RINGS]; |
161 | u32 instdone[I915_NUM_RINGS]; | ||
162 | u32 acthd[I915_NUM_RINGS]; | ||
163 | u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; | ||
164 | /* our own tracking of ring head and tail */ | ||
165 | u32 cpu_ring_head[I915_NUM_RINGS]; | ||
166 | u32 cpu_ring_tail[I915_NUM_RINGS]; | ||
159 | u32 error; /* gen6+ */ | 167 | u32 error; /* gen6+ */ |
160 | u32 bcs_acthd; /* gen6+ blt engine */ | 168 | u32 instpm[I915_NUM_RINGS]; |
161 | u32 bcs_ipehr; | 169 | u32 instps[I915_NUM_RINGS]; |
162 | u32 bcs_ipeir; | ||
163 | u32 bcs_instdone; | ||
164 | u32 bcs_seqno; | ||
165 | u32 vcs_acthd; /* gen6+ bsd engine */ | ||
166 | u32 vcs_ipehr; | ||
167 | u32 vcs_ipeir; | ||
168 | u32 vcs_instdone; | ||
169 | u32 vcs_seqno; | ||
170 | u32 instpm; | ||
171 | u32 instps; | ||
172 | u32 instdone1; | 170 | u32 instdone1; |
173 | u32 seqno; | 171 | u32 seqno[I915_NUM_RINGS]; |
174 | u64 bbaddr; | 172 | u64 bbaddr; |
173 | u32 fault_reg[I915_NUM_RINGS]; | ||
174 | u32 done_reg; | ||
175 | u32 faddr[I915_NUM_RINGS]; | ||
175 | u64 fence[I915_MAX_NUM_FENCES]; | 176 | u64 fence[I915_MAX_NUM_FENCES]; |
176 | struct timeval time; | 177 | struct timeval time; |
177 | struct drm_i915_error_object { | 178 | struct drm_i915_error_ring { |
178 | int page_count; | 179 | struct drm_i915_error_object { |
179 | u32 gtt_offset; | 180 | int page_count; |
180 | u32 *pages[0]; | 181 | u32 gtt_offset; |
181 | } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; | 182 | u32 *pages[0]; |
183 | } *ringbuffer, *batchbuffer; | ||
184 | struct drm_i915_error_request { | ||
185 | long jiffies; | ||
186 | u32 seqno; | ||
187 | u32 tail; | ||
188 | } *requests; | ||
189 | int num_requests; | ||
190 | } ring[I915_NUM_RINGS]; | ||
182 | struct drm_i915_error_buffer { | 191 | struct drm_i915_error_buffer { |
183 | u32 size; | 192 | u32 size; |
184 | u32 name; | 193 | u32 name; |
@@ -191,7 +200,7 @@ struct drm_i915_error_state { | |||
191 | u32 tiling:2; | 200 | u32 tiling:2; |
192 | u32 dirty:1; | 201 | u32 dirty:1; |
193 | u32 purgeable:1; | 202 | u32 purgeable:1; |
194 | u32 ring:4; | 203 | s32 ring:4; |
195 | u32 cache_level:2; | 204 | u32 cache_level:2; |
196 | } *active_bo, *pinned_bo; | 205 | } *active_bo, *pinned_bo; |
197 | u32 active_bo_count, pinned_bo_count; | 206 | u32 active_bo_count, pinned_bo_count; |
@@ -255,6 +264,17 @@ struct intel_device_info { | |||
255 | u8 supports_tv:1; | 264 | u8 supports_tv:1; |
256 | u8 has_bsd_ring:1; | 265 | u8 has_bsd_ring:1; |
257 | u8 has_blt_ring:1; | 266 | u8 has_blt_ring:1; |
267 | u8 has_llc:1; | ||
268 | }; | ||
269 | |||
270 | #define I915_PPGTT_PD_ENTRIES 512 | ||
271 | #define I915_PPGTT_PT_ENTRIES 1024 | ||
272 | struct i915_hw_ppgtt { | ||
273 | unsigned num_pd_entries; | ||
274 | struct page **pt_pages; | ||
275 | uint32_t pd_offset; | ||
276 | dma_addr_t *pt_dma_addr; | ||
277 | dma_addr_t scratch_page_dma_addr; | ||
258 | }; | 278 | }; |
259 | 279 | ||
260 | enum no_fbc_reason { | 280 | enum no_fbc_reason { |
@@ -279,6 +299,16 @@ enum intel_pch { | |||
279 | struct intel_fbdev; | 299 | struct intel_fbdev; |
280 | struct intel_fbc_work; | 300 | struct intel_fbc_work; |
281 | 301 | ||
302 | struct intel_gmbus { | ||
303 | struct i2c_adapter adapter; | ||
304 | bool force_bit; | ||
305 | bool has_gpio; | ||
306 | u32 reg0; | ||
307 | u32 gpio_reg; | ||
308 | struct i2c_algo_bit_data bit_algo; | ||
309 | struct drm_i915_private *dev_priv; | ||
310 | }; | ||
311 | |||
282 | typedef struct drm_i915_private { | 312 | typedef struct drm_i915_private { |
283 | struct drm_device *dev; | 313 | struct drm_device *dev; |
284 | 314 | ||
@@ -296,11 +326,11 @@ typedef struct drm_i915_private { | |||
296 | /** gt_lock is also taken in irq contexts. */ | 326 | /** gt_lock is also taken in irq contexts. */ |
297 | struct spinlock gt_lock; | 327 | struct spinlock gt_lock; |
298 | 328 | ||
299 | struct intel_gmbus { | 329 | struct intel_gmbus *gmbus; |
300 | struct i2c_adapter adapter; | 330 | |
301 | struct i2c_adapter *force_bit; | 331 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
302 | u32 reg0; | 332 | * controller on different i2c buses. */ |
303 | } *gmbus; | 333 | struct mutex gmbus_mutex; |
304 | 334 | ||
305 | struct pci_dev *bridge_dev; | 335 | struct pci_dev *bridge_dev; |
306 | struct intel_ring_buffer ring[I915_NUM_RINGS]; | 336 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
@@ -335,7 +365,6 @@ typedef struct drm_i915_private { | |||
335 | 365 | ||
336 | int tex_lru_log_granularity; | 366 | int tex_lru_log_granularity; |
337 | int allow_batchbuffer; | 367 | int allow_batchbuffer; |
338 | struct mem_block *agp_heap; | ||
339 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 368 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
340 | int vblank_pipe; | 369 | int vblank_pipe; |
341 | int num_pipe; | 370 | int num_pipe; |
@@ -584,6 +613,9 @@ typedef struct drm_i915_private { | |||
584 | struct io_mapping *gtt_mapping; | 613 | struct io_mapping *gtt_mapping; |
585 | int gtt_mtrr; | 614 | int gtt_mtrr; |
586 | 615 | ||
616 | /** PPGTT used for aliasing the PPGTT with the GTT */ | ||
617 | struct i915_hw_ppgtt *aliasing_ppgtt; | ||
618 | |||
587 | struct shrinker inactive_shrinker; | 619 | struct shrinker inactive_shrinker; |
588 | 620 | ||
589 | /** | 621 | /** |
@@ -749,6 +781,13 @@ typedef struct drm_i915_private { | |||
749 | struct drm_property *force_audio_property; | 781 | struct drm_property *force_audio_property; |
750 | } drm_i915_private_t; | 782 | } drm_i915_private_t; |
751 | 783 | ||
784 | enum hdmi_force_audio { | ||
785 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ | ||
786 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ | ||
787 | HDMI_AUDIO_AUTO, /* trust EDID */ | ||
788 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ | ||
789 | }; | ||
790 | |||
752 | enum i915_cache_level { | 791 | enum i915_cache_level { |
753 | I915_CACHE_NONE, | 792 | I915_CACHE_NONE, |
754 | I915_CACHE_LLC, | 793 | I915_CACHE_LLC, |
@@ -841,6 +880,8 @@ struct drm_i915_gem_object { | |||
841 | 880 | ||
842 | unsigned int cache_level:2; | 881 | unsigned int cache_level:2; |
843 | 882 | ||
883 | unsigned int has_aliasing_ppgtt_mapping:1; | ||
884 | |||
844 | struct page **pages; | 885 | struct page **pages; |
845 | 886 | ||
846 | /** | 887 | /** |
@@ -918,6 +959,9 @@ struct drm_i915_gem_request { | |||
918 | /** GEM sequence number associated with this request. */ | 959 | /** GEM sequence number associated with this request. */ |
919 | uint32_t seqno; | 960 | uint32_t seqno; |
920 | 961 | ||
962 | /** Postion in the ringbuffer of the end of the request */ | ||
963 | u32 tail; | ||
964 | |||
921 | /** Time at which this request was emitted, in jiffies. */ | 965 | /** Time at which this request was emitted, in jiffies. */ |
922 | unsigned long emitted_jiffies; | 966 | unsigned long emitted_jiffies; |
923 | 967 | ||
@@ -974,8 +1018,11 @@ struct drm_i915_file_private { | |||
974 | 1018 | ||
975 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | 1019 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
976 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | 1020 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
1021 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) | ||
977 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1022 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
978 | 1023 | ||
1024 | #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) | ||
1025 | |||
979 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | 1026 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
980 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | 1027 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
981 | 1028 | ||
@@ -1018,6 +1065,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly; | |||
1018 | extern int i915_enable_rc6 __read_mostly; | 1065 | extern int i915_enable_rc6 __read_mostly; |
1019 | extern int i915_enable_fbc __read_mostly; | 1066 | extern int i915_enable_fbc __read_mostly; |
1020 | extern bool i915_enable_hangcheck __read_mostly; | 1067 | extern bool i915_enable_hangcheck __read_mostly; |
1068 | extern bool i915_enable_ppgtt __read_mostly; | ||
1021 | 1069 | ||
1022 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1070 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1023 | extern int i915_resume(struct drm_device *dev); | 1071 | extern int i915_resume(struct drm_device *dev); |
@@ -1079,18 +1127,6 @@ extern void i915_destroy_error_state(struct drm_device *dev); | |||
1079 | #endif | 1127 | #endif |
1080 | 1128 | ||
1081 | 1129 | ||
1082 | /* i915_mem.c */ | ||
1083 | extern int i915_mem_alloc(struct drm_device *dev, void *data, | ||
1084 | struct drm_file *file_priv); | ||
1085 | extern int i915_mem_free(struct drm_device *dev, void *data, | ||
1086 | struct drm_file *file_priv); | ||
1087 | extern int i915_mem_init_heap(struct drm_device *dev, void *data, | ||
1088 | struct drm_file *file_priv); | ||
1089 | extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, | ||
1090 | struct drm_file *file_priv); | ||
1091 | extern void i915_mem_takedown(struct mem_block **heap); | ||
1092 | extern void i915_mem_release(struct drm_device * dev, | ||
1093 | struct drm_file *file_priv, struct mem_block *heap); | ||
1094 | /* i915_gem.c */ | 1130 | /* i915_gem.c */ |
1095 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, | 1131 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
1096 | struct drm_file *file_priv); | 1132 | struct drm_file *file_priv); |
@@ -1170,37 +1206,55 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
1170 | return (int32_t)(seq1 - seq2) >= 0; | 1206 | return (int32_t)(seq1 - seq2) >= 0; |
1171 | } | 1207 | } |
1172 | 1208 | ||
1173 | static inline u32 | 1209 | u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); |
1174 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) | ||
1175 | { | ||
1176 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
1177 | return ring->outstanding_lazy_request = dev_priv->next_seqno; | ||
1178 | } | ||
1179 | 1210 | ||
1180 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | 1211 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
1181 | struct intel_ring_buffer *pipelined); | 1212 | struct intel_ring_buffer *pipelined); |
1182 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | 1213 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1183 | 1214 | ||
1215 | static inline void | ||
1216 | i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) | ||
1217 | { | ||
1218 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
1219 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1220 | dev_priv->fence_regs[obj->fence_reg].pin_count++; | ||
1221 | } | ||
1222 | } | ||
1223 | |||
1224 | static inline void | ||
1225 | i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | ||
1226 | { | ||
1227 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
1228 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
1229 | dev_priv->fence_regs[obj->fence_reg].pin_count--; | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1184 | void i915_gem_retire_requests(struct drm_device *dev); | 1233 | void i915_gem_retire_requests(struct drm_device *dev); |
1234 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); | ||
1235 | |||
1185 | void i915_gem_reset(struct drm_device *dev); | 1236 | void i915_gem_reset(struct drm_device *dev); |
1186 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | 1237 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1187 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, | 1238 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1188 | uint32_t read_domains, | 1239 | uint32_t read_domains, |
1189 | uint32_t write_domain); | 1240 | uint32_t write_domain); |
1190 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); | 1241 | int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
1191 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); | 1242 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
1243 | void i915_gem_init_swizzling(struct drm_device *dev); | ||
1244 | void i915_gem_init_ppgtt(struct drm_device *dev); | ||
1192 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1245 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1193 | void i915_gem_do_init(struct drm_device *dev, | 1246 | void i915_gem_do_init(struct drm_device *dev, |
1194 | unsigned long start, | 1247 | unsigned long start, |
1195 | unsigned long mappable_end, | 1248 | unsigned long mappable_end, |
1196 | unsigned long end); | 1249 | unsigned long end); |
1197 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1250 | int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire); |
1198 | int __must_check i915_gem_idle(struct drm_device *dev); | 1251 | int __must_check i915_gem_idle(struct drm_device *dev); |
1199 | int __must_check i915_add_request(struct intel_ring_buffer *ring, | 1252 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1200 | struct drm_file *file, | 1253 | struct drm_file *file, |
1201 | struct drm_i915_gem_request *request); | 1254 | struct drm_i915_gem_request *request); |
1202 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, | 1255 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1203 | uint32_t seqno); | 1256 | uint32_t seqno, |
1257 | bool do_retire); | ||
1204 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1258 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1205 | int __must_check | 1259 | int __must_check |
1206 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1260 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
@@ -1227,6 +1281,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
1227 | enum i915_cache_level cache_level); | 1281 | enum i915_cache_level cache_level); |
1228 | 1282 | ||
1229 | /* i915_gem_gtt.c */ | 1283 | /* i915_gem_gtt.c */ |
1284 | int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); | ||
1285 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); | ||
1286 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | ||
1287 | struct drm_i915_gem_object *obj, | ||
1288 | enum i915_cache_level cache_level); | ||
1289 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | ||
1290 | struct drm_i915_gem_object *obj); | ||
1291 | |||
1230 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1292 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1231 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1293 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
1232 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | 1294 | void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
@@ -1365,7 +1427,7 @@ extern void intel_display_print_error_state(struct seq_file *m, | |||
1365 | */ | 1427 | */ |
1366 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); | 1428 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1367 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); | 1429 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1368 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); | 1430 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1369 | 1431 | ||
1370 | /* We give fast paths for the really cool registers */ | 1432 | /* We give fast paths for the really cool registers */ |
1371 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 1433 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e55badb2d86d..1f441f5c2405 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); | |||
58 | 58 | ||
59 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 59 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
60 | struct shrink_control *sc); | 60 | struct shrink_control *sc); |
61 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||
61 | 62 | ||
62 | /* some bookkeeping */ | 63 | /* some bookkeeping */ |
63 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 64 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | |||
258 | obj->tiling_mode != I915_TILING_NONE; | 259 | obj->tiling_mode != I915_TILING_NONE; |
259 | } | 260 | } |
260 | 261 | ||
261 | static inline void | ||
262 | slow_shmem_copy(struct page *dst_page, | ||
263 | int dst_offset, | ||
264 | struct page *src_page, | ||
265 | int src_offset, | ||
266 | int length) | ||
267 | { | ||
268 | char *dst_vaddr, *src_vaddr; | ||
269 | |||
270 | dst_vaddr = kmap(dst_page); | ||
271 | src_vaddr = kmap(src_page); | ||
272 | |||
273 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | ||
274 | |||
275 | kunmap(src_page); | ||
276 | kunmap(dst_page); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | slow_shmem_bit17_copy(struct page *gpu_page, | ||
281 | int gpu_offset, | ||
282 | struct page *cpu_page, | ||
283 | int cpu_offset, | ||
284 | int length, | ||
285 | int is_read) | ||
286 | { | ||
287 | char *gpu_vaddr, *cpu_vaddr; | ||
288 | |||
289 | /* Use the unswizzled path if this page isn't affected. */ | ||
290 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | ||
291 | if (is_read) | ||
292 | return slow_shmem_copy(cpu_page, cpu_offset, | ||
293 | gpu_page, gpu_offset, length); | ||
294 | else | ||
295 | return slow_shmem_copy(gpu_page, gpu_offset, | ||
296 | cpu_page, cpu_offset, length); | ||
297 | } | ||
298 | |||
299 | gpu_vaddr = kmap(gpu_page); | ||
300 | cpu_vaddr = kmap(cpu_page); | ||
301 | |||
302 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | ||
303 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | ||
304 | */ | ||
305 | while (length > 0) { | ||
306 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
307 | int this_length = min(cacheline_end - gpu_offset, length); | ||
308 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
309 | |||
310 | if (is_read) { | ||
311 | memcpy(cpu_vaddr + cpu_offset, | ||
312 | gpu_vaddr + swizzled_gpu_offset, | ||
313 | this_length); | ||
314 | } else { | ||
315 | memcpy(gpu_vaddr + swizzled_gpu_offset, | ||
316 | cpu_vaddr + cpu_offset, | ||
317 | this_length); | ||
318 | } | ||
319 | cpu_offset += this_length; | ||
320 | gpu_offset += this_length; | ||
321 | length -= this_length; | ||
322 | } | ||
323 | |||
324 | kunmap(cpu_page); | ||
325 | kunmap(gpu_page); | ||
326 | } | ||
327 | |||
328 | /** | 262 | /** |
329 | * This is the fast shmem pread path, which attempts to copy_from_user directly | 263 | * This is the fast shmem pread path, which attempts to copy_from_user directly |
330 | * from the backing pages of the object to the user's address space. On a | 264 | * from the backing pages of the object to the user's address space. On a |
@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, | |||
385 | return 0; | 319 | return 0; |
386 | } | 320 | } |
387 | 321 | ||
322 | static inline int | ||
323 | __copy_to_user_swizzled(char __user *cpu_vaddr, | ||
324 | const char *gpu_vaddr, int gpu_offset, | ||
325 | int length) | ||
326 | { | ||
327 | int ret, cpu_offset = 0; | ||
328 | |||
329 | while (length > 0) { | ||
330 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
331 | int this_length = min(cacheline_end - gpu_offset, length); | ||
332 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
333 | |||
334 | ret = __copy_to_user(cpu_vaddr + cpu_offset, | ||
335 | gpu_vaddr + swizzled_gpu_offset, | ||
336 | this_length); | ||
337 | if (ret) | ||
338 | return ret + length; | ||
339 | |||
340 | cpu_offset += this_length; | ||
341 | gpu_offset += this_length; | ||
342 | length -= this_length; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static inline int | ||
349 | __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset, | ||
350 | const char *cpu_vaddr, | ||
351 | int length) | ||
352 | { | ||
353 | int ret, cpu_offset = 0; | ||
354 | |||
355 | while (length > 0) { | ||
356 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | ||
357 | int this_length = min(cacheline_end - gpu_offset, length); | ||
358 | int swizzled_gpu_offset = gpu_offset ^ 64; | ||
359 | |||
360 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, | ||
361 | cpu_vaddr + cpu_offset, | ||
362 | this_length); | ||
363 | if (ret) | ||
364 | return ret + length; | ||
365 | |||
366 | cpu_offset += this_length; | ||
367 | gpu_offset += this_length; | ||
368 | length -= this_length; | ||
369 | } | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
388 | /** | 374 | /** |
389 | * This is the fallback shmem pread path, which allocates temporary storage | 375 | * This is the fallback shmem pread path, which allocates temporary storage |
390 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 376 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
398 | struct drm_file *file) | 384 | struct drm_file *file) |
399 | { | 385 | { |
400 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 386 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
401 | struct mm_struct *mm = current->mm; | 387 | char __user *user_data; |
402 | struct page **user_pages; | ||
403 | ssize_t remain; | 388 | ssize_t remain; |
404 | loff_t offset, pinned_pages, i; | 389 | loff_t offset; |
405 | loff_t first_data_page, last_data_page, num_pages; | 390 | int shmem_page_offset, page_length, ret; |
406 | int shmem_page_offset; | 391 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
407 | int data_page_index, data_page_offset; | ||
408 | int page_length; | ||
409 | int ret; | ||
410 | uint64_t data_ptr = args->data_ptr; | ||
411 | int do_bit17_swizzling; | ||
412 | 392 | ||
393 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
413 | remain = args->size; | 394 | remain = args->size; |
414 | 395 | ||
415 | /* Pin the user pages containing the data. We can't fault while | 396 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
416 | * holding the struct mutex, yet we want to hold it while | ||
417 | * dereferencing the user data. | ||
418 | */ | ||
419 | first_data_page = data_ptr / PAGE_SIZE; | ||
420 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
421 | num_pages = last_data_page - first_data_page + 1; | ||
422 | 397 | ||
423 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | 398 | offset = args->offset; |
424 | if (user_pages == NULL) | ||
425 | return -ENOMEM; | ||
426 | 399 | ||
427 | mutex_unlock(&dev->struct_mutex); | 400 | mutex_unlock(&dev->struct_mutex); |
428 | down_read(&mm->mmap_sem); | ||
429 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
430 | num_pages, 1, 0, user_pages, NULL); | ||
431 | up_read(&mm->mmap_sem); | ||
432 | mutex_lock(&dev->struct_mutex); | ||
433 | if (pinned_pages < num_pages) { | ||
434 | ret = -EFAULT; | ||
435 | goto out; | ||
436 | } | ||
437 | |||
438 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | ||
439 | args->offset, | ||
440 | args->size); | ||
441 | if (ret) | ||
442 | goto out; | ||
443 | |||
444 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
445 | |||
446 | offset = args->offset; | ||
447 | 401 | ||
448 | while (remain > 0) { | 402 | while (remain > 0) { |
449 | struct page *page; | 403 | struct page *page; |
404 | char *vaddr; | ||
450 | 405 | ||
451 | /* Operation in this page | 406 | /* Operation in this page |
452 | * | 407 | * |
453 | * shmem_page_offset = offset within page in shmem file | 408 | * shmem_page_offset = offset within page in shmem file |
454 | * data_page_index = page number in get_user_pages return | ||
455 | * data_page_offset = offset with data_page_index page. | ||
456 | * page_length = bytes to copy for this page | 409 | * page_length = bytes to copy for this page |
457 | */ | 410 | */ |
458 | shmem_page_offset = offset_in_page(offset); | 411 | shmem_page_offset = offset_in_page(offset); |
459 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
460 | data_page_offset = offset_in_page(data_ptr); | ||
461 | |||
462 | page_length = remain; | 412 | page_length = remain; |
463 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 413 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
464 | page_length = PAGE_SIZE - shmem_page_offset; | 414 | page_length = PAGE_SIZE - shmem_page_offset; |
465 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
466 | page_length = PAGE_SIZE - data_page_offset; | ||
467 | 415 | ||
468 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | 416 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
469 | if (IS_ERR(page)) { | 417 | if (IS_ERR(page)) { |
@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, | |||
471 | goto out; | 419 | goto out; |
472 | } | 420 | } |
473 | 421 | ||
474 | if (do_bit17_swizzling) { | 422 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
475 | slow_shmem_bit17_copy(page, | 423 | (page_to_phys(page) & (1 << 17)) != 0; |
476 | shmem_page_offset, | 424 | |
477 | user_pages[data_page_index], | 425 | vaddr = kmap(page); |
478 | data_page_offset, | 426 | if (page_do_bit17_swizzling) |
479 | page_length, | 427 | ret = __copy_to_user_swizzled(user_data, |
480 | 1); | 428 | vaddr, shmem_page_offset, |
481 | } else { | 429 | page_length); |
482 | slow_shmem_copy(user_pages[data_page_index], | 430 | else |
483 | data_page_offset, | 431 | ret = __copy_to_user(user_data, |
484 | page, | 432 | vaddr + shmem_page_offset, |
485 | shmem_page_offset, | 433 | page_length); |
486 | page_length); | 434 | kunmap(page); |
487 | } | ||
488 | 435 | ||
489 | mark_page_accessed(page); | 436 | mark_page_accessed(page); |
490 | page_cache_release(page); | 437 | page_cache_release(page); |
491 | 438 | ||
439 | if (ret) { | ||
440 | ret = -EFAULT; | ||
441 | goto out; | ||
442 | } | ||
443 | |||
492 | remain -= page_length; | 444 | remain -= page_length; |
493 | data_ptr += page_length; | 445 | user_data += page_length; |
494 | offset += page_length; | 446 | offset += page_length; |
495 | } | 447 | } |
496 | 448 | ||
497 | out: | 449 | out: |
498 | for (i = 0; i < pinned_pages; i++) { | 450 | mutex_lock(&dev->struct_mutex); |
499 | SetPageDirty(user_pages[i]); | 451 | /* Fixup: Kill any reinstated backing storage pages */ |
500 | mark_page_accessed(user_pages[i]); | 452 | if (obj->madv == __I915_MADV_PURGED) |
501 | page_cache_release(user_pages[i]); | 453 | i915_gem_object_truncate(obj); |
502 | } | ||
503 | drm_free_large(user_pages); | ||
504 | 454 | ||
505 | return ret; | 455 | return ret; |
506 | } | 456 | } |
@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
841 | struct drm_file *file) | 791 | struct drm_file *file) |
842 | { | 792 | { |
843 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 793 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
844 | struct mm_struct *mm = current->mm; | ||
845 | struct page **user_pages; | ||
846 | ssize_t remain; | 794 | ssize_t remain; |
847 | loff_t offset, pinned_pages, i; | 795 | loff_t offset; |
848 | loff_t first_data_page, last_data_page, num_pages; | 796 | char __user *user_data; |
849 | int shmem_page_offset; | 797 | int shmem_page_offset, page_length, ret; |
850 | int data_page_index, data_page_offset; | 798 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
851 | int page_length; | ||
852 | int ret; | ||
853 | uint64_t data_ptr = args->data_ptr; | ||
854 | int do_bit17_swizzling; | ||
855 | 799 | ||
800 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
856 | remain = args->size; | 801 | remain = args->size; |
857 | 802 | ||
858 | /* Pin the user pages containing the data. We can't fault while | 803 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
859 | * holding the struct mutex, and all of the pwrite implementations | ||
860 | * want to hold it while dereferencing the user data. | ||
861 | */ | ||
862 | first_data_page = data_ptr / PAGE_SIZE; | ||
863 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
864 | num_pages = last_data_page - first_data_page + 1; | ||
865 | |||
866 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | ||
867 | if (user_pages == NULL) | ||
868 | return -ENOMEM; | ||
869 | |||
870 | mutex_unlock(&dev->struct_mutex); | ||
871 | down_read(&mm->mmap_sem); | ||
872 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
873 | num_pages, 0, 0, user_pages, NULL); | ||
874 | up_read(&mm->mmap_sem); | ||
875 | mutex_lock(&dev->struct_mutex); | ||
876 | if (pinned_pages < num_pages) { | ||
877 | ret = -EFAULT; | ||
878 | goto out; | ||
879 | } | ||
880 | |||
881 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
882 | if (ret) | ||
883 | goto out; | ||
884 | |||
885 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | ||
886 | 804 | ||
887 | offset = args->offset; | 805 | offset = args->offset; |
888 | obj->dirty = 1; | 806 | obj->dirty = 1; |
889 | 807 | ||
808 | mutex_unlock(&dev->struct_mutex); | ||
809 | |||
890 | while (remain > 0) { | 810 | while (remain > 0) { |
891 | struct page *page; | 811 | struct page *page; |
812 | char *vaddr; | ||
892 | 813 | ||
893 | /* Operation in this page | 814 | /* Operation in this page |
894 | * | 815 | * |
895 | * shmem_page_offset = offset within page in shmem file | 816 | * shmem_page_offset = offset within page in shmem file |
896 | * data_page_index = page number in get_user_pages return | ||
897 | * data_page_offset = offset with data_page_index page. | ||
898 | * page_length = bytes to copy for this page | 817 | * page_length = bytes to copy for this page |
899 | */ | 818 | */ |
900 | shmem_page_offset = offset_in_page(offset); | 819 | shmem_page_offset = offset_in_page(offset); |
901 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
902 | data_page_offset = offset_in_page(data_ptr); | ||
903 | 820 | ||
904 | page_length = remain; | 821 | page_length = remain; |
905 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 822 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
906 | page_length = PAGE_SIZE - shmem_page_offset; | 823 | page_length = PAGE_SIZE - shmem_page_offset; |
907 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
908 | page_length = PAGE_SIZE - data_page_offset; | ||
909 | 824 | ||
910 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); | 825 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
911 | if (IS_ERR(page)) { | 826 | if (IS_ERR(page)) { |
@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, | |||
913 | goto out; | 828 | goto out; |
914 | } | 829 | } |
915 | 830 | ||
916 | if (do_bit17_swizzling) { | 831 | page_do_bit17_swizzling = obj_do_bit17_swizzling && |
917 | slow_shmem_bit17_copy(page, | 832 | (page_to_phys(page) & (1 << 17)) != 0; |
918 | shmem_page_offset, | 833 | |
919 | user_pages[data_page_index], | 834 | vaddr = kmap(page); |
920 | data_page_offset, | 835 | if (page_do_bit17_swizzling) |
921 | page_length, | 836 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, |
922 | 0); | 837 | user_data, |
923 | } else { | 838 | page_length); |
924 | slow_shmem_copy(page, | 839 | else |
925 | shmem_page_offset, | 840 | ret = __copy_from_user(vaddr + shmem_page_offset, |
926 | user_pages[data_page_index], | 841 | user_data, |
927 | data_page_offset, | 842 | page_length); |
928 | page_length); | 843 | kunmap(page); |
929 | } | ||
930 | 844 | ||
931 | set_page_dirty(page); | 845 | set_page_dirty(page); |
932 | mark_page_accessed(page); | 846 | mark_page_accessed(page); |
933 | page_cache_release(page); | 847 | page_cache_release(page); |
934 | 848 | ||
849 | if (ret) { | ||
850 | ret = -EFAULT; | ||
851 | goto out; | ||
852 | } | ||
853 | |||
935 | remain -= page_length; | 854 | remain -= page_length; |
936 | data_ptr += page_length; | 855 | user_data += page_length; |
937 | offset += page_length; | 856 | offset += page_length; |
938 | } | 857 | } |
939 | 858 | ||
940 | out: | 859 | out: |
941 | for (i = 0; i < pinned_pages; i++) | 860 | mutex_lock(&dev->struct_mutex); |
942 | page_cache_release(user_pages[i]); | 861 | /* Fixup: Kill any reinstated backing storage pages */ |
943 | drm_free_large(user_pages); | 862 | if (obj->madv == __I915_MADV_PURGED) |
863 | i915_gem_object_truncate(obj); | ||
864 | /* and flush dirty cachelines in case the object isn't in the cpu write | ||
865 | * domain anymore. */ | ||
866 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | ||
867 | i915_gem_clflush_object(obj); | ||
868 | intel_gtt_chipset_flush(); | ||
869 | } | ||
944 | 870 | ||
945 | return ret; | 871 | return ret; |
946 | } | 872 | } |
@@ -996,10 +922,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
996 | * pread/pwrite currently are reading and writing from the CPU | 922 | * pread/pwrite currently are reading and writing from the CPU |
997 | * perspective, requiring manual detiling by the client. | 923 | * perspective, requiring manual detiling by the client. |
998 | */ | 924 | */ |
999 | if (obj->phys_obj) | 925 | if (obj->phys_obj) { |
1000 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 926 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1001 | else if (obj->gtt_space && | 927 | goto out; |
1002 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 928 | } |
929 | |||
930 | if (obj->gtt_space && | ||
931 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | ||
1003 | ret = i915_gem_object_pin(obj, 0, true); | 932 | ret = i915_gem_object_pin(obj, 0, true); |
1004 | if (ret) | 933 | if (ret) |
1005 | goto out; | 934 | goto out; |
@@ -1018,18 +947,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1018 | 947 | ||
1019 | out_unpin: | 948 | out_unpin: |
1020 | i915_gem_object_unpin(obj); | 949 | i915_gem_object_unpin(obj); |
1021 | } else { | ||
1022 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
1023 | if (ret) | ||
1024 | goto out; | ||
1025 | 950 | ||
1026 | ret = -EFAULT; | 951 | if (ret != -EFAULT) |
1027 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 952 | goto out; |
1028 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | 953 | /* Fall through to the shmfs paths because the gtt paths might |
1029 | if (ret == -EFAULT) | 954 | * fail with non-page-backed user pointers (e.g. gtt mappings |
1030 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | 955 | * when moving data between textures). */ |
1031 | } | 956 | } |
1032 | 957 | ||
958 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
959 | if (ret) | ||
960 | goto out; | ||
961 | |||
962 | ret = -EFAULT; | ||
963 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | ||
964 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | ||
965 | if (ret == -EFAULT) | ||
966 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | ||
967 | |||
1033 | out: | 968 | out: |
1034 | drm_gem_object_unreference(&obj->base); | 969 | drm_gem_object_unreference(&obj->base); |
1035 | unlock: | 970 | unlock: |
@@ -1141,7 +1076,6 @@ int | |||
1141 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1076 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1142 | struct drm_file *file) | 1077 | struct drm_file *file) |
1143 | { | 1078 | { |
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1145 | struct drm_i915_gem_mmap *args = data; | 1079 | struct drm_i915_gem_mmap *args = data; |
1146 | struct drm_gem_object *obj; | 1080 | struct drm_gem_object *obj; |
1147 | unsigned long addr; | 1081 | unsigned long addr; |
@@ -1153,11 +1087,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1153 | if (obj == NULL) | 1087 | if (obj == NULL) |
1154 | return -ENOENT; | 1088 | return -ENOENT; |
1155 | 1089 | ||
1156 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | ||
1157 | drm_gem_object_unreference_unlocked(obj); | ||
1158 | return -E2BIG; | ||
1159 | } | ||
1160 | |||
1161 | down_write(¤t->mm->mmap_sem); | 1090 | down_write(¤t->mm->mmap_sem); |
1162 | addr = do_mmap(obj->filp, 0, args->size, | 1091 | addr = do_mmap(obj->filp, 0, args->size, |
1163 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1092 | PROT_READ | PROT_WRITE, MAP_SHARED, |
@@ -1647,6 +1576,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring, | |||
1647 | } | 1576 | } |
1648 | } | 1577 | } |
1649 | 1578 | ||
1579 | static u32 | ||
1580 | i915_gem_get_seqno(struct drm_device *dev) | ||
1581 | { | ||
1582 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1583 | u32 seqno = dev_priv->next_seqno; | ||
1584 | |||
1585 | /* reserve 0 for non-seqno */ | ||
1586 | if (++dev_priv->next_seqno == 0) | ||
1587 | dev_priv->next_seqno = 1; | ||
1588 | |||
1589 | return seqno; | ||
1590 | } | ||
1591 | |||
1592 | u32 | ||
1593 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) | ||
1594 | { | ||
1595 | if (ring->outstanding_lazy_request == 0) | ||
1596 | ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); | ||
1597 | |||
1598 | return ring->outstanding_lazy_request; | ||
1599 | } | ||
1600 | |||
1650 | int | 1601 | int |
1651 | i915_add_request(struct intel_ring_buffer *ring, | 1602 | i915_add_request(struct intel_ring_buffer *ring, |
1652 | struct drm_file *file, | 1603 | struct drm_file *file, |
@@ -1654,10 +1605,19 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1654 | { | 1605 | { |
1655 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1606 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1656 | uint32_t seqno; | 1607 | uint32_t seqno; |
1608 | u32 request_ring_position; | ||
1657 | int was_empty; | 1609 | int was_empty; |
1658 | int ret; | 1610 | int ret; |
1659 | 1611 | ||
1660 | BUG_ON(request == NULL); | 1612 | BUG_ON(request == NULL); |
1613 | seqno = i915_gem_next_request_seqno(ring); | ||
1614 | |||
1615 | /* Record the position of the start of the request so that | ||
1616 | * should we detect the updated seqno part-way through the | ||
1617 | * GPU processing the request, we never over-estimate the | ||
1618 | * position of the head. | ||
1619 | */ | ||
1620 | request_ring_position = intel_ring_get_tail(ring); | ||
1661 | 1621 | ||
1662 | ret = ring->add_request(ring, &seqno); | 1622 | ret = ring->add_request(ring, &seqno); |
1663 | if (ret) | 1623 | if (ret) |
@@ -1667,6 +1627,7 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1667 | 1627 | ||
1668 | request->seqno = seqno; | 1628 | request->seqno = seqno; |
1669 | request->ring = ring; | 1629 | request->ring = ring; |
1630 | request->tail = request_ring_position; | ||
1670 | request->emitted_jiffies = jiffies; | 1631 | request->emitted_jiffies = jiffies; |
1671 | was_empty = list_empty(&ring->request_list); | 1632 | was_empty = list_empty(&ring->request_list); |
1672 | list_add_tail(&request->list, &ring->request_list); | 1633 | list_add_tail(&request->list, &ring->request_list); |
@@ -1681,7 +1642,7 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1681 | spin_unlock(&file_priv->mm.lock); | 1642 | spin_unlock(&file_priv->mm.lock); |
1682 | } | 1643 | } |
1683 | 1644 | ||
1684 | ring->outstanding_lazy_request = false; | 1645 | ring->outstanding_lazy_request = 0; |
1685 | 1646 | ||
1686 | if (!dev_priv->mm.suspended) { | 1647 | if (!dev_priv->mm.suspended) { |
1687 | if (i915_enable_hangcheck) { | 1648 | if (i915_enable_hangcheck) { |
@@ -1803,7 +1764,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
1803 | /** | 1764 | /** |
1804 | * This function clears the request list as sequence numbers are passed. | 1765 | * This function clears the request list as sequence numbers are passed. |
1805 | */ | 1766 | */ |
1806 | static void | 1767 | void |
1807 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | 1768 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1808 | { | 1769 | { |
1809 | uint32_t seqno; | 1770 | uint32_t seqno; |
@@ -1831,6 +1792,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1831 | break; | 1792 | break; |
1832 | 1793 | ||
1833 | trace_i915_gem_request_retire(ring, request->seqno); | 1794 | trace_i915_gem_request_retire(ring, request->seqno); |
1795 | /* We know the GPU must have read the request to have | ||
1796 | * sent us the seqno + interrupt, so use the position | ||
1797 | * of tail of the request to update the last known position | ||
1798 | * of the GPU head. | ||
1799 | */ | ||
1800 | ring->last_retired_head = request->tail; | ||
1834 | 1801 | ||
1835 | list_del(&request->list); | 1802 | list_del(&request->list); |
1836 | i915_gem_request_remove_from_client(request); | 1803 | i915_gem_request_remove_from_client(request); |
@@ -1943,7 +1910,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1943 | */ | 1910 | */ |
1944 | int | 1911 | int |
1945 | i915_wait_request(struct intel_ring_buffer *ring, | 1912 | i915_wait_request(struct intel_ring_buffer *ring, |
1946 | uint32_t seqno) | 1913 | uint32_t seqno, |
1914 | bool do_retire) | ||
1947 | { | 1915 | { |
1948 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1916 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1949 | u32 ier; | 1917 | u32 ier; |
@@ -2017,17 +1985,12 @@ i915_wait_request(struct intel_ring_buffer *ring, | |||
2017 | if (atomic_read(&dev_priv->mm.wedged)) | 1985 | if (atomic_read(&dev_priv->mm.wedged)) |
2018 | ret = -EAGAIN; | 1986 | ret = -EAGAIN; |
2019 | 1987 | ||
2020 | if (ret && ret != -ERESTARTSYS) | ||
2021 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", | ||
2022 | __func__, ret, seqno, ring->get_seqno(ring), | ||
2023 | dev_priv->next_seqno); | ||
2024 | |||
2025 | /* Directly dispatch request retiring. While we have the work queue | 1988 | /* Directly dispatch request retiring. While we have the work queue |
2026 | * to handle this, the waiter on a request often wants an associated | 1989 | * to handle this, the waiter on a request often wants an associated |
2027 | * buffer to have made it to the inactive list, and we would need | 1990 | * buffer to have made it to the inactive list, and we would need |
2028 | * a separate wait queue to handle that. | 1991 | * a separate wait queue to handle that. |
2029 | */ | 1992 | */ |
2030 | if (ret == 0) | 1993 | if (ret == 0 && do_retire) |
2031 | i915_gem_retire_requests_ring(ring); | 1994 | i915_gem_retire_requests_ring(ring); |
2032 | 1995 | ||
2033 | return ret; | 1996 | return ret; |
@@ -2051,7 +2014,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | |||
2051 | * it. | 2014 | * it. |
2052 | */ | 2015 | */ |
2053 | if (obj->active) { | 2016 | if (obj->active) { |
2054 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); | 2017 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno, |
2018 | true); | ||
2055 | if (ret) | 2019 | if (ret) |
2056 | return ret; | 2020 | return ret; |
2057 | } | 2021 | } |
@@ -2089,6 +2053,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) | |||
2089 | int | 2053 | int |
2090 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) | 2054 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2091 | { | 2055 | { |
2056 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | ||
2092 | int ret = 0; | 2057 | int ret = 0; |
2093 | 2058 | ||
2094 | if (obj->gtt_space == NULL) | 2059 | if (obj->gtt_space == NULL) |
@@ -2133,6 +2098,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2133 | trace_i915_gem_object_unbind(obj); | 2098 | trace_i915_gem_object_unbind(obj); |
2134 | 2099 | ||
2135 | i915_gem_gtt_unbind_object(obj); | 2100 | i915_gem_gtt_unbind_object(obj); |
2101 | if (obj->has_aliasing_ppgtt_mapping) { | ||
2102 | i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); | ||
2103 | obj->has_aliasing_ppgtt_mapping = 0; | ||
2104 | } | ||
2105 | |||
2136 | i915_gem_object_put_pages_gtt(obj); | 2106 | i915_gem_object_put_pages_gtt(obj); |
2137 | 2107 | ||
2138 | list_del_init(&obj->gtt_list); | 2108 | list_del_init(&obj->gtt_list); |
@@ -2172,7 +2142,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, | |||
2172 | return 0; | 2142 | return 0; |
2173 | } | 2143 | } |
2174 | 2144 | ||
2175 | static int i915_ring_idle(struct intel_ring_buffer *ring) | 2145 | static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire) |
2176 | { | 2146 | { |
2177 | int ret; | 2147 | int ret; |
2178 | 2148 | ||
@@ -2186,18 +2156,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) | |||
2186 | return ret; | 2156 | return ret; |
2187 | } | 2157 | } |
2188 | 2158 | ||
2189 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); | 2159 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring), |
2160 | do_retire); | ||
2190 | } | 2161 | } |
2191 | 2162 | ||
2192 | int | 2163 | int i915_gpu_idle(struct drm_device *dev, bool do_retire) |
2193 | i915_gpu_idle(struct drm_device *dev) | ||
2194 | { | 2164 | { |
2195 | drm_i915_private_t *dev_priv = dev->dev_private; | 2165 | drm_i915_private_t *dev_priv = dev->dev_private; |
2196 | int ret, i; | 2166 | int ret, i; |
2197 | 2167 | ||
2198 | /* Flush everything onto the inactive list. */ | 2168 | /* Flush everything onto the inactive list. */ |
2199 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2169 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2200 | ret = i915_ring_idle(&dev_priv->ring[i]); | 2170 | ret = i915_ring_idle(&dev_priv->ring[i], do_retire); |
2201 | if (ret) | 2171 | if (ret) |
2202 | return ret; | 2172 | return ret; |
2203 | } | 2173 | } |
@@ -2400,7 +2370,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2400 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2370 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2401 | obj->last_fenced_seqno)) { | 2371 | obj->last_fenced_seqno)) { |
2402 | ret = i915_wait_request(obj->last_fenced_ring, | 2372 | ret = i915_wait_request(obj->last_fenced_ring, |
2403 | obj->last_fenced_seqno); | 2373 | obj->last_fenced_seqno, |
2374 | true); | ||
2404 | if (ret) | 2375 | if (ret) |
2405 | return ret; | 2376 | return ret; |
2406 | } | 2377 | } |
@@ -2432,6 +2403,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | |||
2432 | 2403 | ||
2433 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | 2404 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
2434 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2405 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2406 | |||
2407 | WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count); | ||
2435 | i915_gem_clear_fence_reg(obj->base.dev, | 2408 | i915_gem_clear_fence_reg(obj->base.dev, |
2436 | &dev_priv->fence_regs[obj->fence_reg]); | 2409 | &dev_priv->fence_regs[obj->fence_reg]); |
2437 | 2410 | ||
@@ -2456,7 +2429,7 @@ i915_find_fence_reg(struct drm_device *dev, | |||
2456 | if (!reg->obj) | 2429 | if (!reg->obj) |
2457 | return reg; | 2430 | return reg; |
2458 | 2431 | ||
2459 | if (!reg->obj->pin_count) | 2432 | if (!reg->pin_count) |
2460 | avail = reg; | 2433 | avail = reg; |
2461 | } | 2434 | } |
2462 | 2435 | ||
@@ -2466,7 +2439,7 @@ i915_find_fence_reg(struct drm_device *dev, | |||
2466 | /* None available, try to steal one or wait for a user to finish */ | 2439 | /* None available, try to steal one or wait for a user to finish */ |
2467 | avail = first = NULL; | 2440 | avail = first = NULL; |
2468 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { | 2441 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2469 | if (reg->obj->pin_count) | 2442 | if (reg->pin_count) |
2470 | continue; | 2443 | continue; |
2471 | 2444 | ||
2472 | if (first == NULL) | 2445 | if (first == NULL) |
@@ -2541,7 +2514,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2541 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2514 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2542 | reg->setup_seqno)) { | 2515 | reg->setup_seqno)) { |
2543 | ret = i915_wait_request(obj->last_fenced_ring, | 2516 | ret = i915_wait_request(obj->last_fenced_ring, |
2544 | reg->setup_seqno); | 2517 | reg->setup_seqno, |
2518 | true); | ||
2545 | if (ret) | 2519 | if (ret) |
2546 | return ret; | 2520 | return ret; |
2547 | } | 2521 | } |
@@ -2560,7 +2534,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2560 | 2534 | ||
2561 | reg = i915_find_fence_reg(dev, pipelined); | 2535 | reg = i915_find_fence_reg(dev, pipelined); |
2562 | if (reg == NULL) | 2536 | if (reg == NULL) |
2563 | return -ENOSPC; | 2537 | return -EDEADLK; |
2564 | 2538 | ||
2565 | ret = i915_gem_object_flush_fence(obj, pipelined); | 2539 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2566 | if (ret) | 2540 | if (ret) |
@@ -2660,6 +2634,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev, | |||
2660 | list_del_init(®->lru_list); | 2634 | list_del_init(®->lru_list); |
2661 | reg->obj = NULL; | 2635 | reg->obj = NULL; |
2662 | reg->setup_seqno = 0; | 2636 | reg->setup_seqno = 0; |
2637 | reg->pin_count = 0; | ||
2663 | } | 2638 | } |
2664 | 2639 | ||
2665 | /** | 2640 | /** |
@@ -2946,6 +2921,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2946 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 2921 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
2947 | enum i915_cache_level cache_level) | 2922 | enum i915_cache_level cache_level) |
2948 | { | 2923 | { |
2924 | struct drm_device *dev = obj->base.dev; | ||
2925 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2949 | int ret; | 2926 | int ret; |
2950 | 2927 | ||
2951 | if (obj->cache_level == cache_level) | 2928 | if (obj->cache_level == cache_level) |
@@ -2974,6 +2951,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
2974 | } | 2951 | } |
2975 | 2952 | ||
2976 | i915_gem_gtt_rebind_object(obj, cache_level); | 2953 | i915_gem_gtt_rebind_object(obj, cache_level); |
2954 | if (obj->has_aliasing_ppgtt_mapping) | ||
2955 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, | ||
2956 | obj, cache_level); | ||
2977 | } | 2957 | } |
2978 | 2958 | ||
2979 | if (cache_level == I915_CACHE_NONE) { | 2959 | if (cache_level == I915_CACHE_NONE) { |
@@ -3084,10 +3064,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) | |||
3084 | return ret; | 3064 | return ret; |
3085 | } | 3065 | } |
3086 | 3066 | ||
3067 | ret = i915_gem_object_wait_rendering(obj); | ||
3068 | if (ret) | ||
3069 | return ret; | ||
3070 | |||
3087 | /* Ensure that we invalidate the GPU's caches and TLBs. */ | 3071 | /* Ensure that we invalidate the GPU's caches and TLBs. */ |
3088 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 3072 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
3089 | 3073 | return 0; | |
3090 | return i915_gem_object_wait_rendering(obj); | ||
3091 | } | 3074 | } |
3092 | 3075 | ||
3093 | /** | 3076 | /** |
@@ -3619,8 +3602,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3619 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 3602 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3620 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 3603 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3621 | 3604 | ||
3622 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | 3605 | if (HAS_LLC(dev)) { |
3623 | /* On Gen6, we can have the GPU use the LLC (the CPU | 3606 | /* On some devices, we can have the GPU use the LLC (the CPU |
3624 | * cache) for about a 10% performance improvement | 3607 | * cache) for about a 10% performance improvement |
3625 | * compared to uncached. Graphics requests other than | 3608 | * compared to uncached. Graphics requests other than |
3626 | * display scanout are coherent with the CPU in | 3609 | * display scanout are coherent with the CPU in |
@@ -3710,7 +3693,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3710 | return 0; | 3693 | return 0; |
3711 | } | 3694 | } |
3712 | 3695 | ||
3713 | ret = i915_gpu_idle(dev); | 3696 | ret = i915_gpu_idle(dev, true); |
3714 | if (ret) { | 3697 | if (ret) { |
3715 | mutex_unlock(&dev->struct_mutex); | 3698 | mutex_unlock(&dev->struct_mutex); |
3716 | return ret; | 3699 | return ret; |
@@ -3745,12 +3728,71 @@ i915_gem_idle(struct drm_device *dev) | |||
3745 | return 0; | 3728 | return 0; |
3746 | } | 3729 | } |
3747 | 3730 | ||
3731 | void i915_gem_init_swizzling(struct drm_device *dev) | ||
3732 | { | ||
3733 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3734 | |||
3735 | if (INTEL_INFO(dev)->gen < 5 || | ||
3736 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) | ||
3737 | return; | ||
3738 | |||
3739 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | | ||
3740 | DISP_TILE_SURFACE_SWIZZLING); | ||
3741 | |||
3742 | if (IS_GEN5(dev)) | ||
3743 | return; | ||
3744 | |||
3745 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); | ||
3746 | if (IS_GEN6(dev)) | ||
3747 | I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB)); | ||
3748 | else | ||
3749 | I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB)); | ||
3750 | } | ||
3751 | |||
3752 | void i915_gem_init_ppgtt(struct drm_device *dev) | ||
3753 | { | ||
3754 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3755 | uint32_t pd_offset; | ||
3756 | struct intel_ring_buffer *ring; | ||
3757 | int i; | ||
3758 | |||
3759 | if (!dev_priv->mm.aliasing_ppgtt) | ||
3760 | return; | ||
3761 | |||
3762 | pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset; | ||
3763 | pd_offset /= 64; /* in cachelines, */ | ||
3764 | pd_offset <<= 16; | ||
3765 | |||
3766 | if (INTEL_INFO(dev)->gen == 6) { | ||
3767 | uint32_t ecochk = I915_READ(GAM_ECOCHK); | ||
3768 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | | ||
3769 | ECOCHK_PPGTT_CACHE64B); | ||
3770 | I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); | ||
3771 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
3772 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); | ||
3773 | /* GFX_MODE is per-ring on gen7+ */ | ||
3774 | } | ||
3775 | |||
3776 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
3777 | ring = &dev_priv->ring[i]; | ||
3778 | |||
3779 | if (INTEL_INFO(dev)->gen >= 7) | ||
3780 | I915_WRITE(RING_MODE_GEN7(ring), | ||
3781 | GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); | ||
3782 | |||
3783 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | ||
3784 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); | ||
3785 | } | ||
3786 | } | ||
3787 | |||
3748 | int | 3788 | int |
3749 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3789 | i915_gem_init_hw(struct drm_device *dev) |
3750 | { | 3790 | { |
3751 | drm_i915_private_t *dev_priv = dev->dev_private; | 3791 | drm_i915_private_t *dev_priv = dev->dev_private; |
3752 | int ret; | 3792 | int ret; |
3753 | 3793 | ||
3794 | i915_gem_init_swizzling(dev); | ||
3795 | |||
3754 | ret = intel_init_render_ring_buffer(dev); | 3796 | ret = intel_init_render_ring_buffer(dev); |
3755 | if (ret) | 3797 | if (ret) |
3756 | return ret; | 3798 | return ret; |
@@ -3769,6 +3811,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
3769 | 3811 | ||
3770 | dev_priv->next_seqno = 1; | 3812 | dev_priv->next_seqno = 1; |
3771 | 3813 | ||
3814 | i915_gem_init_ppgtt(dev); | ||
3815 | |||
3772 | return 0; | 3816 | return 0; |
3773 | 3817 | ||
3774 | cleanup_bsd_ring: | 3818 | cleanup_bsd_ring: |
@@ -3806,7 +3850,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3806 | mutex_lock(&dev->struct_mutex); | 3850 | mutex_lock(&dev->struct_mutex); |
3807 | dev_priv->mm.suspended = 0; | 3851 | dev_priv->mm.suspended = 0; |
3808 | 3852 | ||
3809 | ret = i915_gem_init_ringbuffer(dev); | 3853 | ret = i915_gem_init_hw(dev); |
3810 | if (ret != 0) { | 3854 | if (ret != 0) { |
3811 | mutex_unlock(&dev->struct_mutex); | 3855 | mutex_unlock(&dev->struct_mutex); |
3812 | return ret; | 3856 | return ret; |
@@ -4201,7 +4245,7 @@ rescan: | |||
4201 | * This has a dramatic impact to reduce the number of | 4245 | * This has a dramatic impact to reduce the number of |
4202 | * OOM-killer events whilst running the GPU aggressively. | 4246 | * OOM-killer events whilst running the GPU aggressively. |
4203 | */ | 4247 | */ |
4204 | if (i915_gpu_idle(dev) == 0) | 4248 | if (i915_gpu_idle(dev, true) == 0) |
4205 | goto rescan; | 4249 | goto rescan; |
4206 | } | 4250 | } |
4207 | mutex_unlock(&dev->struct_mutex); | 4251 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ead5d00f91b0..21a82710f4b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -36,7 +36,6 @@ static bool | |||
36 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | 36 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
37 | { | 37 | { |
38 | list_add(&obj->exec_list, unwind); | 38 | list_add(&obj->exec_list, unwind); |
39 | drm_gem_object_reference(&obj->base); | ||
40 | return drm_mm_scan_add_block(obj->gtt_space); | 39 | return drm_mm_scan_add_block(obj->gtt_space); |
41 | } | 40 | } |
42 | 41 | ||
@@ -49,21 +48,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
49 | struct drm_i915_gem_object *obj; | 48 | struct drm_i915_gem_object *obj; |
50 | int ret = 0; | 49 | int ret = 0; |
51 | 50 | ||
52 | i915_gem_retire_requests(dev); | ||
53 | |||
54 | /* Re-check for free space after retiring requests */ | ||
55 | if (mappable) { | ||
56 | if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, | ||
57 | min_size, alignment, 0, | ||
58 | dev_priv->mm.gtt_mappable_end, | ||
59 | 0)) | ||
60 | return 0; | ||
61 | } else { | ||
62 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
63 | min_size, alignment, 0)) | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | trace_i915_gem_evict(dev, min_size, alignment, mappable); | 51 | trace_i915_gem_evict(dev, min_size, alignment, mappable); |
68 | 52 | ||
69 | /* | 53 | /* |
@@ -139,7 +123,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
139 | BUG_ON(ret); | 123 | BUG_ON(ret); |
140 | 124 | ||
141 | list_del_init(&obj->exec_list); | 125 | list_del_init(&obj->exec_list); |
142 | drm_gem_object_unreference(&obj->base); | ||
143 | } | 126 | } |
144 | 127 | ||
145 | /* We expect the caller to unpin, evict all and try again, or give up. | 128 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -158,10 +141,10 @@ found: | |||
158 | exec_list); | 141 | exec_list); |
159 | if (drm_mm_scan_remove_block(obj->gtt_space)) { | 142 | if (drm_mm_scan_remove_block(obj->gtt_space)) { |
160 | list_move(&obj->exec_list, &eviction_list); | 143 | list_move(&obj->exec_list, &eviction_list); |
144 | drm_gem_object_reference(&obj->base); | ||
161 | continue; | 145 | continue; |
162 | } | 146 | } |
163 | list_del_init(&obj->exec_list); | 147 | list_del_init(&obj->exec_list); |
164 | drm_gem_object_unreference(&obj->base); | ||
165 | } | 148 | } |
166 | 149 | ||
167 | /* Unbinding will emit any required flushes */ | 150 | /* Unbinding will emit any required flushes */ |
@@ -195,7 +178,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
195 | trace_i915_gem_evict_everything(dev, purgeable_only); | 178 | trace_i915_gem_evict_everything(dev, purgeable_only); |
196 | 179 | ||
197 | /* Flush everything (on to the inactive lists) and evict */ | 180 | /* Flush everything (on to the inactive lists) and evict */ |
198 | ret = i915_gpu_idle(dev); | 181 | ret = i915_gpu_idle(dev, true); |
199 | if (ret) | 182 | if (ret) |
200 | return ret; | 183 | return ret; |
201 | 184 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 65e1f0043f9d..81687af00893 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | |||
203 | cd->invalidate_domains |= invalidate_domains; | 203 | cd->invalidate_domains |= invalidate_domains; |
204 | cd->flush_domains |= flush_domains; | 204 | cd->flush_domains |= flush_domains; |
205 | if (flush_domains & I915_GEM_GPU_DOMAINS) | 205 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
206 | cd->flush_rings |= obj->ring->id; | 206 | cd->flush_rings |= intel_ring_flag(obj->ring); |
207 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | 207 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) |
208 | cd->flush_rings |= ring->id; | 208 | cd->flush_rings |= intel_ring_flag(ring); |
209 | } | 209 | } |
210 | 210 | ||
211 | struct eb_objects { | 211 | struct eb_objects { |
@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
287 | * exec_object list, so it should have a GTT space bound by now. | 287 | * exec_object list, so it should have a GTT space bound by now. |
288 | */ | 288 | */ |
289 | if (unlikely(target_offset == 0)) { | 289 | if (unlikely(target_offset == 0)) { |
290 | DRM_ERROR("No GTT space found for object %d\n", | 290 | DRM_DEBUG("No GTT space found for object %d\n", |
291 | reloc->target_handle); | 291 | reloc->target_handle); |
292 | return ret; | 292 | return ret; |
293 | } | 293 | } |
294 | 294 | ||
295 | /* Validate that the target is in a valid r/w GPU domain */ | 295 | /* Validate that the target is in a valid r/w GPU domain */ |
296 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { | 296 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
297 | DRM_ERROR("reloc with multiple write domains: " | 297 | DRM_DEBUG("reloc with multiple write domains: " |
298 | "obj %p target %d offset %d " | 298 | "obj %p target %d offset %d " |
299 | "read %08x write %08x", | 299 | "read %08x write %08x", |
300 | obj, reloc->target_handle, | 300 | obj, reloc->target_handle, |
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
303 | reloc->write_domain); | 303 | reloc->write_domain); |
304 | return ret; | 304 | return ret; |
305 | } | 305 | } |
306 | if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { | 306 | if (unlikely((reloc->write_domain | reloc->read_domains) |
307 | DRM_ERROR("reloc with read/write CPU domains: " | 307 | & ~I915_GEM_GPU_DOMAINS)) { |
308 | DRM_DEBUG("reloc with read/write non-GPU domains: " | ||
308 | "obj %p target %d offset %d " | 309 | "obj %p target %d offset %d " |
309 | "read %08x write %08x", | 310 | "read %08x write %08x", |
310 | obj, reloc->target_handle, | 311 | obj, reloc->target_handle, |
@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
315 | } | 316 | } |
316 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && | 317 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && |
317 | reloc->write_domain != target_obj->pending_write_domain)) { | 318 | reloc->write_domain != target_obj->pending_write_domain)) { |
318 | DRM_ERROR("Write domain conflict: " | 319 | DRM_DEBUG("Write domain conflict: " |
319 | "obj %p target %d offset %d " | 320 | "obj %p target %d offset %d " |
320 | "new %08x old %08x\n", | 321 | "new %08x old %08x\n", |
321 | obj, reloc->target_handle, | 322 | obj, reloc->target_handle, |
@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
336 | 337 | ||
337 | /* Check that the relocation address is valid... */ | 338 | /* Check that the relocation address is valid... */ |
338 | if (unlikely(reloc->offset > obj->base.size - 4)) { | 339 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
339 | DRM_ERROR("Relocation beyond object bounds: " | 340 | DRM_DEBUG("Relocation beyond object bounds: " |
340 | "obj %p target %d offset %d size %d.\n", | 341 | "obj %p target %d offset %d size %d.\n", |
341 | obj, reloc->target_handle, | 342 | obj, reloc->target_handle, |
342 | (int) reloc->offset, | 343 | (int) reloc->offset, |
@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
344 | return ret; | 345 | return ret; |
345 | } | 346 | } |
346 | if (unlikely(reloc->offset & 3)) { | 347 | if (unlikely(reloc->offset & 3)) { |
347 | DRM_ERROR("Relocation not 4-byte aligned: " | 348 | DRM_DEBUG("Relocation not 4-byte aligned: " |
348 | "obj %p target %d offset %d.\n", | 349 | "obj %p target %d offset %d.\n", |
349 | obj, reloc->target_handle, | 350 | obj, reloc->target_handle, |
350 | (int) reloc->offset); | 351 | (int) reloc->offset); |
@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
461 | return ret; | 462 | return ret; |
462 | } | 463 | } |
463 | 464 | ||
465 | #define __EXEC_OBJECT_HAS_FENCE (1<<31) | ||
466 | |||
467 | static int | ||
468 | pin_and_fence_object(struct drm_i915_gem_object *obj, | ||
469 | struct intel_ring_buffer *ring) | ||
470 | { | ||
471 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
472 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
473 | bool need_fence, need_mappable; | ||
474 | int ret; | ||
475 | |||
476 | need_fence = | ||
477 | has_fenced_gpu_access && | ||
478 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
479 | obj->tiling_mode != I915_TILING_NONE; | ||
480 | need_mappable = | ||
481 | entry->relocation_count ? true : need_fence; | ||
482 | |||
483 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); | ||
484 | if (ret) | ||
485 | return ret; | ||
486 | |||
487 | if (has_fenced_gpu_access) { | ||
488 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { | ||
489 | if (obj->tiling_mode) { | ||
490 | ret = i915_gem_object_get_fence(obj, ring); | ||
491 | if (ret) | ||
492 | goto err_unpin; | ||
493 | |||
494 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; | ||
495 | i915_gem_object_pin_fence(obj); | ||
496 | } else { | ||
497 | ret = i915_gem_object_put_fence(obj); | ||
498 | if (ret) | ||
499 | goto err_unpin; | ||
500 | } | ||
501 | } | ||
502 | obj->pending_fenced_gpu_access = need_fence; | ||
503 | } | ||
504 | |||
505 | entry->offset = obj->gtt_offset; | ||
506 | return 0; | ||
507 | |||
508 | err_unpin: | ||
509 | i915_gem_object_unpin(obj); | ||
510 | return ret; | ||
511 | } | ||
512 | |||
464 | static int | 513 | static int |
465 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 514 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
466 | struct drm_file *file, | 515 | struct drm_file *file, |
467 | struct list_head *objects) | 516 | struct list_head *objects) |
468 | { | 517 | { |
518 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
469 | struct drm_i915_gem_object *obj; | 519 | struct drm_i915_gem_object *obj; |
470 | int ret, retry; | 520 | int ret, retry; |
471 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 521 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
@@ -518,6 +568,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
518 | list_for_each_entry(obj, objects, exec_list) { | 568 | list_for_each_entry(obj, objects, exec_list) { |
519 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 569 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
520 | bool need_fence, need_mappable; | 570 | bool need_fence, need_mappable; |
571 | |||
521 | if (!obj->gtt_space) | 572 | if (!obj->gtt_space) |
522 | continue; | 573 | continue; |
523 | 574 | ||
@@ -532,58 +583,55 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
532 | (need_mappable && !obj->map_and_fenceable)) | 583 | (need_mappable && !obj->map_and_fenceable)) |
533 | ret = i915_gem_object_unbind(obj); | 584 | ret = i915_gem_object_unbind(obj); |
534 | else | 585 | else |
535 | ret = i915_gem_object_pin(obj, | 586 | ret = pin_and_fence_object(obj, ring); |
536 | entry->alignment, | ||
537 | need_mappable); | ||
538 | if (ret) | 587 | if (ret) |
539 | goto err; | 588 | goto err; |
540 | |||
541 | entry++; | ||
542 | } | 589 | } |
543 | 590 | ||
544 | /* Bind fresh objects */ | 591 | /* Bind fresh objects */ |
545 | list_for_each_entry(obj, objects, exec_list) { | 592 | list_for_each_entry(obj, objects, exec_list) { |
546 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 593 | if (obj->gtt_space) |
547 | bool need_fence; | 594 | continue; |
548 | 595 | ||
549 | need_fence = | 596 | ret = pin_and_fence_object(obj, ring); |
550 | has_fenced_gpu_access && | 597 | if (ret) { |
551 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 598 | int ret_ignore; |
552 | obj->tiling_mode != I915_TILING_NONE; | 599 | |
600 | /* This can potentially raise a harmless | ||
601 | * -EINVAL if we failed to bind in the above | ||
602 | * call. It cannot raise -EINTR since we know | ||
603 | * that the bo is freshly bound and so will | ||
604 | * not need to be flushed or waited upon. | ||
605 | */ | ||
606 | ret_ignore = i915_gem_object_unbind(obj); | ||
607 | (void)ret_ignore; | ||
608 | WARN_ON(obj->gtt_space); | ||
609 | break; | ||
610 | } | ||
611 | } | ||
553 | 612 | ||
554 | if (!obj->gtt_space) { | 613 | /* Decrement pin count for bound objects */ |
555 | bool need_mappable = | 614 | list_for_each_entry(obj, objects, exec_list) { |
556 | entry->relocation_count ? true : need_fence; | 615 | struct drm_i915_gem_exec_object2 *entry; |
557 | 616 | ||
558 | ret = i915_gem_object_pin(obj, | 617 | if (!obj->gtt_space) |
559 | entry->alignment, | 618 | continue; |
560 | need_mappable); | ||
561 | if (ret) | ||
562 | break; | ||
563 | } | ||
564 | 619 | ||
565 | if (has_fenced_gpu_access) { | 620 | entry = obj->exec_entry; |
566 | if (need_fence) { | 621 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { |
567 | ret = i915_gem_object_get_fence(obj, ring); | 622 | i915_gem_object_unpin_fence(obj); |
568 | if (ret) | 623 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; |
569 | break; | ||
570 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
571 | obj->tiling_mode == I915_TILING_NONE) { | ||
572 | /* XXX pipelined! */ | ||
573 | ret = i915_gem_object_put_fence(obj); | ||
574 | if (ret) | ||
575 | break; | ||
576 | } | ||
577 | obj->pending_fenced_gpu_access = need_fence; | ||
578 | } | 624 | } |
579 | 625 | ||
580 | entry->offset = obj->gtt_offset; | 626 | i915_gem_object_unpin(obj); |
581 | } | ||
582 | 627 | ||
583 | /* Decrement pin count for bound objects */ | 628 | /* ... and ensure ppgtt mapping exist if needed. */ |
584 | list_for_each_entry(obj, objects, exec_list) { | 629 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { |
585 | if (obj->gtt_space) | 630 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
586 | i915_gem_object_unpin(obj); | 631 | obj, obj->cache_level); |
632 | |||
633 | obj->has_aliasing_ppgtt_mapping = 1; | ||
634 | } | ||
587 | } | 635 | } |
588 | 636 | ||
589 | if (ret != -ENOSPC || retry > 1) | 637 | if (ret != -ENOSPC || retry > 1) |
@@ -600,16 +648,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
600 | } while (1); | 648 | } while (1); |
601 | 649 | ||
602 | err: | 650 | err: |
603 | obj = list_entry(obj->exec_list.prev, | 651 | list_for_each_entry_continue_reverse(obj, objects, exec_list) { |
604 | struct drm_i915_gem_object, | 652 | struct drm_i915_gem_exec_object2 *entry; |
605 | exec_list); | 653 | |
606 | while (objects != &obj->exec_list) { | 654 | if (!obj->gtt_space) |
607 | if (obj->gtt_space) | 655 | continue; |
608 | i915_gem_object_unpin(obj); | 656 | |
657 | entry = obj->exec_entry; | ||
658 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | ||
659 | i915_gem_object_unpin_fence(obj); | ||
660 | entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; | ||
661 | } | ||
609 | 662 | ||
610 | obj = list_entry(obj->exec_list.prev, | 663 | i915_gem_object_unpin(obj); |
611 | struct drm_i915_gem_object, | ||
612 | exec_list); | ||
613 | } | 664 | } |
614 | 665 | ||
615 | return ret; | 666 | return ret; |
@@ -682,7 +733,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
682 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | 733 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
683 | exec[i].handle)); | 734 | exec[i].handle)); |
684 | if (&obj->base == NULL) { | 735 | if (&obj->base == NULL) { |
685 | DRM_ERROR("Invalid object handle %d at index %d\n", | 736 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
686 | exec[i].handle, i); | 737 | exec[i].handle, i); |
687 | ret = -ENOENT; | 738 | ret = -ENOENT; |
688 | goto err; | 739 | goto err; |
@@ -1013,7 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1013 | int ret, mode, i; | 1064 | int ret, mode, i; |
1014 | 1065 | ||
1015 | if (!i915_gem_check_execbuffer(args)) { | 1066 | if (!i915_gem_check_execbuffer(args)) { |
1016 | DRM_ERROR("execbuf with invalid offset/length\n"); | 1067 | DRM_DEBUG("execbuf with invalid offset/length\n"); |
1017 | return -EINVAL; | 1068 | return -EINVAL; |
1018 | } | 1069 | } |
1019 | 1070 | ||
@@ -1028,20 +1079,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1028 | break; | 1079 | break; |
1029 | case I915_EXEC_BSD: | 1080 | case I915_EXEC_BSD: |
1030 | if (!HAS_BSD(dev)) { | 1081 | if (!HAS_BSD(dev)) { |
1031 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | 1082 | DRM_DEBUG("execbuf with invalid ring (BSD)\n"); |
1032 | return -EINVAL; | 1083 | return -EINVAL; |
1033 | } | 1084 | } |
1034 | ring = &dev_priv->ring[VCS]; | 1085 | ring = &dev_priv->ring[VCS]; |
1035 | break; | 1086 | break; |
1036 | case I915_EXEC_BLT: | 1087 | case I915_EXEC_BLT: |
1037 | if (!HAS_BLT(dev)) { | 1088 | if (!HAS_BLT(dev)) { |
1038 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | 1089 | DRM_DEBUG("execbuf with invalid ring (BLT)\n"); |
1039 | return -EINVAL; | 1090 | return -EINVAL; |
1040 | } | 1091 | } |
1041 | ring = &dev_priv->ring[BCS]; | 1092 | ring = &dev_priv->ring[BCS]; |
1042 | break; | 1093 | break; |
1043 | default: | 1094 | default: |
1044 | DRM_ERROR("execbuf with unknown ring: %d\n", | 1095 | DRM_DEBUG("execbuf with unknown ring: %d\n", |
1045 | (int)(args->flags & I915_EXEC_RING_MASK)); | 1096 | (int)(args->flags & I915_EXEC_RING_MASK)); |
1046 | return -EINVAL; | 1097 | return -EINVAL; |
1047 | } | 1098 | } |
@@ -1067,18 +1118,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1067 | } | 1118 | } |
1068 | break; | 1119 | break; |
1069 | default: | 1120 | default: |
1070 | DRM_ERROR("execbuf with unknown constants: %d\n", mode); | 1121 | DRM_DEBUG("execbuf with unknown constants: %d\n", mode); |
1071 | return -EINVAL; | 1122 | return -EINVAL; |
1072 | } | 1123 | } |
1073 | 1124 | ||
1074 | if (args->buffer_count < 1) { | 1125 | if (args->buffer_count < 1) { |
1075 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 1126 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
1076 | return -EINVAL; | 1127 | return -EINVAL; |
1077 | } | 1128 | } |
1078 | 1129 | ||
1079 | if (args->num_cliprects != 0) { | 1130 | if (args->num_cliprects != 0) { |
1080 | if (ring != &dev_priv->ring[RCS]) { | 1131 | if (ring != &dev_priv->ring[RCS]) { |
1081 | DRM_ERROR("clip rectangles are only valid with the render ring\n"); | 1132 | DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
1082 | return -EINVAL; | 1133 | return -EINVAL; |
1083 | } | 1134 | } |
1084 | 1135 | ||
@@ -1123,7 +1174,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1123 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | 1174 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
1124 | exec[i].handle)); | 1175 | exec[i].handle)); |
1125 | if (&obj->base == NULL) { | 1176 | if (&obj->base == NULL) { |
1126 | DRM_ERROR("Invalid object handle %d at index %d\n", | 1177 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
1127 | exec[i].handle, i); | 1178 | exec[i].handle, i); |
1128 | /* prevent error path from reading uninitialized data */ | 1179 | /* prevent error path from reading uninitialized data */ |
1129 | ret = -ENOENT; | 1180 | ret = -ENOENT; |
@@ -1131,7 +1182,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1131 | } | 1182 | } |
1132 | 1183 | ||
1133 | if (!list_empty(&obj->exec_list)) { | 1184 | if (!list_empty(&obj->exec_list)) { |
1134 | DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", | 1185 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
1135 | obj, exec[i].handle, i); | 1186 | obj, exec[i].handle, i); |
1136 | ret = -EINVAL; | 1187 | ret = -EINVAL; |
1137 | goto err; | 1188 | goto err; |
@@ -1169,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1169 | 1220 | ||
1170 | /* Set the pending read domains for the batch buffer to COMMAND */ | 1221 | /* Set the pending read domains for the batch buffer to COMMAND */ |
1171 | if (batch_obj->base.pending_write_domain) { | 1222 | if (batch_obj->base.pending_write_domain) { |
1172 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 1223 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
1173 | ret = -EINVAL; | 1224 | ret = -EINVAL; |
1174 | goto err; | 1225 | goto err; |
1175 | } | 1226 | } |
@@ -1186,7 +1237,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1186 | * so every billion or so execbuffers, we need to stall | 1237 | * so every billion or so execbuffers, we need to stall |
1187 | * the GPU in order to reset the counters. | 1238 | * the GPU in order to reset the counters. |
1188 | */ | 1239 | */ |
1189 | ret = i915_gpu_idle(dev); | 1240 | ret = i915_gpu_idle(dev, true); |
1190 | if (ret) | 1241 | if (ret) |
1191 | goto err; | 1242 | goto err; |
1192 | 1243 | ||
@@ -1274,7 +1325,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1274 | int ret, i; | 1325 | int ret, i; |
1275 | 1326 | ||
1276 | if (args->buffer_count < 1) { | 1327 | if (args->buffer_count < 1) { |
1277 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 1328 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
1278 | return -EINVAL; | 1329 | return -EINVAL; |
1279 | } | 1330 | } |
1280 | 1331 | ||
@@ -1282,7 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1282 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | 1333 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); |
1283 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 1334 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
1284 | if (exec_list == NULL || exec2_list == NULL) { | 1335 | if (exec_list == NULL || exec2_list == NULL) { |
1285 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 1336 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1286 | args->buffer_count); | 1337 | args->buffer_count); |
1287 | drm_free_large(exec_list); | 1338 | drm_free_large(exec_list); |
1288 | drm_free_large(exec2_list); | 1339 | drm_free_large(exec2_list); |
@@ -1293,7 +1344,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1293 | (uintptr_t) args->buffers_ptr, | 1344 | (uintptr_t) args->buffers_ptr, |
1294 | sizeof(*exec_list) * args->buffer_count); | 1345 | sizeof(*exec_list) * args->buffer_count); |
1295 | if (ret != 0) { | 1346 | if (ret != 0) { |
1296 | DRM_ERROR("copy %d exec entries failed %d\n", | 1347 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1297 | args->buffer_count, ret); | 1348 | args->buffer_count, ret); |
1298 | drm_free_large(exec_list); | 1349 | drm_free_large(exec_list); |
1299 | drm_free_large(exec2_list); | 1350 | drm_free_large(exec2_list); |
@@ -1334,7 +1385,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1334 | sizeof(*exec_list) * args->buffer_count); | 1385 | sizeof(*exec_list) * args->buffer_count); |
1335 | if (ret) { | 1386 | if (ret) { |
1336 | ret = -EFAULT; | 1387 | ret = -EFAULT; |
1337 | DRM_ERROR("failed to copy %d exec entries " | 1388 | DRM_DEBUG("failed to copy %d exec entries " |
1338 | "back to user (%d)\n", | 1389 | "back to user (%d)\n", |
1339 | args->buffer_count, ret); | 1390 | args->buffer_count, ret); |
1340 | } | 1391 | } |
@@ -1354,7 +1405,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1354 | int ret; | 1405 | int ret; |
1355 | 1406 | ||
1356 | if (args->buffer_count < 1) { | 1407 | if (args->buffer_count < 1) { |
1357 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | 1408 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
1358 | return -EINVAL; | 1409 | return -EINVAL; |
1359 | } | 1410 | } |
1360 | 1411 | ||
@@ -1364,7 +1415,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1364 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), | 1415 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
1365 | args->buffer_count); | 1416 | args->buffer_count); |
1366 | if (exec2_list == NULL) { | 1417 | if (exec2_list == NULL) { |
1367 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 1418 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1368 | args->buffer_count); | 1419 | args->buffer_count); |
1369 | return -ENOMEM; | 1420 | return -ENOMEM; |
1370 | } | 1421 | } |
@@ -1373,7 +1424,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1373 | (uintptr_t) args->buffers_ptr, | 1424 | (uintptr_t) args->buffers_ptr, |
1374 | sizeof(*exec2_list) * args->buffer_count); | 1425 | sizeof(*exec2_list) * args->buffer_count); |
1375 | if (ret != 0) { | 1426 | if (ret != 0) { |
1376 | DRM_ERROR("copy %d exec entries failed %d\n", | 1427 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1377 | args->buffer_count, ret); | 1428 | args->buffer_count, ret); |
1378 | drm_free_large(exec2_list); | 1429 | drm_free_large(exec2_list); |
1379 | return -EFAULT; | 1430 | return -EFAULT; |
@@ -1388,7 +1439,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1388 | sizeof(*exec2_list) * args->buffer_count); | 1439 | sizeof(*exec2_list) * args->buffer_count); |
1389 | if (ret) { | 1440 | if (ret) { |
1390 | ret = -EFAULT; | 1441 | ret = -EFAULT; |
1391 | DRM_ERROR("failed to copy %d exec entries " | 1442 | DRM_DEBUG("failed to copy %d exec entries " |
1392 | "back to user (%d)\n", | 1443 | "back to user (%d)\n", |
1393 | args->buffer_count, ret); | 1444 | args->buffer_count, ret); |
1394 | } | 1445 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6042c5e6d278..2eacd78bb93b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -29,6 +29,279 @@ | |||
29 | #include "i915_trace.h" | 29 | #include "i915_trace.h" |
30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
31 | 31 | ||
32 | /* PPGTT support for Sandybdrige/Gen6 and later */ | ||
33 | static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | ||
34 | unsigned first_entry, | ||
35 | unsigned num_entries) | ||
36 | { | ||
37 | uint32_t *pt_vaddr; | ||
38 | uint32_t scratch_pte; | ||
39 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
40 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
41 | unsigned last_pte, i; | ||
42 | |||
43 | scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); | ||
44 | scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; | ||
45 | |||
46 | while (num_entries) { | ||
47 | last_pte = first_pte + num_entries; | ||
48 | if (last_pte > I915_PPGTT_PT_ENTRIES) | ||
49 | last_pte = I915_PPGTT_PT_ENTRIES; | ||
50 | |||
51 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
52 | |||
53 | for (i = first_pte; i < last_pte; i++) | ||
54 | pt_vaddr[i] = scratch_pte; | ||
55 | |||
56 | kunmap_atomic(pt_vaddr); | ||
57 | |||
58 | num_entries -= last_pte - first_pte; | ||
59 | first_pte = 0; | ||
60 | act_pd++; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | ||
65 | { | ||
66 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
67 | struct i915_hw_ppgtt *ppgtt; | ||
68 | uint32_t pd_entry; | ||
69 | unsigned first_pd_entry_in_global_pt; | ||
70 | uint32_t __iomem *pd_addr; | ||
71 | int i; | ||
72 | int ret = -ENOMEM; | ||
73 | |||
74 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 | ||
75 | * entries. For aliasing ppgtt support we just steal them at the end for | ||
76 | * now. */ | ||
77 | first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; | ||
78 | |||
79 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | ||
80 | if (!ppgtt) | ||
81 | return ret; | ||
82 | |||
83 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; | ||
84 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, | ||
85 | GFP_KERNEL); | ||
86 | if (!ppgtt->pt_pages) | ||
87 | goto err_ppgtt; | ||
88 | |||
89 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
90 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); | ||
91 | if (!ppgtt->pt_pages[i]) | ||
92 | goto err_pt_alloc; | ||
93 | } | ||
94 | |||
95 | if (dev_priv->mm.gtt->needs_dmar) { | ||
96 | ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) | ||
97 | *ppgtt->num_pd_entries, | ||
98 | GFP_KERNEL); | ||
99 | if (!ppgtt->pt_dma_addr) | ||
100 | goto err_pt_alloc; | ||
101 | } | ||
102 | |||
103 | pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt; | ||
104 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
105 | dma_addr_t pt_addr; | ||
106 | if (dev_priv->mm.gtt->needs_dmar) { | ||
107 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], | ||
108 | 0, 4096, | ||
109 | PCI_DMA_BIDIRECTIONAL); | ||
110 | |||
111 | if (pci_dma_mapping_error(dev->pdev, | ||
112 | pt_addr)) { | ||
113 | ret = -EIO; | ||
114 | goto err_pd_pin; | ||
115 | |||
116 | } | ||
117 | ppgtt->pt_dma_addr[i] = pt_addr; | ||
118 | } else | ||
119 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | ||
120 | |||
121 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
122 | pd_entry |= GEN6_PDE_VALID; | ||
123 | |||
124 | writel(pd_entry, pd_addr + i); | ||
125 | } | ||
126 | readl(pd_addr); | ||
127 | |||
128 | ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; | ||
129 | |||
130 | i915_ppgtt_clear_range(ppgtt, 0, | ||
131 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); | ||
132 | |||
133 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); | ||
134 | |||
135 | dev_priv->mm.aliasing_ppgtt = ppgtt; | ||
136 | |||
137 | return 0; | ||
138 | |||
139 | err_pd_pin: | ||
140 | if (ppgtt->pt_dma_addr) { | ||
141 | for (i--; i >= 0; i--) | ||
142 | pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], | ||
143 | 4096, PCI_DMA_BIDIRECTIONAL); | ||
144 | } | ||
145 | err_pt_alloc: | ||
146 | kfree(ppgtt->pt_dma_addr); | ||
147 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
148 | if (ppgtt->pt_pages[i]) | ||
149 | __free_page(ppgtt->pt_pages[i]); | ||
150 | } | ||
151 | kfree(ppgtt->pt_pages); | ||
152 | err_ppgtt: | ||
153 | kfree(ppgtt); | ||
154 | |||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | ||
159 | { | ||
160 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
161 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
162 | int i; | ||
163 | |||
164 | if (!ppgtt) | ||
165 | return; | ||
166 | |||
167 | if (ppgtt->pt_dma_addr) { | ||
168 | for (i = 0; i < ppgtt->num_pd_entries; i++) | ||
169 | pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], | ||
170 | 4096, PCI_DMA_BIDIRECTIONAL); | ||
171 | } | ||
172 | |||
173 | kfree(ppgtt->pt_dma_addr); | ||
174 | for (i = 0; i < ppgtt->num_pd_entries; i++) | ||
175 | __free_page(ppgtt->pt_pages[i]); | ||
176 | kfree(ppgtt->pt_pages); | ||
177 | kfree(ppgtt); | ||
178 | } | ||
179 | |||
180 | static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, | ||
181 | struct scatterlist *sg_list, | ||
182 | unsigned sg_len, | ||
183 | unsigned first_entry, | ||
184 | uint32_t pte_flags) | ||
185 | { | ||
186 | uint32_t *pt_vaddr, pte; | ||
187 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
188 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
189 | unsigned i, j, m, segment_len; | ||
190 | dma_addr_t page_addr; | ||
191 | struct scatterlist *sg; | ||
192 | |||
193 | /* init sg walking */ | ||
194 | sg = sg_list; | ||
195 | i = 0; | ||
196 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
197 | m = 0; | ||
198 | |||
199 | while (i < sg_len) { | ||
200 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
201 | |||
202 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | ||
203 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | ||
204 | pte = GEN6_PTE_ADDR_ENCODE(page_addr); | ||
205 | pt_vaddr[j] = pte | pte_flags; | ||
206 | |||
207 | /* grab the next page */ | ||
208 | m++; | ||
209 | if (m == segment_len) { | ||
210 | sg = sg_next(sg); | ||
211 | i++; | ||
212 | if (i == sg_len) | ||
213 | break; | ||
214 | |||
215 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
216 | m = 0; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | kunmap_atomic(pt_vaddr); | ||
221 | |||
222 | first_pte = 0; | ||
223 | act_pd++; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, | ||
228 | unsigned first_entry, unsigned num_entries, | ||
229 | struct page **pages, uint32_t pte_flags) | ||
230 | { | ||
231 | uint32_t *pt_vaddr, pte; | ||
232 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
233 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
234 | unsigned last_pte, i; | ||
235 | dma_addr_t page_addr; | ||
236 | |||
237 | while (num_entries) { | ||
238 | last_pte = first_pte + num_entries; | ||
239 | last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES); | ||
240 | |||
241 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
242 | |||
243 | for (i = first_pte; i < last_pte; i++) { | ||
244 | page_addr = page_to_phys(*pages); | ||
245 | pte = GEN6_PTE_ADDR_ENCODE(page_addr); | ||
246 | pt_vaddr[i] = pte | pte_flags; | ||
247 | |||
248 | pages++; | ||
249 | } | ||
250 | |||
251 | kunmap_atomic(pt_vaddr); | ||
252 | |||
253 | num_entries -= last_pte - first_pte; | ||
254 | first_pte = 0; | ||
255 | act_pd++; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | ||
260 | struct drm_i915_gem_object *obj, | ||
261 | enum i915_cache_level cache_level) | ||
262 | { | ||
263 | struct drm_device *dev = obj->base.dev; | ||
264 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
265 | uint32_t pte_flags = GEN6_PTE_VALID; | ||
266 | |||
267 | switch (cache_level) { | ||
268 | case I915_CACHE_LLC_MLC: | ||
269 | pte_flags |= GEN6_PTE_CACHE_LLC_MLC; | ||
270 | break; | ||
271 | case I915_CACHE_LLC: | ||
272 | pte_flags |= GEN6_PTE_CACHE_LLC; | ||
273 | break; | ||
274 | case I915_CACHE_NONE: | ||
275 | pte_flags |= GEN6_PTE_UNCACHED; | ||
276 | break; | ||
277 | default: | ||
278 | BUG(); | ||
279 | } | ||
280 | |||
281 | if (dev_priv->mm.gtt->needs_dmar) { | ||
282 | BUG_ON(!obj->sg_list); | ||
283 | |||
284 | i915_ppgtt_insert_sg_entries(ppgtt, | ||
285 | obj->sg_list, | ||
286 | obj->num_sg, | ||
287 | obj->gtt_space->start >> PAGE_SHIFT, | ||
288 | pte_flags); | ||
289 | } else | ||
290 | i915_ppgtt_insert_pages(ppgtt, | ||
291 | obj->gtt_space->start >> PAGE_SHIFT, | ||
292 | obj->base.size >> PAGE_SHIFT, | ||
293 | obj->pages, | ||
294 | pte_flags); | ||
295 | } | ||
296 | |||
297 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | ||
298 | struct drm_i915_gem_object *obj) | ||
299 | { | ||
300 | i915_ppgtt_clear_range(ppgtt, | ||
301 | obj->gtt_space->start >> PAGE_SHIFT, | ||
302 | obj->base.size >> PAGE_SHIFT); | ||
303 | } | ||
304 | |||
32 | /* XXX kill agp_type! */ | 305 | /* XXX kill agp_type! */ |
33 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, | 306 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, |
34 | enum i915_cache_level cache_level) | 307 | enum i915_cache_level cache_level) |
@@ -55,7 +328,7 @@ static bool do_idling(struct drm_i915_private *dev_priv) | |||
55 | 328 | ||
56 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { | 329 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { |
57 | dev_priv->mm.interruptible = false; | 330 | dev_priv->mm.interruptible = false; |
58 | if (i915_gpu_idle(dev_priv->dev)) { | 331 | if (i915_gpu_idle(dev_priv->dev, false)) { |
59 | DRM_ERROR("Couldn't idle GPU\n"); | 332 | DRM_ERROR("Couldn't idle GPU\n"); |
60 | /* Wait a bit, in hopes it avoids the hang */ | 333 | /* Wait a bit, in hopes it avoids the hang */ |
61 | udelay(10); | 334 | udelay(10); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 31d334d9d9da..1a9306665987 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
94 | 94 | ||
95 | if (INTEL_INFO(dev)->gen >= 6) { | 95 | if (INTEL_INFO(dev)->gen >= 6) { |
96 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 96 | uint32_t dimm_c0, dimm_c1; |
97 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 97 | dimm_c0 = I915_READ(MAD_DIMM_C0); |
98 | dimm_c1 = I915_READ(MAD_DIMM_C1); | ||
99 | dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | ||
100 | dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | ||
101 | /* Enable swizzling when the channels are populated with | ||
102 | * identically sized dimms. We don't need to check the 3rd | ||
103 | * channel because no cpu with gpu attached ships in that | ||
104 | * configuration. Also, swizzling only makes sense for 2 | ||
105 | * channels anyway. */ | ||
106 | if (dimm_c0 == dimm_c1) { | ||
107 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | ||
108 | swizzle_y = I915_BIT_6_SWIZZLE_9; | ||
109 | } else { | ||
110 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
111 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
112 | } | ||
98 | } else if (IS_GEN5(dev)) { | 113 | } else if (IS_GEN5(dev)) { |
99 | /* On Ironlake whatever DRAM config, GPU always do | 114 | /* On Ironlake whatever DRAM config, GPU always do |
100 | * same swizzling setup. | 115 | * same swizzling setup. |
@@ -107,10 +122,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
107 | */ | 122 | */ |
108 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 123 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
109 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 124 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
110 | } else if (IS_MOBILE(dev)) { | 125 | } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { |
111 | uint32_t dcc; | 126 | uint32_t dcc; |
112 | 127 | ||
113 | /* On mobile 9xx chipsets, channel interleave by the CPU is | 128 | /* On 9xx chipsets, channel interleave by the CPU is |
114 | * determined by DCC. For single-channel, neither the CPU | 129 | * determined by DCC. For single-channel, neither the CPU |
115 | * nor the GPU do swizzling. For dual channel interleaved, | 130 | * nor the GPU do swizzling. For dual channel interleaved, |
116 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | 131 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5bd4361ea84d..afd4e03e337e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
720 | reloc_offset = src->gtt_offset; | 720 | reloc_offset = src->gtt_offset; |
721 | for (page = 0; page < page_count; page++) { | 721 | for (page = 0; page < page_count; page++) { |
722 | unsigned long flags; | 722 | unsigned long flags; |
723 | void __iomem *s; | ||
724 | void *d; | 723 | void *d; |
725 | 724 | ||
726 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | 725 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
728 | goto unwind; | 727 | goto unwind; |
729 | 728 | ||
730 | local_irq_save(flags); | 729 | local_irq_save(flags); |
731 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 730 | if (reloc_offset < dev_priv->mm.gtt_mappable_end) { |
732 | reloc_offset); | 731 | void __iomem *s; |
733 | memcpy_fromio(d, s, PAGE_SIZE); | 732 | |
734 | io_mapping_unmap_atomic(s); | 733 | /* Simply ignore tiling or any overlapping fence. |
734 | * It's part of the error state, and this hopefully | ||
735 | * captures what the GPU read. | ||
736 | */ | ||
737 | |||
738 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
739 | reloc_offset); | ||
740 | memcpy_fromio(d, s, PAGE_SIZE); | ||
741 | io_mapping_unmap_atomic(s); | ||
742 | } else { | ||
743 | void *s; | ||
744 | |||
745 | drm_clflush_pages(&src->pages[page], 1); | ||
746 | |||
747 | s = kmap_atomic(src->pages[page]); | ||
748 | memcpy(d, s, PAGE_SIZE); | ||
749 | kunmap_atomic(s); | ||
750 | |||
751 | drm_clflush_pages(&src->pages[page], 1); | ||
752 | } | ||
735 | local_irq_restore(flags); | 753 | local_irq_restore(flags); |
736 | 754 | ||
737 | dst->pages[page] = d; | 755 | dst->pages[page] = d; |
@@ -770,11 +788,11 @@ i915_error_state_free(struct drm_device *dev, | |||
770 | { | 788 | { |
771 | int i; | 789 | int i; |
772 | 790 | ||
773 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) | 791 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
774 | i915_error_object_free(error->batchbuffer[i]); | 792 | i915_error_object_free(error->ring[i].batchbuffer); |
775 | 793 | i915_error_object_free(error->ring[i].ringbuffer); | |
776 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) | 794 | kfree(error->ring[i].requests); |
777 | i915_error_object_free(error->ringbuffer[i]); | 795 | } |
778 | 796 | ||
779 | kfree(error->active_bo); | 797 | kfree(error->active_bo); |
780 | kfree(error->overlay); | 798 | kfree(error->overlay); |
@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, | |||
804 | err->tiling = obj->tiling_mode; | 822 | err->tiling = obj->tiling_mode; |
805 | err->dirty = obj->dirty; | 823 | err->dirty = obj->dirty; |
806 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 824 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
807 | err->ring = obj->ring ? obj->ring->id : 0; | 825 | err->ring = obj->ring ? obj->ring->id : -1; |
808 | err->cache_level = obj->cache_level; | 826 | err->cache_level = obj->cache_level; |
809 | 827 | ||
810 | if (++i == count) | 828 | if (++i == count) |
@@ -876,6 +894,92 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
876 | return NULL; | 894 | return NULL; |
877 | } | 895 | } |
878 | 896 | ||
897 | static void i915_record_ring_state(struct drm_device *dev, | ||
898 | struct drm_i915_error_state *error, | ||
899 | struct intel_ring_buffer *ring) | ||
900 | { | ||
901 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
902 | |||
903 | if (INTEL_INFO(dev)->gen >= 6) { | ||
904 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); | ||
905 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | ||
906 | error->semaphore_mboxes[ring->id][0] | ||
907 | = I915_READ(RING_SYNC_0(ring->mmio_base)); | ||
908 | error->semaphore_mboxes[ring->id][1] | ||
909 | = I915_READ(RING_SYNC_1(ring->mmio_base)); | ||
910 | } | ||
911 | |||
912 | if (INTEL_INFO(dev)->gen >= 4) { | ||
913 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); | ||
914 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | ||
915 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | ||
916 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | ||
917 | if (ring->id == RCS) { | ||
918 | error->instdone1 = I915_READ(INSTDONE1); | ||
919 | error->bbaddr = I915_READ64(BB_ADDR); | ||
920 | } | ||
921 | } else { | ||
922 | error->ipeir[ring->id] = I915_READ(IPEIR); | ||
923 | error->ipehr[ring->id] = I915_READ(IPEHR); | ||
924 | error->instdone[ring->id] = I915_READ(INSTDONE); | ||
925 | } | ||
926 | |||
927 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | ||
928 | error->seqno[ring->id] = ring->get_seqno(ring); | ||
929 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | ||
930 | error->head[ring->id] = I915_READ_HEAD(ring); | ||
931 | error->tail[ring->id] = I915_READ_TAIL(ring); | ||
932 | |||
933 | error->cpu_ring_head[ring->id] = ring->head; | ||
934 | error->cpu_ring_tail[ring->id] = ring->tail; | ||
935 | } | ||
936 | |||
937 | static void i915_gem_record_rings(struct drm_device *dev, | ||
938 | struct drm_i915_error_state *error) | ||
939 | { | ||
940 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
941 | struct drm_i915_gem_request *request; | ||
942 | int i, count; | ||
943 | |||
944 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
945 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; | ||
946 | |||
947 | if (ring->obj == NULL) | ||
948 | continue; | ||
949 | |||
950 | i915_record_ring_state(dev, error, ring); | ||
951 | |||
952 | error->ring[i].batchbuffer = | ||
953 | i915_error_first_batchbuffer(dev_priv, ring); | ||
954 | |||
955 | error->ring[i].ringbuffer = | ||
956 | i915_error_object_create(dev_priv, ring->obj); | ||
957 | |||
958 | count = 0; | ||
959 | list_for_each_entry(request, &ring->request_list, list) | ||
960 | count++; | ||
961 | |||
962 | error->ring[i].num_requests = count; | ||
963 | error->ring[i].requests = | ||
964 | kmalloc(count*sizeof(struct drm_i915_error_request), | ||
965 | GFP_ATOMIC); | ||
966 | if (error->ring[i].requests == NULL) { | ||
967 | error->ring[i].num_requests = 0; | ||
968 | continue; | ||
969 | } | ||
970 | |||
971 | count = 0; | ||
972 | list_for_each_entry(request, &ring->request_list, list) { | ||
973 | struct drm_i915_error_request *erq; | ||
974 | |||
975 | erq = &error->ring[i].requests[count++]; | ||
976 | erq->seqno = request->seqno; | ||
977 | erq->jiffies = request->emitted_jiffies; | ||
978 | erq->tail = request->tail; | ||
979 | } | ||
980 | } | ||
981 | } | ||
982 | |||
879 | /** | 983 | /** |
880 | * i915_capture_error_state - capture an error record for later analysis | 984 | * i915_capture_error_state - capture an error record for later analysis |
881 | * @dev: drm device | 985 | * @dev: drm device |
@@ -900,7 +1004,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
900 | return; | 1004 | return; |
901 | 1005 | ||
902 | /* Account for pipe specific data like PIPE*STAT */ | 1006 | /* Account for pipe specific data like PIPE*STAT */ |
903 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 1007 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
904 | if (!error) { | 1008 | if (!error) { |
905 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 1009 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
906 | return; | 1010 | return; |
@@ -909,59 +1013,18 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
909 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", | 1013 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
910 | dev->primary->index); | 1014 | dev->primary->index); |
911 | 1015 | ||
912 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); | ||
913 | error->eir = I915_READ(EIR); | 1016 | error->eir = I915_READ(EIR); |
914 | error->pgtbl_er = I915_READ(PGTBL_ER); | 1017 | error->pgtbl_er = I915_READ(PGTBL_ER); |
915 | for_each_pipe(pipe) | 1018 | for_each_pipe(pipe) |
916 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | 1019 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
917 | error->instpm = I915_READ(INSTPM); | 1020 | |
918 | error->error = 0; | ||
919 | if (INTEL_INFO(dev)->gen >= 6) { | 1021 | if (INTEL_INFO(dev)->gen >= 6) { |
920 | error->error = I915_READ(ERROR_GEN6); | 1022 | error->error = I915_READ(ERROR_GEN6); |
921 | 1023 | error->done_reg = I915_READ(DONE_REG); | |
922 | error->bcs_acthd = I915_READ(BCS_ACTHD); | ||
923 | error->bcs_ipehr = I915_READ(BCS_IPEHR); | ||
924 | error->bcs_ipeir = I915_READ(BCS_IPEIR); | ||
925 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
926 | error->bcs_seqno = 0; | ||
927 | if (dev_priv->ring[BCS].get_seqno) | ||
928 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
929 | |||
930 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
931 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
932 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
933 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
934 | error->vcs_seqno = 0; | ||
935 | if (dev_priv->ring[VCS].get_seqno) | ||
936 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
937 | } | 1024 | } |
938 | if (INTEL_INFO(dev)->gen >= 4) { | ||
939 | error->ipeir = I915_READ(IPEIR_I965); | ||
940 | error->ipehr = I915_READ(IPEHR_I965); | ||
941 | error->instdone = I915_READ(INSTDONE_I965); | ||
942 | error->instps = I915_READ(INSTPS); | ||
943 | error->instdone1 = I915_READ(INSTDONE1); | ||
944 | error->acthd = I915_READ(ACTHD_I965); | ||
945 | error->bbaddr = I915_READ64(BB_ADDR); | ||
946 | } else { | ||
947 | error->ipeir = I915_READ(IPEIR); | ||
948 | error->ipehr = I915_READ(IPEHR); | ||
949 | error->instdone = I915_READ(INSTDONE); | ||
950 | error->acthd = I915_READ(ACTHD); | ||
951 | error->bbaddr = 0; | ||
952 | } | ||
953 | i915_gem_record_fences(dev, error); | ||
954 | |||
955 | /* Record the active batch and ring buffers */ | ||
956 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
957 | error->batchbuffer[i] = | ||
958 | i915_error_first_batchbuffer(dev_priv, | ||
959 | &dev_priv->ring[i]); | ||
960 | 1025 | ||
961 | error->ringbuffer[i] = | 1026 | i915_gem_record_fences(dev, error); |
962 | i915_error_object_create(dev_priv, | 1027 | i915_gem_record_rings(dev, error); |
963 | dev_priv->ring[i].obj); | ||
964 | } | ||
965 | 1028 | ||
966 | /* Record buffers on the active and pinned lists. */ | 1029 | /* Record buffers on the active and pinned lists. */ |
967 | error->active_bo = NULL; | 1030 | error->active_bo = NULL; |
@@ -1017,11 +1080,12 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
1017 | { | 1080 | { |
1018 | struct drm_i915_private *dev_priv = dev->dev_private; | 1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
1019 | struct drm_i915_error_state *error; | 1082 | struct drm_i915_error_state *error; |
1083 | unsigned long flags; | ||
1020 | 1084 | ||
1021 | spin_lock(&dev_priv->error_lock); | 1085 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
1022 | error = dev_priv->first_error; | 1086 | error = dev_priv->first_error; |
1023 | dev_priv->first_error = NULL; | 1087 | dev_priv->first_error = NULL; |
1024 | spin_unlock(&dev_priv->error_lock); | 1088 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
1025 | 1089 | ||
1026 | if (error) | 1090 | if (error) |
1027 | i915_error_state_free(dev, error); | 1091 | i915_error_state_free(dev, error); |
@@ -1698,6 +1762,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1698 | dev_priv->last_instdone1 == instdone1) { | 1762 | dev_priv->last_instdone1 == instdone1) { |
1699 | if (dev_priv->hangcheck_count++ > 1) { | 1763 | if (dev_priv->hangcheck_count++ > 1) { |
1700 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1764 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1765 | i915_handle_error(dev, true); | ||
1701 | 1766 | ||
1702 | if (!IS_GEN2(dev)) { | 1767 | if (!IS_GEN2(dev)) { |
1703 | /* Is the chip hanging on a WAIT_FOR_EVENT? | 1768 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
@@ -1705,7 +1770,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1705 | * and break the hang. This should work on | 1770 | * and break the hang. This should work on |
1706 | * all but the second generation chipsets. | 1771 | * all but the second generation chipsets. |
1707 | */ | 1772 | */ |
1708 | |||
1709 | if (kick_ring(&dev_priv->ring[RCS])) | 1773 | if (kick_ring(&dev_priv->ring[RCS])) |
1710 | goto repeat; | 1774 | goto repeat; |
1711 | 1775 | ||
@@ -1718,7 +1782,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1718 | goto repeat; | 1782 | goto repeat; |
1719 | } | 1783 | } |
1720 | 1784 | ||
1721 | i915_handle_error(dev, true); | ||
1722 | return; | 1785 | return; |
1723 | } | 1786 | } |
1724 | } else { | 1787 | } else { |
@@ -1752,18 +1815,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1752 | 1815 | ||
1753 | I915_WRITE(HWSTAM, 0xeffe); | 1816 | I915_WRITE(HWSTAM, 0xeffe); |
1754 | 1817 | ||
1755 | if (IS_GEN6(dev)) { | ||
1756 | /* Workaround stalls observed on Sandy Bridge GPUs by | ||
1757 | * making the blitter command streamer generate a | ||
1758 | * write to the Hardware Status Page for | ||
1759 | * MI_USER_INTERRUPT. This appears to serialize the | ||
1760 | * previous seqno write out before the interrupt | ||
1761 | * happens. | ||
1762 | */ | ||
1763 | I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1764 | I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT); | ||
1765 | } | ||
1766 | |||
1767 | /* XXX hotplug from PCH */ | 1818 | /* XXX hotplug from PCH */ |
1768 | 1819 | ||
1769 | I915_WRITE(DEIMR, 0xffffffff); | 1820 | I915_WRITE(DEIMR, 0xffffffff); |
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c deleted file mode 100644 index cc8f6d49cf20..000000000000 --- a/drivers/gpu/drm/i915/i915_mem.c +++ /dev/null | |||
@@ -1,387 +0,0 @@ | |||
1 | /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- | ||
2 | */ | ||
3 | /* | ||
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | ||
5 | * All Rights Reserved. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | * copy of this software and associated documentation files (the | ||
9 | * "Software"), to deal in the Software without restriction, including | ||
10 | * without limitation the rights to use, copy, modify, merge, publish, | ||
11 | * distribute, sub license, and/or sell copies of the Software, and to | ||
12 | * permit persons to whom the Software is furnished to do so, subject to | ||
13 | * the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the | ||
16 | * next paragraph) shall be included in all copies or substantial portions | ||
17 | * of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | ||
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drm.h" | ||
32 | #include "i915_drv.h" | ||
33 | |||
34 | /* This memory manager is integrated into the global/local lru | ||
35 | * mechanisms used by the clients. Specifically, it operates by | ||
36 | * setting the 'in_use' fields of the global LRU to indicate whether | ||
37 | * this region is privately allocated to a client. | ||
38 | * | ||
39 | * This does require the client to actually respect that field. | ||
40 | * | ||
41 | * Currently no effort is made to allocate 'private' memory in any | ||
42 | * clever way - the LRU information isn't used to determine which | ||
43 | * block to allocate, and the ring is drained prior to allocations -- | ||
44 | * in other words allocation is expensive. | ||
45 | */ | ||
46 | static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) | ||
47 | { | ||
48 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
49 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
50 | drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; | ||
51 | struct drm_tex_region *list; | ||
52 | unsigned shift, nr; | ||
53 | unsigned start; | ||
54 | unsigned end; | ||
55 | unsigned i; | ||
56 | int age; | ||
57 | |||
58 | shift = dev_priv->tex_lru_log_granularity; | ||
59 | nr = I915_NR_TEX_REGIONS; | ||
60 | |||
61 | start = p->start >> shift; | ||
62 | end = (p->start + p->size - 1) >> shift; | ||
63 | |||
64 | age = ++sarea_priv->texAge; | ||
65 | list = sarea_priv->texList; | ||
66 | |||
67 | /* Mark the regions with the new flag and update their age. Move | ||
68 | * them to head of list to preserve LRU semantics. | ||
69 | */ | ||
70 | for (i = start; i <= end; i++) { | ||
71 | list[i].in_use = in_use; | ||
72 | list[i].age = age; | ||
73 | |||
74 | /* remove_from_list(i) | ||
75 | */ | ||
76 | list[(unsigned)list[i].next].prev = list[i].prev; | ||
77 | list[(unsigned)list[i].prev].next = list[i].next; | ||
78 | |||
79 | /* insert_at_head(list, i) | ||
80 | */ | ||
81 | list[i].prev = nr; | ||
82 | list[i].next = list[nr].next; | ||
83 | list[(unsigned)list[nr].next].prev = i; | ||
84 | list[nr].next = i; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Very simple allocator for agp memory, working on a static range | ||
89 | * already mapped into each client's address space. | ||
90 | */ | ||
91 | |||
92 | static struct mem_block *split_block(struct mem_block *p, int start, int size, | ||
93 | struct drm_file *file_priv) | ||
94 | { | ||
95 | /* Maybe cut off the start of an existing block */ | ||
96 | if (start > p->start) { | ||
97 | struct mem_block *newblock = kmalloc(sizeof(*newblock), | ||
98 | GFP_KERNEL); | ||
99 | if (!newblock) | ||
100 | goto out; | ||
101 | newblock->start = start; | ||
102 | newblock->size = p->size - (start - p->start); | ||
103 | newblock->file_priv = NULL; | ||
104 | newblock->next = p->next; | ||
105 | newblock->prev = p; | ||
106 | p->next->prev = newblock; | ||
107 | p->next = newblock; | ||
108 | p->size -= newblock->size; | ||
109 | p = newblock; | ||
110 | } | ||
111 | |||
112 | /* Maybe cut off the end of an existing block */ | ||
113 | if (size < p->size) { | ||
114 | struct mem_block *newblock = kmalloc(sizeof(*newblock), | ||
115 | GFP_KERNEL); | ||
116 | if (!newblock) | ||
117 | goto out; | ||
118 | newblock->start = start + size; | ||
119 | newblock->size = p->size - size; | ||
120 | newblock->file_priv = NULL; | ||
121 | newblock->next = p->next; | ||
122 | newblock->prev = p; | ||
123 | p->next->prev = newblock; | ||
124 | p->next = newblock; | ||
125 | p->size = size; | ||
126 | } | ||
127 | |||
128 | out: | ||
129 | /* Our block is in the middle */ | ||
130 | p->file_priv = file_priv; | ||
131 | return p; | ||
132 | } | ||
133 | |||
134 | static struct mem_block *alloc_block(struct mem_block *heap, int size, | ||
135 | int align2, struct drm_file *file_priv) | ||
136 | { | ||
137 | struct mem_block *p; | ||
138 | int mask = (1 << align2) - 1; | ||
139 | |||
140 | for (p = heap->next; p != heap; p = p->next) { | ||
141 | int start = (p->start + mask) & ~mask; | ||
142 | if (p->file_priv == NULL && start + size <= p->start + p->size) | ||
143 | return split_block(p, start, size, file_priv); | ||
144 | } | ||
145 | |||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | static struct mem_block *find_block(struct mem_block *heap, int start) | ||
150 | { | ||
151 | struct mem_block *p; | ||
152 | |||
153 | for (p = heap->next; p != heap; p = p->next) | ||
154 | if (p->start == start) | ||
155 | return p; | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | static void free_block(struct mem_block *p) | ||
161 | { | ||
162 | p->file_priv = NULL; | ||
163 | |||
164 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
165 | * 'heap' to stop it being subsumed. | ||
166 | */ | ||
167 | if (p->next->file_priv == NULL) { | ||
168 | struct mem_block *q = p->next; | ||
169 | p->size += q->size; | ||
170 | p->next = q->next; | ||
171 | p->next->prev = p; | ||
172 | kfree(q); | ||
173 | } | ||
174 | |||
175 | if (p->prev->file_priv == NULL) { | ||
176 | struct mem_block *q = p->prev; | ||
177 | q->size += p->size; | ||
178 | q->next = p->next; | ||
179 | q->next->prev = q; | ||
180 | kfree(p); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | /* Initialize. How to check for an uninitialized heap? | ||
185 | */ | ||
186 | static int init_heap(struct mem_block **heap, int start, int size) | ||
187 | { | ||
188 | struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); | ||
189 | |||
190 | if (!blocks) | ||
191 | return -ENOMEM; | ||
192 | |||
193 | *heap = kmalloc(sizeof(**heap), GFP_KERNEL); | ||
194 | if (!*heap) { | ||
195 | kfree(blocks); | ||
196 | return -ENOMEM; | ||
197 | } | ||
198 | |||
199 | blocks->start = start; | ||
200 | blocks->size = size; | ||
201 | blocks->file_priv = NULL; | ||
202 | blocks->next = blocks->prev = *heap; | ||
203 | |||
204 | memset(*heap, 0, sizeof(**heap)); | ||
205 | (*heap)->file_priv = (struct drm_file *) -1; | ||
206 | (*heap)->next = (*heap)->prev = blocks; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* Free all blocks associated with the releasing file. | ||
211 | */ | ||
212 | void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, | ||
213 | struct mem_block *heap) | ||
214 | { | ||
215 | struct mem_block *p; | ||
216 | |||
217 | if (!heap || !heap->next) | ||
218 | return; | ||
219 | |||
220 | for (p = heap->next; p != heap; p = p->next) { | ||
221 | if (p->file_priv == file_priv) { | ||
222 | p->file_priv = NULL; | ||
223 | mark_block(dev, p, 0); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
228 | * 'heap' to stop it being subsumed. | ||
229 | */ | ||
230 | for (p = heap->next; p != heap; p = p->next) { | ||
231 | while (p->file_priv == NULL && p->next->file_priv == NULL) { | ||
232 | struct mem_block *q = p->next; | ||
233 | p->size += q->size; | ||
234 | p->next = q->next; | ||
235 | p->next->prev = p; | ||
236 | kfree(q); | ||
237 | } | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* Shutdown. | ||
242 | */ | ||
243 | void i915_mem_takedown(struct mem_block **heap) | ||
244 | { | ||
245 | struct mem_block *p; | ||
246 | |||
247 | if (!*heap) | ||
248 | return; | ||
249 | |||
250 | for (p = (*heap)->next; p != *heap;) { | ||
251 | struct mem_block *q = p; | ||
252 | p = p->next; | ||
253 | kfree(q); | ||
254 | } | ||
255 | |||
256 | kfree(*heap); | ||
257 | *heap = NULL; | ||
258 | } | ||
259 | |||
260 | static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) | ||
261 | { | ||
262 | switch (region) { | ||
263 | case I915_MEM_REGION_AGP: | ||
264 | return &dev_priv->agp_heap; | ||
265 | default: | ||
266 | return NULL; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | /* IOCTL HANDLERS */ | ||
271 | |||
272 | int i915_mem_alloc(struct drm_device *dev, void *data, | ||
273 | struct drm_file *file_priv) | ||
274 | { | ||
275 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
276 | drm_i915_mem_alloc_t *alloc = data; | ||
277 | struct mem_block *block, **heap; | ||
278 | |||
279 | if (!dev_priv) { | ||
280 | DRM_ERROR("called with no initialization\n"); | ||
281 | return -EINVAL; | ||
282 | } | ||
283 | |||
284 | heap = get_heap(dev_priv, alloc->region); | ||
285 | if (!heap || !*heap) | ||
286 | return -EFAULT; | ||
287 | |||
288 | /* Make things easier on ourselves: all allocations at least | ||
289 | * 4k aligned. | ||
290 | */ | ||
291 | if (alloc->alignment < 12) | ||
292 | alloc->alignment = 12; | ||
293 | |||
294 | block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); | ||
295 | |||
296 | if (!block) | ||
297 | return -ENOMEM; | ||
298 | |||
299 | mark_block(dev, block, 1); | ||
300 | |||
301 | if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, | ||
302 | sizeof(int))) { | ||
303 | DRM_ERROR("copy_to_user\n"); | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | int i915_mem_free(struct drm_device *dev, void *data, | ||
311 | struct drm_file *file_priv) | ||
312 | { | ||
313 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
314 | drm_i915_mem_free_t *memfree = data; | ||
315 | struct mem_block *block, **heap; | ||
316 | |||
317 | if (!dev_priv) { | ||
318 | DRM_ERROR("called with no initialization\n"); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | |||
322 | heap = get_heap(dev_priv, memfree->region); | ||
323 | if (!heap || !*heap) | ||
324 | return -EFAULT; | ||
325 | |||
326 | block = find_block(*heap, memfree->region_offset); | ||
327 | if (!block) | ||
328 | return -EFAULT; | ||
329 | |||
330 | if (block->file_priv != file_priv) | ||
331 | return -EPERM; | ||
332 | |||
333 | mark_block(dev, block, 0); | ||
334 | free_block(block); | ||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | int i915_mem_init_heap(struct drm_device *dev, void *data, | ||
339 | struct drm_file *file_priv) | ||
340 | { | ||
341 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
342 | drm_i915_mem_init_heap_t *initheap = data; | ||
343 | struct mem_block **heap; | ||
344 | |||
345 | if (!dev_priv) { | ||
346 | DRM_ERROR("called with no initialization\n"); | ||
347 | return -EINVAL; | ||
348 | } | ||
349 | |||
350 | heap = get_heap(dev_priv, initheap->region); | ||
351 | if (!heap) | ||
352 | return -EFAULT; | ||
353 | |||
354 | if (*heap) { | ||
355 | DRM_ERROR("heap already initialized?"); | ||
356 | return -EFAULT; | ||
357 | } | ||
358 | |||
359 | return init_heap(heap, initheap->start, initheap->size); | ||
360 | } | ||
361 | |||
362 | int i915_mem_destroy_heap(struct drm_device *dev, void *data, | ||
363 | struct drm_file *file_priv) | ||
364 | { | ||
365 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
366 | drm_i915_mem_destroy_heap_t *destroyheap = data; | ||
367 | struct mem_block **heap; | ||
368 | |||
369 | if (!dev_priv) { | ||
370 | DRM_ERROR("called with no initialization\n"); | ||
371 | return -EINVAL; | ||
372 | } | ||
373 | |||
374 | heap = get_heap(dev_priv, destroyheap->region); | ||
375 | if (!heap) { | ||
376 | DRM_ERROR("get_heap failed"); | ||
377 | return -EFAULT; | ||
378 | } | ||
379 | |||
380 | if (!*heap) { | ||
381 | DRM_ERROR("heap not initialized?"); | ||
382 | return -EFAULT; | ||
383 | } | ||
384 | |||
385 | i915_mem_takedown(heap); | ||
386 | return 0; | ||
387 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 558ac716a328..3886cf051bac 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -86,12 +86,45 @@ | |||
86 | #define GEN6_MBC_SNPCR_LOW (2<<21) | 86 | #define GEN6_MBC_SNPCR_LOW (2<<21) |
87 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ | 87 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ |
88 | 88 | ||
89 | #define GEN6_MBCTL 0x0907c | ||
90 | #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) | ||
91 | #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) | ||
92 | #define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) | ||
93 | #define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) | ||
94 | #define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) | ||
95 | |||
89 | #define GEN6_GDRST 0x941c | 96 | #define GEN6_GDRST 0x941c |
90 | #define GEN6_GRDOM_FULL (1 << 0) | 97 | #define GEN6_GRDOM_FULL (1 << 0) |
91 | #define GEN6_GRDOM_RENDER (1 << 1) | 98 | #define GEN6_GRDOM_RENDER (1 << 1) |
92 | #define GEN6_GRDOM_MEDIA (1 << 2) | 99 | #define GEN6_GRDOM_MEDIA (1 << 2) |
93 | #define GEN6_GRDOM_BLT (1 << 3) | 100 | #define GEN6_GRDOM_BLT (1 << 3) |
94 | 101 | ||
102 | /* PPGTT stuff */ | ||
103 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | ||
104 | |||
105 | #define GEN6_PDE_VALID (1 << 0) | ||
106 | #define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ | ||
107 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ | ||
108 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
109 | |||
110 | #define GEN6_PTE_VALID (1 << 0) | ||
111 | #define GEN6_PTE_UNCACHED (1 << 1) | ||
112 | #define GEN6_PTE_CACHE_LLC (2 << 1) | ||
113 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | ||
114 | #define GEN6_PTE_CACHE_BITS (3 << 1) | ||
115 | #define GEN6_PTE_GFDT (1 << 3) | ||
116 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
117 | |||
118 | #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) | ||
119 | #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) | ||
120 | #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) | ||
121 | #define PP_DIR_DCLV_2G 0xffffffff | ||
122 | |||
123 | #define GAM_ECOCHK 0x4090 | ||
124 | #define ECOCHK_SNB_BIT (1<<10) | ||
125 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) | ||
126 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) | ||
127 | |||
95 | /* VGA stuff */ | 128 | /* VGA stuff */ |
96 | 129 | ||
97 | #define VGA_ST01_MDA 0x3ba | 130 | #define VGA_ST01_MDA 0x3ba |
@@ -295,6 +328,12 @@ | |||
295 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 | 328 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 |
296 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 | 329 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 |
297 | 330 | ||
331 | /* control register for cpu gtt access */ | ||
332 | #define TILECTL 0x101000 | ||
333 | #define TILECTL_SWZCTL (1 << 0) | ||
334 | #define TILECTL_TLB_PREFETCH_DIS (1 << 2) | ||
335 | #define TILECTL_BACKSNOOP_DIS (1 << 3) | ||
336 | |||
298 | /* | 337 | /* |
299 | * Instruction and interrupt control regs | 338 | * Instruction and interrupt control regs |
300 | */ | 339 | */ |
@@ -318,7 +357,14 @@ | |||
318 | #define RING_MAX_IDLE(base) ((base)+0x54) | 357 | #define RING_MAX_IDLE(base) ((base)+0x54) |
319 | #define RING_HWS_PGA(base) ((base)+0x80) | 358 | #define RING_HWS_PGA(base) ((base)+0x80) |
320 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 359 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
360 | #define ARB_MODE 0x04030 | ||
361 | #define ARB_MODE_SWIZZLE_SNB (1<<4) | ||
362 | #define ARB_MODE_SWIZZLE_IVB (1<<5) | ||
363 | #define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x) | ||
364 | #define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x) | ||
321 | #define RENDER_HWS_PGA_GEN7 (0x04080) | 365 | #define RENDER_HWS_PGA_GEN7 (0x04080) |
366 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) | ||
367 | #define DONE_REG 0x40b0 | ||
322 | #define BSD_HWS_PGA_GEN7 (0x04180) | 368 | #define BSD_HWS_PGA_GEN7 (0x04180) |
323 | #define BLT_HWS_PGA_GEN7 (0x04280) | 369 | #define BLT_HWS_PGA_GEN7 (0x04280) |
324 | #define RING_ACTHD(base) ((base)+0x74) | 370 | #define RING_ACTHD(base) ((base)+0x74) |
@@ -352,6 +398,12 @@ | |||
352 | #define IPEIR_I965 0x02064 | 398 | #define IPEIR_I965 0x02064 |
353 | #define IPEHR_I965 0x02068 | 399 | #define IPEHR_I965 0x02068 |
354 | #define INSTDONE_I965 0x0206c | 400 | #define INSTDONE_I965 0x0206c |
401 | #define RING_IPEIR(base) ((base)+0x64) | ||
402 | #define RING_IPEHR(base) ((base)+0x68) | ||
403 | #define RING_INSTDONE(base) ((base)+0x6c) | ||
404 | #define RING_INSTPS(base) ((base)+0x70) | ||
405 | #define RING_DMA_FADD(base) ((base)+0x78) | ||
406 | #define RING_INSTPM(base) ((base)+0xc0) | ||
355 | #define INSTPS 0x02070 /* 965+ only */ | 407 | #define INSTPS 0x02070 /* 965+ only */ |
356 | #define INSTDONE1 0x0207c /* 965+ only */ | 408 | #define INSTDONE1 0x0207c /* 965+ only */ |
357 | #define ACTHD_I965 0x02074 | 409 | #define ACTHD_I965 0x02074 |
@@ -365,14 +417,6 @@ | |||
365 | #define INSTDONE 0x02090 | 417 | #define INSTDONE 0x02090 |
366 | #define NOPID 0x02094 | 418 | #define NOPID 0x02094 |
367 | #define HWSTAM 0x02098 | 419 | #define HWSTAM 0x02098 |
368 | #define VCS_INSTDONE 0x1206C | ||
369 | #define VCS_IPEIR 0x12064 | ||
370 | #define VCS_IPEHR 0x12068 | ||
371 | #define VCS_ACTHD 0x12074 | ||
372 | #define BCS_INSTDONE 0x2206C | ||
373 | #define BCS_IPEIR 0x22064 | ||
374 | #define BCS_IPEHR 0x22068 | ||
375 | #define BCS_ACTHD 0x22074 | ||
376 | 420 | ||
377 | #define ERROR_GEN6 0x040a0 | 421 | #define ERROR_GEN6 0x040a0 |
378 | 422 | ||
@@ -391,10 +435,11 @@ | |||
391 | 435 | ||
392 | #define MI_MODE 0x0209c | 436 | #define MI_MODE 0x0209c |
393 | # define VS_TIMER_DISPATCH (1 << 6) | 437 | # define VS_TIMER_DISPATCH (1 << 6) |
394 | # define MI_FLUSH_ENABLE (1 << 11) | 438 | # define MI_FLUSH_ENABLE (1 << 12) |
395 | 439 | ||
396 | #define GFX_MODE 0x02520 | 440 | #define GFX_MODE 0x02520 |
397 | #define GFX_MODE_GEN7 0x0229c | 441 | #define GFX_MODE_GEN7 0x0229c |
442 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) | ||
398 | #define GFX_RUN_LIST_ENABLE (1<<15) | 443 | #define GFX_RUN_LIST_ENABLE (1<<15) |
399 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | 444 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) |
400 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | 445 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) |
@@ -1037,6 +1082,29 @@ | |||
1037 | #define C0DRB3 0x10206 | 1082 | #define C0DRB3 0x10206 |
1038 | #define C1DRB3 0x10606 | 1083 | #define C1DRB3 0x10606 |
1039 | 1084 | ||
1085 | /** snb MCH registers for reading the DRAM channel configuration */ | ||
1086 | #define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) | ||
1087 | #define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) | ||
1088 | #define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) | ||
1089 | #define MAD_DIMM_ECC_MASK (0x3 << 24) | ||
1090 | #define MAD_DIMM_ECC_OFF (0x0 << 24) | ||
1091 | #define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) | ||
1092 | #define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) | ||
1093 | #define MAD_DIMM_ECC_ON (0x3 << 24) | ||
1094 | #define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) | ||
1095 | #define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) | ||
1096 | #define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ | ||
1097 | #define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ | ||
1098 | #define MAD_DIMM_B_DUAL_RANK (0x1 << 18) | ||
1099 | #define MAD_DIMM_A_DUAL_RANK (0x1 << 17) | ||
1100 | #define MAD_DIMM_A_SELECT (0x1 << 16) | ||
1101 | /* DIMM sizes are in multiples of 256mb. */ | ||
1102 | #define MAD_DIMM_B_SIZE_SHIFT 8 | ||
1103 | #define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) | ||
1104 | #define MAD_DIMM_A_SIZE_SHIFT 0 | ||
1105 | #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) | ||
1106 | |||
1107 | |||
1040 | /* Clocking configuration register */ | 1108 | /* Clocking configuration register */ |
1041 | #define CLKCFG 0x10c00 | 1109 | #define CLKCFG 0x10c00 |
1042 | #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ | 1110 | #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ |
@@ -1316,6 +1384,7 @@ | |||
1316 | #define _VSYNC_A 0x60014 | 1384 | #define _VSYNC_A 0x60014 |
1317 | #define _PIPEASRC 0x6001c | 1385 | #define _PIPEASRC 0x6001c |
1318 | #define _BCLRPAT_A 0x60020 | 1386 | #define _BCLRPAT_A 0x60020 |
1387 | #define _VSYNCSHIFT_A 0x60028 | ||
1319 | 1388 | ||
1320 | /* Pipe B timing regs */ | 1389 | /* Pipe B timing regs */ |
1321 | #define _HTOTAL_B 0x61000 | 1390 | #define _HTOTAL_B 0x61000 |
@@ -1326,6 +1395,8 @@ | |||
1326 | #define _VSYNC_B 0x61014 | 1395 | #define _VSYNC_B 0x61014 |
1327 | #define _PIPEBSRC 0x6101c | 1396 | #define _PIPEBSRC 0x6101c |
1328 | #define _BCLRPAT_B 0x61020 | 1397 | #define _BCLRPAT_B 0x61020 |
1398 | #define _VSYNCSHIFT_B 0x61028 | ||
1399 | |||
1329 | 1400 | ||
1330 | #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) | 1401 | #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) |
1331 | #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) | 1402 | #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) |
@@ -1334,6 +1405,7 @@ | |||
1334 | #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) | 1405 | #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) |
1335 | #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) | 1406 | #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) |
1336 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) | 1407 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) |
1408 | #define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) | ||
1337 | 1409 | ||
1338 | /* VGA port control */ | 1410 | /* VGA port control */ |
1339 | #define ADPA 0x61100 | 1411 | #define ADPA 0x61100 |
@@ -2319,10 +2391,21 @@ | |||
2319 | #define PIPECONF_PALETTE 0 | 2391 | #define PIPECONF_PALETTE 0 |
2320 | #define PIPECONF_GAMMA (1<<24) | 2392 | #define PIPECONF_GAMMA (1<<24) |
2321 | #define PIPECONF_FORCE_BORDER (1<<25) | 2393 | #define PIPECONF_FORCE_BORDER (1<<25) |
2322 | #define PIPECONF_PROGRESSIVE (0 << 21) | ||
2323 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) | ||
2324 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) | ||
2325 | #define PIPECONF_INTERLACE_MASK (7 << 21) | 2394 | #define PIPECONF_INTERLACE_MASK (7 << 21) |
2395 | /* Note that pre-gen3 does not support interlaced display directly. Panel | ||
2396 | * fitting must be disabled on pre-ilk for interlaced. */ | ||
2397 | #define PIPECONF_PROGRESSIVE (0 << 21) | ||
2398 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ | ||
2399 | #define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ | ||
2400 | #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) | ||
2401 | #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ | ||
2402 | /* Ironlake and later have a complete new set of values for interlaced. PFIT | ||
2403 | * means panel fitter required, PF means progressive fetch, DBL means power | ||
2404 | * saving pixel doubling. */ | ||
2405 | #define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) | ||
2406 | #define PIPECONF_INTERLACED_ILK (3 << 21) | ||
2407 | #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ | ||
2408 | #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ | ||
2326 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) | 2409 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) |
2327 | #define PIPECONF_BPP_MASK (0x000000e0) | 2410 | #define PIPECONF_BPP_MASK (0x000000e0) |
2328 | #define PIPECONF_BPP_8 (0<<5) | 2411 | #define PIPECONF_BPP_8 (0<<5) |
@@ -3219,6 +3302,7 @@ | |||
3219 | #define _TRANS_VSYNC_A 0xe0014 | 3302 | #define _TRANS_VSYNC_A 0xe0014 |
3220 | #define TRANS_VSYNC_END_SHIFT 16 | 3303 | #define TRANS_VSYNC_END_SHIFT 16 |
3221 | #define TRANS_VSYNC_START_SHIFT 0 | 3304 | #define TRANS_VSYNC_START_SHIFT 0 |
3305 | #define _TRANS_VSYNCSHIFT_A 0xe0028 | ||
3222 | 3306 | ||
3223 | #define _TRANSA_DATA_M1 0xe0030 | 3307 | #define _TRANSA_DATA_M1 0xe0030 |
3224 | #define _TRANSA_DATA_N1 0xe0034 | 3308 | #define _TRANSA_DATA_N1 0xe0034 |
@@ -3249,6 +3333,7 @@ | |||
3249 | #define _TRANS_VTOTAL_B 0xe100c | 3333 | #define _TRANS_VTOTAL_B 0xe100c |
3250 | #define _TRANS_VBLANK_B 0xe1010 | 3334 | #define _TRANS_VBLANK_B 0xe1010 |
3251 | #define _TRANS_VSYNC_B 0xe1014 | 3335 | #define _TRANS_VSYNC_B 0xe1014 |
3336 | #define _TRANS_VSYNCSHIFT_B 0xe1028 | ||
3252 | 3337 | ||
3253 | #define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) | 3338 | #define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) |
3254 | #define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) | 3339 | #define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) |
@@ -3256,6 +3341,8 @@ | |||
3256 | #define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) | 3341 | #define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) |
3257 | #define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) | 3342 | #define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) |
3258 | #define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) | 3343 | #define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) |
3344 | #define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \ | ||
3345 | _TRANS_VSYNCSHIFT_B) | ||
3259 | 3346 | ||
3260 | #define _TRANSB_DATA_M1 0xe1030 | 3347 | #define _TRANSB_DATA_M1 0xe1030 |
3261 | #define _TRANSB_DATA_N1 0xe1034 | 3348 | #define _TRANSB_DATA_N1 0xe1034 |
@@ -3289,7 +3376,10 @@ | |||
3289 | #define TRANS_FSYNC_DELAY_HB4 (3<<27) | 3376 | #define TRANS_FSYNC_DELAY_HB4 (3<<27) |
3290 | #define TRANS_DP_AUDIO_ONLY (1<<26) | 3377 | #define TRANS_DP_AUDIO_ONLY (1<<26) |
3291 | #define TRANS_DP_VIDEO_AUDIO (0<<26) | 3378 | #define TRANS_DP_VIDEO_AUDIO (0<<26) |
3379 | #define TRANS_INTERLACE_MASK (7<<21) | ||
3292 | #define TRANS_PROGRESSIVE (0<<21) | 3380 | #define TRANS_PROGRESSIVE (0<<21) |
3381 | #define TRANS_INTERLACED (3<<21) | ||
3382 | #define TRANS_LEGACY_INTERLACED_ILK (2<<21) | ||
3293 | #define TRANS_8BPC (0<<5) | 3383 | #define TRANS_8BPC (0<<5) |
3294 | #define TRANS_10BPC (1<<5) | 3384 | #define TRANS_10BPC (1<<5) |
3295 | #define TRANS_6BPC (2<<5) | 3385 | #define TRANS_6BPC (2<<5) |
@@ -3628,6 +3718,12 @@ | |||
3628 | #define ECOBUS 0xa180 | 3718 | #define ECOBUS 0xa180 |
3629 | #define FORCEWAKE_MT_ENABLE (1<<5) | 3719 | #define FORCEWAKE_MT_ENABLE (1<<5) |
3630 | 3720 | ||
3721 | #define GTFIFODBG 0x120000 | ||
3722 | #define GT_FIFO_CPU_ERROR_MASK 7 | ||
3723 | #define GT_FIFO_OVFERR (1<<2) | ||
3724 | #define GT_FIFO_IAWRERR (1<<1) | ||
3725 | #define GT_FIFO_IARDERR (1<<0) | ||
3726 | |||
3631 | #define GT_FIFO_FREE_ENTRIES 0x120008 | 3727 | #define GT_FIFO_FREE_ENTRIES 0x120008 |
3632 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 3728 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
3633 | 3729 | ||
@@ -3757,4 +3853,16 @@ | |||
3757 | */ | 3853 | */ |
3758 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) | 3854 | #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) |
3759 | 3855 | ||
3856 | #define IBX_AUD_CONFIG_A 0xe2000 | ||
3857 | #define CPT_AUD_CONFIG_A 0xe5000 | ||
3858 | #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) | ||
3859 | #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) | ||
3860 | #define AUD_CONFIG_UPPER_N_SHIFT 20 | ||
3861 | #define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) | ||
3862 | #define AUD_CONFIG_LOWER_N_SHIFT 4 | ||
3863 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) | ||
3864 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 | ||
3865 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) | ||
3866 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | ||
3867 | |||
3760 | #endif /* _I915_REG_H_ */ | 3868 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index cb912106d1a2..bae3edf956a4 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) | |||
208 | 208 | ||
209 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); | 209 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); |
210 | if (ret < 0) { | 210 | if (ret < 0) { |
211 | DRM_ERROR("failed to get supported _DSM functions\n"); | 211 | DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); |
212 | return false; | 212 | return false; |
213 | } | 213 | } |
214 | 214 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 63880e2e5cfd..8168d8f8a634 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -572,7 +572,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
572 | DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); | 572 | DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); |
573 | return; | 573 | return; |
574 | } | 574 | } |
575 | dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL); | 575 | dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); |
576 | if (!dev_priv->child_dev) { | 576 | if (!dev_priv->child_dev) { |
577 | DRM_DEBUG_KMS("No memory space for child device\n"); | 577 | DRM_DEBUG_KMS("No memory space for child device\n"); |
578 | return; | 578 | return; |
@@ -669,7 +669,7 @@ intel_parse_bios(struct drm_device *dev) | |||
669 | } | 669 | } |
670 | 670 | ||
671 | if (!vbt) { | 671 | if (!vbt) { |
672 | DRM_ERROR("VBT signature missing\n"); | 672 | DRM_DEBUG_DRIVER("VBT signature missing\n"); |
673 | pci_unmap_rom(pdev, bios); | 673 | pci_unmap_rom(pdev, bios); |
674 | return -1; | 674 | return -1; |
675 | } | 675 | } |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index dd729d46a61f..4d3d736a4f56 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -594,7 +594,10 @@ void intel_crt_init(struct drm_device *dev) | |||
594 | 1 << INTEL_ANALOG_CLONE_BIT | | 594 | 1 << INTEL_ANALOG_CLONE_BIT | |
595 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); | 595 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); |
596 | crt->base.crtc_mask = (1 << 0) | (1 << 1); | 596 | crt->base.crtc_mask = (1 << 0) | (1 << 1); |
597 | connector->interlace_allowed = 1; | 597 | if (IS_GEN2(dev)) |
598 | connector->interlace_allowed = 0; | ||
599 | else | ||
600 | connector->interlace_allowed = 1; | ||
598 | connector->doublescan_allowed = 0; | 601 | connector->doublescan_allowed = 0; |
599 | 602 | ||
600 | drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); | 603 | drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 397087cf689e..d514719f65e2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -75,7 +75,7 @@ struct intel_limit { | |||
75 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | 75 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
76 | intel_p2_t p2; | 76 | intel_p2_t p2; |
77 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | 77 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
78 | int, int, intel_clock_t *); | 78 | int, int, intel_clock_t *, intel_clock_t *); |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* FDI */ | 81 | /* FDI */ |
@@ -83,17 +83,21 @@ struct intel_limit { | |||
83 | 83 | ||
84 | static bool | 84 | static bool |
85 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 85 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
86 | int target, int refclk, intel_clock_t *best_clock); | 86 | int target, int refclk, intel_clock_t *match_clock, |
87 | intel_clock_t *best_clock); | ||
87 | static bool | 88 | static bool |
88 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 89 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
89 | int target, int refclk, intel_clock_t *best_clock); | 90 | int target, int refclk, intel_clock_t *match_clock, |
91 | intel_clock_t *best_clock); | ||
90 | 92 | ||
91 | static bool | 93 | static bool |
92 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 94 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
93 | int target, int refclk, intel_clock_t *best_clock); | 95 | int target, int refclk, intel_clock_t *match_clock, |
96 | intel_clock_t *best_clock); | ||
94 | static bool | 97 | static bool |
95 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, | 98 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
96 | int target, int refclk, intel_clock_t *best_clock); | 99 | int target, int refclk, intel_clock_t *match_clock, |
100 | intel_clock_t *best_clock); | ||
97 | 101 | ||
98 | static inline u32 /* units of 100MHz */ | 102 | static inline u32 /* units of 100MHz */ |
99 | intel_fdi_link_freq(struct drm_device *dev) | 103 | intel_fdi_link_freq(struct drm_device *dev) |
@@ -515,7 +519,8 @@ static bool intel_PLL_is_valid(struct drm_device *dev, | |||
515 | 519 | ||
516 | static bool | 520 | static bool |
517 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 521 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
518 | int target, int refclk, intel_clock_t *best_clock) | 522 | int target, int refclk, intel_clock_t *match_clock, |
523 | intel_clock_t *best_clock) | ||
519 | 524 | ||
520 | { | 525 | { |
521 | struct drm_device *dev = crtc->dev; | 526 | struct drm_device *dev = crtc->dev; |
@@ -562,6 +567,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
562 | if (!intel_PLL_is_valid(dev, limit, | 567 | if (!intel_PLL_is_valid(dev, limit, |
563 | &clock)) | 568 | &clock)) |
564 | continue; | 569 | continue; |
570 | if (match_clock && | ||
571 | clock.p != match_clock->p) | ||
572 | continue; | ||
565 | 573 | ||
566 | this_err = abs(clock.dot - target); | 574 | this_err = abs(clock.dot - target); |
567 | if (this_err < err) { | 575 | if (this_err < err) { |
@@ -578,7 +586,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
578 | 586 | ||
579 | static bool | 587 | static bool |
580 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 588 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
581 | int target, int refclk, intel_clock_t *best_clock) | 589 | int target, int refclk, intel_clock_t *match_clock, |
590 | intel_clock_t *best_clock) | ||
582 | { | 591 | { |
583 | struct drm_device *dev = crtc->dev; | 592 | struct drm_device *dev = crtc->dev; |
584 | struct drm_i915_private *dev_priv = dev->dev_private; | 593 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -625,6 +634,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
625 | if (!intel_PLL_is_valid(dev, limit, | 634 | if (!intel_PLL_is_valid(dev, limit, |
626 | &clock)) | 635 | &clock)) |
627 | continue; | 636 | continue; |
637 | if (match_clock && | ||
638 | clock.p != match_clock->p) | ||
639 | continue; | ||
628 | 640 | ||
629 | this_err = abs(clock.dot - target); | 641 | this_err = abs(clock.dot - target); |
630 | if (this_err < err_most) { | 642 | if (this_err < err_most) { |
@@ -642,7 +654,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
642 | 654 | ||
643 | static bool | 655 | static bool |
644 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 656 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
645 | int target, int refclk, intel_clock_t *best_clock) | 657 | int target, int refclk, intel_clock_t *match_clock, |
658 | intel_clock_t *best_clock) | ||
646 | { | 659 | { |
647 | struct drm_device *dev = crtc->dev; | 660 | struct drm_device *dev = crtc->dev; |
648 | intel_clock_t clock; | 661 | intel_clock_t clock; |
@@ -668,7 +681,8 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
668 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | 681 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
669 | static bool | 682 | static bool |
670 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 683 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
671 | int target, int refclk, intel_clock_t *best_clock) | 684 | int target, int refclk, intel_clock_t *match_clock, |
685 | intel_clock_t *best_clock) | ||
672 | { | 686 | { |
673 | intel_clock_t clock; | 687 | intel_clock_t clock; |
674 | if (target < 200000) { | 688 | if (target < 200000) { |
@@ -922,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
922 | u32 val; | 936 | u32 val; |
923 | bool cur_state; | 937 | bool cur_state; |
924 | 938 | ||
939 | /* if we need the pipe A quirk it must be always on */ | ||
940 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | ||
941 | state = true; | ||
942 | |||
925 | reg = PIPECONF(pipe); | 943 | reg = PIPECONF(pipe); |
926 | val = I915_READ(reg); | 944 | val = I915_READ(reg); |
927 | cur_state = !!(val & PIPECONF_ENABLE); | 945 | cur_state = !!(val & PIPECONF_ENABLE); |
@@ -930,19 +948,24 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
930 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 948 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
931 | } | 949 | } |
932 | 950 | ||
933 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, | 951 | static void assert_plane(struct drm_i915_private *dev_priv, |
934 | enum plane plane) | 952 | enum plane plane, bool state) |
935 | { | 953 | { |
936 | int reg; | 954 | int reg; |
937 | u32 val; | 955 | u32 val; |
956 | bool cur_state; | ||
938 | 957 | ||
939 | reg = DSPCNTR(plane); | 958 | reg = DSPCNTR(plane); |
940 | val = I915_READ(reg); | 959 | val = I915_READ(reg); |
941 | WARN(!(val & DISPLAY_PLANE_ENABLE), | 960 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
942 | "plane %c assertion failure, should be active but is disabled\n", | 961 | WARN(cur_state != state, |
943 | plane_name(plane)); | 962 | "plane %c assertion failure (expected %s, current %s)\n", |
963 | plane_name(plane), state_string(state), state_string(cur_state)); | ||
944 | } | 964 | } |
945 | 965 | ||
966 | #define assert_plane_enabled(d, p) assert_plane(d, p, true) | ||
967 | #define assert_plane_disabled(d, p) assert_plane(d, p, false) | ||
968 | |||
946 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | 969 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
947 | enum pipe pipe) | 970 | enum pipe pipe) |
948 | { | 971 | { |
@@ -951,8 +974,14 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
951 | int cur_pipe; | 974 | int cur_pipe; |
952 | 975 | ||
953 | /* Planes are fixed to pipes on ILK+ */ | 976 | /* Planes are fixed to pipes on ILK+ */ |
954 | if (HAS_PCH_SPLIT(dev_priv->dev)) | 977 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
978 | reg = DSPCNTR(pipe); | ||
979 | val = I915_READ(reg); | ||
980 | WARN((val & DISPLAY_PLANE_ENABLE), | ||
981 | "plane %c assertion failure, should be disabled but not\n", | ||
982 | plane_name(pipe)); | ||
955 | return; | 983 | return; |
984 | } | ||
956 | 985 | ||
957 | /* Need to check both planes against the pipe */ | 986 | /* Need to check both planes against the pipe */ |
958 | for (i = 0; i < 2; i++) { | 987 | for (i = 0; i < 2; i++) { |
@@ -1071,7 +1100,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1071 | { | 1100 | { |
1072 | u32 val = I915_READ(reg); | 1101 | u32 val = I915_READ(reg); |
1073 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), | 1102 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
1074 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1103 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1075 | reg, pipe_name(pipe)); | 1104 | reg, pipe_name(pipe)); |
1076 | } | 1105 | } |
1077 | 1106 | ||
@@ -1237,7 +1266,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1237 | enum pipe pipe) | 1266 | enum pipe pipe) |
1238 | { | 1267 | { |
1239 | int reg; | 1268 | int reg; |
1240 | u32 val; | 1269 | u32 val, pipeconf_val; |
1270 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
1241 | 1271 | ||
1242 | /* PCH only available on ILK+ */ | 1272 | /* PCH only available on ILK+ */ |
1243 | BUG_ON(dev_priv->info->gen < 5); | 1273 | BUG_ON(dev_priv->info->gen < 5); |
@@ -1251,6 +1281,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1251 | 1281 | ||
1252 | reg = TRANSCONF(pipe); | 1282 | reg = TRANSCONF(pipe); |
1253 | val = I915_READ(reg); | 1283 | val = I915_READ(reg); |
1284 | pipeconf_val = I915_READ(PIPECONF(pipe)); | ||
1254 | 1285 | ||
1255 | if (HAS_PCH_IBX(dev_priv->dev)) { | 1286 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1256 | /* | 1287 | /* |
@@ -1258,8 +1289,19 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1258 | * that in pipeconf reg. | 1289 | * that in pipeconf reg. |
1259 | */ | 1290 | */ |
1260 | val &= ~PIPE_BPC_MASK; | 1291 | val &= ~PIPE_BPC_MASK; |
1261 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 1292 | val |= pipeconf_val & PIPE_BPC_MASK; |
1262 | } | 1293 | } |
1294 | |||
1295 | val &= ~TRANS_INTERLACE_MASK; | ||
1296 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) | ||
1297 | if (HAS_PCH_IBX(dev_priv->dev) && | ||
1298 | intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) | ||
1299 | val |= TRANS_LEGACY_INTERLACED_ILK; | ||
1300 | else | ||
1301 | val |= TRANS_INTERLACED; | ||
1302 | else | ||
1303 | val |= TRANS_PROGRESSIVE; | ||
1304 | |||
1263 | I915_WRITE(reg, val | TRANS_ENABLE); | 1305 | I915_WRITE(reg, val | TRANS_ENABLE); |
1264 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 1306 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1265 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 1307 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
@@ -2012,6 +2054,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
2012 | ret = i915_gem_object_get_fence(obj, pipelined); | 2054 | ret = i915_gem_object_get_fence(obj, pipelined); |
2013 | if (ret) | 2055 | if (ret) |
2014 | goto err_unpin; | 2056 | goto err_unpin; |
2057 | |||
2058 | i915_gem_object_pin_fence(obj); | ||
2015 | } | 2059 | } |
2016 | 2060 | ||
2017 | dev_priv->mm.interruptible = true; | 2061 | dev_priv->mm.interruptible = true; |
@@ -2024,6 +2068,12 @@ err_interruptible: | |||
2024 | return ret; | 2068 | return ret; |
2025 | } | 2069 | } |
2026 | 2070 | ||
2071 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) | ||
2072 | { | ||
2073 | i915_gem_object_unpin_fence(obj); | ||
2074 | i915_gem_object_unpin(obj); | ||
2075 | } | ||
2076 | |||
2027 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | 2077 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2028 | int x, int y) | 2078 | int x, int y) |
2029 | { | 2079 | { |
@@ -2255,7 +2305,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2255 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 2305 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
2256 | LEAVE_ATOMIC_MODE_SET); | 2306 | LEAVE_ATOMIC_MODE_SET); |
2257 | if (ret) { | 2307 | if (ret) { |
2258 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 2308 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
2259 | mutex_unlock(&dev->struct_mutex); | 2309 | mutex_unlock(&dev->struct_mutex); |
2260 | DRM_ERROR("failed to update base address\n"); | 2310 | DRM_ERROR("failed to update base address\n"); |
2261 | return ret; | 2311 | return ret; |
@@ -2263,7 +2313,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2263 | 2313 | ||
2264 | if (old_fb) { | 2314 | if (old_fb) { |
2265 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 2315 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2266 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); | 2316 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2267 | } | 2317 | } |
2268 | 2318 | ||
2269 | mutex_unlock(&dev->struct_mutex); | 2319 | mutex_unlock(&dev->struct_mutex); |
@@ -2936,6 +2986,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2936 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); | 2986 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
2937 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2987 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2938 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2988 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2989 | I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); | ||
2939 | 2990 | ||
2940 | intel_fdi_normal_train(crtc); | 2991 | intel_fdi_normal_train(crtc); |
2941 | 2992 | ||
@@ -3321,10 +3372,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
3321 | struct drm_device *dev = crtc->dev; | 3372 | struct drm_device *dev = crtc->dev; |
3322 | 3373 | ||
3323 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | 3374 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
3375 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); | ||
3376 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); | ||
3324 | 3377 | ||
3325 | if (crtc->fb) { | 3378 | if (crtc->fb) { |
3326 | mutex_lock(&dev->struct_mutex); | 3379 | mutex_lock(&dev->struct_mutex); |
3327 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 3380 | intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
3328 | mutex_unlock(&dev->struct_mutex); | 3381 | mutex_unlock(&dev->struct_mutex); |
3329 | } | 3382 | } |
3330 | } | 3383 | } |
@@ -3398,11 +3451,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
3398 | return false; | 3451 | return false; |
3399 | } | 3452 | } |
3400 | 3453 | ||
3401 | /* XXX some encoders set the crtcinfo, others don't. | 3454 | /* All interlaced capable intel hw wants timings in frames. */ |
3402 | * Obviously we need some form of conflict resolution here... | 3455 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3403 | */ | ||
3404 | if (adjusted_mode->crtc_htotal == 0) | ||
3405 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
3406 | 3456 | ||
3407 | return true; | 3457 | return true; |
3408 | } | 3458 | } |
@@ -4521,6 +4571,7 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4521 | { | 4571 | { |
4522 | struct drm_i915_private *dev_priv = dev->dev_private; | 4572 | struct drm_i915_private *dev_priv = dev->dev_private; |
4523 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | 4573 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4574 | u32 val; | ||
4524 | int fbc_wm, plane_wm, cursor_wm; | 4575 | int fbc_wm, plane_wm, cursor_wm; |
4525 | unsigned int enabled; | 4576 | unsigned int enabled; |
4526 | 4577 | ||
@@ -4529,8 +4580,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4529 | &sandybridge_display_wm_info, latency, | 4580 | &sandybridge_display_wm_info, latency, |
4530 | &sandybridge_cursor_wm_info, latency, | 4581 | &sandybridge_cursor_wm_info, latency, |
4531 | &plane_wm, &cursor_wm)) { | 4582 | &plane_wm, &cursor_wm)) { |
4532 | I915_WRITE(WM0_PIPEA_ILK, | 4583 | val = I915_READ(WM0_PIPEA_ILK); |
4533 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4584 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4585 | I915_WRITE(WM0_PIPEA_ILK, val | | ||
4586 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4534 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 4587 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4535 | " plane %d, " "cursor: %d\n", | 4588 | " plane %d, " "cursor: %d\n", |
4536 | plane_wm, cursor_wm); | 4589 | plane_wm, cursor_wm); |
@@ -4541,8 +4594,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4541 | &sandybridge_display_wm_info, latency, | 4594 | &sandybridge_display_wm_info, latency, |
4542 | &sandybridge_cursor_wm_info, latency, | 4595 | &sandybridge_cursor_wm_info, latency, |
4543 | &plane_wm, &cursor_wm)) { | 4596 | &plane_wm, &cursor_wm)) { |
4544 | I915_WRITE(WM0_PIPEB_ILK, | 4597 | val = I915_READ(WM0_PIPEB_ILK); |
4545 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4598 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4599 | I915_WRITE(WM0_PIPEB_ILK, val | | ||
4600 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4546 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 4601 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4547 | " plane %d, cursor: %d\n", | 4602 | " plane %d, cursor: %d\n", |
4548 | plane_wm, cursor_wm); | 4603 | plane_wm, cursor_wm); |
@@ -4555,8 +4610,10 @@ void sandybridge_update_wm(struct drm_device *dev) | |||
4555 | &sandybridge_display_wm_info, latency, | 4610 | &sandybridge_display_wm_info, latency, |
4556 | &sandybridge_cursor_wm_info, latency, | 4611 | &sandybridge_cursor_wm_info, latency, |
4557 | &plane_wm, &cursor_wm)) { | 4612 | &plane_wm, &cursor_wm)) { |
4558 | I915_WRITE(WM0_PIPEC_IVB, | 4613 | val = I915_READ(WM0_PIPEC_IVB); |
4559 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 4614 | val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
4615 | I915_WRITE(WM0_PIPEC_IVB, val | | ||
4616 | ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); | ||
4560 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | 4617 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" |
4561 | " plane %d, cursor: %d\n", | 4618 | " plane %d, cursor: %d\n", |
4562 | plane_wm, cursor_wm); | 4619 | plane_wm, cursor_wm); |
@@ -4709,6 +4766,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, | |||
4709 | { | 4766 | { |
4710 | struct drm_i915_private *dev_priv = dev->dev_private; | 4767 | struct drm_i915_private *dev_priv = dev->dev_private; |
4711 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | 4768 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4769 | u32 val; | ||
4712 | int sprite_wm, reg; | 4770 | int sprite_wm, reg; |
4713 | int ret; | 4771 | int ret; |
4714 | 4772 | ||
@@ -4735,7 +4793,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, | |||
4735 | return; | 4793 | return; |
4736 | } | 4794 | } |
4737 | 4795 | ||
4738 | I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | 4796 | val = I915_READ(reg); |
4797 | val &= ~WM0_PIPE_SPRITE_MASK; | ||
4798 | I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); | ||
4739 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); | 4799 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); |
4740 | 4800 | ||
4741 | 4801 | ||
@@ -4977,6 +5037,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4977 | return display_bpc != bpc; | 5037 | return display_bpc != bpc; |
4978 | } | 5038 | } |
4979 | 5039 | ||
5040 | static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) | ||
5041 | { | ||
5042 | struct drm_device *dev = crtc->dev; | ||
5043 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5044 | int refclk; | ||
5045 | |||
5046 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | ||
5047 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||
5048 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
5049 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5050 | refclk / 1000); | ||
5051 | } else if (!IS_GEN2(dev)) { | ||
5052 | refclk = 96000; | ||
5053 | } else { | ||
5054 | refclk = 48000; | ||
5055 | } | ||
5056 | |||
5057 | return refclk; | ||
5058 | } | ||
5059 | |||
5060 | static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, | ||
5061 | intel_clock_t *clock) | ||
5062 | { | ||
5063 | /* SDVO TV has fixed PLL values depend on its clock range, | ||
5064 | this mirrors vbios setting. */ | ||
5065 | if (adjusted_mode->clock >= 100000 | ||
5066 | && adjusted_mode->clock < 140500) { | ||
5067 | clock->p1 = 2; | ||
5068 | clock->p2 = 10; | ||
5069 | clock->n = 3; | ||
5070 | clock->m1 = 16; | ||
5071 | clock->m2 = 8; | ||
5072 | } else if (adjusted_mode->clock >= 140500 | ||
5073 | && adjusted_mode->clock <= 200000) { | ||
5074 | clock->p1 = 1; | ||
5075 | clock->p2 = 10; | ||
5076 | clock->n = 6; | ||
5077 | clock->m1 = 12; | ||
5078 | clock->m2 = 8; | ||
5079 | } | ||
5080 | } | ||
5081 | |||
5082 | static void i9xx_update_pll_dividers(struct drm_crtc *crtc, | ||
5083 | intel_clock_t *clock, | ||
5084 | intel_clock_t *reduced_clock) | ||
5085 | { | ||
5086 | struct drm_device *dev = crtc->dev; | ||
5087 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5088 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5089 | int pipe = intel_crtc->pipe; | ||
5090 | u32 fp, fp2 = 0; | ||
5091 | |||
5092 | if (IS_PINEVIEW(dev)) { | ||
5093 | fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; | ||
5094 | if (reduced_clock) | ||
5095 | fp2 = (1 << reduced_clock->n) << 16 | | ||
5096 | reduced_clock->m1 << 8 | reduced_clock->m2; | ||
5097 | } else { | ||
5098 | fp = clock->n << 16 | clock->m1 << 8 | clock->m2; | ||
5099 | if (reduced_clock) | ||
5100 | fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | | ||
5101 | reduced_clock->m2; | ||
5102 | } | ||
5103 | |||
5104 | I915_WRITE(FP0(pipe), fp); | ||
5105 | |||
5106 | intel_crtc->lowfreq_avail = false; | ||
5107 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | ||
5108 | reduced_clock && i915_powersave) { | ||
5109 | I915_WRITE(FP1(pipe), fp2); | ||
5110 | intel_crtc->lowfreq_avail = true; | ||
5111 | } else { | ||
5112 | I915_WRITE(FP1(pipe), fp); | ||
5113 | } | ||
5114 | } | ||
5115 | |||
4980 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | 5116 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4981 | struct drm_display_mode *mode, | 5117 | struct drm_display_mode *mode, |
4982 | struct drm_display_mode *adjusted_mode, | 5118 | struct drm_display_mode *adjusted_mode, |
@@ -4990,7 +5126,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4990 | int plane = intel_crtc->plane; | 5126 | int plane = intel_crtc->plane; |
4991 | int refclk, num_connectors = 0; | 5127 | int refclk, num_connectors = 0; |
4992 | intel_clock_t clock, reduced_clock; | 5128 | intel_clock_t clock, reduced_clock; |
4993 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; | 5129 | u32 dpll, dspcntr, pipeconf, vsyncshift; |
4994 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 5130 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
4995 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 5131 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
4996 | struct drm_mode_config *mode_config = &dev->mode_config; | 5132 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -5031,15 +5167,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5031 | num_connectors++; | 5167 | num_connectors++; |
5032 | } | 5168 | } |
5033 | 5169 | ||
5034 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 5170 | refclk = i9xx_get_refclk(crtc, num_connectors); |
5035 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
5036 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5037 | refclk / 1000); | ||
5038 | } else if (!IS_GEN2(dev)) { | ||
5039 | refclk = 96000; | ||
5040 | } else { | ||
5041 | refclk = 48000; | ||
5042 | } | ||
5043 | 5171 | ||
5044 | /* | 5172 | /* |
5045 | * Returns a set of divisors for the desired target clock with the given | 5173 | * Returns a set of divisors for the desired target clock with the given |
@@ -5047,7 +5175,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5047 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 5175 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5048 | */ | 5176 | */ |
5049 | limit = intel_limit(crtc, refclk); | 5177 | limit = intel_limit(crtc, refclk); |
5050 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 5178 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
5179 | &clock); | ||
5051 | if (!ok) { | 5180 | if (!ok) { |
5052 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 5181 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5053 | return -EINVAL; | 5182 | return -EINVAL; |
@@ -5057,53 +5186,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5057 | intel_crtc_update_cursor(crtc, true); | 5186 | intel_crtc_update_cursor(crtc, true); |
5058 | 5187 | ||
5059 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 5188 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5189 | /* | ||
5190 | * Ensure we match the reduced clock's P to the target clock. | ||
5191 | * If the clocks don't match, we can't switch the display clock | ||
5192 | * by using the FP0/FP1. In such case we will disable the LVDS | ||
5193 | * downclock feature. | ||
5194 | */ | ||
5060 | has_reduced_clock = limit->find_pll(limit, crtc, | 5195 | has_reduced_clock = limit->find_pll(limit, crtc, |
5061 | dev_priv->lvds_downclock, | 5196 | dev_priv->lvds_downclock, |
5062 | refclk, | 5197 | refclk, |
5198 | &clock, | ||
5063 | &reduced_clock); | 5199 | &reduced_clock); |
5064 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | ||
5065 | /* | ||
5066 | * If the different P is found, it means that we can't | ||
5067 | * switch the display clock by using the FP0/FP1. | ||
5068 | * In such case we will disable the LVDS downclock | ||
5069 | * feature. | ||
5070 | */ | ||
5071 | DRM_DEBUG_KMS("Different P is found for " | ||
5072 | "LVDS clock/downclock\n"); | ||
5073 | has_reduced_clock = 0; | ||
5074 | } | ||
5075 | } | ||
5076 | /* SDVO TV has fixed PLL values depend on its clock range, | ||
5077 | this mirrors vbios setting. */ | ||
5078 | if (is_sdvo && is_tv) { | ||
5079 | if (adjusted_mode->clock >= 100000 | ||
5080 | && adjusted_mode->clock < 140500) { | ||
5081 | clock.p1 = 2; | ||
5082 | clock.p2 = 10; | ||
5083 | clock.n = 3; | ||
5084 | clock.m1 = 16; | ||
5085 | clock.m2 = 8; | ||
5086 | } else if (adjusted_mode->clock >= 140500 | ||
5087 | && adjusted_mode->clock <= 200000) { | ||
5088 | clock.p1 = 1; | ||
5089 | clock.p2 = 10; | ||
5090 | clock.n = 6; | ||
5091 | clock.m1 = 12; | ||
5092 | clock.m2 = 8; | ||
5093 | } | ||
5094 | } | 5200 | } |
5095 | 5201 | ||
5096 | if (IS_PINEVIEW(dev)) { | 5202 | if (is_sdvo && is_tv) |
5097 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 5203 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); |
5098 | if (has_reduced_clock) | 5204 | |
5099 | fp2 = (1 << reduced_clock.n) << 16 | | 5205 | i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? |
5100 | reduced_clock.m1 << 8 | reduced_clock.m2; | 5206 | &reduced_clock : NULL); |
5101 | } else { | ||
5102 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
5103 | if (has_reduced_clock) | ||
5104 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | ||
5105 | reduced_clock.m2; | ||
5106 | } | ||
5107 | 5207 | ||
5108 | dpll = DPLL_VGA_MODE_DIS; | 5208 | dpll = DPLL_VGA_MODE_DIS; |
5109 | 5209 | ||
@@ -5177,8 +5277,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5177 | /* Set up the display plane register */ | 5277 | /* Set up the display plane register */ |
5178 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 5278 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5179 | 5279 | ||
5180 | /* Ironlake's plane is forced to pipe, bit 24 is to | ||
5181 | enable color space conversion */ | ||
5182 | if (pipe == 0) | 5280 | if (pipe == 0) |
5183 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | 5281 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
5184 | else | 5282 | else |
@@ -5213,7 +5311,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5213 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 5311 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
5214 | drm_mode_debug_printmodeline(mode); | 5312 | drm_mode_debug_printmodeline(mode); |
5215 | 5313 | ||
5216 | I915_WRITE(FP0(pipe), fp); | ||
5217 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); | 5314 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5218 | 5315 | ||
5219 | POSTING_READ(DPLL(pipe)); | 5316 | POSTING_READ(DPLL(pipe)); |
@@ -5300,34 +5397,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5300 | I915_WRITE(DPLL(pipe), dpll); | 5397 | I915_WRITE(DPLL(pipe), dpll); |
5301 | } | 5398 | } |
5302 | 5399 | ||
5303 | intel_crtc->lowfreq_avail = false; | 5400 | if (HAS_PIPE_CXSR(dev)) { |
5304 | if (is_lvds && has_reduced_clock && i915_powersave) { | 5401 | if (intel_crtc->lowfreq_avail) { |
5305 | I915_WRITE(FP1(pipe), fp2); | ||
5306 | intel_crtc->lowfreq_avail = true; | ||
5307 | if (HAS_PIPE_CXSR(dev)) { | ||
5308 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5402 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5309 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5403 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5310 | } | 5404 | } else { |
5311 | } else { | ||
5312 | I915_WRITE(FP1(pipe), fp); | ||
5313 | if (HAS_PIPE_CXSR(dev)) { | ||
5314 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5405 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5315 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 5406 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5316 | } | 5407 | } |
5317 | } | 5408 | } |
5318 | 5409 | ||
5319 | pipeconf &= ~PIPECONF_INTERLACE_MASK; | 5410 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
5320 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 5411 | if (!IS_GEN2(dev) && |
5412 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
5321 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | 5413 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5322 | /* the chip adds 2 halflines automatically */ | 5414 | /* the chip adds 2 halflines automatically */ |
5323 | adjusted_mode->crtc_vdisplay -= 1; | ||
5324 | adjusted_mode->crtc_vtotal -= 1; | 5415 | adjusted_mode->crtc_vtotal -= 1; |
5325 | adjusted_mode->crtc_vblank_start -= 1; | ||
5326 | adjusted_mode->crtc_vblank_end -= 1; | 5416 | adjusted_mode->crtc_vblank_end -= 1; |
5327 | adjusted_mode->crtc_vsync_end -= 1; | 5417 | vsyncshift = adjusted_mode->crtc_hsync_start |
5328 | adjusted_mode->crtc_vsync_start -= 1; | 5418 | - adjusted_mode->crtc_htotal/2; |
5329 | } else | 5419 | } else { |
5330 | pipeconf |= PIPECONF_PROGRESSIVE; | 5420 | pipeconf |= PIPECONF_PROGRESSIVE; |
5421 | vsyncshift = 0; | ||
5422 | } | ||
5423 | |||
5424 | if (!IS_GEN3(dev)) | ||
5425 | I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); | ||
5331 | 5426 | ||
5332 | I915_WRITE(HTOTAL(pipe), | 5427 | I915_WRITE(HTOTAL(pipe), |
5333 | (adjusted_mode->crtc_hdisplay - 1) | | 5428 | (adjusted_mode->crtc_hdisplay - 1) | |
@@ -5593,7 +5688,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5593 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 5688 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5594 | */ | 5689 | */ |
5595 | limit = intel_limit(crtc, refclk); | 5690 | limit = intel_limit(crtc, refclk); |
5596 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 5691 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
5692 | &clock); | ||
5597 | if (!ok) { | 5693 | if (!ok) { |
5598 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 5694 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5599 | return -EINVAL; | 5695 | return -EINVAL; |
@@ -5603,21 +5699,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5603 | intel_crtc_update_cursor(crtc, true); | 5699 | intel_crtc_update_cursor(crtc, true); |
5604 | 5700 | ||
5605 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 5701 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5702 | /* | ||
5703 | * Ensure we match the reduced clock's P to the target clock. | ||
5704 | * If the clocks don't match, we can't switch the display clock | ||
5705 | * by using the FP0/FP1. In such case we will disable the LVDS | ||
5706 | * downclock feature. | ||
5707 | */ | ||
5606 | has_reduced_clock = limit->find_pll(limit, crtc, | 5708 | has_reduced_clock = limit->find_pll(limit, crtc, |
5607 | dev_priv->lvds_downclock, | 5709 | dev_priv->lvds_downclock, |
5608 | refclk, | 5710 | refclk, |
5711 | &clock, | ||
5609 | &reduced_clock); | 5712 | &reduced_clock); |
5610 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { | ||
5611 | /* | ||
5612 | * If the different P is found, it means that we can't | ||
5613 | * switch the display clock by using the FP0/FP1. | ||
5614 | * In such case we will disable the LVDS downclock | ||
5615 | * feature. | ||
5616 | */ | ||
5617 | DRM_DEBUG_KMS("Different P is found for " | ||
5618 | "LVDS clock/downclock\n"); | ||
5619 | has_reduced_clock = 0; | ||
5620 | } | ||
5621 | } | 5713 | } |
5622 | /* SDVO TV has fixed PLL values depend on its clock range, | 5714 | /* SDVO TV has fixed PLL values depend on its clock range, |
5623 | this mirrors vbios setting. */ | 5715 | this mirrors vbios setting. */ |
@@ -5914,16 +6006,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5914 | 6006 | ||
5915 | pipeconf &= ~PIPECONF_INTERLACE_MASK; | 6007 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
5916 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 6008 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5917 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | 6009 | pipeconf |= PIPECONF_INTERLACED_ILK; |
5918 | /* the chip adds 2 halflines automatically */ | 6010 | /* the chip adds 2 halflines automatically */ |
5919 | adjusted_mode->crtc_vdisplay -= 1; | ||
5920 | adjusted_mode->crtc_vtotal -= 1; | 6011 | adjusted_mode->crtc_vtotal -= 1; |
5921 | adjusted_mode->crtc_vblank_start -= 1; | ||
5922 | adjusted_mode->crtc_vblank_end -= 1; | 6012 | adjusted_mode->crtc_vblank_end -= 1; |
5923 | adjusted_mode->crtc_vsync_end -= 1; | 6013 | I915_WRITE(VSYNCSHIFT(pipe), |
5924 | adjusted_mode->crtc_vsync_start -= 1; | 6014 | adjusted_mode->crtc_hsync_start |
5925 | } else | 6015 | - adjusted_mode->crtc_htotal/2); |
6016 | } else { | ||
5926 | pipeconf |= PIPECONF_PROGRESSIVE; | 6017 | pipeconf |= PIPECONF_PROGRESSIVE; |
6018 | I915_WRITE(VSYNCSHIFT(pipe), 0); | ||
6019 | } | ||
5927 | 6020 | ||
5928 | I915_WRITE(HTOTAL(pipe), | 6021 | I915_WRITE(HTOTAL(pipe), |
5929 | (adjusted_mode->crtc_hdisplay - 1) | | 6022 | (adjusted_mode->crtc_hdisplay - 1) | |
@@ -5966,12 +6059,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5966 | 6059 | ||
5967 | intel_wait_for_vblank(dev, pipe); | 6060 | intel_wait_for_vblank(dev, pipe); |
5968 | 6061 | ||
5969 | if (IS_GEN5(dev)) { | ||
5970 | /* enable address swizzle for tiling buffer */ | ||
5971 | temp = I915_READ(DISP_ARB_CTL); | ||
5972 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | ||
5973 | } | ||
5974 | |||
5975 | I915_WRITE(DSPCNTR(plane), dspcntr); | 6062 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5976 | POSTING_READ(DSPCNTR(plane)); | 6063 | POSTING_READ(DSPCNTR(plane)); |
5977 | 6064 | ||
@@ -6086,15 +6173,18 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6086 | uint32_t i; | 6173 | uint32_t i; |
6087 | int len; | 6174 | int len; |
6088 | int hdmiw_hdmiedid; | 6175 | int hdmiw_hdmiedid; |
6176 | int aud_config; | ||
6089 | int aud_cntl_st; | 6177 | int aud_cntl_st; |
6090 | int aud_cntrl_st2; | 6178 | int aud_cntrl_st2; |
6091 | 6179 | ||
6092 | if (HAS_PCH_IBX(connector->dev)) { | 6180 | if (HAS_PCH_IBX(connector->dev)) { |
6093 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; | 6181 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; |
6182 | aud_config = IBX_AUD_CONFIG_A; | ||
6094 | aud_cntl_st = IBX_AUD_CNTL_ST_A; | 6183 | aud_cntl_st = IBX_AUD_CNTL_ST_A; |
6095 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 6184 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6096 | } else { | 6185 | } else { |
6097 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; | 6186 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; |
6187 | aud_config = CPT_AUD_CONFIG_A; | ||
6098 | aud_cntl_st = CPT_AUD_CNTL_ST_A; | 6188 | aud_cntl_st = CPT_AUD_CNTL_ST_A; |
6099 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; | 6189 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6100 | } | 6190 | } |
@@ -6102,6 +6192,7 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6102 | i = to_intel_crtc(crtc)->pipe; | 6192 | i = to_intel_crtc(crtc)->pipe; |
6103 | hdmiw_hdmiedid += i * 0x100; | 6193 | hdmiw_hdmiedid += i * 0x100; |
6104 | aud_cntl_st += i * 0x100; | 6194 | aud_cntl_st += i * 0x100; |
6195 | aud_config += i * 0x100; | ||
6105 | 6196 | ||
6106 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); | 6197 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); |
6107 | 6198 | ||
@@ -6121,7 +6212,9 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6121 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 6212 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6122 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 6213 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6123 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 6214 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6124 | } | 6215 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6216 | } else | ||
6217 | I915_WRITE(aud_config, 0); | ||
6125 | 6218 | ||
6126 | if (intel_eld_uptodate(connector, | 6219 | if (intel_eld_uptodate(connector, |
6127 | aud_cntrl_st2, eldv, | 6220 | aud_cntrl_st2, eldv, |
@@ -6927,9 +7020,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
6927 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 7020 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
6928 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 7021 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
6929 | 7022 | ||
6930 | /* Unlock panel regs */ | 7023 | assert_panel_unlocked(dev_priv, pipe); |
6931 | I915_WRITE(PP_CONTROL, | ||
6932 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | ||
6933 | 7024 | ||
6934 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 7025 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
6935 | I915_WRITE(dpll_reg, dpll); | 7026 | I915_WRITE(dpll_reg, dpll); |
@@ -6938,9 +7029,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
6938 | dpll = I915_READ(dpll_reg); | 7029 | dpll = I915_READ(dpll_reg); |
6939 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 7030 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
6940 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 7031 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
6941 | |||
6942 | /* ...and lock them again */ | ||
6943 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | ||
6944 | } | 7032 | } |
6945 | 7033 | ||
6946 | /* Schedule downclock */ | 7034 | /* Schedule downclock */ |
@@ -6970,9 +7058,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
6970 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | 7058 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { |
6971 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 7059 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
6972 | 7060 | ||
6973 | /* Unlock panel regs */ | 7061 | assert_panel_unlocked(dev_priv, pipe); |
6974 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | ||
6975 | PANEL_UNLOCK_REGS); | ||
6976 | 7062 | ||
6977 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 7063 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
6978 | I915_WRITE(dpll_reg, dpll); | 7064 | I915_WRITE(dpll_reg, dpll); |
@@ -6980,9 +7066,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
6980 | dpll = I915_READ(dpll_reg); | 7066 | dpll = I915_READ(dpll_reg); |
6981 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 7067 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
6982 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); | 7068 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
6983 | |||
6984 | /* ...and lock them again */ | ||
6985 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | ||
6986 | } | 7069 | } |
6987 | 7070 | ||
6988 | } | 7071 | } |
@@ -7097,7 +7180,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
7097 | container_of(__work, struct intel_unpin_work, work); | 7180 | container_of(__work, struct intel_unpin_work, work); |
7098 | 7181 | ||
7099 | mutex_lock(&work->dev->struct_mutex); | 7182 | mutex_lock(&work->dev->struct_mutex); |
7100 | i915_gem_object_unpin(work->old_fb_obj); | 7183 | intel_unpin_fb_obj(work->old_fb_obj); |
7101 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 7184 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
7102 | drm_gem_object_unreference(&work->old_fb_obj->base); | 7185 | drm_gem_object_unreference(&work->old_fb_obj->base); |
7103 | 7186 | ||
@@ -7247,7 +7330,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
7247 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 7330 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7248 | OUT_RING(fb->pitches[0]); | 7331 | OUT_RING(fb->pitches[0]); |
7249 | OUT_RING(obj->gtt_offset + offset); | 7332 | OUT_RING(obj->gtt_offset + offset); |
7250 | OUT_RING(MI_NOOP); | 7333 | OUT_RING(0); /* aux display base address, unused */ |
7251 | ADVANCE_LP_RING(); | 7334 | ADVANCE_LP_RING(); |
7252 | out: | 7335 | out: |
7253 | return ret; | 7336 | return ret; |
@@ -7681,10 +7764,9 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7681 | struct drm_i915_private *dev_priv = dev->dev_private; | 7764 | struct drm_i915_private *dev_priv = dev->dev_private; |
7682 | struct intel_encoder *encoder; | 7765 | struct intel_encoder *encoder; |
7683 | bool dpd_is_edp = false; | 7766 | bool dpd_is_edp = false; |
7684 | bool has_lvds = false; | 7767 | bool has_lvds; |
7685 | 7768 | ||
7686 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 7769 | has_lvds = intel_lvds_init(dev); |
7687 | has_lvds = intel_lvds_init(dev); | ||
7688 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | 7770 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { |
7689 | /* disable the panel fitter on everything but LVDS */ | 7771 | /* disable the panel fitter on everything but LVDS */ |
7690 | I915_WRITE(PFIT_CONTROL, 0); | 7772 | I915_WRITE(PFIT_CONTROL, 0); |
@@ -7840,7 +7922,8 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
7840 | case DRM_FORMAT_VYUY: | 7922 | case DRM_FORMAT_VYUY: |
7841 | break; | 7923 | break; |
7842 | default: | 7924 | default: |
7843 | DRM_ERROR("unsupported pixel format\n"); | 7925 | DRM_DEBUG_KMS("unsupported pixel format %u\n", |
7926 | mode_cmd->pixel_format); | ||
7844 | return -EINVAL; | 7927 | return -EINVAL; |
7845 | } | 7928 | } |
7846 | 7929 | ||
@@ -8162,6 +8245,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
8162 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 8245 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
8163 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 8246 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
8164 | u32 pcu_mbox, rc6_mask = 0; | 8247 | u32 pcu_mbox, rc6_mask = 0; |
8248 | u32 gtfifodbg; | ||
8165 | int cur_freq, min_freq, max_freq; | 8249 | int cur_freq, min_freq, max_freq; |
8166 | int i; | 8250 | int i; |
8167 | 8251 | ||
@@ -8173,6 +8257,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
8173 | */ | 8257 | */ |
8174 | I915_WRITE(GEN6_RC_STATE, 0); | 8258 | I915_WRITE(GEN6_RC_STATE, 0); |
8175 | mutex_lock(&dev_priv->dev->struct_mutex); | 8259 | mutex_lock(&dev_priv->dev->struct_mutex); |
8260 | |||
8261 | /* Clear the DBG now so we don't confuse earlier errors */ | ||
8262 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | ||
8263 | DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); | ||
8264 | I915_WRITE(GTFIFODBG, gtfifodbg); | ||
8265 | } | ||
8266 | |||
8176 | gen6_gt_force_wake_get(dev_priv); | 8267 | gen6_gt_force_wake_get(dev_priv); |
8177 | 8268 | ||
8178 | /* disable the counters and set deterministic thresholds */ | 8269 | /* disable the counters and set deterministic thresholds */ |
@@ -8959,8 +9050,6 @@ struct intel_quirk { | |||
8959 | }; | 9050 | }; |
8960 | 9051 | ||
8961 | struct intel_quirk intel_quirks[] = { | 9052 | struct intel_quirk intel_quirks[] = { |
8962 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
8963 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
8964 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | 9053 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8965 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, | 9054 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8966 | 9055 | ||
@@ -9037,6 +9126,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
9037 | dev->mode_config.min_width = 0; | 9126 | dev->mode_config.min_width = 0; |
9038 | dev->mode_config.min_height = 0; | 9127 | dev->mode_config.min_height = 0; |
9039 | 9128 | ||
9129 | dev->mode_config.preferred_depth = 24; | ||
9130 | dev->mode_config.prefer_shadow = 1; | ||
9131 | |||
9040 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 9132 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
9041 | 9133 | ||
9042 | intel_init_quirks(dev); | 9134 | intel_init_quirks(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 94f860cce3f7..110552ff302c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -49,7 +49,7 @@ struct intel_dp { | |||
49 | uint32_t DP; | 49 | uint32_t DP; |
50 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 50 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
51 | bool has_audio; | 51 | bool has_audio; |
52 | int force_audio; | 52 | enum hdmi_force_audio force_audio; |
53 | uint32_t color_range; | 53 | uint32_t color_range; |
54 | int dpms_mode; | 54 | int dpms_mode; |
55 | uint8_t link_bw; | 55 | uint8_t link_bw; |
@@ -352,7 +352,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
352 | int recv_bytes; | 352 | int recv_bytes; |
353 | uint32_t status; | 353 | uint32_t status; |
354 | uint32_t aux_clock_divider; | 354 | uint32_t aux_clock_divider; |
355 | int try, precharge; | 355 | int try, precharge = 5; |
356 | 356 | ||
357 | intel_dp_check_edp(intel_dp); | 357 | intel_dp_check_edp(intel_dp); |
358 | /* The clock divider is based off the hrawclk, | 358 | /* The clock divider is based off the hrawclk, |
@@ -368,15 +368,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
368 | else | 368 | else |
369 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 369 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
370 | } else if (HAS_PCH_SPLIT(dev)) | 370 | } else if (HAS_PCH_SPLIT(dev)) |
371 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ | 371 | aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ |
372 | else | 372 | else |
373 | aux_clock_divider = intel_hrawclk(dev) / 2; | 373 | aux_clock_divider = intel_hrawclk(dev) / 2; |
374 | 374 | ||
375 | if (IS_GEN6(dev)) | ||
376 | precharge = 3; | ||
377 | else | ||
378 | precharge = 5; | ||
379 | |||
380 | /* Try to wait for any previous AUX channel activity */ | 375 | /* Try to wait for any previous AUX channel activity */ |
381 | for (try = 0; try < 3; try++) { | 376 | for (try = 0; try < 3; try++) { |
382 | status = I915_READ(ch_ctl); | 377 | status = I915_READ(ch_ctl); |
@@ -421,6 +416,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
421 | DP_AUX_CH_CTL_DONE | | 416 | DP_AUX_CH_CTL_DONE | |
422 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 417 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
423 | DP_AUX_CH_CTL_RECEIVE_ERROR); | 418 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
419 | |||
420 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
421 | DP_AUX_CH_CTL_RECEIVE_ERROR)) | ||
422 | continue; | ||
424 | if (status & DP_AUX_CH_CTL_DONE) | 423 | if (status & DP_AUX_CH_CTL_DONE) |
425 | break; | 424 | break; |
426 | } | 425 | } |
@@ -2117,8 +2116,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
2117 | if (status != connector_status_connected) | 2116 | if (status != connector_status_connected) |
2118 | return status; | 2117 | return status; |
2119 | 2118 | ||
2120 | if (intel_dp->force_audio) { | 2119 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
2121 | intel_dp->has_audio = intel_dp->force_audio > 0; | 2120 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
2122 | } else { | 2121 | } else { |
2123 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); | 2122 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2124 | if (edid) { | 2123 | if (edid) { |
@@ -2218,10 +2217,10 @@ intel_dp_set_property(struct drm_connector *connector, | |||
2218 | 2217 | ||
2219 | intel_dp->force_audio = i; | 2218 | intel_dp->force_audio = i; |
2220 | 2219 | ||
2221 | if (i == 0) | 2220 | if (i == HDMI_AUDIO_AUTO) |
2222 | has_audio = intel_dp_detect_audio(connector); | 2221 | has_audio = intel_dp_detect_audio(connector); |
2223 | else | 2222 | else |
2224 | has_audio = i > 0; | 2223 | has_audio = (i == HDMI_AUDIO_ON); |
2225 | 2224 | ||
2226 | if (has_audio == intel_dp->has_audio) | 2225 | if (has_audio == intel_dp->has_audio) |
2227 | return 0; | 2226 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1348705faf6b..9cec6c3937fa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev); | |||
374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 374 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
375 | struct drm_i915_gem_object *obj, | 375 | struct drm_i915_gem_object *obj, |
376 | struct intel_ring_buffer *pipelined); | 376 | struct intel_ring_buffer *pipelined); |
377 | extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); | ||
377 | 378 | ||
378 | extern int intel_framebuffer_init(struct drm_device *dev, | 379 | extern int intel_framebuffer_init(struct drm_device *dev, |
379 | struct intel_framebuffer *ifb, | 380 | struct intel_framebuffer *ifb, |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 6eda1b51c636..020a7d7f744d 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -157,7 +157,6 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | |||
157 | C(vsync_end); | 157 | C(vsync_end); |
158 | C(vtotal); | 158 | C(vtotal); |
159 | C(clock); | 159 | C(clock); |
160 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
161 | #undef C | 160 | #undef C |
162 | } | 161 | } |
163 | 162 | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 571375a3eef4..2d8766978388 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -152,11 +152,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
152 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | 152 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
153 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); | 153 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); |
154 | 154 | ||
155 | info->pixmap.size = 64*1024; | 155 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
156 | info->pixmap.buf_align = 8; | ||
157 | info->pixmap.access_align = 32; | ||
158 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
159 | info->pixmap.scan_align = 1; | ||
160 | 156 | ||
161 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", | 157 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
162 | fb->width, fb->height, | 158 | fb->width, fb->height, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 64541f7ef900..cae3e5f17a49 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -44,7 +44,7 @@ struct intel_hdmi { | |||
44 | uint32_t color_range; | 44 | uint32_t color_range; |
45 | bool has_hdmi_sink; | 45 | bool has_hdmi_sink; |
46 | bool has_audio; | 46 | bool has_audio; |
47 | int force_audio; | 47 | enum hdmi_force_audio force_audio; |
48 | void (*write_infoframe)(struct drm_encoder *encoder, | 48 | void (*write_infoframe)(struct drm_encoder *encoder, |
49 | struct dip_infoframe *frame); | 49 | struct dip_infoframe *frame); |
50 | }; | 50 | }; |
@@ -339,7 +339,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
339 | if (edid) { | 339 | if (edid) { |
340 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 340 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
341 | status = connector_status_connected; | 341 | status = connector_status_connected; |
342 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 342 | if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) |
343 | intel_hdmi->has_hdmi_sink = | ||
344 | drm_detect_hdmi_monitor(edid); | ||
343 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | 345 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); |
344 | } | 346 | } |
345 | connector->display_info.raw_edid = NULL; | 347 | connector->display_info.raw_edid = NULL; |
@@ -347,8 +349,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
347 | } | 349 | } |
348 | 350 | ||
349 | if (status == connector_status_connected) { | 351 | if (status == connector_status_connected) { |
350 | if (intel_hdmi->force_audio) | 352 | if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) |
351 | intel_hdmi->has_audio = intel_hdmi->force_audio > 0; | 353 | intel_hdmi->has_audio = |
354 | (intel_hdmi->force_audio == HDMI_AUDIO_ON); | ||
352 | } | 355 | } |
353 | 356 | ||
354 | return status; | 357 | return status; |
@@ -402,7 +405,7 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
402 | return ret; | 405 | return ret; |
403 | 406 | ||
404 | if (property == dev_priv->force_audio_property) { | 407 | if (property == dev_priv->force_audio_property) { |
405 | int i = val; | 408 | enum hdmi_force_audio i = val; |
406 | bool has_audio; | 409 | bool has_audio; |
407 | 410 | ||
408 | if (i == intel_hdmi->force_audio) | 411 | if (i == intel_hdmi->force_audio) |
@@ -410,13 +413,13 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
410 | 413 | ||
411 | intel_hdmi->force_audio = i; | 414 | intel_hdmi->force_audio = i; |
412 | 415 | ||
413 | if (i == 0) | 416 | if (i == HDMI_AUDIO_AUTO) |
414 | has_audio = intel_hdmi_detect_audio(connector); | 417 | has_audio = intel_hdmi_detect_audio(connector); |
415 | else | 418 | else |
416 | has_audio = i > 0; | 419 | has_audio = (i == HDMI_AUDIO_ON); |
417 | 420 | ||
418 | if (has_audio == intel_hdmi->has_audio) | 421 | if (i == HDMI_AUDIO_OFF_DVI) |
419 | return 0; | 422 | intel_hdmi->has_hdmi_sink = 0; |
420 | 423 | ||
421 | intel_hdmi->has_audio = has_audio; | 424 | intel_hdmi->has_audio = has_audio; |
422 | goto done; | 425 | goto done; |
@@ -514,7 +517,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
514 | intel_encoder->type = INTEL_OUTPUT_HDMI; | 517 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
515 | 518 | ||
516 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 519 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
517 | connector->interlace_allowed = 0; | 520 | connector->interlace_allowed = 1; |
518 | connector->doublescan_allowed = 0; | 521 | connector->doublescan_allowed = 0; |
519 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 522 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
520 | 523 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d30ccccb9d73..601c86e664af 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | /* Intel GPIO access functions */ | 38 | /* Intel GPIO access functions */ |
39 | 39 | ||
40 | #define I2C_RISEFALL_TIME 20 | 40 | #define I2C_RISEFALL_TIME 10 |
41 | 41 | ||
42 | static inline struct intel_gmbus * | 42 | static inline struct intel_gmbus * |
43 | to_intel_gmbus(struct i2c_adapter *i2c) | 43 | to_intel_gmbus(struct i2c_adapter *i2c) |
@@ -45,13 +45,6 @@ to_intel_gmbus(struct i2c_adapter *i2c) | |||
45 | return container_of(i2c, struct intel_gmbus, adapter); | 45 | return container_of(i2c, struct intel_gmbus, adapter); |
46 | } | 46 | } |
47 | 47 | ||
48 | struct intel_gpio { | ||
49 | struct i2c_adapter adapter; | ||
50 | struct i2c_algo_bit_data algo; | ||
51 | struct drm_i915_private *dev_priv; | ||
52 | u32 reg; | ||
53 | }; | ||
54 | |||
55 | void | 48 | void |
56 | intel_i2c_reset(struct drm_device *dev) | 49 | intel_i2c_reset(struct drm_device *dev) |
57 | { | 50 | { |
@@ -78,15 +71,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) | |||
78 | I915_WRITE(DSPCLK_GATE_D, val); | 71 | I915_WRITE(DSPCLK_GATE_D, val); |
79 | } | 72 | } |
80 | 73 | ||
81 | static u32 get_reserved(struct intel_gpio *gpio) | 74 | static u32 get_reserved(struct intel_gmbus *bus) |
82 | { | 75 | { |
83 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 76 | struct drm_i915_private *dev_priv = bus->dev_priv; |
84 | struct drm_device *dev = dev_priv->dev; | 77 | struct drm_device *dev = dev_priv->dev; |
85 | u32 reserved = 0; | 78 | u32 reserved = 0; |
86 | 79 | ||
87 | /* On most chips, these bits must be preserved in software. */ | 80 | /* On most chips, these bits must be preserved in software. */ |
88 | if (!IS_I830(dev) && !IS_845G(dev)) | 81 | if (!IS_I830(dev) && !IS_845G(dev)) |
89 | reserved = I915_READ_NOTRACE(gpio->reg) & | 82 | reserved = I915_READ_NOTRACE(bus->gpio_reg) & |
90 | (GPIO_DATA_PULLUP_DISABLE | | 83 | (GPIO_DATA_PULLUP_DISABLE | |
91 | GPIO_CLOCK_PULLUP_DISABLE); | 84 | GPIO_CLOCK_PULLUP_DISABLE); |
92 | 85 | ||
@@ -95,29 +88,29 @@ static u32 get_reserved(struct intel_gpio *gpio) | |||
95 | 88 | ||
96 | static int get_clock(void *data) | 89 | static int get_clock(void *data) |
97 | { | 90 | { |
98 | struct intel_gpio *gpio = data; | 91 | struct intel_gmbus *bus = data; |
99 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 92 | struct drm_i915_private *dev_priv = bus->dev_priv; |
100 | u32 reserved = get_reserved(gpio); | 93 | u32 reserved = get_reserved(bus); |
101 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); | 94 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); |
102 | I915_WRITE_NOTRACE(gpio->reg, reserved); | 95 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved); |
103 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; | 96 | return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; |
104 | } | 97 | } |
105 | 98 | ||
106 | static int get_data(void *data) | 99 | static int get_data(void *data) |
107 | { | 100 | { |
108 | struct intel_gpio *gpio = data; | 101 | struct intel_gmbus *bus = data; |
109 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 102 | struct drm_i915_private *dev_priv = bus->dev_priv; |
110 | u32 reserved = get_reserved(gpio); | 103 | u32 reserved = get_reserved(bus); |
111 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); | 104 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); |
112 | I915_WRITE_NOTRACE(gpio->reg, reserved); | 105 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved); |
113 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; | 106 | return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; |
114 | } | 107 | } |
115 | 108 | ||
116 | static void set_clock(void *data, int state_high) | 109 | static void set_clock(void *data, int state_high) |
117 | { | 110 | { |
118 | struct intel_gpio *gpio = data; | 111 | struct intel_gmbus *bus = data; |
119 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 112 | struct drm_i915_private *dev_priv = bus->dev_priv; |
120 | u32 reserved = get_reserved(gpio); | 113 | u32 reserved = get_reserved(bus); |
121 | u32 clock_bits; | 114 | u32 clock_bits; |
122 | 115 | ||
123 | if (state_high) | 116 | if (state_high) |
@@ -126,15 +119,15 @@ static void set_clock(void *data, int state_high) | |||
126 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | | 119 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | |
127 | GPIO_CLOCK_VAL_MASK; | 120 | GPIO_CLOCK_VAL_MASK; |
128 | 121 | ||
129 | I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); | 122 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits); |
130 | POSTING_READ(gpio->reg); | 123 | POSTING_READ(bus->gpio_reg); |
131 | } | 124 | } |
132 | 125 | ||
133 | static void set_data(void *data, int state_high) | 126 | static void set_data(void *data, int state_high) |
134 | { | 127 | { |
135 | struct intel_gpio *gpio = data; | 128 | struct intel_gmbus *bus = data; |
136 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 129 | struct drm_i915_private *dev_priv = bus->dev_priv; |
137 | u32 reserved = get_reserved(gpio); | 130 | u32 reserved = get_reserved(bus); |
138 | u32 data_bits; | 131 | u32 data_bits; |
139 | 132 | ||
140 | if (state_high) | 133 | if (state_high) |
@@ -143,13 +136,14 @@ static void set_data(void *data, int state_high) | |||
143 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | | 136 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | |
144 | GPIO_DATA_VAL_MASK; | 137 | GPIO_DATA_VAL_MASK; |
145 | 138 | ||
146 | I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); | 139 | I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits); |
147 | POSTING_READ(gpio->reg); | 140 | POSTING_READ(bus->gpio_reg); |
148 | } | 141 | } |
149 | 142 | ||
150 | static struct i2c_adapter * | 143 | static bool |
151 | intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | 144 | intel_gpio_setup(struct intel_gmbus *bus, u32 pin) |
152 | { | 145 | { |
146 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
153 | static const int map_pin_to_reg[] = { | 147 | static const int map_pin_to_reg[] = { |
154 | 0, | 148 | 0, |
155 | GPIOB, | 149 | GPIOB, |
@@ -160,65 +154,48 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
160 | 0, | 154 | 0, |
161 | GPIOF, | 155 | GPIOF, |
162 | }; | 156 | }; |
163 | struct intel_gpio *gpio; | 157 | struct i2c_algo_bit_data *algo; |
164 | 158 | ||
165 | if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) | 159 | if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) |
166 | return NULL; | 160 | return false; |
167 | 161 | ||
168 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); | 162 | algo = &bus->bit_algo; |
169 | if (gpio == NULL) | ||
170 | return NULL; | ||
171 | 163 | ||
172 | gpio->reg = map_pin_to_reg[pin]; | 164 | bus->gpio_reg = map_pin_to_reg[pin]; |
173 | if (HAS_PCH_SPLIT(dev_priv->dev)) | 165 | if (HAS_PCH_SPLIT(dev_priv->dev)) |
174 | gpio->reg += PCH_GPIOA - GPIOA; | 166 | bus->gpio_reg += PCH_GPIOA - GPIOA; |
175 | gpio->dev_priv = dev_priv; | 167 | |
176 | 168 | bus->adapter.algo_data = algo; | |
177 | snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), | 169 | algo->setsda = set_data; |
178 | "i915 GPIO%c", "?BACDE?F"[pin]); | 170 | algo->setscl = set_clock; |
179 | gpio->adapter.owner = THIS_MODULE; | 171 | algo->getsda = get_data; |
180 | gpio->adapter.algo_data = &gpio->algo; | 172 | algo->getscl = get_clock; |
181 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; | 173 | algo->udelay = I2C_RISEFALL_TIME; |
182 | gpio->algo.setsda = set_data; | 174 | algo->timeout = usecs_to_jiffies(2200); |
183 | gpio->algo.setscl = set_clock; | 175 | algo->data = bus; |
184 | gpio->algo.getsda = get_data; | 176 | |
185 | gpio->algo.getscl = get_clock; | 177 | return true; |
186 | gpio->algo.udelay = I2C_RISEFALL_TIME; | ||
187 | gpio->algo.timeout = usecs_to_jiffies(2200); | ||
188 | gpio->algo.data = gpio; | ||
189 | |||
190 | if (i2c_bit_add_bus(&gpio->adapter)) | ||
191 | goto out_free; | ||
192 | |||
193 | return &gpio->adapter; | ||
194 | |||
195 | out_free: | ||
196 | kfree(gpio); | ||
197 | return NULL; | ||
198 | } | 178 | } |
199 | 179 | ||
200 | static int | 180 | static int |
201 | intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv, | 181 | intel_i2c_quirk_xfer(struct intel_gmbus *bus, |
202 | struct i2c_adapter *adapter, | ||
203 | struct i2c_msg *msgs, | 182 | struct i2c_msg *msgs, |
204 | int num) | 183 | int num) |
205 | { | 184 | { |
206 | struct intel_gpio *gpio = container_of(adapter, | 185 | struct drm_i915_private *dev_priv = bus->dev_priv; |
207 | struct intel_gpio, | ||
208 | adapter); | ||
209 | int ret; | 186 | int ret; |
210 | 187 | ||
211 | intel_i2c_reset(dev_priv->dev); | 188 | intel_i2c_reset(dev_priv->dev); |
212 | 189 | ||
213 | intel_i2c_quirk_set(dev_priv, true); | 190 | intel_i2c_quirk_set(dev_priv, true); |
214 | set_data(gpio, 1); | 191 | set_data(bus, 1); |
215 | set_clock(gpio, 1); | 192 | set_clock(bus, 1); |
216 | udelay(I2C_RISEFALL_TIME); | 193 | udelay(I2C_RISEFALL_TIME); |
217 | 194 | ||
218 | ret = adapter->algo->master_xfer(adapter, msgs, num); | 195 | ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num); |
219 | 196 | ||
220 | set_data(gpio, 1); | 197 | set_data(bus, 1); |
221 | set_clock(gpio, 1); | 198 | set_clock(bus, 1); |
222 | intel_i2c_quirk_set(dev_priv, false); | 199 | intel_i2c_quirk_set(dev_priv, false); |
223 | 200 | ||
224 | return ret; | 201 | return ret; |
@@ -232,12 +209,15 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
232 | struct intel_gmbus *bus = container_of(adapter, | 209 | struct intel_gmbus *bus = container_of(adapter, |
233 | struct intel_gmbus, | 210 | struct intel_gmbus, |
234 | adapter); | 211 | adapter); |
235 | struct drm_i915_private *dev_priv = adapter->algo_data; | 212 | struct drm_i915_private *dev_priv = bus->dev_priv; |
236 | int i, reg_offset; | 213 | int i, reg_offset, ret; |
237 | 214 | ||
238 | if (bus->force_bit) | 215 | mutex_lock(&dev_priv->gmbus_mutex); |
239 | return intel_i2c_quirk_xfer(dev_priv, | 216 | |
240 | bus->force_bit, msgs, num); | 217 | if (bus->force_bit) { |
218 | ret = intel_i2c_quirk_xfer(bus, msgs, num); | ||
219 | goto out; | ||
220 | } | ||
241 | 221 | ||
242 | reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; | 222 | reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; |
243 | 223 | ||
@@ -249,7 +229,8 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
249 | 229 | ||
250 | if (msgs[i].flags & I2C_M_RD) { | 230 | if (msgs[i].flags & I2C_M_RD) { |
251 | I915_WRITE(GMBUS1 + reg_offset, | 231 | I915_WRITE(GMBUS1 + reg_offset, |
252 | GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | | 232 | GMBUS_CYCLE_WAIT | |
233 | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | | ||
253 | (len << GMBUS_BYTE_COUNT_SHIFT) | | 234 | (len << GMBUS_BYTE_COUNT_SHIFT) | |
254 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | | 235 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | |
255 | GMBUS_SLAVE_READ | GMBUS_SW_RDY); | 236 | GMBUS_SLAVE_READ | GMBUS_SW_RDY); |
@@ -278,7 +259,8 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
278 | 259 | ||
279 | I915_WRITE(GMBUS3 + reg_offset, val); | 260 | I915_WRITE(GMBUS3 + reg_offset, val); |
280 | I915_WRITE(GMBUS1 + reg_offset, | 261 | I915_WRITE(GMBUS1 + reg_offset, |
281 | (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | | 262 | GMBUS_CYCLE_WAIT | |
263 | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | | ||
282 | (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | | 264 | (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | |
283 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | | 265 | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | |
284 | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); | 266 | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); |
@@ -317,11 +299,15 @@ clear_err: | |||
317 | I915_WRITE(GMBUS1 + reg_offset, 0); | 299 | I915_WRITE(GMBUS1 + reg_offset, 0); |
318 | 300 | ||
319 | done: | 301 | done: |
320 | /* Mark the GMBUS interface as disabled. We will re-enable it at the | 302 | /* Mark the GMBUS interface as disabled after waiting for idle. |
321 | * start of the next xfer, till then let it sleep. | 303 | * We will re-enable it at the start of the next xfer, |
304 | * till then let it sleep. | ||
322 | */ | 305 | */ |
306 | if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) | ||
307 | DRM_INFO("GMBUS timed out waiting for idle\n"); | ||
323 | I915_WRITE(GMBUS0 + reg_offset, 0); | 308 | I915_WRITE(GMBUS0 + reg_offset, 0); |
324 | return i; | 309 | ret = i; |
310 | goto out; | ||
325 | 311 | ||
326 | timeout: | 312 | timeout: |
327 | DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", | 313 | DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", |
@@ -329,23 +315,21 @@ timeout: | |||
329 | I915_WRITE(GMBUS0 + reg_offset, 0); | 315 | I915_WRITE(GMBUS0 + reg_offset, 0); |
330 | 316 | ||
331 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ | 317 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ |
332 | bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); | 318 | if (!bus->has_gpio) { |
333 | if (!bus->force_bit) | 319 | ret = -EIO; |
334 | return -ENOMEM; | 320 | } else { |
335 | 321 | bus->force_bit = true; | |
336 | return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); | 322 | ret = intel_i2c_quirk_xfer(bus, msgs, num); |
323 | } | ||
324 | out: | ||
325 | mutex_unlock(&dev_priv->gmbus_mutex); | ||
326 | return ret; | ||
337 | } | 327 | } |
338 | 328 | ||
339 | static u32 gmbus_func(struct i2c_adapter *adapter) | 329 | static u32 gmbus_func(struct i2c_adapter *adapter) |
340 | { | 330 | { |
341 | struct intel_gmbus *bus = container_of(adapter, | 331 | return i2c_bit_algo.functionality(adapter) & |
342 | struct intel_gmbus, | 332 | (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | |
343 | adapter); | ||
344 | |||
345 | if (bus->force_bit) | ||
346 | bus->force_bit->algo->functionality(bus->force_bit); | ||
347 | |||
348 | return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
349 | /* I2C_FUNC_10BIT_ADDR | */ | 333 | /* I2C_FUNC_10BIT_ADDR | */ |
350 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | 334 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | |
351 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL); | 335 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL); |
@@ -375,11 +359,13 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
375 | struct drm_i915_private *dev_priv = dev->dev_private; | 359 | struct drm_i915_private *dev_priv = dev->dev_private; |
376 | int ret, i; | 360 | int ret, i; |
377 | 361 | ||
378 | dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, | 362 | dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), |
379 | GFP_KERNEL); | 363 | GFP_KERNEL); |
380 | if (dev_priv->gmbus == NULL) | 364 | if (dev_priv->gmbus == NULL) |
381 | return -ENOMEM; | 365 | return -ENOMEM; |
382 | 366 | ||
367 | mutex_init(&dev_priv->gmbus_mutex); | ||
368 | |||
383 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { | 369 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
384 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; | 370 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
385 | 371 | ||
@@ -391,7 +377,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
391 | names[i]); | 377 | names[i]); |
392 | 378 | ||
393 | bus->adapter.dev.parent = &dev->pdev->dev; | 379 | bus->adapter.dev.parent = &dev->pdev->dev; |
394 | bus->adapter.algo_data = dev_priv; | 380 | bus->dev_priv = dev_priv; |
395 | 381 | ||
396 | bus->adapter.algo = &gmbus_algorithm; | 382 | bus->adapter.algo = &gmbus_algorithm; |
397 | ret = i2c_add_adapter(&bus->adapter); | 383 | ret = i2c_add_adapter(&bus->adapter); |
@@ -401,8 +387,11 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
401 | /* By default use a conservative clock rate */ | 387 | /* By default use a conservative clock rate */ |
402 | bus->reg0 = i | GMBUS_RATE_100KHZ; | 388 | bus->reg0 = i | GMBUS_RATE_100KHZ; |
403 | 389 | ||
390 | bus->has_gpio = intel_gpio_setup(bus, i); | ||
391 | |||
404 | /* XXX force bit banging until GMBUS is fully debugged */ | 392 | /* XXX force bit banging until GMBUS is fully debugged */ |
405 | bus->force_bit = intel_gpio_create(dev_priv, i); | 393 | if (bus->has_gpio && IS_GEN2(dev)) |
394 | bus->force_bit = true; | ||
406 | } | 395 | } |
407 | 396 | ||
408 | intel_i2c_reset(dev_priv->dev); | 397 | intel_i2c_reset(dev_priv->dev); |
@@ -430,19 +419,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | |||
430 | { | 419 | { |
431 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | 420 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
432 | 421 | ||
433 | if (force_bit) { | 422 | if (bus->has_gpio) |
434 | if (bus->force_bit == NULL) { | 423 | bus->force_bit = force_bit; |
435 | struct drm_i915_private *dev_priv = adapter->algo_data; | ||
436 | bus->force_bit = intel_gpio_create(dev_priv, | ||
437 | bus->reg0 & 0xff); | ||
438 | } | ||
439 | } else { | ||
440 | if (bus->force_bit) { | ||
441 | i2c_del_adapter(bus->force_bit); | ||
442 | kfree(bus->force_bit); | ||
443 | bus->force_bit = NULL; | ||
444 | } | ||
445 | } | ||
446 | } | 424 | } |
447 | 425 | ||
448 | void intel_teardown_gmbus(struct drm_device *dev) | 426 | void intel_teardown_gmbus(struct drm_device *dev) |
@@ -455,10 +433,6 @@ void intel_teardown_gmbus(struct drm_device *dev) | |||
455 | 433 | ||
456 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { | 434 | for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
457 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; | 435 | struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
458 | if (bus->force_bit) { | ||
459 | i2c_del_adapter(bus->force_bit); | ||
460 | kfree(bus->force_bit); | ||
461 | } | ||
462 | i2c_del_adapter(&bus->adapter); | 436 | i2c_del_adapter(&bus->adapter); |
463 | } | 437 | } |
464 | 438 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa84832b0e1a..c5c0973af8a1 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -739,6 +739,22 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
739 | DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"), | 739 | DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"), |
740 | }, | 740 | }, |
741 | }, | 741 | }, |
742 | { | ||
743 | .callback = intel_no_lvds_dmi_callback, | ||
744 | .ident = "Hewlett-Packard t5745", | ||
745 | .matches = { | ||
746 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
747 | DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), | ||
748 | }, | ||
749 | }, | ||
750 | { | ||
751 | .callback = intel_no_lvds_dmi_callback, | ||
752 | .ident = "Hewlett-Packard st5747", | ||
753 | .matches = { | ||
754 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
755 | DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), | ||
756 | }, | ||
757 | }, | ||
742 | 758 | ||
743 | { } /* terminating entry */ | 759 | { } /* terminating entry */ |
744 | }; | 760 | }; |
@@ -844,6 +860,18 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev, | |||
844 | return false; | 860 | return false; |
845 | } | 861 | } |
846 | 862 | ||
863 | static bool intel_lvds_supported(struct drm_device *dev) | ||
864 | { | ||
865 | /* With the introduction of the PCH we gained a dedicated | ||
866 | * LVDS presence pin, use it. */ | ||
867 | if (HAS_PCH_SPLIT(dev)) | ||
868 | return true; | ||
869 | |||
870 | /* Otherwise LVDS was only attached to mobile products, | ||
871 | * except for the inglorious 830gm */ | ||
872 | return IS_MOBILE(dev) && !IS_I830(dev); | ||
873 | } | ||
874 | |||
847 | /** | 875 | /** |
848 | * intel_lvds_init - setup LVDS connectors on this device | 876 | * intel_lvds_init - setup LVDS connectors on this device |
849 | * @dev: drm device | 877 | * @dev: drm device |
@@ -865,6 +893,9 @@ bool intel_lvds_init(struct drm_device *dev) | |||
865 | int pipe; | 893 | int pipe; |
866 | u8 pin; | 894 | u8 pin; |
867 | 895 | ||
896 | if (!intel_lvds_supported(dev)) | ||
897 | return false; | ||
898 | |||
868 | /* Skip init on machines we know falsely report LVDS */ | 899 | /* Skip init on machines we know falsely report LVDS */ |
869 | if (dmi_check_system(intel_no_lvds)) | 900 | if (dmi_check_system(intel_no_lvds)) |
870 | return false; | 901 | return false; |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index be2c6fe07d12..d1928e79d9b6 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/fb.h> | 28 | #include <linux/fb.h> |
29 | #include <drm/drm_edid.h> | 29 | #include <drm/drm_edid.h> |
30 | #include "drmP.h" | 30 | #include "drmP.h" |
31 | #include "drm_edid.h" | ||
31 | #include "intel_drv.h" | 32 | #include "intel_drv.h" |
32 | #include "i915_drv.h" | 33 | #include "i915_drv.h" |
33 | 34 | ||
@@ -42,13 +43,13 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) | |||
42 | u8 buf[2]; | 43 | u8 buf[2]; |
43 | struct i2c_msg msgs[] = { | 44 | struct i2c_msg msgs[] = { |
44 | { | 45 | { |
45 | .addr = 0x50, | 46 | .addr = DDC_ADDR, |
46 | .flags = 0, | 47 | .flags = 0, |
47 | .len = 1, | 48 | .len = 1, |
48 | .buf = out_buf, | 49 | .buf = out_buf, |
49 | }, | 50 | }, |
50 | { | 51 | { |
51 | .addr = 0x50, | 52 | .addr = DDC_ADDR, |
52 | .flags = I2C_M_RD, | 53 | .flags = I2C_M_RD, |
53 | .len = 1, | 54 | .len = 1, |
54 | .buf = buf, | 55 | .buf = buf, |
@@ -83,10 +84,11 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
83 | return ret; | 84 | return ret; |
84 | } | 85 | } |
85 | 86 | ||
86 | static const char *force_audio_names[] = { | 87 | static const struct drm_prop_enum_list force_audio_names[] = { |
87 | "off", | 88 | { HDMI_AUDIO_OFF_DVI, "force-dvi" }, |
88 | "auto", | 89 | { HDMI_AUDIO_OFF, "off" }, |
89 | "on", | 90 | { HDMI_AUDIO_AUTO, "auto" }, |
91 | { HDMI_AUDIO_ON, "on" }, | ||
90 | }; | 92 | }; |
91 | 93 | ||
92 | void | 94 | void |
@@ -95,27 +97,24 @@ intel_attach_force_audio_property(struct drm_connector *connector) | |||
95 | struct drm_device *dev = connector->dev; | 97 | struct drm_device *dev = connector->dev; |
96 | struct drm_i915_private *dev_priv = dev->dev_private; | 98 | struct drm_i915_private *dev_priv = dev->dev_private; |
97 | struct drm_property *prop; | 99 | struct drm_property *prop; |
98 | int i; | ||
99 | 100 | ||
100 | prop = dev_priv->force_audio_property; | 101 | prop = dev_priv->force_audio_property; |
101 | if (prop == NULL) { | 102 | if (prop == NULL) { |
102 | prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, | 103 | prop = drm_property_create_enum(dev, 0, |
103 | "audio", | 104 | "audio", |
105 | force_audio_names, | ||
104 | ARRAY_SIZE(force_audio_names)); | 106 | ARRAY_SIZE(force_audio_names)); |
105 | if (prop == NULL) | 107 | if (prop == NULL) |
106 | return; | 108 | return; |
107 | 109 | ||
108 | for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) | ||
109 | drm_property_add_enum(prop, i, i-1, force_audio_names[i]); | ||
110 | |||
111 | dev_priv->force_audio_property = prop; | 110 | dev_priv->force_audio_property = prop; |
112 | } | 111 | } |
113 | drm_connector_attach_property(connector, prop, 0); | 112 | drm_connector_attach_property(connector, prop, 0); |
114 | } | 113 | } |
115 | 114 | ||
116 | static const char *broadcast_rgb_names[] = { | 115 | static const struct drm_prop_enum_list broadcast_rgb_names[] = { |
117 | "Full", | 116 | { 0, "Full" }, |
118 | "Limited 16:235", | 117 | { 1, "Limited 16:235" }, |
119 | }; | 118 | }; |
120 | 119 | ||
121 | void | 120 | void |
@@ -124,19 +123,16 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) | |||
124 | struct drm_device *dev = connector->dev; | 123 | struct drm_device *dev = connector->dev; |
125 | struct drm_i915_private *dev_priv = dev->dev_private; | 124 | struct drm_i915_private *dev_priv = dev->dev_private; |
126 | struct drm_property *prop; | 125 | struct drm_property *prop; |
127 | int i; | ||
128 | 126 | ||
129 | prop = dev_priv->broadcast_rgb_property; | 127 | prop = dev_priv->broadcast_rgb_property; |
130 | if (prop == NULL) { | 128 | if (prop == NULL) { |
131 | prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, | 129 | prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, |
132 | "Broadcast RGB", | 130 | "Broadcast RGB", |
131 | broadcast_rgb_names, | ||
133 | ARRAY_SIZE(broadcast_rgb_names)); | 132 | ARRAY_SIZE(broadcast_rgb_names)); |
134 | if (prop == NULL) | 133 | if (prop == NULL) |
135 | return; | 134 | return; |
136 | 135 | ||
137 | for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) | ||
138 | drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); | ||
139 | |||
140 | dev_priv->broadcast_rgb_property = prop; | 136 | dev_priv->broadcast_rgb_property = prop; |
141 | } | 137 | } |
142 | 138 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index cdf17d4cc1f7..80b331c322fb 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * | 25 | * |
26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c | 26 | * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c |
27 | */ | 27 | */ |
28 | |||
29 | #include <linux/seq_file.h> | ||
30 | #include "drmP.h" | 28 | #include "drmP.h" |
31 | #include "drm.h" | 29 | #include "drm.h" |
32 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
@@ -227,7 +225,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
227 | } | 225 | } |
228 | overlay->last_flip_req = request->seqno; | 226 | overlay->last_flip_req = request->seqno; |
229 | overlay->flip_tail = tail; | 227 | overlay->flip_tail = tail; |
230 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 228 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, |
229 | true); | ||
231 | if (ret) | 230 | if (ret) |
232 | return ret; | 231 | return ret; |
233 | 232 | ||
@@ -263,7 +262,7 @@ i830_activate_pipe_a(struct drm_device *dev) | |||
263 | DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); | 262 | DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); |
264 | 263 | ||
265 | mode = drm_mode_duplicate(dev, &vesa_640x480); | 264 | mode = drm_mode_duplicate(dev, &vesa_640x480); |
266 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 265 | drm_mode_set_crtcinfo(mode, 0); |
267 | if (!drm_crtc_helper_set_mode(&crtc->base, mode, | 266 | if (!drm_crtc_helper_set_mode(&crtc->base, mode, |
268 | crtc->base.x, crtc->base.y, | 267 | crtc->base.x, crtc->base.y, |
269 | crtc->base.fb)) | 268 | crtc->base.fb)) |
@@ -448,7 +447,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) | |||
448 | if (overlay->last_flip_req == 0) | 447 | if (overlay->last_flip_req == 0) |
449 | return 0; | 448 | return 0; |
450 | 449 | ||
451 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); | 450 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, |
451 | true); | ||
452 | if (ret) | 452 | if (ret) |
453 | return ret; | 453 | return ret; |
454 | 454 | ||
@@ -935,10 +935,10 @@ static int check_overlay_dst(struct intel_overlay *overlay, | |||
935 | { | 935 | { |
936 | struct drm_display_mode *mode = &overlay->crtc->base.mode; | 936 | struct drm_display_mode *mode = &overlay->crtc->base.mode; |
937 | 937 | ||
938 | if (rec->dst_x < mode->crtc_hdisplay && | 938 | if (rec->dst_x < mode->hdisplay && |
939 | rec->dst_x + rec->dst_width <= mode->crtc_hdisplay && | 939 | rec->dst_x + rec->dst_width <= mode->hdisplay && |
940 | rec->dst_y < mode->crtc_vdisplay && | 940 | rec->dst_y < mode->vdisplay && |
941 | rec->dst_y + rec->dst_height <= mode->crtc_vdisplay) | 941 | rec->dst_y + rec->dst_height <= mode->vdisplay) |
942 | return 0; | 942 | return 0; |
943 | else | 943 | else |
944 | return -EINVAL; | 944 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 04d79fd1dc9d..230a141dbea3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | |||
48 | 48 | ||
49 | adjusted_mode->clock = fixed_mode->clock; | 49 | adjusted_mode->clock = fixed_mode->clock; |
50 | 50 | ||
51 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | 51 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* adjusted_mode has been preset to be the panel's fixed mode */ | 54 | /* adjusted_mode has been preset to be the panel's fixed mode */ |
@@ -141,8 +141,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | |||
141 | dev_priv->saveBLC_PWM_CTL2 = val; | 141 | dev_priv->saveBLC_PWM_CTL2 = val; |
142 | } else if (val == 0) { | 142 | } else if (val == 0) { |
143 | I915_WRITE(BLC_PWM_PCH_CTL2, | 143 | I915_WRITE(BLC_PWM_PCH_CTL2, |
144 | dev_priv->saveBLC_PWM_CTL); | 144 | dev_priv->saveBLC_PWM_CTL2); |
145 | val = dev_priv->saveBLC_PWM_CTL; | 145 | val = dev_priv->saveBLC_PWM_CTL2; |
146 | } | 146 | } |
147 | } else { | 147 | } else { |
148 | val = I915_READ(BLC_PWM_CTL); | 148 | val = I915_READ(BLC_PWM_CTL); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 536191540b03..fc66af6a9448 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring) | |||
52 | return space; | 52 | return space; |
53 | } | 53 | } |
54 | 54 | ||
55 | static u32 i915_gem_get_seqno(struct drm_device *dev) | ||
56 | { | ||
57 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
58 | u32 seqno; | ||
59 | |||
60 | seqno = dev_priv->next_seqno; | ||
61 | |||
62 | /* reserve 0 for non-seqno */ | ||
63 | if (++dev_priv->next_seqno == 0) | ||
64 | dev_priv->next_seqno = 1; | ||
65 | |||
66 | return seqno; | ||
67 | } | ||
68 | |||
69 | static int | 55 | static int |
70 | render_ring_flush(struct intel_ring_buffer *ring, | 56 | render_ring_flush(struct intel_ring_buffer *ring, |
71 | u32 invalidate_domains, | 57 | u32 invalidate_domains, |
@@ -399,8 +385,6 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
399 | 385 | ||
400 | if (INTEL_INFO(dev)->gen > 3) { | 386 | if (INTEL_INFO(dev)->gen > 3) { |
401 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 387 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
402 | if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
403 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | ||
404 | I915_WRITE(MI_MODE, mode); | 388 | I915_WRITE(MI_MODE, mode); |
405 | if (IS_GEN7(dev)) | 389 | if (IS_GEN7(dev)) |
406 | I915_WRITE(GFX_MODE_GEN7, | 390 | I915_WRITE(GFX_MODE_GEN7, |
@@ -467,7 +451,7 @@ gen6_add_request(struct intel_ring_buffer *ring, | |||
467 | mbox1_reg = ring->signal_mbox[0]; | 451 | mbox1_reg = ring->signal_mbox[0]; |
468 | mbox2_reg = ring->signal_mbox[1]; | 452 | mbox2_reg = ring->signal_mbox[1]; |
469 | 453 | ||
470 | *seqno = i915_gem_get_seqno(ring->dev); | 454 | *seqno = i915_gem_next_request_seqno(ring); |
471 | 455 | ||
472 | update_mboxes(ring, *seqno, mbox1_reg); | 456 | update_mboxes(ring, *seqno, mbox1_reg); |
473 | update_mboxes(ring, *seqno, mbox2_reg); | 457 | update_mboxes(ring, *seqno, mbox2_reg); |
@@ -565,8 +549,7 @@ static int | |||
565 | pc_render_add_request(struct intel_ring_buffer *ring, | 549 | pc_render_add_request(struct intel_ring_buffer *ring, |
566 | u32 *result) | 550 | u32 *result) |
567 | { | 551 | { |
568 | struct drm_device *dev = ring->dev; | 552 | u32 seqno = i915_gem_next_request_seqno(ring); |
569 | u32 seqno = i915_gem_get_seqno(dev); | ||
570 | struct pipe_control *pc = ring->private; | 553 | struct pipe_control *pc = ring->private; |
571 | u32 scratch_addr = pc->gtt_offset + 128; | 554 | u32 scratch_addr = pc->gtt_offset + 128; |
572 | int ret; | 555 | int ret; |
@@ -600,6 +583,7 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
600 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 583 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
601 | scratch_addr += 128; | 584 | scratch_addr += 128; |
602 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 585 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
586 | |||
603 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | 587 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
604 | PIPE_CONTROL_WRITE_FLUSH | | 588 | PIPE_CONTROL_WRITE_FLUSH | |
605 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | 589 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
@@ -617,8 +601,7 @@ static int | |||
617 | render_ring_add_request(struct intel_ring_buffer *ring, | 601 | render_ring_add_request(struct intel_ring_buffer *ring, |
618 | u32 *result) | 602 | u32 *result) |
619 | { | 603 | { |
620 | struct drm_device *dev = ring->dev; | 604 | u32 seqno = i915_gem_next_request_seqno(ring); |
621 | u32 seqno = i915_gem_get_seqno(dev); | ||
622 | int ret; | 605 | int ret; |
623 | 606 | ||
624 | ret = intel_ring_begin(ring, 4); | 607 | ret = intel_ring_begin(ring, 4); |
@@ -744,13 +727,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
744 | */ | 727 | */ |
745 | if (IS_GEN7(dev)) { | 728 | if (IS_GEN7(dev)) { |
746 | switch (ring->id) { | 729 | switch (ring->id) { |
747 | case RING_RENDER: | 730 | case RCS: |
748 | mmio = RENDER_HWS_PGA_GEN7; | 731 | mmio = RENDER_HWS_PGA_GEN7; |
749 | break; | 732 | break; |
750 | case RING_BLT: | 733 | case BCS: |
751 | mmio = BLT_HWS_PGA_GEN7; | 734 | mmio = BLT_HWS_PGA_GEN7; |
752 | break; | 735 | break; |
753 | case RING_BSD: | 736 | case VCS: |
754 | mmio = BSD_HWS_PGA_GEN7; | 737 | mmio = BSD_HWS_PGA_GEN7; |
755 | break; | 738 | break; |
756 | } | 739 | } |
@@ -792,7 +775,7 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
792 | if (ret) | 775 | if (ret) |
793 | return ret; | 776 | return ret; |
794 | 777 | ||
795 | seqno = i915_gem_get_seqno(ring->dev); | 778 | seqno = i915_gem_next_request_seqno(ring); |
796 | 779 | ||
797 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 780 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
798 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 781 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
@@ -816,8 +799,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
816 | /* It looks like we need to prevent the gt from suspending while waiting | 799 | /* It looks like we need to prevent the gt from suspending while waiting |
817 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | 800 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
818 | * blt/bsd rings on ivb. */ | 801 | * blt/bsd rings on ivb. */ |
819 | if (IS_GEN7(dev)) | 802 | gen6_gt_force_wake_get(dev_priv); |
820 | gen6_gt_force_wake_get(dev_priv); | ||
821 | 803 | ||
822 | spin_lock(&ring->irq_lock); | 804 | spin_lock(&ring->irq_lock); |
823 | if (ring->irq_refcount++ == 0) { | 805 | if (ring->irq_refcount++ == 0) { |
@@ -844,8 +826,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |||
844 | } | 826 | } |
845 | spin_unlock(&ring->irq_lock); | 827 | spin_unlock(&ring->irq_lock); |
846 | 828 | ||
847 | if (IS_GEN7(dev)) | 829 | gen6_gt_force_wake_put(dev_priv); |
848 | gen6_gt_force_wake_put(dev_priv); | ||
849 | } | 830 | } |
850 | 831 | ||
851 | static bool | 832 | static bool |
@@ -1127,11 +1108,93 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | |||
1127 | return 0; | 1108 | return 0; |
1128 | } | 1109 | } |
1129 | 1110 | ||
1111 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) | ||
1112 | { | ||
1113 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
1114 | bool was_interruptible; | ||
1115 | int ret; | ||
1116 | |||
1117 | /* XXX As we have not yet audited all the paths to check that | ||
1118 | * they are ready for ERESTARTSYS from intel_ring_begin, do not | ||
1119 | * allow us to be interruptible by a signal. | ||
1120 | */ | ||
1121 | was_interruptible = dev_priv->mm.interruptible; | ||
1122 | dev_priv->mm.interruptible = false; | ||
1123 | |||
1124 | ret = i915_wait_request(ring, seqno, true); | ||
1125 | |||
1126 | dev_priv->mm.interruptible = was_interruptible; | ||
1127 | |||
1128 | return ret; | ||
1129 | } | ||
1130 | |||
1131 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) | ||
1132 | { | ||
1133 | struct drm_i915_gem_request *request; | ||
1134 | u32 seqno = 0; | ||
1135 | int ret; | ||
1136 | |||
1137 | i915_gem_retire_requests_ring(ring); | ||
1138 | |||
1139 | if (ring->last_retired_head != -1) { | ||
1140 | ring->head = ring->last_retired_head; | ||
1141 | ring->last_retired_head = -1; | ||
1142 | ring->space = ring_space(ring); | ||
1143 | if (ring->space >= n) | ||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | list_for_each_entry(request, &ring->request_list, list) { | ||
1148 | int space; | ||
1149 | |||
1150 | if (request->tail == -1) | ||
1151 | continue; | ||
1152 | |||
1153 | space = request->tail - (ring->tail + 8); | ||
1154 | if (space < 0) | ||
1155 | space += ring->size; | ||
1156 | if (space >= n) { | ||
1157 | seqno = request->seqno; | ||
1158 | break; | ||
1159 | } | ||
1160 | |||
1161 | /* Consume this request in case we need more space than | ||
1162 | * is available and so need to prevent a race between | ||
1163 | * updating last_retired_head and direct reads of | ||
1164 | * I915_RING_HEAD. It also provides a nice sanity check. | ||
1165 | */ | ||
1166 | request->tail = -1; | ||
1167 | } | ||
1168 | |||
1169 | if (seqno == 0) | ||
1170 | return -ENOSPC; | ||
1171 | |||
1172 | ret = intel_ring_wait_seqno(ring, seqno); | ||
1173 | if (ret) | ||
1174 | return ret; | ||
1175 | |||
1176 | if (WARN_ON(ring->last_retired_head == -1)) | ||
1177 | return -ENOSPC; | ||
1178 | |||
1179 | ring->head = ring->last_retired_head; | ||
1180 | ring->last_retired_head = -1; | ||
1181 | ring->space = ring_space(ring); | ||
1182 | if (WARN_ON(ring->space < n)) | ||
1183 | return -ENOSPC; | ||
1184 | |||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1130 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | 1188 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
1131 | { | 1189 | { |
1132 | struct drm_device *dev = ring->dev; | 1190 | struct drm_device *dev = ring->dev; |
1133 | struct drm_i915_private *dev_priv = dev->dev_private; | 1191 | struct drm_i915_private *dev_priv = dev->dev_private; |
1134 | unsigned long end; | 1192 | unsigned long end; |
1193 | int ret; | ||
1194 | |||
1195 | ret = intel_ring_wait_request(ring, n); | ||
1196 | if (ret != -ENOSPC) | ||
1197 | return ret; | ||
1135 | 1198 | ||
1136 | trace_i915_ring_wait_begin(ring); | 1199 | trace_i915_ring_wait_begin(ring); |
1137 | if (drm_core_check_feature(dev, DRIVER_GEM)) | 1200 | if (drm_core_check_feature(dev, DRIVER_GEM)) |
@@ -1200,7 +1263,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring) | |||
1200 | 1263 | ||
1201 | static const struct intel_ring_buffer render_ring = { | 1264 | static const struct intel_ring_buffer render_ring = { |
1202 | .name = "render ring", | 1265 | .name = "render ring", |
1203 | .id = RING_RENDER, | 1266 | .id = RCS, |
1204 | .mmio_base = RENDER_RING_BASE, | 1267 | .mmio_base = RENDER_RING_BASE, |
1205 | .size = 32 * PAGE_SIZE, | 1268 | .size = 32 * PAGE_SIZE, |
1206 | .init = init_render_ring, | 1269 | .init = init_render_ring, |
@@ -1223,7 +1286,7 @@ static const struct intel_ring_buffer render_ring = { | |||
1223 | 1286 | ||
1224 | static const struct intel_ring_buffer bsd_ring = { | 1287 | static const struct intel_ring_buffer bsd_ring = { |
1225 | .name = "bsd ring", | 1288 | .name = "bsd ring", |
1226 | .id = RING_BSD, | 1289 | .id = VCS, |
1227 | .mmio_base = BSD_RING_BASE, | 1290 | .mmio_base = BSD_RING_BASE, |
1228 | .size = 32 * PAGE_SIZE, | 1291 | .size = 32 * PAGE_SIZE, |
1229 | .init = init_ring_common, | 1292 | .init = init_ring_common, |
@@ -1333,7 +1396,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | |||
1333 | /* ring buffer for Video Codec for Gen6+ */ | 1396 | /* ring buffer for Video Codec for Gen6+ */ |
1334 | static const struct intel_ring_buffer gen6_bsd_ring = { | 1397 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1335 | .name = "gen6 bsd ring", | 1398 | .name = "gen6 bsd ring", |
1336 | .id = RING_BSD, | 1399 | .id = VCS, |
1337 | .mmio_base = GEN6_BSD_RING_BASE, | 1400 | .mmio_base = GEN6_BSD_RING_BASE, |
1338 | .size = 32 * PAGE_SIZE, | 1401 | .size = 32 * PAGE_SIZE, |
1339 | .init = init_ring_common, | 1402 | .init = init_ring_common, |
@@ -1369,79 +1432,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring) | |||
1369 | GEN6_BLITTER_USER_INTERRUPT); | 1432 | GEN6_BLITTER_USER_INTERRUPT); |
1370 | } | 1433 | } |
1371 | 1434 | ||
1372 | |||
1373 | /* Workaround for some stepping of SNB, | ||
1374 | * each time when BLT engine ring tail moved, | ||
1375 | * the first command in the ring to be parsed | ||
1376 | * should be MI_BATCH_BUFFER_START | ||
1377 | */ | ||
1378 | #define NEED_BLT_WORKAROUND(dev) \ | ||
1379 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
1380 | |||
1381 | static inline struct drm_i915_gem_object * | ||
1382 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
1383 | { | ||
1384 | return ring->private; | ||
1385 | } | ||
1386 | |||
1387 | static int blt_ring_init(struct intel_ring_buffer *ring) | ||
1388 | { | ||
1389 | if (NEED_BLT_WORKAROUND(ring->dev)) { | ||
1390 | struct drm_i915_gem_object *obj; | ||
1391 | u32 *ptr; | ||
1392 | int ret; | ||
1393 | |||
1394 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
1395 | if (obj == NULL) | ||
1396 | return -ENOMEM; | ||
1397 | |||
1398 | ret = i915_gem_object_pin(obj, 4096, true); | ||
1399 | if (ret) { | ||
1400 | drm_gem_object_unreference(&obj->base); | ||
1401 | return ret; | ||
1402 | } | ||
1403 | |||
1404 | ptr = kmap(obj->pages[0]); | ||
1405 | *ptr++ = MI_BATCH_BUFFER_END; | ||
1406 | *ptr++ = MI_NOOP; | ||
1407 | kunmap(obj->pages[0]); | ||
1408 | |||
1409 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | ||
1410 | if (ret) { | ||
1411 | i915_gem_object_unpin(obj); | ||
1412 | drm_gem_object_unreference(&obj->base); | ||
1413 | return ret; | ||
1414 | } | ||
1415 | |||
1416 | ring->private = obj; | ||
1417 | } | ||
1418 | |||
1419 | return init_ring_common(ring); | ||
1420 | } | ||
1421 | |||
1422 | static int blt_ring_begin(struct intel_ring_buffer *ring, | ||
1423 | int num_dwords) | ||
1424 | { | ||
1425 | if (ring->private) { | ||
1426 | int ret = intel_ring_begin(ring, num_dwords+2); | ||
1427 | if (ret) | ||
1428 | return ret; | ||
1429 | |||
1430 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | ||
1431 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | ||
1432 | |||
1433 | return 0; | ||
1434 | } else | ||
1435 | return intel_ring_begin(ring, 4); | ||
1436 | } | ||
1437 | |||
1438 | static int blt_ring_flush(struct intel_ring_buffer *ring, | 1435 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1439 | u32 invalidate, u32 flush) | 1436 | u32 invalidate, u32 flush) |
1440 | { | 1437 | { |
1441 | uint32_t cmd; | 1438 | uint32_t cmd; |
1442 | int ret; | 1439 | int ret; |
1443 | 1440 | ||
1444 | ret = blt_ring_begin(ring, 4); | 1441 | ret = intel_ring_begin(ring, 4); |
1445 | if (ret) | 1442 | if (ret) |
1446 | return ret; | 1443 | return ret; |
1447 | 1444 | ||
@@ -1456,22 +1453,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, | |||
1456 | return 0; | 1453 | return 0; |
1457 | } | 1454 | } |
1458 | 1455 | ||
1459 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
1460 | { | ||
1461 | if (!ring->private) | ||
1462 | return; | ||
1463 | |||
1464 | i915_gem_object_unpin(ring->private); | ||
1465 | drm_gem_object_unreference(ring->private); | ||
1466 | ring->private = NULL; | ||
1467 | } | ||
1468 | |||
1469 | static const struct intel_ring_buffer gen6_blt_ring = { | 1456 | static const struct intel_ring_buffer gen6_blt_ring = { |
1470 | .name = "blt ring", | 1457 | .name = "blt ring", |
1471 | .id = RING_BLT, | 1458 | .id = BCS, |
1472 | .mmio_base = BLT_RING_BASE, | 1459 | .mmio_base = BLT_RING_BASE, |
1473 | .size = 32 * PAGE_SIZE, | 1460 | .size = 32 * PAGE_SIZE, |
1474 | .init = blt_ring_init, | 1461 | .init = init_ring_common, |
1475 | .write_tail = ring_write_tail, | 1462 | .write_tail = ring_write_tail, |
1476 | .flush = blt_ring_flush, | 1463 | .flush = blt_ring_flush, |
1477 | .add_request = gen6_add_request, | 1464 | .add_request = gen6_add_request, |
@@ -1479,7 +1466,6 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
1479 | .irq_get = blt_ring_get_irq, | 1466 | .irq_get = blt_ring_get_irq, |
1480 | .irq_put = blt_ring_put_irq, | 1467 | .irq_put = blt_ring_put_irq, |
1481 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | 1468 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
1482 | .cleanup = blt_ring_cleanup, | ||
1483 | .sync_to = gen6_blt_ring_sync_to, | 1469 | .sync_to = gen6_blt_ring_sync_to, |
1484 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, | 1470 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, |
1485 | MI_SEMAPHORE_SYNC_BV, | 1471 | MI_SEMAPHORE_SYNC_BV, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 68281c96c558..bc0365b8fa4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,13 +1,6 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | ||
5 | RCS = 0x0, | ||
6 | VCS, | ||
7 | BCS, | ||
8 | I915_NUM_RINGS, | ||
9 | }; | ||
10 | |||
11 | struct intel_hw_status_page { | 4 | struct intel_hw_status_page { |
12 | u32 __iomem *page_addr; | 5 | u32 __iomem *page_addr; |
13 | unsigned int gfx_addr; | 6 | unsigned int gfx_addr; |
@@ -36,10 +29,11 @@ struct intel_hw_status_page { | |||
36 | struct intel_ring_buffer { | 29 | struct intel_ring_buffer { |
37 | const char *name; | 30 | const char *name; |
38 | enum intel_ring_id { | 31 | enum intel_ring_id { |
39 | RING_RENDER = 0x1, | 32 | RCS = 0x0, |
40 | RING_BSD = 0x2, | 33 | VCS, |
41 | RING_BLT = 0x4, | 34 | BCS, |
42 | } id; | 35 | } id; |
36 | #define I915_NUM_RINGS 3 | ||
43 | u32 mmio_base; | 37 | u32 mmio_base; |
44 | void __iomem *virtual_start; | 38 | void __iomem *virtual_start; |
45 | struct drm_device *dev; | 39 | struct drm_device *dev; |
@@ -52,6 +46,16 @@ struct intel_ring_buffer { | |||
52 | int effective_size; | 46 | int effective_size; |
53 | struct intel_hw_status_page status_page; | 47 | struct intel_hw_status_page status_page; |
54 | 48 | ||
49 | /** We track the position of the requests in the ring buffer, and | ||
50 | * when each is retired we increment last_retired_head as the GPU | ||
51 | * must have finished processing the request and so we know we | ||
52 | * can advance the ringbuffer up to that position. | ||
53 | * | ||
54 | * last_retired_head is set to -1 after the value is consumed so | ||
55 | * we can detect new retirements. | ||
56 | */ | ||
57 | u32 last_retired_head; | ||
58 | |||
55 | spinlock_t irq_lock; | 59 | spinlock_t irq_lock; |
56 | u32 irq_refcount; | 60 | u32 irq_refcount; |
57 | u32 irq_mask; | 61 | u32 irq_mask; |
@@ -119,6 +123,12 @@ struct intel_ring_buffer { | |||
119 | void *private; | 123 | void *private; |
120 | }; | 124 | }; |
121 | 125 | ||
126 | static inline unsigned | ||
127 | intel_ring_flag(struct intel_ring_buffer *ring) | ||
128 | { | ||
129 | return 1 << ring->id; | ||
130 | } | ||
131 | |||
122 | static inline u32 | 132 | static inline u32 |
123 | intel_ring_sync_index(struct intel_ring_buffer *ring, | 133 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
124 | struct intel_ring_buffer *other) | 134 | struct intel_ring_buffer *other) |
@@ -193,6 +203,11 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); | |||
193 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 203 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
194 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 204 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
195 | 205 | ||
206 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) | ||
207 | { | ||
208 | return ring->tail; | ||
209 | } | ||
210 | |||
196 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | 211 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
197 | { | 212 | { |
198 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | 213 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e334ec33a47d..e36b171c1e7d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -148,7 +148,7 @@ struct intel_sdvo_connector { | |||
148 | /* Mark the type of connector */ | 148 | /* Mark the type of connector */ |
149 | uint16_t output_flag; | 149 | uint16_t output_flag; |
150 | 150 | ||
151 | int force_audio; | 151 | enum hdmi_force_audio force_audio; |
152 | 152 | ||
153 | /* This contains all current supported TV format */ | 153 | /* This contains all current supported TV format */ |
154 | u8 tv_format_supported[TV_FORMAT_NUM]; | 154 | u8 tv_format_supported[TV_FORMAT_NUM]; |
@@ -944,7 +944,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, | |||
944 | 944 | ||
945 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); | 945 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); |
946 | 946 | ||
947 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
948 | return true; | 947 | return true; |
949 | } | 948 | } |
950 | 949 | ||
@@ -1310,8 +1309,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) | |||
1310 | 1309 | ||
1311 | if (status == connector_status_connected) { | 1310 | if (status == connector_status_connected) { |
1312 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1311 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1313 | if (intel_sdvo_connector->force_audio) | 1312 | if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) |
1314 | intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; | 1313 | intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); |
1315 | } | 1314 | } |
1316 | 1315 | ||
1317 | return status; | 1316 | return status; |
@@ -1684,10 +1683,10 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1684 | 1683 | ||
1685 | intel_sdvo_connector->force_audio = i; | 1684 | intel_sdvo_connector->force_audio = i; |
1686 | 1685 | ||
1687 | if (i == 0) | 1686 | if (i == HDMI_AUDIO_AUTO) |
1688 | has_audio = intel_sdvo_detect_hdmi_audio(connector); | 1687 | has_audio = intel_sdvo_detect_hdmi_audio(connector); |
1689 | else | 1688 | else |
1690 | has_audio = i > 0; | 1689 | has_audio = (i == HDMI_AUDIO_ON); |
1691 | 1690 | ||
1692 | if (has_audio == intel_sdvo->has_hdmi_audio) | 1691 | if (has_audio == intel_sdvo->has_hdmi_audio) |
1693 | return 0; | 1692 | return 0; |
@@ -1985,7 +1984,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, | |||
1985 | drm_connector_helper_add(&connector->base.base, | 1984 | drm_connector_helper_add(&connector->base.base, |
1986 | &intel_sdvo_connector_helper_funcs); | 1985 | &intel_sdvo_connector_helper_funcs); |
1987 | 1986 | ||
1988 | connector->base.base.interlace_allowed = 0; | 1987 | connector->base.base.interlace_allowed = 1; |
1989 | connector->base.base.doublescan_allowed = 0; | 1988 | connector->base.base.doublescan_allowed = 0; |
1990 | connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; | 1989 | connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; |
1991 | 1990 | ||
@@ -2277,10 +2276,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, | |||
2277 | intel_sdvo_connector->max_##name = data_value[0]; \ | 2276 | intel_sdvo_connector->max_##name = data_value[0]; \ |
2278 | intel_sdvo_connector->cur_##name = response; \ | 2277 | intel_sdvo_connector->cur_##name = response; \ |
2279 | intel_sdvo_connector->name = \ | 2278 | intel_sdvo_connector->name = \ |
2280 | drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \ | 2279 | drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ |
2281 | if (!intel_sdvo_connector->name) return false; \ | 2280 | if (!intel_sdvo_connector->name) return false; \ |
2282 | intel_sdvo_connector->name->values[0] = 0; \ | ||
2283 | intel_sdvo_connector->name->values[1] = data_value[0]; \ | ||
2284 | drm_connector_attach_property(connector, \ | 2281 | drm_connector_attach_property(connector, \ |
2285 | intel_sdvo_connector->name, \ | 2282 | intel_sdvo_connector->name, \ |
2286 | intel_sdvo_connector->cur_##name); \ | 2283 | intel_sdvo_connector->cur_##name); \ |
@@ -2314,25 +2311,19 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | |||
2314 | intel_sdvo_connector->left_margin = data_value[0] - response; | 2311 | intel_sdvo_connector->left_margin = data_value[0] - response; |
2315 | intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; | 2312 | intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; |
2316 | intel_sdvo_connector->left = | 2313 | intel_sdvo_connector->left = |
2317 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2314 | drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); |
2318 | "left_margin", 2); | ||
2319 | if (!intel_sdvo_connector->left) | 2315 | if (!intel_sdvo_connector->left) |
2320 | return false; | 2316 | return false; |
2321 | 2317 | ||
2322 | intel_sdvo_connector->left->values[0] = 0; | ||
2323 | intel_sdvo_connector->left->values[1] = data_value[0]; | ||
2324 | drm_connector_attach_property(connector, | 2318 | drm_connector_attach_property(connector, |
2325 | intel_sdvo_connector->left, | 2319 | intel_sdvo_connector->left, |
2326 | intel_sdvo_connector->left_margin); | 2320 | intel_sdvo_connector->left_margin); |
2327 | 2321 | ||
2328 | intel_sdvo_connector->right = | 2322 | intel_sdvo_connector->right = |
2329 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2323 | drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); |
2330 | "right_margin", 2); | ||
2331 | if (!intel_sdvo_connector->right) | 2324 | if (!intel_sdvo_connector->right) |
2332 | return false; | 2325 | return false; |
2333 | 2326 | ||
2334 | intel_sdvo_connector->right->values[0] = 0; | ||
2335 | intel_sdvo_connector->right->values[1] = data_value[0]; | ||
2336 | drm_connector_attach_property(connector, | 2327 | drm_connector_attach_property(connector, |
2337 | intel_sdvo_connector->right, | 2328 | intel_sdvo_connector->right, |
2338 | intel_sdvo_connector->right_margin); | 2329 | intel_sdvo_connector->right_margin); |
@@ -2356,25 +2347,21 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | |||
2356 | intel_sdvo_connector->top_margin = data_value[0] - response; | 2347 | intel_sdvo_connector->top_margin = data_value[0] - response; |
2357 | intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; | 2348 | intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; |
2358 | intel_sdvo_connector->top = | 2349 | intel_sdvo_connector->top = |
2359 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2350 | drm_property_create_range(dev, 0, |
2360 | "top_margin", 2); | 2351 | "top_margin", 0, data_value[0]); |
2361 | if (!intel_sdvo_connector->top) | 2352 | if (!intel_sdvo_connector->top) |
2362 | return false; | 2353 | return false; |
2363 | 2354 | ||
2364 | intel_sdvo_connector->top->values[0] = 0; | ||
2365 | intel_sdvo_connector->top->values[1] = data_value[0]; | ||
2366 | drm_connector_attach_property(connector, | 2355 | drm_connector_attach_property(connector, |
2367 | intel_sdvo_connector->top, | 2356 | intel_sdvo_connector->top, |
2368 | intel_sdvo_connector->top_margin); | 2357 | intel_sdvo_connector->top_margin); |
2369 | 2358 | ||
2370 | intel_sdvo_connector->bottom = | 2359 | intel_sdvo_connector->bottom = |
2371 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2360 | drm_property_create_range(dev, 0, |
2372 | "bottom_margin", 2); | 2361 | "bottom_margin", 0, data_value[0]); |
2373 | if (!intel_sdvo_connector->bottom) | 2362 | if (!intel_sdvo_connector->bottom) |
2374 | return false; | 2363 | return false; |
2375 | 2364 | ||
2376 | intel_sdvo_connector->bottom->values[0] = 0; | ||
2377 | intel_sdvo_connector->bottom->values[1] = data_value[0]; | ||
2378 | drm_connector_attach_property(connector, | 2365 | drm_connector_attach_property(connector, |
2379 | intel_sdvo_connector->bottom, | 2366 | intel_sdvo_connector->bottom, |
2380 | intel_sdvo_connector->bottom_margin); | 2367 | intel_sdvo_connector->bottom_margin); |
@@ -2403,12 +2390,10 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | |||
2403 | intel_sdvo_connector->max_dot_crawl = 1; | 2390 | intel_sdvo_connector->max_dot_crawl = 1; |
2404 | intel_sdvo_connector->cur_dot_crawl = response & 0x1; | 2391 | intel_sdvo_connector->cur_dot_crawl = response & 0x1; |
2405 | intel_sdvo_connector->dot_crawl = | 2392 | intel_sdvo_connector->dot_crawl = |
2406 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2); | 2393 | drm_property_create_range(dev, 0, "dot_crawl", 0, 1); |
2407 | if (!intel_sdvo_connector->dot_crawl) | 2394 | if (!intel_sdvo_connector->dot_crawl) |
2408 | return false; | 2395 | return false; |
2409 | 2396 | ||
2410 | intel_sdvo_connector->dot_crawl->values[0] = 0; | ||
2411 | intel_sdvo_connector->dot_crawl->values[1] = 1; | ||
2412 | drm_connector_attach_property(connector, | 2397 | drm_connector_attach_property(connector, |
2413 | intel_sdvo_connector->dot_crawl, | 2398 | intel_sdvo_connector->dot_crawl, |
2414 | intel_sdvo_connector->cur_dot_crawl); | 2399 | intel_sdvo_connector->cur_dot_crawl); |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index a0835040c86b..7aa0450399a1 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
501 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); | 501 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); |
502 | mutex_lock(&dev->struct_mutex); | 502 | mutex_lock(&dev->struct_mutex); |
503 | } | 503 | } |
504 | i915_gem_object_unpin(old_obj); | 504 | intel_unpin_fb_obj(old_obj); |
505 | } | 505 | } |
506 | 506 | ||
507 | out_unlock: | 507 | out_unlock: |
@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane) | |||
528 | goto out; | 528 | goto out; |
529 | 529 | ||
530 | mutex_lock(&dev->struct_mutex); | 530 | mutex_lock(&dev->struct_mutex); |
531 | i915_gem_object_unpin(intel_plane->obj); | 531 | intel_unpin_fb_obj(intel_plane->obj); |
532 | intel_plane->obj = NULL; | 532 | intel_plane->obj = NULL; |
533 | mutex_unlock(&dev->struct_mutex); | 533 | mutex_unlock(&dev->struct_mutex); |
534 | out: | 534 | out: |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 1571be37ce3e..05f765ef5464 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1240,7 +1240,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1240 | int type; | 1240 | int type; |
1241 | 1241 | ||
1242 | mode = reported_modes[0]; | 1242 | mode = reported_modes[0]; |
1243 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1243 | drm_mode_set_crtcinfo(&mode, 0); |
1244 | 1244 | ||
1245 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { | 1245 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { |
1246 | type = intel_tv_detect_type(intel_tv, connector); | 1246 | type = intel_tv_detect_type(intel_tv, connector); |
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 5ccb65deb83c..507aa3df0168 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c | |||
@@ -403,6 +403,8 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) | |||
403 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | 403 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; |
404 | dev_priv->chipset = flags; | 404 | dev_priv->chipset = flags; |
405 | 405 | ||
406 | pci_set_master(dev->pdev); | ||
407 | |||
406 | dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); | 408 | dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); |
407 | dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); | 409 | dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); |
408 | 410 | ||
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 9f27e3d9e69a..1a2ad7eb1734 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -14,7 +14,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
14 | nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \ | 14 | nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \ |
15 | nv04_timer.o \ | 15 | nv04_timer.o \ |
16 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 16 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
17 | nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ | 17 | nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ |
18 | nv50_fb.o nvc0_fb.o \ | ||
18 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ | 19 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ |
19 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 20 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
20 | nv40_graph.o nv50_graph.o nvc0_graph.o \ | 21 | nv40_graph.o nv50_graph.o nvc0_graph.o \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e5cbead85e50..8dbeeea91872 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -65,195 +65,232 @@ static bool nv_cksum(const uint8_t *data, unsigned int length) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | static int | 67 | static int |
68 | score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable) | 68 | score_vbios(struct nvbios *bios, const bool writeable) |
69 | { | 69 | { |
70 | if (!(data[0] == 0x55 && data[1] == 0xAA)) { | 70 | if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) { |
71 | NV_TRACEWARN(dev, "... BIOS signature not found\n"); | 71 | NV_TRACEWARN(bios->dev, "... BIOS signature not found\n"); |
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | if (nv_cksum(data, data[2] * 512)) { | 75 | if (nv_cksum(bios->data, bios->data[2] * 512)) { |
76 | NV_TRACEWARN(dev, "... BIOS checksum invalid\n"); | 76 | NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n"); |
77 | /* if a ro image is somewhat bad, it's probably all rubbish */ | 77 | /* if a ro image is somewhat bad, it's probably all rubbish */ |
78 | return writeable ? 2 : 1; | 78 | return writeable ? 2 : 1; |
79 | } else | 79 | } |
80 | NV_TRACE(dev, "... appears to be valid\n"); | ||
81 | 80 | ||
81 | NV_TRACE(bios->dev, "... appears to be valid\n"); | ||
82 | return 3; | 82 | return 3; |
83 | } | 83 | } |
84 | 84 | ||
85 | static void load_vbios_prom(struct drm_device *dev, uint8_t *data) | 85 | static void |
86 | bios_shadow_prom(struct nvbios *bios) | ||
86 | { | 87 | { |
88 | struct drm_device *dev = bios->dev; | ||
87 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 89 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
88 | uint32_t pci_nv_20, save_pci_nv_20; | 90 | u32 pcireg, access; |
89 | int pcir_ptr; | 91 | u16 pcir; |
90 | int i; | 92 | int i; |
91 | 93 | ||
94 | /* enable access to rom */ | ||
92 | if (dev_priv->card_type >= NV_50) | 95 | if (dev_priv->card_type >= NV_50) |
93 | pci_nv_20 = 0x88050; | 96 | pcireg = 0x088050; |
94 | else | 97 | else |
95 | pci_nv_20 = NV_PBUS_PCI_NV_20; | 98 | pcireg = NV_PBUS_PCI_NV_20; |
99 | access = nv_mask(dev, pcireg, 0x00000001, 0x00000000); | ||
96 | 100 | ||
97 | /* enable ROM access */ | 101 | /* bail if no rom signature, with a workaround for a PROM reading |
98 | save_pci_nv_20 = nvReadMC(dev, pci_nv_20); | 102 | * issue on some chipsets. the first read after a period of |
99 | nvWriteMC(dev, pci_nv_20, | 103 | * inactivity returns the wrong result, so retry the first header |
100 | save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); | 104 | * byte a few times before giving up as a workaround |
105 | */ | ||
106 | i = 16; | ||
107 | do { | ||
108 | if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55) | ||
109 | break; | ||
110 | } while (i--); | ||
101 | 111 | ||
102 | /* bail if no rom signature */ | 112 | if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa) |
103 | if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 || | ||
104 | nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa) | ||
105 | goto out; | 113 | goto out; |
106 | 114 | ||
107 | /* additional check (see note below) - read PCI record header */ | 115 | /* additional check (see note below) - read PCI record header */ |
108 | pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) | | 116 | pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) | |
109 | nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8; | 117 | nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8; |
110 | if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' || | 118 | if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' || |
111 | nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' || | 119 | nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' || |
112 | nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' || | 120 | nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' || |
113 | nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R') | 121 | nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R') |
114 | goto out; | 122 | goto out; |
115 | 123 | ||
116 | /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a | 124 | /* read entire bios image to system memory */ |
117 | * a good read may be obtained by waiting or re-reading (cargocult: 5x) | 125 | bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512; |
118 | * each byte. we'll hope pramin has something usable instead | 126 | bios->data = kmalloc(bios->length, GFP_KERNEL); |
119 | */ | 127 | if (bios->data) { |
120 | for (i = 0; i < NV_PROM_SIZE; i++) | 128 | for (i = 0; i < bios->length; i++) |
121 | data[i] = nv_rd08(dev, NV_PROM_OFFSET + i); | 129 | bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i); |
130 | } | ||
122 | 131 | ||
123 | out: | 132 | out: |
124 | /* disable ROM access */ | 133 | /* disable access to rom */ |
125 | nvWriteMC(dev, pci_nv_20, | 134 | nv_wr32(dev, pcireg, access); |
126 | save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); | ||
127 | } | 135 | } |
128 | 136 | ||
129 | static void load_vbios_pramin(struct drm_device *dev, uint8_t *data) | 137 | static void |
138 | bios_shadow_pramin(struct nvbios *bios) | ||
130 | { | 139 | { |
140 | struct drm_device *dev = bios->dev; | ||
131 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 141 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
132 | uint32_t old_bar0_pramin = 0; | 142 | u32 bar0 = 0; |
133 | int i; | 143 | int i; |
134 | 144 | ||
135 | if (dev_priv->card_type >= NV_50) { | 145 | if (dev_priv->card_type >= NV_50) { |
136 | u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8; | 146 | u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8; |
137 | if (!addr) { | 147 | if (!addr) { |
138 | addr = (u64)nv_rd32(dev, 0x1700) << 16; | 148 | addr = (u64)nv_rd32(dev, 0x001700) << 16; |
139 | addr += 0xf0000; | 149 | addr += 0xf0000; |
140 | } | 150 | } |
141 | 151 | ||
142 | old_bar0_pramin = nv_rd32(dev, 0x1700); | 152 | bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16); |
143 | nv_wr32(dev, 0x1700, addr >> 16); | ||
144 | } | 153 | } |
145 | 154 | ||
146 | /* bail if no rom signature */ | 155 | /* bail if no rom signature */ |
147 | if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 || | 156 | if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 || |
148 | nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa) | 157 | nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa) |
149 | goto out; | 158 | goto out; |
150 | 159 | ||
151 | for (i = 0; i < NV_PROM_SIZE; i++) | 160 | bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512; |
152 | data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i); | 161 | bios->data = kmalloc(bios->length, GFP_KERNEL); |
162 | if (bios->data) { | ||
163 | for (i = 0; i < bios->length; i++) | ||
164 | bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i); | ||
165 | } | ||
153 | 166 | ||
154 | out: | 167 | out: |
155 | if (dev_priv->card_type >= NV_50) | 168 | if (dev_priv->card_type >= NV_50) |
156 | nv_wr32(dev, 0x1700, old_bar0_pramin); | 169 | nv_wr32(dev, 0x001700, bar0); |
157 | } | 170 | } |
158 | 171 | ||
159 | static void load_vbios_pci(struct drm_device *dev, uint8_t *data) | 172 | static void |
173 | bios_shadow_pci(struct nvbios *bios) | ||
174 | { | ||
175 | struct pci_dev *pdev = bios->dev->pdev; | ||
176 | size_t length; | ||
177 | |||
178 | if (!pci_enable_rom(pdev)) { | ||
179 | void __iomem *rom = pci_map_rom(pdev, &length); | ||
180 | if (rom) { | ||
181 | bios->data = kmalloc(length, GFP_KERNEL); | ||
182 | if (bios->data) { | ||
183 | memcpy_fromio(bios->data, rom, length); | ||
184 | bios->length = length; | ||
185 | } | ||
186 | pci_unmap_rom(pdev, rom); | ||
187 | } | ||
188 | |||
189 | pci_disable_rom(pdev); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | static void | ||
194 | bios_shadow_acpi(struct nvbios *bios) | ||
160 | { | 195 | { |
161 | void __iomem *rom = NULL; | 196 | struct pci_dev *pdev = bios->dev->pdev; |
162 | size_t rom_len; | 197 | int ptr, len, ret; |
163 | int ret; | 198 | u8 data[3]; |
164 | 199 | ||
165 | ret = pci_enable_rom(dev->pdev); | 200 | if (!nouveau_acpi_rom_supported(pdev)) |
166 | if (ret) | ||
167 | return; | 201 | return; |
168 | 202 | ||
169 | rom = pci_map_rom(dev->pdev, &rom_len); | 203 | ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data)); |
170 | if (!rom) | 204 | if (ret != sizeof(data)) |
171 | goto out; | 205 | return; |
172 | memcpy_fromio(data, rom, rom_len); | ||
173 | pci_unmap_rom(dev->pdev, rom); | ||
174 | 206 | ||
175 | out: | 207 | bios->length = min(data[2] * 512, 65536); |
176 | pci_disable_rom(dev->pdev); | 208 | bios->data = kmalloc(bios->length, GFP_KERNEL); |
177 | } | 209 | if (!bios->data) |
210 | return; | ||
178 | 211 | ||
179 | static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) | 212 | len = bios->length; |
180 | { | 213 | ptr = 0; |
181 | int i; | 214 | while (len) { |
182 | int ret; | 215 | int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len; |
183 | int size = 64 * 1024; | ||
184 | 216 | ||
185 | if (!nouveau_acpi_rom_supported(dev->pdev)) | 217 | ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size); |
186 | return; | 218 | if (ret != size) { |
219 | kfree(bios->data); | ||
220 | bios->data = NULL; | ||
221 | return; | ||
222 | } | ||
187 | 223 | ||
188 | for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { | 224 | len -= size; |
189 | ret = nouveau_acpi_get_bios_chunk(data, | 225 | ptr += size; |
190 | (i * ROM_BIOS_PAGE), | ||
191 | ROM_BIOS_PAGE); | ||
192 | if (ret <= 0) | ||
193 | break; | ||
194 | } | 226 | } |
195 | return; | ||
196 | } | 227 | } |
197 | 228 | ||
198 | struct methods { | 229 | struct methods { |
199 | const char desc[8]; | 230 | const char desc[8]; |
200 | void (*loadbios)(struct drm_device *, uint8_t *); | 231 | void (*shadow)(struct nvbios *); |
201 | const bool rw; | 232 | const bool rw; |
233 | int score; | ||
234 | u32 size; | ||
235 | u8 *data; | ||
202 | }; | 236 | }; |
203 | 237 | ||
204 | static struct methods shadow_methods[] = { | 238 | static bool |
205 | { "PRAMIN", load_vbios_pramin, true }, | 239 | bios_shadow(struct drm_device *dev) |
206 | { "PROM", load_vbios_prom, false }, | 240 | { |
207 | { "PCIROM", load_vbios_pci, true }, | 241 | struct methods shadow_methods[] = { |
208 | { "ACPI", load_vbios_acpi, true }, | 242 | { "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL }, |
209 | }; | 243 | { "PROM", bios_shadow_prom, false, 0, 0, NULL }, |
210 | #define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods) | 244 | { "ACPI", bios_shadow_acpi, true, 0, 0, NULL }, |
211 | 245 | { "PCIROM", bios_shadow_pci, true, 0, 0, NULL }, | |
212 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | 246 | {} |
213 | { | 247 | }; |
214 | struct methods *methods = shadow_methods; | 248 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
215 | int testscore = 3; | 249 | struct nvbios *bios = &dev_priv->vbios; |
216 | int scores[NUM_SHADOW_METHODS], i; | 250 | struct methods *mthd, *best; |
217 | 251 | ||
218 | if (nouveau_vbios) { | 252 | if (nouveau_vbios) { |
219 | for (i = 0; i < NUM_SHADOW_METHODS; i++) | 253 | mthd = shadow_methods; |
220 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) | 254 | do { |
221 | break; | 255 | if (strcasecmp(nouveau_vbios, mthd->desc)) |
222 | 256 | continue; | |
223 | if (i < NUM_SHADOW_METHODS) { | 257 | NV_INFO(dev, "VBIOS source: %s\n", mthd->desc); |
224 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", | ||
225 | methods[i].desc); | ||
226 | 258 | ||
227 | methods[i].loadbios(dev, data); | 259 | mthd->shadow(bios); |
228 | if (score_vbios(dev, data, methods[i].rw)) | 260 | mthd->score = score_vbios(bios, mthd->rw); |
261 | if (mthd->score) | ||
229 | return true; | 262 | return true; |
230 | } | 263 | } while ((++mthd)->shadow); |
231 | 264 | ||
232 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); | 265 | NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); |
233 | } | 266 | } |
234 | 267 | ||
235 | for (i = 0; i < NUM_SHADOW_METHODS; i++) { | 268 | mthd = shadow_methods; |
236 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", | 269 | do { |
237 | methods[i].desc); | 270 | NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc); |
238 | data[0] = data[1] = 0; /* avoid reuse of previous image */ | 271 | mthd->shadow(bios); |
239 | methods[i].loadbios(dev, data); | 272 | mthd->score = score_vbios(bios, mthd->rw); |
240 | scores[i] = score_vbios(dev, data, methods[i].rw); | 273 | mthd->size = bios->length; |
241 | if (scores[i] == testscore) | 274 | mthd->data = bios->data; |
242 | return true; | 275 | } while (mthd->score != 3 && (++mthd)->shadow); |
243 | } | 276 | |
244 | 277 | mthd = shadow_methods; | |
245 | while (--testscore > 0) { | 278 | best = mthd; |
246 | for (i = 0; i < NUM_SHADOW_METHODS; i++) { | 279 | do { |
247 | if (scores[i] == testscore) { | 280 | if (mthd->score > best->score) { |
248 | NV_TRACE(dev, "Using BIOS image from %s\n", | 281 | kfree(best->data); |
249 | methods[i].desc); | 282 | best = mthd; |
250 | methods[i].loadbios(dev, data); | ||
251 | return true; | ||
252 | } | ||
253 | } | 283 | } |
284 | } while ((++mthd)->shadow); | ||
285 | |||
286 | if (best->score) { | ||
287 | NV_TRACE(dev, "Using VBIOS from %s\n", best->desc); | ||
288 | bios->length = best->size; | ||
289 | bios->data = best->data; | ||
290 | return true; | ||
254 | } | 291 | } |
255 | 292 | ||
256 | NV_ERROR(dev, "No valid BIOS image found\n"); | 293 | NV_ERROR(dev, "No valid VBIOS image found\n"); |
257 | return false; | 294 | return false; |
258 | } | 295 | } |
259 | 296 | ||
@@ -6334,11 +6371,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
6334 | spin_lock_init(&bios->lock); | 6371 | spin_lock_init(&bios->lock); |
6335 | bios->dev = dev; | 6372 | bios->dev = dev; |
6336 | 6373 | ||
6337 | if (!NVShadowVBIOS(dev, bios->data)) | 6374 | return bios_shadow(dev); |
6338 | return false; | ||
6339 | |||
6340 | bios->length = NV_PROM_SIZE; | ||
6341 | return true; | ||
6342 | } | 6375 | } |
6343 | 6376 | ||
6344 | static int nouveau_parse_vbios_struct(struct drm_device *dev) | 6377 | static int nouveau_parse_vbios_struct(struct drm_device *dev) |
@@ -6498,6 +6531,10 @@ nouveau_bios_init(struct drm_device *dev) | |||
6498 | void | 6531 | void |
6499 | nouveau_bios_takedown(struct drm_device *dev) | 6532 | nouveau_bios_takedown(struct drm_device *dev) |
6500 | { | 6533 | { |
6534 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
6535 | |||
6501 | nouveau_mxm_fini(dev); | 6536 | nouveau_mxm_fini(dev); |
6502 | nouveau_i2c_fini(dev); | 6537 | nouveau_i2c_fini(dev); |
6538 | |||
6539 | kfree(dev_priv->vbios.data); | ||
6503 | } | 6540 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index a37c31e358aa..1f3233df00e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -75,6 +75,8 @@ enum dcb_connector_type { | |||
75 | DCB_CONNECTOR_eDP = 0x47, | 75 | DCB_CONNECTOR_eDP = 0x47, |
76 | DCB_CONNECTOR_HDMI_0 = 0x60, | 76 | DCB_CONNECTOR_HDMI_0 = 0x60, |
77 | DCB_CONNECTOR_HDMI_1 = 0x61, | 77 | DCB_CONNECTOR_HDMI_1 = 0x61, |
78 | DCB_CONNECTOR_DMS59_DP0 = 0x64, | ||
79 | DCB_CONNECTOR_DMS59_DP1 = 0x65, | ||
78 | DCB_CONNECTOR_NONE = 0xff | 80 | DCB_CONNECTOR_NONE = 0xff |
79 | }; | 81 | }; |
80 | 82 | ||
@@ -209,6 +211,8 @@ struct nvbios { | |||
209 | NVBIOS_BIT | 211 | NVBIOS_BIT |
210 | } type; | 212 | } type; |
211 | uint16_t offset; | 213 | uint16_t offset; |
214 | uint32_t length; | ||
215 | uint8_t *data; | ||
212 | 216 | ||
213 | uint8_t chip_version; | 217 | uint8_t chip_version; |
214 | 218 | ||
@@ -219,8 +223,6 @@ struct nvbios { | |||
219 | 223 | ||
220 | spinlock_t lock; | 224 | spinlock_t lock; |
221 | 225 | ||
222 | uint8_t data[NV_PROM_SIZE]; | ||
223 | unsigned int length; | ||
224 | bool execute; | 226 | bool execute; |
225 | 227 | ||
226 | uint8_t major_version; | 228 | uint8_t major_version; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index f3ce34be082a..9f9d50dbca7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -519,6 +519,19 @@ nouveau_connector_set_property(struct drm_connector *connector, | |||
519 | return nv_crtc->set_dither(nv_crtc, true); | 519 | return nv_crtc->set_dither(nv_crtc, true); |
520 | } | 520 | } |
521 | 521 | ||
522 | if (nv_crtc && nv_crtc->set_color_vibrance) { | ||
523 | /* Hue */ | ||
524 | if (property == disp->vibrant_hue_property) { | ||
525 | nv_crtc->vibrant_hue = value - 90; | ||
526 | return nv_crtc->set_color_vibrance(nv_crtc, true); | ||
527 | } | ||
528 | /* Saturation */ | ||
529 | if (property == disp->color_vibrance_property) { | ||
530 | nv_crtc->color_vibrance = value - 100; | ||
531 | return nv_crtc->set_color_vibrance(nv_crtc, true); | ||
532 | } | ||
533 | } | ||
534 | |||
522 | if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) | 535 | if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) |
523 | return get_slave_funcs(encoder)->set_property( | 536 | return get_slave_funcs(encoder)->set_property( |
524 | encoder, connector, property, value); | 537 | encoder, connector, property, value); |
@@ -858,6 +871,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb) | |||
858 | case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID; | 871 | case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID; |
859 | case DCB_CONNECTOR_LVDS : | 872 | case DCB_CONNECTOR_LVDS : |
860 | case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS; | 873 | case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS; |
874 | case DCB_CONNECTOR_DMS59_DP0: | ||
875 | case DCB_CONNECTOR_DMS59_DP1: | ||
861 | case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort; | 876 | case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort; |
862 | case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP; | 877 | case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP; |
863 | case DCB_CONNECTOR_HDMI_0 : | 878 | case DCB_CONNECTOR_HDMI_0 : |
@@ -1002,7 +1017,9 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
1002 | nv_connector->type == DCB_CONNECTOR_DVI_I || | 1017 | nv_connector->type == DCB_CONNECTOR_DVI_I || |
1003 | nv_connector->type == DCB_CONNECTOR_HDMI_0 || | 1018 | nv_connector->type == DCB_CONNECTOR_HDMI_0 || |
1004 | nv_connector->type == DCB_CONNECTOR_HDMI_1 || | 1019 | nv_connector->type == DCB_CONNECTOR_HDMI_1 || |
1005 | nv_connector->type == DCB_CONNECTOR_DP)) { | 1020 | nv_connector->type == DCB_CONNECTOR_DP || |
1021 | nv_connector->type == DCB_CONNECTOR_DMS59_DP0 || | ||
1022 | nv_connector->type == DCB_CONNECTOR_DMS59_DP1)) { | ||
1006 | drm_connector_attach_property(connector, | 1023 | drm_connector_attach_property(connector, |
1007 | disp->underscan_property, | 1024 | disp->underscan_property, |
1008 | UNDERSCAN_OFF); | 1025 | UNDERSCAN_OFF); |
@@ -1014,6 +1031,16 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
1014 | 0); | 1031 | 0); |
1015 | } | 1032 | } |
1016 | 1033 | ||
1034 | /* Add hue and saturation options */ | ||
1035 | if (disp->vibrant_hue_property) | ||
1036 | drm_connector_attach_property(connector, | ||
1037 | disp->vibrant_hue_property, | ||
1038 | 90); | ||
1039 | if (disp->color_vibrance_property) | ||
1040 | drm_connector_attach_property(connector, | ||
1041 | disp->color_vibrance_property, | ||
1042 | 150); | ||
1043 | |||
1017 | switch (nv_connector->type) { | 1044 | switch (nv_connector->type) { |
1018 | case DCB_CONNECTOR_VGA: | 1045 | case DCB_CONNECTOR_VGA: |
1019 | if (dev_priv->card_type >= NV_50) { | 1046 | if (dev_priv->card_type >= NV_50) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index 686f6b4a1da3..e6d0d1eb0133 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h | |||
@@ -35,6 +35,8 @@ struct nouveau_crtc { | |||
35 | uint32_t dpms_saved_fp_control; | 35 | uint32_t dpms_saved_fp_control; |
36 | uint32_t fp_users; | 36 | uint32_t fp_users; |
37 | int saturation; | 37 | int saturation; |
38 | int color_vibrance; | ||
39 | int vibrant_hue; | ||
38 | int sharpness; | 40 | int sharpness; |
39 | int last_dpms; | 41 | int last_dpms; |
40 | 42 | ||
@@ -67,6 +69,7 @@ struct nouveau_crtc { | |||
67 | 69 | ||
68 | int (*set_dither)(struct nouveau_crtc *crtc, bool update); | 70 | int (*set_dither)(struct nouveau_crtc *crtc, bool update); |
69 | int (*set_scale)(struct nouveau_crtc *crtc, bool update); | 71 | int (*set_scale)(struct nouveau_crtc *crtc, bool update); |
72 | int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update); | ||
70 | }; | 73 | }; |
71 | 74 | ||
72 | static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) | 75 | static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 795a9e3c990a..c01ae781e2a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -155,20 +155,20 @@ static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | |||
155 | }; | 155 | }; |
156 | 156 | ||
157 | 157 | ||
158 | struct drm_prop_enum_list { | 158 | struct nouveau_drm_prop_enum_list { |
159 | u8 gen_mask; | 159 | u8 gen_mask; |
160 | int type; | 160 | int type; |
161 | char *name; | 161 | char *name; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static struct drm_prop_enum_list underscan[] = { | 164 | static struct nouveau_drm_prop_enum_list underscan[] = { |
165 | { 6, UNDERSCAN_AUTO, "auto" }, | 165 | { 6, UNDERSCAN_AUTO, "auto" }, |
166 | { 6, UNDERSCAN_OFF, "off" }, | 166 | { 6, UNDERSCAN_OFF, "off" }, |
167 | { 6, UNDERSCAN_ON, "on" }, | 167 | { 6, UNDERSCAN_ON, "on" }, |
168 | {} | 168 | {} |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static struct drm_prop_enum_list dither_mode[] = { | 171 | static struct nouveau_drm_prop_enum_list dither_mode[] = { |
172 | { 7, DITHERING_MODE_AUTO, "auto" }, | 172 | { 7, DITHERING_MODE_AUTO, "auto" }, |
173 | { 7, DITHERING_MODE_OFF, "off" }, | 173 | { 7, DITHERING_MODE_OFF, "off" }, |
174 | { 1, DITHERING_MODE_ON, "on" }, | 174 | { 1, DITHERING_MODE_ON, "on" }, |
@@ -178,7 +178,7 @@ static struct drm_prop_enum_list dither_mode[] = { | |||
178 | {} | 178 | {} |
179 | }; | 179 | }; |
180 | 180 | ||
181 | static struct drm_prop_enum_list dither_depth[] = { | 181 | static struct nouveau_drm_prop_enum_list dither_depth[] = { |
182 | { 6, DITHERING_DEPTH_AUTO, "auto" }, | 182 | { 6, DITHERING_DEPTH_AUTO, "auto" }, |
183 | { 6, DITHERING_DEPTH_6BPC, "6 bpc" }, | 183 | { 6, DITHERING_DEPTH_6BPC, "6 bpc" }, |
184 | { 6, DITHERING_DEPTH_8BPC, "8 bpc" }, | 184 | { 6, DITHERING_DEPTH_8BPC, "8 bpc" }, |
@@ -186,7 +186,7 @@ static struct drm_prop_enum_list dither_depth[] = { | |||
186 | }; | 186 | }; |
187 | 187 | ||
188 | #define PROP_ENUM(p,gen,n,list) do { \ | 188 | #define PROP_ENUM(p,gen,n,list) do { \ |
189 | struct drm_prop_enum_list *l = (list); \ | 189 | struct nouveau_drm_prop_enum_list *l = (list); \ |
190 | int c = 0; \ | 190 | int c = 0; \ |
191 | while (l->gen_mask) { \ | 191 | while (l->gen_mask) { \ |
192 | if (l->gen_mask & (1 << (gen))) \ | 192 | if (l->gen_mask & (1 << (gen))) \ |
@@ -281,16 +281,24 @@ nouveau_display_create(struct drm_device *dev) | |||
281 | PROP_ENUM(disp->underscan_property, gen, "underscan", underscan); | 281 | PROP_ENUM(disp->underscan_property, gen, "underscan", underscan); |
282 | 282 | ||
283 | disp->underscan_hborder_property = | 283 | disp->underscan_hborder_property = |
284 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 284 | drm_property_create_range(dev, 0, "underscan hborder", 0, 128); |
285 | "underscan hborder", 2); | ||
286 | disp->underscan_hborder_property->values[0] = 0; | ||
287 | disp->underscan_hborder_property->values[1] = 128; | ||
288 | 285 | ||
289 | disp->underscan_vborder_property = | 286 | disp->underscan_vborder_property = |
290 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 287 | drm_property_create_range(dev, 0, "underscan vborder", 0, 128); |
291 | "underscan vborder", 2); | 288 | |
292 | disp->underscan_vborder_property->values[0] = 0; | 289 | if (gen == 1) { |
293 | disp->underscan_vborder_property->values[1] = 128; | 290 | disp->vibrant_hue_property = |
291 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
292 | "vibrant hue", 2); | ||
293 | disp->vibrant_hue_property->values[0] = 0; | ||
294 | disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */ | ||
295 | |||
296 | disp->color_vibrance_property = | ||
297 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
298 | "color vibrance", 2); | ||
299 | disp->color_vibrance_property->values[0] = 0; | ||
300 | disp->color_vibrance_property->values[1] = 200; /* -100..+100 */ | ||
301 | } | ||
294 | 302 | ||
295 | dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; | 303 | dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs; |
296 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); | 304 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); |
@@ -309,6 +317,9 @@ nouveau_display_create(struct drm_device *dev) | |||
309 | dev->mode_config.max_height = 8192; | 317 | dev->mode_config.max_height = 8192; |
310 | } | 318 | } |
311 | 319 | ||
320 | dev->mode_config.preferred_depth = 24; | ||
321 | dev->mode_config.prefer_shadow = 1; | ||
322 | |||
312 | drm_kms_helper_poll_init(dev); | 323 | drm_kms_helper_poll_init(dev); |
313 | drm_kms_helper_poll_disable(dev); | 324 | drm_kms_helper_poll_disable(dev); |
314 | 325 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 9b93b703ceab..302b2f7d0678 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -161,116 +161,6 @@ out: | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static u32 | ||
165 | dp_link_bw_get(struct drm_device *dev, int or, int link) | ||
166 | { | ||
167 | u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800)); | ||
168 | if (!(ctrl & 0x000c0000)) | ||
169 | return 162000; | ||
170 | return 270000; | ||
171 | } | ||
172 | |||
173 | static int | ||
174 | dp_lane_count_get(struct drm_device *dev, int or, int link) | ||
175 | { | ||
176 | u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); | ||
177 | switch (ctrl & 0x000f0000) { | ||
178 | case 0x00010000: return 1; | ||
179 | case 0x00030000: return 2; | ||
180 | default: | ||
181 | return 4; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | void | ||
186 | nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) | ||
187 | { | ||
188 | const u32 symbol = 100000; | ||
189 | int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; | ||
190 | int TU, VTUi, VTUf, VTUa; | ||
191 | u64 link_data_rate, link_ratio, unk; | ||
192 | u32 best_diff = 64 * symbol; | ||
193 | u32 link_nr, link_bw, r; | ||
194 | |||
195 | /* calculate packed data rate for each lane */ | ||
196 | link_nr = dp_lane_count_get(dev, or, link); | ||
197 | link_data_rate = (clk * bpp / 8) / link_nr; | ||
198 | |||
199 | /* calculate ratio of packed data rate to link symbol rate */ | ||
200 | link_bw = dp_link_bw_get(dev, or, link); | ||
201 | link_ratio = link_data_rate * symbol; | ||
202 | r = do_div(link_ratio, link_bw); | ||
203 | |||
204 | for (TU = 64; TU >= 32; TU--) { | ||
205 | /* calculate average number of valid symbols in each TU */ | ||
206 | u32 tu_valid = link_ratio * TU; | ||
207 | u32 calc, diff; | ||
208 | |||
209 | /* find a hw representation for the fraction.. */ | ||
210 | VTUi = tu_valid / symbol; | ||
211 | calc = VTUi * symbol; | ||
212 | diff = tu_valid - calc; | ||
213 | if (diff) { | ||
214 | if (diff >= (symbol / 2)) { | ||
215 | VTUf = symbol / (symbol - diff); | ||
216 | if (symbol - (VTUf * diff)) | ||
217 | VTUf++; | ||
218 | |||
219 | if (VTUf <= 15) { | ||
220 | VTUa = 1; | ||
221 | calc += symbol - (symbol / VTUf); | ||
222 | } else { | ||
223 | VTUa = 0; | ||
224 | VTUf = 1; | ||
225 | calc += symbol; | ||
226 | } | ||
227 | } else { | ||
228 | VTUa = 0; | ||
229 | VTUf = min((int)(symbol / diff), 15); | ||
230 | calc += symbol / VTUf; | ||
231 | } | ||
232 | |||
233 | diff = calc - tu_valid; | ||
234 | } else { | ||
235 | /* no remainder, but the hw doesn't like the fractional | ||
236 | * part to be zero. decrement the integer part and | ||
237 | * have the fraction add a whole symbol back | ||
238 | */ | ||
239 | VTUa = 0; | ||
240 | VTUf = 1; | ||
241 | VTUi--; | ||
242 | } | ||
243 | |||
244 | if (diff < best_diff) { | ||
245 | best_diff = diff; | ||
246 | bestTU = TU; | ||
247 | bestVTUa = VTUa; | ||
248 | bestVTUf = VTUf; | ||
249 | bestVTUi = VTUi; | ||
250 | if (diff == 0) | ||
251 | break; | ||
252 | } | ||
253 | } | ||
254 | |||
255 | if (!bestTU) { | ||
256 | NV_ERROR(dev, "DP: unable to find suitable config\n"); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | /* XXX close to vbios numbers, but not right */ | ||
261 | unk = (symbol - link_ratio) * bestTU; | ||
262 | unk *= link_ratio; | ||
263 | r = do_div(unk, symbol); | ||
264 | r = do_div(unk, symbol); | ||
265 | unk += 6; | ||
266 | |||
267 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); | ||
268 | nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | | ||
269 | bestVTUf << 16 | | ||
270 | bestVTUi << 8 | | ||
271 | unk); | ||
272 | } | ||
273 | |||
274 | u8 * | 164 | u8 * |
275 | nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) | 165 | nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) |
276 | { | 166 | { |
@@ -318,13 +208,10 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) | |||
318 | * link training | 208 | * link training |
319 | *****************************************************************************/ | 209 | *****************************************************************************/ |
320 | struct dp_state { | 210 | struct dp_state { |
211 | struct dp_train_func *func; | ||
321 | struct dcb_entry *dcb; | 212 | struct dcb_entry *dcb; |
322 | u8 *table; | ||
323 | u8 *entry; | ||
324 | int auxch; | 213 | int auxch; |
325 | int crtc; | 214 | int crtc; |
326 | int or; | ||
327 | int link; | ||
328 | u8 *dpcd; | 215 | u8 *dpcd; |
329 | int link_nr; | 216 | int link_nr; |
330 | u32 link_bw; | 217 | u32 link_bw; |
@@ -335,142 +222,58 @@ struct dp_state { | |||
335 | static void | 222 | static void |
336 | dp_set_link_config(struct drm_device *dev, struct dp_state *dp) | 223 | dp_set_link_config(struct drm_device *dev, struct dp_state *dp) |
337 | { | 224 | { |
338 | int or = dp->or, link = dp->link; | 225 | u8 sink[2]; |
339 | u8 *entry, sink[2]; | ||
340 | u32 dp_ctrl; | ||
341 | u16 script; | ||
342 | 226 | ||
343 | NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); | 227 | NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); |
344 | 228 | ||
345 | /* set selected link rate on source */ | 229 | /* set desired link configuration on the source */ |
346 | switch (dp->link_bw) { | 230 | dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, |
347 | case 270000: | 231 | dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); |
348 | nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000); | ||
349 | sink[0] = DP_LINK_BW_2_7; | ||
350 | break; | ||
351 | default: | ||
352 | nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000); | ||
353 | sink[0] = DP_LINK_BW_1_62; | ||
354 | break; | ||
355 | } | ||
356 | |||
357 | /* offset +0x0a of each dp encoder table entry is a pointer to another | ||
358 | * table, that has (among other things) pointers to more scripts that | ||
359 | * need to be executed, this time depending on link speed. | ||
360 | */ | ||
361 | entry = ROMPTR(dev, dp->entry[10]); | ||
362 | if (entry) { | ||
363 | if (dp->table[0] < 0x30) { | ||
364 | while (dp->link_bw < (ROM16(entry[0]) * 10)) | ||
365 | entry += 4; | ||
366 | script = ROM16(entry[2]); | ||
367 | } else { | ||
368 | while (dp->link_bw < (entry[0] * 27000)) | ||
369 | entry += 3; | ||
370 | script = ROM16(entry[1]); | ||
371 | } | ||
372 | |||
373 | nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); | ||
374 | } | ||
375 | 232 | ||
376 | /* configure lane count on the source */ | 233 | /* inform the sink of the new configuration */ |
377 | dp_ctrl = ((1 << dp->link_nr) - 1) << 16; | 234 | sink[0] = dp->link_bw / 27000; |
378 | sink[1] = dp->link_nr; | 235 | sink[1] = dp->link_nr; |
379 | if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) { | 236 | if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) |
380 | dp_ctrl |= 0x00004000; | ||
381 | sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 237 | sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
382 | } | ||
383 | |||
384 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl); | ||
385 | 238 | ||
386 | /* inform the sink of the new configuration */ | ||
387 | auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); | 239 | auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); |
388 | } | 240 | } |
389 | 241 | ||
390 | static void | 242 | static void |
391 | dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp) | 243 | dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) |
392 | { | 244 | { |
393 | u8 sink_tp; | 245 | u8 sink_tp; |
394 | 246 | ||
395 | NV_DEBUG_KMS(dev, "training pattern %d\n", tp); | 247 | NV_DEBUG_KMS(dev, "training pattern %d\n", pattern); |
396 | 248 | ||
397 | nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24); | 249 | dp->func->train_set(dev, dp->dcb, pattern); |
398 | 250 | ||
399 | auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); | 251 | auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); |
400 | sink_tp &= ~DP_TRAINING_PATTERN_MASK; | 252 | sink_tp &= ~DP_TRAINING_PATTERN_MASK; |
401 | sink_tp |= tp; | 253 | sink_tp |= pattern; |
402 | auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); | 254 | auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); |
403 | } | 255 | } |
404 | 256 | ||
405 | static const u8 nv50_lane_map[] = { 16, 8, 0, 24 }; | ||
406 | static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 }; | ||
407 | |||
408 | static int | 257 | static int |
409 | dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) | 258 | dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) |
410 | { | 259 | { |
411 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
412 | u32 mask = 0, drv = 0, pre = 0, unk = 0; | ||
413 | const u8 *shifts; | ||
414 | int link = dp->link; | ||
415 | int or = dp->or; | ||
416 | int i; | 260 | int i; |
417 | 261 | ||
418 | if (dev_priv->chipset != 0xaf) | ||
419 | shifts = nv50_lane_map; | ||
420 | else | ||
421 | shifts = nvaf_lane_map; | ||
422 | |||
423 | for (i = 0; i < dp->link_nr; i++) { | 262 | for (i = 0; i < dp->link_nr; i++) { |
424 | u8 *conf = dp->entry + dp->table[4]; | ||
425 | u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; | 263 | u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; |
426 | u8 lpre = (lane & 0x0c) >> 2; | 264 | u8 lpre = (lane & 0x0c) >> 2; |
427 | u8 lvsw = (lane & 0x03) >> 0; | 265 | u8 lvsw = (lane & 0x03) >> 0; |
428 | 266 | ||
429 | mask |= 0xff << shifts[i]; | ||
430 | unk |= 1 << (shifts[i] >> 3); | ||
431 | |||
432 | dp->conf[i] = (lpre << 3) | lvsw; | 267 | dp->conf[i] = (lpre << 3) | lvsw; |
433 | if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200) | 268 | if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200) |
434 | dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED; | 269 | dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED; |
435 | if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5) | 270 | if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5) |
436 | dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | 271 | dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
437 | 272 | ||
438 | NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); | 273 | NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); |
439 | 274 | dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); | |
440 | if (dp->table[0] < 0x30) { | ||
441 | u8 *last = conf + (dp->entry[4] * dp->table[5]); | ||
442 | while (lvsw != conf[0] || lpre != conf[1]) { | ||
443 | conf += dp->table[5]; | ||
444 | if (conf >= last) | ||
445 | return -EINVAL; | ||
446 | } | ||
447 | |||
448 | conf += 2; | ||
449 | } else { | ||
450 | /* no lookup table anymore, set entries for each | ||
451 | * combination of voltage swing and pre-emphasis | ||
452 | * level allowed by the DP spec. | ||
453 | */ | ||
454 | switch (lvsw) { | ||
455 | case 0: lpre += 0; break; | ||
456 | case 1: lpre += 4; break; | ||
457 | case 2: lpre += 7; break; | ||
458 | case 3: lpre += 9; break; | ||
459 | } | ||
460 | |||
461 | conf = conf + (lpre * dp->table[5]); | ||
462 | conf++; | ||
463 | } | ||
464 | |||
465 | drv |= conf[0] << shifts[i]; | ||
466 | pre |= conf[1] << shifts[i]; | ||
467 | unk = (unk & ~0x0000ff00) | (conf[2] << 8); | ||
468 | } | 275 | } |
469 | 276 | ||
470 | nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv); | ||
471 | nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre); | ||
472 | nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk); | ||
473 | |||
474 | return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); | 277 | return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); |
475 | } | 278 | } |
476 | 279 | ||
@@ -554,8 +357,50 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp) | |||
554 | return eq_done ? 0 : -1; | 357 | return eq_done ? 0 : -1; |
555 | } | 358 | } |
556 | 359 | ||
360 | static void | ||
361 | dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) | ||
362 | { | ||
363 | u16 script = 0x0000; | ||
364 | u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); | ||
365 | if (table) { | ||
366 | if (table[0] >= 0x20 && table[0] <= 0x30) { | ||
367 | if (enable) script = ROM16(entry[12]); | ||
368 | else script = ROM16(entry[14]); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); | ||
373 | } | ||
374 | |||
375 | static void | ||
376 | dp_link_train_init(struct drm_device *dev, struct dp_state *dp) | ||
377 | { | ||
378 | u16 script = 0x0000; | ||
379 | u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); | ||
380 | if (table) { | ||
381 | if (table[0] >= 0x20 && table[0] <= 0x30) | ||
382 | script = ROM16(entry[6]); | ||
383 | } | ||
384 | |||
385 | nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); | ||
386 | } | ||
387 | |||
388 | static void | ||
389 | dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) | ||
390 | { | ||
391 | u16 script = 0x0000; | ||
392 | u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); | ||
393 | if (table) { | ||
394 | if (table[0] >= 0x20 && table[0] <= 0x30) | ||
395 | script = ROM16(entry[8]); | ||
396 | } | ||
397 | |||
398 | nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); | ||
399 | } | ||
400 | |||
557 | bool | 401 | bool |
558 | nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) | 402 | nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, |
403 | struct dp_train_func *func) | ||
559 | { | 404 | { |
560 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 405 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
561 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | 406 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); |
@@ -571,17 +416,15 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) | |||
571 | if (!auxch) | 416 | if (!auxch) |
572 | return false; | 417 | return false; |
573 | 418 | ||
574 | dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry); | 419 | dp.func = func; |
575 | if (!dp.table) | ||
576 | return -EINVAL; | ||
577 | |||
578 | dp.dcb = nv_encoder->dcb; | 420 | dp.dcb = nv_encoder->dcb; |
579 | dp.crtc = nv_crtc->index; | 421 | dp.crtc = nv_crtc->index; |
580 | dp.auxch = auxch->drive; | 422 | dp.auxch = auxch->drive; |
581 | dp.or = nv_encoder->or; | ||
582 | dp.link = !(nv_encoder->dcb->sorconf.link & 1); | ||
583 | dp.dpcd = nv_encoder->dp.dpcd; | 423 | dp.dpcd = nv_encoder->dp.dpcd; |
584 | 424 | ||
425 | /* adjust required bandwidth for 8B/10B coding overhead */ | ||
426 | datarate = (datarate / 8) * 10; | ||
427 | |||
585 | /* some sinks toggle hotplug in response to some of the actions | 428 | /* some sinks toggle hotplug in response to some of the actions |
586 | * we take during link training (DP_SET_POWER is one), we need | 429 | * we take during link training (DP_SET_POWER is one), we need |
587 | * to ignore them for the moment to avoid races. | 430 | * to ignore them for the moment to avoid races. |
@@ -589,16 +432,10 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) | |||
589 | nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false); | 432 | nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false); |
590 | 433 | ||
591 | /* enable down-spreading, if possible */ | 434 | /* enable down-spreading, if possible */ |
592 | if (dp.table[1] >= 16) { | 435 | dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); |
593 | u16 script = ROM16(dp.entry[14]); | ||
594 | if (nv_encoder->dp.dpcd[3] & 1) | ||
595 | script = ROM16(dp.entry[12]); | ||
596 | |||
597 | nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc); | ||
598 | } | ||
599 | 436 | ||
600 | /* execute pre-train script from vbios */ | 437 | /* execute pre-train script from vbios */ |
601 | nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc); | 438 | dp_link_train_init(dev, &dp); |
602 | 439 | ||
603 | /* start off at highest link rate supported by encoder and display */ | 440 | /* start off at highest link rate supported by encoder and display */ |
604 | while (*link_bw > nv_encoder->dp.link_bw) | 441 | while (*link_bw > nv_encoder->dp.link_bw) |
@@ -632,13 +469,36 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate) | |||
632 | dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE); | 469 | dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE); |
633 | 470 | ||
634 | /* execute post-train script from vbios */ | 471 | /* execute post-train script from vbios */ |
635 | nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc); | 472 | dp_link_train_fini(dev, &dp); |
636 | 473 | ||
637 | /* re-enable hotplug detect */ | 474 | /* re-enable hotplug detect */ |
638 | nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true); | 475 | nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true); |
639 | return true; | 476 | return true; |
640 | } | 477 | } |
641 | 478 | ||
479 | void | ||
480 | nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, | ||
481 | struct dp_train_func *func) | ||
482 | { | ||
483 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
484 | struct nouveau_i2c_chan *auxch; | ||
485 | u8 status; | ||
486 | |||
487 | auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index); | ||
488 | if (!auxch) | ||
489 | return; | ||
490 | |||
491 | if (mode == DRM_MODE_DPMS_ON) | ||
492 | status = DP_SET_POWER_D0; | ||
493 | else | ||
494 | status = DP_SET_POWER_D3; | ||
495 | |||
496 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | ||
497 | |||
498 | if (mode == DRM_MODE_DPMS_ON) | ||
499 | nouveau_dp_link_train(encoder, datarate, func); | ||
500 | } | ||
501 | |||
642 | bool | 502 | bool |
643 | nouveau_dp_detect(struct drm_encoder *encoder) | 503 | nouveau_dp_detect(struct drm_encoder *encoder) |
644 | { | 504 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 81d7962e7252..4f2030bd5676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -57,6 +57,10 @@ MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); | |||
57 | int nouveau_vram_notify = 0; | 57 | int nouveau_vram_notify = 0; |
58 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); | 58 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); |
59 | 59 | ||
60 | MODULE_PARM_DESC(vram_type, "Override detected VRAM type"); | ||
61 | char *nouveau_vram_type; | ||
62 | module_param_named(vram_type, nouveau_vram_type, charp, 0400); | ||
63 | |||
60 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); | 64 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); |
61 | int nouveau_duallink = 1; | 65 | int nouveau_duallink = 1; |
62 | module_param_named(duallink, nouveau_duallink, int, 0400); | 66 | module_param_named(duallink, nouveau_duallink, int, 0400); |
@@ -89,7 +93,7 @@ MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); | |||
89 | int nouveau_override_conntype = 0; | 93 | int nouveau_override_conntype = 0; |
90 | module_param_named(override_conntype, nouveau_override_conntype, int, 0400); | 94 | module_param_named(override_conntype, nouveau_override_conntype, int, 0400); |
91 | 95 | ||
92 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n"); | 96 | MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); |
93 | int nouveau_tv_disable = 0; | 97 | int nouveau_tv_disable = 0; |
94 | module_param_named(tv_disable, nouveau_tv_disable, int, 0400); | 98 | module_param_named(tv_disable, nouveau_tv_disable, int, 0400); |
95 | 99 | ||
@@ -104,27 +108,27 @@ module_param_named(tv_norm, nouveau_tv_norm, charp, 0400); | |||
104 | MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" | 108 | MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" |
105 | "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n" | 109 | "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n" |
106 | "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n" | 110 | "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n" |
107 | "\t\t0x100 vgaattr, 0x200 EVO (G80+). "); | 111 | "\t\t0x100 vgaattr, 0x200 EVO (G80+)"); |
108 | int nouveau_reg_debug; | 112 | int nouveau_reg_debug; |
109 | module_param_named(reg_debug, nouveau_reg_debug, int, 0600); | 113 | module_param_named(reg_debug, nouveau_reg_debug, int, 0600); |
110 | 114 | ||
111 | MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n"); | 115 | MODULE_PARM_DESC(perflvl, "Performance level (default: boot)"); |
112 | char *nouveau_perflvl; | 116 | char *nouveau_perflvl; |
113 | module_param_named(perflvl, nouveau_perflvl, charp, 0400); | 117 | module_param_named(perflvl, nouveau_perflvl, charp, 0400); |
114 | 118 | ||
115 | MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); | 119 | MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)"); |
116 | int nouveau_perflvl_wr; | 120 | int nouveau_perflvl_wr; |
117 | module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); | 121 | module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); |
118 | 122 | ||
119 | MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); | 123 | MODULE_PARM_DESC(msi, "Enable MSI (default: off)"); |
120 | int nouveau_msi; | 124 | int nouveau_msi; |
121 | module_param_named(msi, nouveau_msi, int, 0400); | 125 | module_param_named(msi, nouveau_msi, int, 0400); |
122 | 126 | ||
123 | MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n"); | 127 | MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)"); |
124 | int nouveau_ctxfw; | 128 | int nouveau_ctxfw; |
125 | module_param_named(ctxfw, nouveau_ctxfw, int, 0400); | 129 | module_param_named(ctxfw, nouveau_ctxfw, int, 0400); |
126 | 130 | ||
127 | MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n"); | 131 | MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS"); |
128 | int nouveau_mxmdcb = 1; | 132 | int nouveau_mxmdcb = 1; |
129 | module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400); | 133 | module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400); |
130 | 134 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b82709828931..a184ba331273 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -406,6 +406,9 @@ struct nouveau_display_engine { | |||
406 | struct drm_property *underscan_property; | 406 | struct drm_property *underscan_property; |
407 | struct drm_property *underscan_hborder_property; | 407 | struct drm_property *underscan_hborder_property; |
408 | struct drm_property *underscan_vborder_property; | 408 | struct drm_property *underscan_vborder_property; |
409 | /* not really hue and saturation: */ | ||
410 | struct drm_property *vibrant_hue_property; | ||
411 | struct drm_property *color_vibrance_property; | ||
409 | }; | 412 | }; |
410 | 413 | ||
411 | struct nouveau_gpio_engine { | 414 | struct nouveau_gpio_engine { |
@@ -432,58 +435,85 @@ struct nouveau_pm_voltage { | |||
432 | int nr_level; | 435 | int nr_level; |
433 | }; | 436 | }; |
434 | 437 | ||
438 | /* Exclusive upper limits */ | ||
439 | #define NV_MEM_CL_DDR2_MAX 8 | ||
440 | #define NV_MEM_WR_DDR2_MAX 9 | ||
441 | #define NV_MEM_CL_DDR3_MAX 17 | ||
442 | #define NV_MEM_WR_DDR3_MAX 17 | ||
443 | #define NV_MEM_CL_GDDR3_MAX 16 | ||
444 | #define NV_MEM_WR_GDDR3_MAX 18 | ||
445 | #define NV_MEM_CL_GDDR5_MAX 21 | ||
446 | #define NV_MEM_WR_GDDR5_MAX 20 | ||
447 | |||
435 | struct nouveau_pm_memtiming { | 448 | struct nouveau_pm_memtiming { |
436 | int id; | 449 | int id; |
437 | u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */ | 450 | |
438 | u32 reg_1; | 451 | u32 reg[9]; |
439 | u32 reg_2; | 452 | u32 mr[4]; |
440 | u32 reg_3; | 453 | |
441 | u32 reg_4; | 454 | u8 tCWL; |
442 | u32 reg_5; | 455 | |
443 | u32 reg_6; | 456 | u8 odt; |
444 | u32 reg_7; | 457 | u8 drive_strength; |
445 | u32 reg_8; | ||
446 | /* To be written to 0x1002c0 */ | ||
447 | u8 CL; | ||
448 | u8 WR; | ||
449 | }; | 458 | }; |
450 | 459 | ||
451 | struct nouveau_pm_tbl_header{ | 460 | struct nouveau_pm_tbl_header { |
452 | u8 version; | 461 | u8 version; |
453 | u8 header_len; | 462 | u8 header_len; |
454 | u8 entry_cnt; | 463 | u8 entry_cnt; |
455 | u8 entry_len; | 464 | u8 entry_len; |
456 | }; | 465 | }; |
457 | 466 | ||
458 | struct nouveau_pm_tbl_entry{ | 467 | struct nouveau_pm_tbl_entry { |
459 | u8 tWR; | 468 | u8 tWR; |
460 | u8 tUNK_1; | 469 | u8 tWTR; |
461 | u8 tCL; | 470 | u8 tCL; |
462 | u8 tRP; /* Byte 3 */ | 471 | u8 tRC; |
463 | u8 empty_4; | 472 | u8 empty_4; |
464 | u8 tRAS; /* Byte 5 */ | 473 | u8 tRFC; /* Byte 5 */ |
465 | u8 empty_6; | 474 | u8 empty_6; |
466 | u8 tRFC; /* Byte 7 */ | 475 | u8 tRAS; /* Byte 7 */ |
467 | u8 empty_8; | 476 | u8 empty_8; |
468 | u8 tRC; /* Byte 9 */ | 477 | u8 tRP; /* Byte 9 */ |
469 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; | 478 | u8 tRCDRD; |
470 | u8 empty_15,empty_16,empty_17; | 479 | u8 tRCDWR; |
471 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; | 480 | u8 tRRD; |
481 | u8 tUNK_13; | ||
482 | u8 RAM_FT1; /* 14, a bitmask of random RAM features */ | ||
483 | u8 empty_15; | ||
484 | u8 tUNK_16; | ||
485 | u8 empty_17; | ||
486 | u8 tUNK_18; | ||
487 | u8 tCWL; | ||
488 | u8 tUNK_20, tUNK_21; | ||
472 | }; | 489 | }; |
473 | 490 | ||
474 | /* nouveau_mem.c */ | 491 | struct nouveau_pm_profile; |
475 | void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | 492 | struct nouveau_pm_profile_func { |
476 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | 493 | void (*destroy)(struct nouveau_pm_profile *); |
477 | struct nouveau_pm_memtiming *timing); | 494 | void (*init)(struct nouveau_pm_profile *); |
495 | void (*fini)(struct nouveau_pm_profile *); | ||
496 | struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *); | ||
497 | }; | ||
498 | |||
499 | struct nouveau_pm_profile { | ||
500 | const struct nouveau_pm_profile_func *func; | ||
501 | struct list_head head; | ||
502 | char name[8]; | ||
503 | }; | ||
478 | 504 | ||
479 | #define NOUVEAU_PM_MAX_LEVEL 8 | 505 | #define NOUVEAU_PM_MAX_LEVEL 8 |
480 | struct nouveau_pm_level { | 506 | struct nouveau_pm_level { |
507 | struct nouveau_pm_profile profile; | ||
481 | struct device_attribute dev_attr; | 508 | struct device_attribute dev_attr; |
482 | char name[32]; | 509 | char name[32]; |
483 | int id; | 510 | int id; |
484 | 511 | ||
485 | u32 core; | 512 | struct nouveau_pm_memtiming timing; |
486 | u32 memory; | 513 | u32 memory; |
514 | u16 memscript; | ||
515 | |||
516 | u32 core; | ||
487 | u32 shader; | 517 | u32 shader; |
488 | u32 rop; | 518 | u32 rop; |
489 | u32 copy; | 519 | u32 copy; |
@@ -498,9 +528,6 @@ struct nouveau_pm_level { | |||
498 | u32 volt_min; /* microvolts */ | 528 | u32 volt_min; /* microvolts */ |
499 | u32 volt_max; | 529 | u32 volt_max; |
500 | u8 fanspeed; | 530 | u8 fanspeed; |
501 | |||
502 | u16 memscript; | ||
503 | struct nouveau_pm_memtiming *timing; | ||
504 | }; | 531 | }; |
505 | 532 | ||
506 | struct nouveau_pm_temp_sensor_constants { | 533 | struct nouveau_pm_temp_sensor_constants { |
@@ -517,27 +544,26 @@ struct nouveau_pm_threshold_temp { | |||
517 | s16 fan_boost; | 544 | s16 fan_boost; |
518 | }; | 545 | }; |
519 | 546 | ||
520 | struct nouveau_pm_memtimings { | ||
521 | bool supported; | ||
522 | struct nouveau_pm_memtiming *timing; | ||
523 | int nr_timing; | ||
524 | }; | ||
525 | |||
526 | struct nouveau_pm_fan { | 547 | struct nouveau_pm_fan { |
548 | u32 percent; | ||
527 | u32 min_duty; | 549 | u32 min_duty; |
528 | u32 max_duty; | 550 | u32 max_duty; |
529 | u32 pwm_freq; | 551 | u32 pwm_freq; |
552 | u32 pwm_divisor; | ||
530 | }; | 553 | }; |
531 | 554 | ||
532 | struct nouveau_pm_engine { | 555 | struct nouveau_pm_engine { |
533 | struct nouveau_pm_voltage voltage; | 556 | struct nouveau_pm_voltage voltage; |
534 | struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; | 557 | struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; |
535 | int nr_perflvl; | 558 | int nr_perflvl; |
536 | struct nouveau_pm_memtimings memtimings; | ||
537 | struct nouveau_pm_temp_sensor_constants sensor_constants; | 559 | struct nouveau_pm_temp_sensor_constants sensor_constants; |
538 | struct nouveau_pm_threshold_temp threshold_temp; | 560 | struct nouveau_pm_threshold_temp threshold_temp; |
539 | struct nouveau_pm_fan fan; | 561 | struct nouveau_pm_fan fan; |
540 | u32 pwm_divisor; | 562 | |
563 | struct nouveau_pm_profile *profile_ac; | ||
564 | struct nouveau_pm_profile *profile_dc; | ||
565 | struct nouveau_pm_profile *profile; | ||
566 | struct list_head profiles; | ||
541 | 567 | ||
542 | struct nouveau_pm_level boot; | 568 | struct nouveau_pm_level boot; |
543 | struct nouveau_pm_level *cur; | 569 | struct nouveau_pm_level *cur; |
@@ -669,14 +695,14 @@ struct nv04_mode_state { | |||
669 | }; | 695 | }; |
670 | 696 | ||
671 | enum nouveau_card_type { | 697 | enum nouveau_card_type { |
672 | NV_04 = 0x00, | 698 | NV_04 = 0x04, |
673 | NV_10 = 0x10, | 699 | NV_10 = 0x10, |
674 | NV_20 = 0x20, | 700 | NV_20 = 0x20, |
675 | NV_30 = 0x30, | 701 | NV_30 = 0x30, |
676 | NV_40 = 0x40, | 702 | NV_40 = 0x40, |
677 | NV_50 = 0x50, | 703 | NV_50 = 0x50, |
678 | NV_C0 = 0xc0, | 704 | NV_C0 = 0xc0, |
679 | NV_D0 = 0xd0 | 705 | NV_D0 = 0xd0, |
680 | }; | 706 | }; |
681 | 707 | ||
682 | struct drm_nouveau_private { | 708 | struct drm_nouveau_private { |
@@ -772,8 +798,22 @@ struct drm_nouveau_private { | |||
772 | } tile; | 798 | } tile; |
773 | 799 | ||
774 | /* VRAM/fb configuration */ | 800 | /* VRAM/fb configuration */ |
801 | enum { | ||
802 | NV_MEM_TYPE_UNKNOWN = 0, | ||
803 | NV_MEM_TYPE_STOLEN, | ||
804 | NV_MEM_TYPE_SGRAM, | ||
805 | NV_MEM_TYPE_SDRAM, | ||
806 | NV_MEM_TYPE_DDR1, | ||
807 | NV_MEM_TYPE_DDR2, | ||
808 | NV_MEM_TYPE_DDR3, | ||
809 | NV_MEM_TYPE_GDDR2, | ||
810 | NV_MEM_TYPE_GDDR3, | ||
811 | NV_MEM_TYPE_GDDR4, | ||
812 | NV_MEM_TYPE_GDDR5 | ||
813 | } vram_type; | ||
775 | uint64_t vram_size; | 814 | uint64_t vram_size; |
776 | uint64_t vram_sys_base; | 815 | uint64_t vram_sys_base; |
816 | bool vram_rank_B; | ||
777 | 817 | ||
778 | uint64_t fb_available_size; | 818 | uint64_t fb_available_size; |
779 | uint64_t fb_mappable_pages; | 819 | uint64_t fb_mappable_pages; |
@@ -846,6 +886,7 @@ extern int nouveau_uscript_lvds; | |||
846 | extern int nouveau_uscript_tmds; | 886 | extern int nouveau_uscript_tmds; |
847 | extern int nouveau_vram_pushbuf; | 887 | extern int nouveau_vram_pushbuf; |
848 | extern int nouveau_vram_notify; | 888 | extern int nouveau_vram_notify; |
889 | extern char *nouveau_vram_type; | ||
849 | extern int nouveau_fbpercrtc; | 890 | extern int nouveau_fbpercrtc; |
850 | extern int nouveau_tv_disable; | 891 | extern int nouveau_tv_disable; |
851 | extern char *nouveau_tv_norm; | 892 | extern char *nouveau_tv_norm; |
@@ -894,8 +935,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *); | |||
894 | extern int nouveau_mem_init_agp(struct drm_device *); | 935 | extern int nouveau_mem_init_agp(struct drm_device *); |
895 | extern int nouveau_mem_reset_agp(struct drm_device *); | 936 | extern int nouveau_mem_reset_agp(struct drm_device *); |
896 | extern void nouveau_mem_close(struct drm_device *); | 937 | extern void nouveau_mem_close(struct drm_device *); |
897 | extern int nouveau_mem_detect(struct drm_device *); | ||
898 | extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); | 938 | extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); |
939 | extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq, | ||
940 | struct nouveau_pm_memtiming *); | ||
941 | extern void nouveau_mem_timing_read(struct drm_device *, | ||
942 | struct nouveau_pm_memtiming *); | ||
943 | extern int nouveau_mem_vbios_type(struct drm_device *); | ||
899 | extern struct nouveau_tile_reg *nv10_mem_set_tiling( | 944 | extern struct nouveau_tile_reg *nv10_mem_set_tiling( |
900 | struct drm_device *dev, uint32_t addr, uint32_t size, | 945 | struct drm_device *dev, uint32_t addr, uint32_t size, |
901 | uint32_t pitch, uint32_t flags); | 946 | uint32_t pitch, uint32_t flags); |
@@ -1117,19 +1162,14 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); | |||
1117 | /* nouveau_hdmi.c */ | 1162 | /* nouveau_hdmi.c */ |
1118 | void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); | 1163 | void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); |
1119 | 1164 | ||
1120 | /* nouveau_dp.c */ | ||
1121 | int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | ||
1122 | uint8_t *data, int data_nr); | ||
1123 | bool nouveau_dp_detect(struct drm_encoder *); | ||
1124 | bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate); | ||
1125 | void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32); | ||
1126 | u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **); | ||
1127 | |||
1128 | /* nv04_fb.c */ | 1165 | /* nv04_fb.c */ |
1166 | extern int nv04_fb_vram_init(struct drm_device *); | ||
1129 | extern int nv04_fb_init(struct drm_device *); | 1167 | extern int nv04_fb_init(struct drm_device *); |
1130 | extern void nv04_fb_takedown(struct drm_device *); | 1168 | extern void nv04_fb_takedown(struct drm_device *); |
1131 | 1169 | ||
1132 | /* nv10_fb.c */ | 1170 | /* nv10_fb.c */ |
1171 | extern int nv10_fb_vram_init(struct drm_device *dev); | ||
1172 | extern int nv1a_fb_vram_init(struct drm_device *dev); | ||
1133 | extern int nv10_fb_init(struct drm_device *); | 1173 | extern int nv10_fb_init(struct drm_device *); |
1134 | extern void nv10_fb_takedown(struct drm_device *); | 1174 | extern void nv10_fb_takedown(struct drm_device *); |
1135 | extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, | 1175 | extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, |
@@ -1138,6 +1178,16 @@ extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, | |||
1138 | extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); | 1178 | extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); |
1139 | extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); | 1179 | extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); |
1140 | 1180 | ||
1181 | /* nv20_fb.c */ | ||
1182 | extern int nv20_fb_vram_init(struct drm_device *dev); | ||
1183 | extern int nv20_fb_init(struct drm_device *); | ||
1184 | extern void nv20_fb_takedown(struct drm_device *); | ||
1185 | extern void nv20_fb_init_tile_region(struct drm_device *dev, int i, | ||
1186 | uint32_t addr, uint32_t size, | ||
1187 | uint32_t pitch, uint32_t flags); | ||
1188 | extern void nv20_fb_set_tile_region(struct drm_device *dev, int i); | ||
1189 | extern void nv20_fb_free_tile_region(struct drm_device *dev, int i); | ||
1190 | |||
1141 | /* nv30_fb.c */ | 1191 | /* nv30_fb.c */ |
1142 | extern int nv30_fb_init(struct drm_device *); | 1192 | extern int nv30_fb_init(struct drm_device *); |
1143 | extern void nv30_fb_takedown(struct drm_device *); | 1193 | extern void nv30_fb_takedown(struct drm_device *); |
@@ -1147,6 +1197,7 @@ extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, | |||
1147 | extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); | 1197 | extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); |
1148 | 1198 | ||
1149 | /* nv40_fb.c */ | 1199 | /* nv40_fb.c */ |
1200 | extern int nv40_fb_vram_init(struct drm_device *dev); | ||
1150 | extern int nv40_fb_init(struct drm_device *); | 1201 | extern int nv40_fb_init(struct drm_device *); |
1151 | extern void nv40_fb_takedown(struct drm_device *); | 1202 | extern void nv40_fb_takedown(struct drm_device *); |
1152 | extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); | 1203 | extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); |
@@ -1703,6 +1754,7 @@ nv44_graph_class(struct drm_device *dev) | |||
1703 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) | 1754 | #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) |
1704 | #define NV_MEM_ACCESS_SYS 4 | 1755 | #define NV_MEM_ACCESS_SYS 4 |
1705 | #define NV_MEM_ACCESS_VM 8 | 1756 | #define NV_MEM_ACCESS_VM 8 |
1757 | #define NV_MEM_ACCESS_NOSNOOP 16 | ||
1706 | 1758 | ||
1707 | #define NV_MEM_TARGET_VRAM 0 | 1759 | #define NV_MEM_TARGET_VRAM 0 |
1708 | #define NV_MEM_TARGET_PCI 1 | 1760 | #define NV_MEM_TARGET_PCI 1 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index e5d6e3faff3d..3dc14a3dcc4c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h | |||
@@ -32,6 +32,14 @@ | |||
32 | 32 | ||
33 | #define NV_DPMS_CLEARED 0x80 | 33 | #define NV_DPMS_CLEARED 0x80 |
34 | 34 | ||
35 | struct dp_train_func { | ||
36 | void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc, | ||
37 | int nr, u32 bw, bool enhframe); | ||
38 | void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern); | ||
39 | void (*train_adj)(struct drm_device *, struct dcb_entry *, | ||
40 | u8 lane, u8 swing, u8 preem); | ||
41 | }; | ||
42 | |||
35 | struct nouveau_encoder { | 43 | struct nouveau_encoder { |
36 | struct drm_encoder_slave base; | 44 | struct drm_encoder_slave base; |
37 | 45 | ||
@@ -78,9 +86,19 @@ get_slave_funcs(struct drm_encoder *enc) | |||
78 | return to_encoder_slave(enc)->slave_funcs; | 86 | return to_encoder_slave(enc)->slave_funcs; |
79 | } | 87 | } |
80 | 88 | ||
89 | /* nouveau_dp.c */ | ||
90 | int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | ||
91 | uint8_t *data, int data_nr); | ||
92 | bool nouveau_dp_detect(struct drm_encoder *); | ||
93 | void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, | ||
94 | struct dp_train_func *); | ||
95 | u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **); | ||
96 | |||
81 | struct nouveau_connector * | 97 | struct nouveau_connector * |
82 | nouveau_encoder_connector_get(struct nouveau_encoder *encoder); | 98 | nouveau_encoder_connector_get(struct nouveau_encoder *encoder); |
83 | int nv50_sor_create(struct drm_connector *, struct dcb_entry *); | 99 | int nv50_sor_create(struct drm_connector *, struct dcb_entry *); |
100 | void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32); | ||
84 | int nv50_dac_create(struct drm_connector *, struct dcb_entry *); | 101 | int nv50_dac_create(struct drm_connector *, struct dcb_entry *); |
85 | 102 | ||
103 | |||
86 | #endif /* __NOUVEAU_ENCODER_H__ */ | 104 | #endif /* __NOUVEAU_ENCODER_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9892218d7452..8113e9201ed9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -381,11 +381,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
381 | goto out_unref; | 381 | goto out_unref; |
382 | } | 382 | } |
383 | 383 | ||
384 | info->pixmap.size = 64*1024; | 384 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
385 | info->pixmap.buf_align = 8; | ||
386 | info->pixmap.access_align = 32; | ||
387 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
388 | info->pixmap.scan_align = 1; | ||
389 | 385 | ||
390 | mutex_unlock(&dev->struct_mutex); | 386 | mutex_unlock(&dev->struct_mutex); |
391 | 387 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 820ae7f52044..8f4f914d9eab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -277,7 +277,7 @@ i2c_bit_func(struct i2c_adapter *adap) | |||
277 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | 277 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; |
278 | } | 278 | } |
279 | 279 | ||
280 | const struct i2c_algorithm i2c_bit_algo = { | 280 | const struct i2c_algorithm nouveau_i2c_bit_algo = { |
281 | .master_xfer = i2c_bit_xfer, | 281 | .master_xfer = i2c_bit_xfer, |
282 | .functionality = i2c_bit_func | 282 | .functionality = i2c_bit_func |
283 | }; | 283 | }; |
@@ -384,12 +384,12 @@ nouveau_i2c_init(struct drm_device *dev) | |||
384 | case 0: /* NV04:NV50 */ | 384 | case 0: /* NV04:NV50 */ |
385 | port->drive = entry[0]; | 385 | port->drive = entry[0]; |
386 | port->sense = entry[1]; | 386 | port->sense = entry[1]; |
387 | port->adapter.algo = &i2c_bit_algo; | 387 | port->adapter.algo = &nouveau_i2c_bit_algo; |
388 | break; | 388 | break; |
389 | case 4: /* NV4E */ | 389 | case 4: /* NV4E */ |
390 | port->drive = 0x600800 + entry[1]; | 390 | port->drive = 0x600800 + entry[1]; |
391 | port->sense = port->drive; | 391 | port->sense = port->drive; |
392 | port->adapter.algo = &i2c_bit_algo; | 392 | port->adapter.algo = &nouveau_i2c_bit_algo; |
393 | break; | 393 | break; |
394 | case 5: /* NV50- */ | 394 | case 5: /* NV50- */ |
395 | port->drive = entry[0] & 0x0f; | 395 | port->drive = entry[0] & 0x0f; |
@@ -402,7 +402,7 @@ nouveau_i2c_init(struct drm_device *dev) | |||
402 | port->drive = 0x00d014 + (port->drive * 0x20); | 402 | port->drive = 0x00d014 + (port->drive * 0x20); |
403 | port->sense = port->drive; | 403 | port->sense = port->drive; |
404 | } | 404 | } |
405 | port->adapter.algo = &i2c_bit_algo; | 405 | port->adapter.algo = &nouveau_i2c_bit_algo; |
406 | break; | 406 | break; |
407 | case 6: /* NV50- DP AUX */ | 407 | case 6: /* NV50- DP AUX */ |
408 | port->drive = entry[0]; | 408 | port->drive = entry[0]; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c3a5745e9c79..b08065f981df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -26,7 +26,8 @@ | |||
26 | * DEALINGS IN THE SOFTWARE. | 26 | * DEALINGS IN THE SOFTWARE. |
27 | * | 27 | * |
28 | * Authors: | 28 | * Authors: |
29 | * Keith Whitwell <keith@tungstengraphics.com> | 29 | * Ben Skeggs <bskeggs@redhat.com> |
30 | * Roy Spliet <r.spliet@student.tudelft.nl> | ||
30 | */ | 31 | */ |
31 | 32 | ||
32 | 33 | ||
@@ -192,75 +193,6 @@ nouveau_mem_gart_fini(struct drm_device *dev) | |||
192 | } | 193 | } |
193 | } | 194 | } |
194 | 195 | ||
195 | static uint32_t | ||
196 | nouveau_mem_detect_nv04(struct drm_device *dev) | ||
197 | { | ||
198 | uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0); | ||
199 | |||
200 | if (boot0 & 0x00000100) | ||
201 | return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; | ||
202 | |||
203 | switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { | ||
204 | case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: | ||
205 | return 32 * 1024 * 1024; | ||
206 | case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: | ||
207 | return 16 * 1024 * 1024; | ||
208 | case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: | ||
209 | return 8 * 1024 * 1024; | ||
210 | case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: | ||
211 | return 4 * 1024 * 1024; | ||
212 | } | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static uint32_t | ||
218 | nouveau_mem_detect_nforce(struct drm_device *dev) | ||
219 | { | ||
220 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
221 | struct pci_dev *bridge; | ||
222 | uint32_t mem; | ||
223 | |||
224 | bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); | ||
225 | if (!bridge) { | ||
226 | NV_ERROR(dev, "no bridge device\n"); | ||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | if (dev_priv->flags & NV_NFORCE) { | ||
231 | pci_read_config_dword(bridge, 0x7C, &mem); | ||
232 | return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; | ||
233 | } else | ||
234 | if (dev_priv->flags & NV_NFORCE2) { | ||
235 | pci_read_config_dword(bridge, 0x84, &mem); | ||
236 | return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; | ||
237 | } | ||
238 | |||
239 | NV_ERROR(dev, "impossible!\n"); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | int | ||
244 | nouveau_mem_detect(struct drm_device *dev) | ||
245 | { | ||
246 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
247 | |||
248 | if (dev_priv->card_type == NV_04) { | ||
249 | dev_priv->vram_size = nouveau_mem_detect_nv04(dev); | ||
250 | } else | ||
251 | if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { | ||
252 | dev_priv->vram_size = nouveau_mem_detect_nforce(dev); | ||
253 | } else | ||
254 | if (dev_priv->card_type < NV_50) { | ||
255 | dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
256 | dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; | ||
257 | } | ||
258 | |||
259 | if (dev_priv->vram_size) | ||
260 | return 0; | ||
261 | return -ENOMEM; | ||
262 | } | ||
263 | |||
264 | bool | 196 | bool |
265 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | 197 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) |
266 | { | 198 | { |
@@ -385,11 +317,29 @@ nouveau_mem_init_agp(struct drm_device *dev) | |||
385 | return 0; | 317 | return 0; |
386 | } | 318 | } |
387 | 319 | ||
320 | static const struct vram_types { | ||
321 | int value; | ||
322 | const char *name; | ||
323 | } vram_type_map[] = { | ||
324 | { NV_MEM_TYPE_STOLEN , "stolen system memory" }, | ||
325 | { NV_MEM_TYPE_SGRAM , "SGRAM" }, | ||
326 | { NV_MEM_TYPE_SDRAM , "SDRAM" }, | ||
327 | { NV_MEM_TYPE_DDR1 , "DDR1" }, | ||
328 | { NV_MEM_TYPE_DDR2 , "DDR2" }, | ||
329 | { NV_MEM_TYPE_DDR3 , "DDR3" }, | ||
330 | { NV_MEM_TYPE_GDDR2 , "GDDR2" }, | ||
331 | { NV_MEM_TYPE_GDDR3 , "GDDR3" }, | ||
332 | { NV_MEM_TYPE_GDDR4 , "GDDR4" }, | ||
333 | { NV_MEM_TYPE_GDDR5 , "GDDR5" }, | ||
334 | { NV_MEM_TYPE_UNKNOWN, "unknown type" } | ||
335 | }; | ||
336 | |||
388 | int | 337 | int |
389 | nouveau_mem_vram_init(struct drm_device *dev) | 338 | nouveau_mem_vram_init(struct drm_device *dev) |
390 | { | 339 | { |
391 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 340 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
392 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | 341 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; |
342 | const struct vram_types *vram_type; | ||
393 | int ret, dma_bits; | 343 | int ret, dma_bits; |
394 | 344 | ||
395 | dma_bits = 32; | 345 | dma_bits = 32; |
@@ -427,7 +377,21 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
427 | return ret; | 377 | return ret; |
428 | } | 378 | } |
429 | 379 | ||
430 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | 380 | vram_type = vram_type_map; |
381 | while (vram_type->value != NV_MEM_TYPE_UNKNOWN) { | ||
382 | if (nouveau_vram_type) { | ||
383 | if (!strcasecmp(nouveau_vram_type, vram_type->name)) | ||
384 | break; | ||
385 | dev_priv->vram_type = vram_type->value; | ||
386 | } else { | ||
387 | if (vram_type->value == dev_priv->vram_type) | ||
388 | break; | ||
389 | } | ||
390 | vram_type++; | ||
391 | } | ||
392 | |||
393 | NV_INFO(dev, "Detected %dMiB VRAM (%s)\n", | ||
394 | (int)(dev_priv->vram_size >> 20), vram_type->name); | ||
431 | if (dev_priv->vram_sys_base) { | 395 | if (dev_priv->vram_sys_base) { |
432 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | 396 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", |
433 | dev_priv->vram_sys_base); | 397 | dev_priv->vram_sys_base); |
@@ -508,216 +472,617 @@ nouveau_mem_gart_init(struct drm_device *dev) | |||
508 | return 0; | 472 | return 0; |
509 | } | 473 | } |
510 | 474 | ||
511 | /* XXX: For now a dummy. More samples required, possibly even a card | 475 | static int |
512 | * Called from nouveau_perf.c */ | 476 | nv40_mem_timing_calc(struct drm_device *dev, u32 freq, |
513 | void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | 477 | struct nouveau_pm_tbl_entry *e, u8 len, |
514 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | 478 | struct nouveau_pm_memtiming *boot, |
515 | struct nouveau_pm_memtiming *timing) { | 479 | struct nouveau_pm_memtiming *t) |
516 | 480 | { | |
517 | NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers"); | 481 | t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); |
518 | } | ||
519 | |||
520 | void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | ||
521 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | ||
522 | struct nouveau_pm_memtiming *timing) { | ||
523 | |||
524 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | ||
525 | 482 | ||
526 | /* XXX: I don't trust the -1's and +1's... they must come | 483 | /* XXX: I don't trust the -1's and +1's... they must come |
527 | * from somewhere! */ | 484 | * from somewhere! */ |
528 | timing->reg_1 = (e->tWR + 2 + magic_number) << 24 | | 485 | t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 | |
529 | 1 << 16 | | 486 | 1 << 16 | |
530 | (e->tUNK_1 + 2 + magic_number) << 8 | | 487 | (e->tWTR + 2 + (t->tCWL - 1)) << 8 | |
531 | (e->tCL + 2 - magic_number); | 488 | (e->tCL + 2 - (t->tCWL - 1)); |
532 | timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); | 489 | |
533 | timing->reg_2 |= 0x20200000; | 490 | t->reg[2] = 0x20200000 | |
534 | 491 | ((t->tCWL - 1) << 24 | | |
535 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id, | 492 | e->tRRD << 16 | |
536 | timing->reg_0, timing->reg_1,timing->reg_2); | 493 | e->tRCDWR << 8 | |
494 | e->tRCDRD); | ||
495 | |||
496 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id, | ||
497 | t->reg[0], t->reg[1], t->reg[2]); | ||
498 | return 0; | ||
537 | } | 499 | } |
538 | 500 | ||
539 | void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr, | 501 | static int |
540 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) { | 502 | nv50_mem_timing_calc(struct drm_device *dev, u32 freq, |
503 | struct nouveau_pm_tbl_entry *e, u8 len, | ||
504 | struct nouveau_pm_memtiming *boot, | ||
505 | struct nouveau_pm_memtiming *t) | ||
506 | { | ||
541 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 507 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
508 | struct bit_entry P; | ||
509 | uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; | ||
542 | 510 | ||
543 | uint8_t unk18 = 1, | 511 | if (bit_table(dev, 'P', &P)) |
544 | unk19 = 1, | 512 | return -EINVAL; |
545 | unk20 = 0, | ||
546 | unk21 = 0; | ||
547 | 513 | ||
548 | switch (min(hdr->entry_len, (u8) 22)) { | 514 | switch (min(len, (u8) 22)) { |
549 | case 22: | 515 | case 22: |
550 | unk21 = e->tUNK_21; | 516 | unk21 = e->tUNK_21; |
551 | case 21: | 517 | case 21: |
552 | unk20 = e->tUNK_20; | 518 | unk20 = e->tUNK_20; |
553 | case 20: | 519 | case 20: |
554 | unk19 = e->tUNK_19; | 520 | if (e->tCWL > 0) |
521 | t->tCWL = e->tCWL; | ||
555 | case 19: | 522 | case 19: |
556 | unk18 = e->tUNK_18; | 523 | unk18 = e->tUNK_18; |
557 | break; | 524 | break; |
558 | } | 525 | } |
559 | 526 | ||
560 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | 527 | t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); |
561 | 528 | ||
562 | /* XXX: I don't trust the -1's and +1's... they must come | 529 | t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 | |
563 | * from somewhere! */ | 530 | max(unk18, (u8) 1) << 16 | |
564 | timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 | | 531 | (e->tWTR + 2 + (t->tCWL - 1)) << 8; |
565 | max(unk18, (u8) 1) << 16 | | 532 | |
566 | (e->tUNK_1 + unk19 + 1 + magic_number) << 8; | 533 | t->reg[2] = ((t->tCWL - 1) << 24 | |
567 | if (dev_priv->chipset == 0xa8) { | 534 | e->tRRD << 16 | |
568 | timing->reg_1 |= (e->tCL - 1); | 535 | e->tRCDWR << 8 | |
569 | } else { | 536 | e->tRCDRD); |
570 | timing->reg_1 |= (e->tCL + 2 - magic_number); | 537 | |
571 | } | 538 | t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13; |
572 | timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); | 539 | |
573 | 540 | t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP); | |
574 | timing->reg_5 = (e->tRAS << 24 | e->tRC); | 541 | |
575 | timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16; | 542 | t->reg[8] = boot->reg[8] & 0xffffff00; |
576 | 543 | ||
577 | if (P->version == 1) { | 544 | if (P.version == 1) { |
578 | timing->reg_2 |= magic_number << 24; | 545 | t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1)); |
579 | timing->reg_3 = (0x14 + e->tCL) << 24 | | 546 | |
580 | 0x16 << 16 | | 547 | t->reg[3] = (0x14 + e->tCL) << 24 | |
581 | (e->tCL - 1) << 8 | | 548 | 0x16 << 16 | |
582 | (e->tCL - 1); | 549 | (e->tCL - 1) << 8 | |
583 | timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13; | 550 | (e->tCL - 1); |
584 | timing->reg_5 |= (e->tCL + 2) << 8; | 551 | |
585 | timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16; | 552 | t->reg[4] |= boot->reg[4] & 0xffff0000; |
553 | |||
554 | t->reg[6] = (0x33 - t->tCWL) << 16 | | ||
555 | t->tCWL << 8 | | ||
556 | (0x2e + e->tCL - t->tCWL); | ||
557 | |||
558 | t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; | ||
559 | |||
560 | /* XXX: P.version == 1 only has DDR2 and GDDR3? */ | ||
561 | if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) { | ||
562 | t->reg[5] |= (e->tCL + 3) << 8; | ||
563 | t->reg[6] |= (t->tCWL - 2) << 8; | ||
564 | t->reg[8] |= (e->tCL - 4); | ||
565 | } else { | ||
566 | t->reg[5] |= (e->tCL + 2) << 8; | ||
567 | t->reg[6] |= t->tCWL << 8; | ||
568 | t->reg[8] |= (e->tCL - 2); | ||
569 | } | ||
586 | } else { | 570 | } else { |
587 | timing->reg_2 |= (unk19 - 1) << 24; | 571 | t->reg[1] |= (5 + e->tCL - (t->tCWL)); |
588 | /* XXX: reg_10022c for recentish cards pretty much unknown*/ | 572 | |
589 | timing->reg_3 = e->tCL - 1; | 573 | /* XXX: 0xb? 0x30? */ |
590 | timing->reg_4 = (unk20 << 24 | unk21 << 16 | | 574 | t->reg[3] = (0x30 + e->tCL) << 24 | |
591 | e->tUNK_13 << 8 | e->tUNK_13); | 575 | (boot->reg[3] & 0x00ff0000)| |
576 | (0xb + e->tCL) << 8 | | ||
577 | (e->tCL - 1); | ||
578 | |||
579 | t->reg[4] |= (unk20 << 24 | unk21 << 16); | ||
580 | |||
592 | /* XXX: +6? */ | 581 | /* XXX: +6? */ |
593 | timing->reg_5 |= (unk19 + 6) << 8; | 582 | t->reg[5] |= (t->tCWL + 6) << 8; |
594 | 583 | ||
595 | /* XXX: reg_10023c currently unknown | 584 | t->reg[6] = (0x5a + e->tCL) << 16 | |
596 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | 585 | (6 - e->tCL + t->tCWL) << 8 | |
597 | timing->reg_7 = 0x202; | 586 | (0x50 + e->tCL - t->tCWL); |
587 | |||
588 | tmp7_3 = (boot->reg[7] & 0xff000000) >> 24; | ||
589 | t->reg[7] = (tmp7_3 << 24) | | ||
590 | ((tmp7_3 - 6 + e->tCL) << 16) | | ||
591 | 0x202; | ||
598 | } | 592 | } |
599 | 593 | ||
600 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id, | 594 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, |
601 | timing->reg_0, timing->reg_1, | 595 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); |
602 | timing->reg_2, timing->reg_3); | ||
603 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", | 596 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", |
604 | timing->reg_4, timing->reg_5, | 597 | t->reg[4], t->reg[5], t->reg[6], t->reg[7]); |
605 | timing->reg_6, timing->reg_7); | 598 | NV_DEBUG(dev, " 240: %08x\n", t->reg[8]); |
606 | NV_DEBUG(dev, " 240: %08x\n", timing->reg_8); | 599 | return 0; |
607 | } | 600 | } |
608 | 601 | ||
609 | void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | 602 | static int |
610 | struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) { | 603 | nvc0_mem_timing_calc(struct drm_device *dev, u32 freq, |
611 | timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP); | 604 | struct nouveau_pm_tbl_entry *e, u8 len, |
612 | timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f); | 605 | struct nouveau_pm_memtiming *boot, |
613 | timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8; | 606 | struct nouveau_pm_memtiming *t) |
614 | timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13; | 607 | { |
615 | timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15; | 608 | if (e->tCWL > 0) |
616 | NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id, | 609 | t->tCWL = e->tCWL; |
617 | timing->reg_0, timing->reg_1, | 610 | |
618 | timing->reg_2, timing->reg_3); | 611 | t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 | |
619 | NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n", | 612 | e->tRFC << 8 | e->tRC); |
620 | timing->reg_4, timing->reg_5, | 613 | |
621 | timing->reg_6, timing->reg_7); | 614 | t->reg[1] = (boot->reg[1] & 0xff000000) | |
615 | (e->tRCDWR & 0x0f) << 20 | | ||
616 | (e->tRCDRD & 0x0f) << 14 | | ||
617 | (t->tCWL << 7) | | ||
618 | (e->tCL & 0x0f); | ||
619 | |||
620 | t->reg[2] = (boot->reg[2] & 0xff0000ff) | | ||
621 | e->tWR << 16 | e->tWTR << 8; | ||
622 | |||
623 | t->reg[3] = (e->tUNK_20 & 0x1f) << 9 | | ||
624 | (e->tUNK_21 & 0xf) << 5 | | ||
625 | (e->tUNK_13 & 0x1f); | ||
626 | |||
627 | t->reg[4] = (boot->reg[4] & 0xfff00fff) | | ||
628 | (e->tRRD&0x1f) << 15; | ||
629 | |||
630 | NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, | ||
631 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); | ||
632 | NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]); | ||
633 | return 0; | ||
622 | } | 634 | } |
623 | 635 | ||
624 | /** | 636 | /** |
625 | * Processes the Memory Timing BIOS table, stores generated | 637 | * MR generation methods |
626 | * register values | ||
627 | * @pre init scripts were run, memtiming regs are initialized | ||
628 | */ | 638 | */ |
629 | void | 639 | |
630 | nouveau_mem_timing_init(struct drm_device *dev) | 640 | static int |
641 | nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq, | ||
642 | struct nouveau_pm_tbl_entry *e, u8 len, | ||
643 | struct nouveau_pm_memtiming *boot, | ||
644 | struct nouveau_pm_memtiming *t) | ||
645 | { | ||
646 | t->drive_strength = 0; | ||
647 | if (len < 15) { | ||
648 | t->odt = boot->odt; | ||
649 | } else { | ||
650 | t->odt = e->RAM_FT1 & 0x07; | ||
651 | } | ||
652 | |||
653 | if (e->tCL >= NV_MEM_CL_DDR2_MAX) { | ||
654 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | ||
655 | return -ERANGE; | ||
656 | } | ||
657 | |||
658 | if (e->tWR >= NV_MEM_WR_DDR2_MAX) { | ||
659 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | ||
660 | return -ERANGE; | ||
661 | } | ||
662 | |||
663 | if (t->odt > 3) { | ||
664 | NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x", | ||
665 | t->id, t->odt); | ||
666 | t->odt = 0; | ||
667 | } | ||
668 | |||
669 | t->mr[0] = (boot->mr[0] & 0x100f) | | ||
670 | (e->tCL) << 4 | | ||
671 | (e->tWR - 1) << 9; | ||
672 | t->mr[1] = (boot->mr[1] & 0x101fbb) | | ||
673 | (t->odt & 0x1) << 2 | | ||
674 | (t->odt & 0x2) << 5; | ||
675 | |||
676 | NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]); | ||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { | ||
681 | 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; | ||
682 | |||
683 | static int | ||
684 | nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq, | ||
685 | struct nouveau_pm_tbl_entry *e, u8 len, | ||
686 | struct nouveau_pm_memtiming *boot, | ||
687 | struct nouveau_pm_memtiming *t) | ||
688 | { | ||
689 | u8 cl = e->tCL - 4; | ||
690 | |||
691 | t->drive_strength = 0; | ||
692 | if (len < 15) { | ||
693 | t->odt = boot->odt; | ||
694 | } else { | ||
695 | t->odt = e->RAM_FT1 & 0x07; | ||
696 | } | ||
697 | |||
698 | if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { | ||
699 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | ||
700 | return -ERANGE; | ||
701 | } | ||
702 | |||
703 | if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { | ||
704 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | ||
705 | return -ERANGE; | ||
706 | } | ||
707 | |||
708 | if (e->tCWL < 5) { | ||
709 | NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL); | ||
710 | return -ERANGE; | ||
711 | } | ||
712 | |||
713 | t->mr[0] = (boot->mr[0] & 0x180b) | | ||
714 | /* CAS */ | ||
715 | (cl & 0x7) << 4 | | ||
716 | (cl & 0x8) >> 1 | | ||
717 | (nv_mem_wr_lut_ddr3[e->tWR]) << 9; | ||
718 | t->mr[1] = (boot->mr[1] & 0x101dbb) | | ||
719 | (t->odt & 0x1) << 2 | | ||
720 | (t->odt & 0x2) << 5 | | ||
721 | (t->odt & 0x4) << 7; | ||
722 | t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; | ||
723 | |||
724 | NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { | ||
729 | 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; | ||
730 | uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { | ||
731 | 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; | ||
732 | |||
733 | static int | ||
734 | nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq, | ||
735 | struct nouveau_pm_tbl_entry *e, u8 len, | ||
736 | struct nouveau_pm_memtiming *boot, | ||
737 | struct nouveau_pm_memtiming *t) | ||
738 | { | ||
739 | if (len < 15) { | ||
740 | t->drive_strength = boot->drive_strength; | ||
741 | t->odt = boot->odt; | ||
742 | } else { | ||
743 | t->drive_strength = (e->RAM_FT1 & 0x30) >> 4; | ||
744 | t->odt = e->RAM_FT1 & 0x07; | ||
745 | } | ||
746 | |||
747 | if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { | ||
748 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | ||
749 | return -ERANGE; | ||
750 | } | ||
751 | |||
752 | if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { | ||
753 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | ||
754 | return -ERANGE; | ||
755 | } | ||
756 | |||
757 | if (t->odt > 3) { | ||
758 | NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", | ||
759 | t->id, t->odt); | ||
760 | t->odt = 0; | ||
761 | } | ||
762 | |||
763 | t->mr[0] = (boot->mr[0] & 0xe0b) | | ||
764 | /* CAS */ | ||
765 | ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) | | ||
766 | ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2); | ||
767 | t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength | | ||
768 | (t->odt << 2) | | ||
769 | (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; | ||
770 | t->mr[2] = boot->mr[2]; | ||
771 | |||
772 | NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id, | ||
773 | t->mr[0], t->mr[1], t->mr[2]); | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static int | ||
778 | nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq, | ||
779 | struct nouveau_pm_tbl_entry *e, u8 len, | ||
780 | struct nouveau_pm_memtiming *boot, | ||
781 | struct nouveau_pm_memtiming *t) | ||
782 | { | ||
783 | if (len < 15) { | ||
784 | t->drive_strength = boot->drive_strength; | ||
785 | t->odt = boot->odt; | ||
786 | } else { | ||
787 | t->drive_strength = (e->RAM_FT1 & 0x30) >> 4; | ||
788 | t->odt = e->RAM_FT1 & 0x03; | ||
789 | } | ||
790 | |||
791 | if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { | ||
792 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | ||
793 | return -ERANGE; | ||
794 | } | ||
795 | |||
796 | if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { | ||
797 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | ||
798 | return -ERANGE; | ||
799 | } | ||
800 | |||
801 | if (t->odt > 3) { | ||
802 | NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", | ||
803 | t->id, t->odt); | ||
804 | t->odt = 0; | ||
805 | } | ||
806 | |||
807 | t->mr[0] = (boot->mr[0] & 0x007) | | ||
808 | ((e->tCL - 5) << 3) | | ||
809 | ((e->tWR - 4) << 8); | ||
810 | t->mr[1] = (boot->mr[1] & 0x1007f0) | | ||
811 | t->drive_strength | | ||
812 | (t->odt << 2); | ||
813 | |||
814 | NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | int | ||
819 | nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | ||
820 | struct nouveau_pm_memtiming *t) | ||
631 | { | 821 | { |
632 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 822 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
633 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 823 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
634 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | 824 | struct nouveau_pm_memtiming *boot = &pm->boot.timing; |
635 | struct nvbios *bios = &dev_priv->vbios; | 825 | struct nouveau_pm_tbl_entry *e; |
636 | struct bit_entry P; | 826 | u8 ver, len, *ptr, *ramcfg; |
637 | struct nouveau_pm_tbl_header *hdr = NULL; | 827 | int ret; |
638 | uint8_t magic_number; | 828 | |
639 | u8 *entry; | 829 | ptr = nouveau_perf_timing(dev, freq, &ver, &len); |
640 | int i; | 830 | if (!ptr || ptr[0] == 0x00) { |
831 | *t = *boot; | ||
832 | return 0; | ||
833 | } | ||
834 | e = (struct nouveau_pm_tbl_entry *)ptr; | ||
835 | |||
836 | t->tCWL = boot->tCWL; | ||
837 | |||
838 | switch (dev_priv->card_type) { | ||
839 | case NV_40: | ||
840 | ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); | ||
841 | break; | ||
842 | case NV_50: | ||
843 | ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t); | ||
844 | break; | ||
845 | case NV_C0: | ||
846 | ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t); | ||
847 | break; | ||
848 | default: | ||
849 | ret = -ENODEV; | ||
850 | break; | ||
851 | } | ||
641 | 852 | ||
642 | if (bios->type == NVBIOS_BIT) { | 853 | switch (dev_priv->vram_type * !ret) { |
643 | if (bit_table(dev, 'P', &P)) | 854 | case NV_MEM_TYPE_GDDR3: |
644 | return; | 855 | ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); |
856 | break; | ||
857 | case NV_MEM_TYPE_GDDR5: | ||
858 | ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t); | ||
859 | break; | ||
860 | case NV_MEM_TYPE_DDR2: | ||
861 | ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t); | ||
862 | break; | ||
863 | case NV_MEM_TYPE_DDR3: | ||
864 | ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t); | ||
865 | break; | ||
866 | default: | ||
867 | ret = -EINVAL; | ||
868 | break; | ||
869 | } | ||
870 | |||
871 | ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len); | ||
872 | if (ramcfg) { | ||
873 | int dll_off; | ||
645 | 874 | ||
646 | if (P.version == 1) | 875 | if (ver == 0x00) |
647 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]); | 876 | dll_off = !!(ramcfg[3] & 0x04); |
648 | else | 877 | else |
649 | if (P.version == 2) | 878 | dll_off = !!(ramcfg[2] & 0x40); |
650 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]); | 879 | |
651 | else { | 880 | switch (dev_priv->vram_type) { |
652 | NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); | 881 | case NV_MEM_TYPE_GDDR3: |
882 | t->mr[1] &= ~0x00000040; | ||
883 | t->mr[1] |= 0x00000040 * dll_off; | ||
884 | break; | ||
885 | default: | ||
886 | t->mr[1] &= ~0x00000001; | ||
887 | t->mr[1] |= 0x00000001 * dll_off; | ||
888 | break; | ||
653 | } | 889 | } |
890 | } | ||
891 | |||
892 | return ret; | ||
893 | } | ||
894 | |||
895 | void | ||
896 | nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) | ||
897 | { | ||
898 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
899 | u32 timing_base, timing_regs, mr_base; | ||
900 | int i; | ||
901 | |||
902 | if (dev_priv->card_type >= 0xC0) { | ||
903 | timing_base = 0x10f290; | ||
904 | mr_base = 0x10f300; | ||
654 | } else { | 905 | } else { |
655 | NV_DEBUG(dev, "BMP version too old for memory\n"); | 906 | timing_base = 0x100220; |
656 | return; | 907 | mr_base = 0x1002c0; |
657 | } | 908 | } |
658 | 909 | ||
659 | if (!hdr) { | 910 | t->id = -1; |
660 | NV_DEBUG(dev, "memory timing table pointer invalid\n"); | 911 | |
912 | switch (dev_priv->card_type) { | ||
913 | case NV_50: | ||
914 | timing_regs = 9; | ||
915 | break; | ||
916 | case NV_C0: | ||
917 | case NV_D0: | ||
918 | timing_regs = 5; | ||
919 | break; | ||
920 | case NV_30: | ||
921 | case NV_40: | ||
922 | timing_regs = 3; | ||
923 | break; | ||
924 | default: | ||
925 | timing_regs = 0; | ||
661 | return; | 926 | return; |
662 | } | 927 | } |
928 | for(i = 0; i < timing_regs; i++) | ||
929 | t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i)); | ||
930 | |||
931 | t->tCWL = 0; | ||
932 | if (dev_priv->card_type < NV_C0) { | ||
933 | t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1; | ||
934 | } else if (dev_priv->card_type <= NV_D0) { | ||
935 | t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7); | ||
936 | } | ||
663 | 937 | ||
664 | if (hdr->version != 0x10) { | 938 | t->mr[0] = nv_rd32(dev, mr_base); |
665 | NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version); | 939 | t->mr[1] = nv_rd32(dev, mr_base + 0x04); |
666 | return; | 940 | t->mr[2] = nv_rd32(dev, mr_base + 0x20); |
941 | t->mr[3] = nv_rd32(dev, mr_base + 0x24); | ||
942 | |||
943 | t->odt = 0; | ||
944 | t->drive_strength = 0; | ||
945 | |||
946 | switch (dev_priv->vram_type) { | ||
947 | case NV_MEM_TYPE_DDR3: | ||
948 | t->odt |= (t->mr[1] & 0x200) >> 7; | ||
949 | case NV_MEM_TYPE_DDR2: | ||
950 | t->odt |= (t->mr[1] & 0x04) >> 2 | | ||
951 | (t->mr[1] & 0x40) >> 5; | ||
952 | break; | ||
953 | case NV_MEM_TYPE_GDDR3: | ||
954 | case NV_MEM_TYPE_GDDR5: | ||
955 | t->drive_strength = t->mr[1] & 0x03; | ||
956 | t->odt = (t->mr[1] & 0x0c) >> 2; | ||
957 | break; | ||
958 | default: | ||
959 | break; | ||
667 | } | 960 | } |
961 | } | ||
668 | 962 | ||
669 | /* validate record length */ | 963 | int |
670 | if (hdr->entry_len < 15) { | 964 | nouveau_mem_exec(struct nouveau_mem_exec_func *exec, |
671 | NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len); | 965 | struct nouveau_pm_level *perflvl) |
672 | return; | 966 | { |
967 | struct drm_nouveau_private *dev_priv = exec->dev->dev_private; | ||
968 | struct nouveau_pm_memtiming *info = &perflvl->timing; | ||
969 | u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; | ||
970 | u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; | ||
971 | u32 mr1_dlloff; | ||
972 | |||
973 | switch (dev_priv->vram_type) { | ||
974 | case NV_MEM_TYPE_DDR2: | ||
975 | tDLLK = 2000; | ||
976 | mr1_dlloff = 0x00000001; | ||
977 | break; | ||
978 | case NV_MEM_TYPE_DDR3: | ||
979 | tDLLK = 12000; | ||
980 | mr1_dlloff = 0x00000001; | ||
981 | break; | ||
982 | case NV_MEM_TYPE_GDDR3: | ||
983 | tDLLK = 40000; | ||
984 | mr1_dlloff = 0x00000040; | ||
985 | break; | ||
986 | default: | ||
987 | NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n"); | ||
988 | return -ENODEV; | ||
673 | } | 989 | } |
674 | 990 | ||
675 | /* parse vbios entries into common format */ | 991 | /* fetch current MRs */ |
676 | memtimings->timing = | 992 | switch (dev_priv->vram_type) { |
677 | kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL); | 993 | case NV_MEM_TYPE_GDDR3: |
678 | if (!memtimings->timing) | 994 | case NV_MEM_TYPE_DDR3: |
679 | return; | 995 | mr[2] = exec->mrg(exec, 2); |
996 | default: | ||
997 | mr[1] = exec->mrg(exec, 1); | ||
998 | mr[0] = exec->mrg(exec, 0); | ||
999 | break; | ||
1000 | } | ||
680 | 1001 | ||
681 | /* Get "some number" from the timing reg for NV_40 and NV_50 | 1002 | /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */ |
682 | * Used in calculations later... source unknown */ | 1003 | if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) { |
683 | magic_number = 0; | 1004 | exec->precharge(exec); |
684 | if (P.version == 1) { | 1005 | exec->mrs (exec, 1, mr[1] | mr1_dlloff); |
685 | magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; | 1006 | exec->wait(exec, tMRD); |
686 | } | 1007 | } |
687 | 1008 | ||
688 | entry = (u8*) hdr + hdr->header_len; | 1009 | /* enter self-refresh mode */ |
689 | for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) { | 1010 | exec->precharge(exec); |
690 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; | 1011 | exec->refresh(exec); |
691 | if (entry[0] == 0) | 1012 | exec->refresh(exec); |
692 | continue; | 1013 | exec->refresh_auto(exec, false); |
1014 | exec->refresh_self(exec, true); | ||
1015 | exec->wait(exec, tCKSRE); | ||
1016 | |||
1017 | /* modify input clock frequency */ | ||
1018 | exec->clock_set(exec); | ||
1019 | |||
1020 | /* exit self-refresh mode */ | ||
1021 | exec->wait(exec, tCKSRX); | ||
1022 | exec->precharge(exec); | ||
1023 | exec->refresh_self(exec, false); | ||
1024 | exec->refresh_auto(exec, true); | ||
1025 | exec->wait(exec, tXS); | ||
1026 | |||
1027 | /* update MRs */ | ||
1028 | if (mr[2] != info->mr[2]) { | ||
1029 | exec->mrs (exec, 2, info->mr[2]); | ||
1030 | exec->wait(exec, tMRD); | ||
1031 | } | ||
1032 | |||
1033 | if (mr[1] != info->mr[1]) { | ||
1034 | /* need to keep DLL off until later, at least on GDDR3 */ | ||
1035 | exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff)); | ||
1036 | exec->wait(exec, tMRD); | ||
1037 | } | ||
1038 | |||
1039 | if (mr[0] != info->mr[0]) { | ||
1040 | exec->mrs (exec, 0, info->mr[0]); | ||
1041 | exec->wait(exec, tMRD); | ||
1042 | } | ||
693 | 1043 | ||
694 | timing->id = i; | 1044 | /* update PFB timing registers */ |
695 | timing->WR = entry[0]; | 1045 | exec->timing_set(exec); |
696 | timing->CL = entry[2]; | ||
697 | 1046 | ||
698 | if(dev_priv->card_type <= NV_40) { | 1047 | /* DLL (enable + ) reset */ |
699 | nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); | 1048 | if (!(info->mr[1] & mr1_dlloff)) { |
700 | } else if(dev_priv->card_type == NV_50){ | 1049 | if (mr[1] & mr1_dlloff) { |
701 | nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); | 1050 | exec->mrs (exec, 1, info->mr[1]); |
702 | } else if(dev_priv->card_type == NV_C0) { | 1051 | exec->wait(exec, tMRD); |
703 | nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]); | ||
704 | } | 1052 | } |
1053 | exec->mrs (exec, 0, info->mr[0] | 0x00000100); | ||
1054 | exec->wait(exec, tMRD); | ||
1055 | exec->mrs (exec, 0, info->mr[0] | 0x00000000); | ||
1056 | exec->wait(exec, tMRD); | ||
1057 | exec->wait(exec, tDLLK); | ||
1058 | if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3) | ||
1059 | exec->precharge(exec); | ||
705 | } | 1060 | } |
706 | 1061 | ||
707 | memtimings->nr_timing = hdr->entry_cnt; | 1062 | return 0; |
708 | memtimings->supported = P.version == 1; | ||
709 | } | 1063 | } |
710 | 1064 | ||
711 | void | 1065 | int |
712 | nouveau_mem_timing_fini(struct drm_device *dev) | 1066 | nouveau_mem_vbios_type(struct drm_device *dev) |
713 | { | 1067 | { |
714 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1068 | struct bit_entry M; |
715 | struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; | 1069 | u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; |
1070 | if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) { | ||
1071 | u8 *table = ROMPTR(dev, M.data[3]); | ||
1072 | if (table && table[0] == 0x10 && ramcfg < table[3]) { | ||
1073 | u8 *entry = table + table[1] + (ramcfg * table[2]); | ||
1074 | switch (entry[0] & 0x0f) { | ||
1075 | case 0: return NV_MEM_TYPE_DDR2; | ||
1076 | case 1: return NV_MEM_TYPE_DDR3; | ||
1077 | case 2: return NV_MEM_TYPE_GDDR3; | ||
1078 | case 3: return NV_MEM_TYPE_GDDR5; | ||
1079 | default: | ||
1080 | break; | ||
1081 | } | ||
716 | 1082 | ||
717 | if(mem->timing) { | 1083 | } |
718 | kfree(mem->timing); | ||
719 | mem->timing = NULL; | ||
720 | } | 1084 | } |
1085 | return NV_MEM_TYPE_UNKNOWN; | ||
721 | } | 1086 | } |
722 | 1087 | ||
723 | static int | 1088 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c index e5a64f0f4cb7..07d0d1e03690 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mxm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c | |||
@@ -582,6 +582,35 @@ mxm_shadow_dsm(struct drm_device *dev, u8 version) | |||
582 | 582 | ||
583 | #define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" | 583 | #define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" |
584 | 584 | ||
585 | static u8 | ||
586 | wmi_wmmx_mxmi(struct drm_device *dev, u8 version) | ||
587 | { | ||
588 | u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 }; | ||
589 | struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args }; | ||
590 | struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
591 | union acpi_object *obj; | ||
592 | acpi_status status; | ||
593 | |||
594 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); | ||
595 | if (ACPI_FAILURE(status)) { | ||
596 | MXM_DBG(dev, "WMMX MXMI returned %d\n", status); | ||
597 | return 0x00; | ||
598 | } | ||
599 | |||
600 | obj = retn.pointer; | ||
601 | if (obj->type == ACPI_TYPE_INTEGER) { | ||
602 | version = obj->integer.value; | ||
603 | MXM_DBG(dev, "WMMX MXMI version %d.%d\n", | ||
604 | (version >> 4), version & 0x0f); | ||
605 | } else { | ||
606 | version = 0; | ||
607 | MXM_DBG(dev, "WMMX MXMI returned non-integer\n"); | ||
608 | } | ||
609 | |||
610 | kfree(obj); | ||
611 | return version; | ||
612 | } | ||
613 | |||
585 | static bool | 614 | static bool |
586 | mxm_shadow_wmi(struct drm_device *dev, u8 version) | 615 | mxm_shadow_wmi(struct drm_device *dev, u8 version) |
587 | { | 616 | { |
@@ -592,7 +621,15 @@ mxm_shadow_wmi(struct drm_device *dev, u8 version) | |||
592 | union acpi_object *obj; | 621 | union acpi_object *obj; |
593 | acpi_status status; | 622 | acpi_status status; |
594 | 623 | ||
595 | if (!wmi_has_guid(WMI_WMMX_GUID)) | 624 | if (!wmi_has_guid(WMI_WMMX_GUID)) { |
625 | MXM_DBG(dev, "WMMX GUID not found\n"); | ||
626 | return false; | ||
627 | } | ||
628 | |||
629 | mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00); | ||
630 | if (!mxms_args[1]) | ||
631 | mxms_args[1] = wmi_wmmx_mxmi(dev, version); | ||
632 | if (!mxms_args[1]) | ||
596 | return false; | 633 | return false; |
597 | 634 | ||
598 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); | 635 | status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 58f497343cec..69a528d106e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
@@ -27,6 +27,178 @@ | |||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | #include "nouveau_pm.h" | 28 | #include "nouveau_pm.h" |
29 | 29 | ||
30 | static u8 * | ||
31 | nouveau_perf_table(struct drm_device *dev, u8 *ver) | ||
32 | { | ||
33 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
34 | struct nvbios *bios = &dev_priv->vbios; | ||
35 | struct bit_entry P; | ||
36 | |||
37 | if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) { | ||
38 | u8 *perf = ROMPTR(dev, P.data[0]); | ||
39 | if (perf) { | ||
40 | *ver = perf[0]; | ||
41 | return perf; | ||
42 | } | ||
43 | } | ||
44 | |||
45 | if (bios->type == NVBIOS_BMP) { | ||
46 | if (bios->data[bios->offset + 6] >= 0x25) { | ||
47 | u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]); | ||
48 | if (perf) { | ||
49 | *ver = perf[1]; | ||
50 | return perf; | ||
51 | } | ||
52 | } | ||
53 | } | ||
54 | |||
55 | return NULL; | ||
56 | } | ||
57 | |||
58 | static u8 * | ||
59 | nouveau_perf_entry(struct drm_device *dev, int idx, | ||
60 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | ||
61 | { | ||
62 | u8 *perf = nouveau_perf_table(dev, ver); | ||
63 | if (perf) { | ||
64 | if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) { | ||
65 | *hdr = perf[3]; | ||
66 | *cnt = 0; | ||
67 | *len = 0; | ||
68 | return perf + perf[0] + idx * perf[3]; | ||
69 | } else | ||
70 | if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) { | ||
71 | *hdr = perf[3]; | ||
72 | *cnt = perf[4]; | ||
73 | *len = perf[5]; | ||
74 | return perf + perf[1] + idx * (*hdr + (*cnt * *len)); | ||
75 | } else | ||
76 | if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) { | ||
77 | *hdr = perf[2]; | ||
78 | *cnt = perf[4]; | ||
79 | *len = perf[3]; | ||
80 | return perf + perf[1] + idx * (*hdr + (*cnt * *len)); | ||
81 | } | ||
82 | } | ||
83 | return NULL; | ||
84 | } | ||
85 | |||
86 | static u8 * | ||
87 | nouveau_perf_rammap(struct drm_device *dev, u32 freq, | ||
88 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len) | ||
89 | { | ||
90 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
91 | struct bit_entry P; | ||
92 | u8 *perf, i = 0; | ||
93 | |||
94 | if (!bit_table(dev, 'P', &P) && P.version == 2) { | ||
95 | u8 *rammap = ROMPTR(dev, P.data[4]); | ||
96 | if (rammap) { | ||
97 | u8 *ramcfg = rammap + rammap[1]; | ||
98 | |||
99 | *ver = rammap[0]; | ||
100 | *hdr = rammap[2]; | ||
101 | *cnt = rammap[4]; | ||
102 | *len = rammap[3]; | ||
103 | |||
104 | freq /= 1000; | ||
105 | for (i = 0; i < rammap[5]; i++) { | ||
106 | if (freq >= ROM16(ramcfg[0]) && | ||
107 | freq <= ROM16(ramcfg[2])) | ||
108 | return ramcfg; | ||
109 | |||
110 | ramcfg += *hdr + (*cnt * *len); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | return NULL; | ||
115 | } | ||
116 | |||
117 | if (dev_priv->chipset == 0x49 || | ||
118 | dev_priv->chipset == 0x4b) | ||
119 | freq /= 2; | ||
120 | |||
121 | while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) { | ||
122 | if (*ver >= 0x20 && *ver < 0x25) { | ||
123 | if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000) | ||
124 | break; | ||
125 | } else | ||
126 | if (*ver >= 0x25 && *ver < 0x40) { | ||
127 | if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000) | ||
128 | break; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | if (perf) { | ||
133 | u8 *ramcfg = perf + *hdr; | ||
134 | *ver = 0x00; | ||
135 | *hdr = 0; | ||
136 | return ramcfg; | ||
137 | } | ||
138 | |||
139 | return NULL; | ||
140 | } | ||
141 | |||
142 | u8 * | ||
143 | nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) | ||
144 | { | ||
145 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
146 | struct nvbios *bios = &dev_priv->vbios; | ||
147 | u8 strap, hdr, cnt; | ||
148 | u8 *rammap; | ||
149 | |||
150 | strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; | ||
151 | if (bios->ram_restrict_tbl_ptr) | ||
152 | strap = bios->data[bios->ram_restrict_tbl_ptr + strap]; | ||
153 | |||
154 | rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len); | ||
155 | if (rammap && strap < cnt) | ||
156 | return rammap + hdr + (strap * *len); | ||
157 | |||
158 | return NULL; | ||
159 | } | ||
160 | |||
161 | u8 * | ||
162 | nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) | ||
163 | { | ||
164 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
165 | struct nvbios *bios = &dev_priv->vbios; | ||
166 | struct bit_entry P; | ||
167 | u8 *perf, *timing = NULL; | ||
168 | u8 i = 0, hdr, cnt; | ||
169 | |||
170 | if (bios->type == NVBIOS_BMP) { | ||
171 | while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt, | ||
172 | len)) && *ver == 0x15) { | ||
173 | if (freq <= ROM32(perf[5]) * 20) { | ||
174 | *ver = 0x00; | ||
175 | *len = 14; | ||
176 | return perf + 41; | ||
177 | } | ||
178 | } | ||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | if (!bit_table(dev, 'P', &P)) { | ||
183 | if (P.version == 1) | ||
184 | timing = ROMPTR(dev, P.data[4]); | ||
185 | else | ||
186 | if (P.version == 2) | ||
187 | timing = ROMPTR(dev, P.data[8]); | ||
188 | } | ||
189 | |||
190 | if (timing && timing[0] == 0x10) { | ||
191 | u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len); | ||
192 | if (ramcfg && ramcfg[1] < timing[2]) { | ||
193 | *ver = timing[0]; | ||
194 | *len = timing[3]; | ||
195 | return timing + timing[1] + (ramcfg[1] * timing[3]); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | return NULL; | ||
200 | } | ||
201 | |||
30 | static void | 202 | static void |
31 | legacy_perf_init(struct drm_device *dev) | 203 | legacy_perf_init(struct drm_device *dev) |
32 | { | 204 | { |
@@ -72,74 +244,11 @@ legacy_perf_init(struct drm_device *dev) | |||
72 | pm->nr_perflvl = 1; | 244 | pm->nr_perflvl = 1; |
73 | } | 245 | } |
74 | 246 | ||
75 | static struct nouveau_pm_memtiming * | ||
76 | nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P, | ||
77 | u16 memclk, u8 *entry, u8 recordlen, u8 entries) | ||
78 | { | ||
79 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
80 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
81 | struct nvbios *bios = &dev_priv->vbios; | ||
82 | u8 ramcfg; | ||
83 | int i; | ||
84 | |||
85 | /* perf v2 has a separate "timing map" table, we have to match | ||
86 | * the target memory clock to a specific entry, *then* use | ||
87 | * ramcfg to select the correct subentry | ||
88 | */ | ||
89 | if (P->version == 2) { | ||
90 | u8 *tmap = ROMPTR(dev, P->data[4]); | ||
91 | if (!tmap) { | ||
92 | NV_DEBUG(dev, "no timing map pointer\n"); | ||
93 | return NULL; | ||
94 | } | ||
95 | |||
96 | if (tmap[0] != 0x10) { | ||
97 | NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]); | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | entry = tmap + tmap[1]; | ||
102 | recordlen = tmap[2] + (tmap[4] * tmap[3]); | ||
103 | for (i = 0; i < tmap[5]; i++, entry += recordlen) { | ||
104 | if (memclk >= ROM16(entry[0]) && | ||
105 | memclk <= ROM16(entry[2])) | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | if (i == tmap[5]) { | ||
110 | NV_WARN(dev, "no match in timing map table\n"); | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | entry += tmap[2]; | ||
115 | recordlen = tmap[3]; | ||
116 | entries = tmap[4]; | ||
117 | } | ||
118 | |||
119 | ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2; | ||
120 | if (bios->ram_restrict_tbl_ptr) | ||
121 | ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg]; | ||
122 | |||
123 | if (ramcfg >= entries) { | ||
124 | NV_WARN(dev, "ramcfg strap out of bounds!\n"); | ||
125 | return NULL; | ||
126 | } | ||
127 | |||
128 | entry += ramcfg * recordlen; | ||
129 | if (entry[1] >= pm->memtimings.nr_timing) { | ||
130 | if (entry[1] != 0xff) | ||
131 | NV_WARN(dev, "timingset %d does not exist\n", entry[1]); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | return &pm->memtimings.timing[entry[1]]; | ||
136 | } | ||
137 | |||
138 | static void | 247 | static void |
139 | nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P, | 248 | nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl) |
140 | struct nouveau_pm_level *perflvl) | ||
141 | { | 249 | { |
142 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 250 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
251 | struct bit_entry P; | ||
143 | u8 *vmap; | 252 | u8 *vmap; |
144 | int id; | 253 | int id; |
145 | 254 | ||
@@ -158,13 +267,13 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P, | |||
158 | /* on newer ones, the perflvl stores an index into yet another | 267 | /* on newer ones, the perflvl stores an index into yet another |
159 | * vbios table containing a min/max voltage value for the perflvl | 268 | * vbios table containing a min/max voltage value for the perflvl |
160 | */ | 269 | */ |
161 | if (P->version != 2 || P->length < 34) { | 270 | if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) { |
162 | NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n", | 271 | NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n", |
163 | P->version, P->length); | 272 | P.version, P.length); |
164 | return; | 273 | return; |
165 | } | 274 | } |
166 | 275 | ||
167 | vmap = ROMPTR(dev, P->data[32]); | 276 | vmap = ROMPTR(dev, P.data[32]); |
168 | if (!vmap) { | 277 | if (!vmap) { |
169 | NV_DEBUG(dev, "volt map table pointer invalid\n"); | 278 | NV_DEBUG(dev, "volt map table pointer invalid\n"); |
170 | return; | 279 | return; |
@@ -183,130 +292,70 @@ nouveau_perf_init(struct drm_device *dev) | |||
183 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 292 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
184 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 293 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
185 | struct nvbios *bios = &dev_priv->vbios; | 294 | struct nvbios *bios = &dev_priv->vbios; |
186 | struct bit_entry P; | 295 | u8 *perf, ver, hdr, cnt, len; |
187 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | 296 | int ret, vid, i = -1; |
188 | struct nouveau_pm_tbl_header mt_hdr; | ||
189 | u8 version, headerlen, recordlen, entries; | ||
190 | u8 *perf, *entry; | ||
191 | int vid, i; | ||
192 | |||
193 | if (bios->type == NVBIOS_BIT) { | ||
194 | if (bit_table(dev, 'P', &P)) | ||
195 | return; | ||
196 | |||
197 | if (P.version != 1 && P.version != 2) { | ||
198 | NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); | ||
199 | return; | ||
200 | } | ||
201 | |||
202 | perf = ROMPTR(dev, P.data[0]); | ||
203 | version = perf[0]; | ||
204 | headerlen = perf[1]; | ||
205 | if (version < 0x40) { | ||
206 | recordlen = perf[3] + (perf[4] * perf[5]); | ||
207 | entries = perf[2]; | ||
208 | |||
209 | pm->pwm_divisor = ROM16(perf[6]); | ||
210 | } else { | ||
211 | recordlen = perf[2] + (perf[3] * perf[4]); | ||
212 | entries = perf[5]; | ||
213 | } | ||
214 | } else { | ||
215 | if (bios->data[bios->offset + 6] < 0x25) { | ||
216 | legacy_perf_init(dev); | ||
217 | return; | ||
218 | } | ||
219 | 297 | ||
220 | perf = ROMPTR(dev, bios->data[bios->offset + 0x94]); | 298 | if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) { |
221 | if (!perf) { | 299 | legacy_perf_init(dev); |
222 | NV_DEBUG(dev, "perf table pointer invalid\n"); | 300 | return; |
223 | return; | ||
224 | } | ||
225 | |||
226 | version = perf[1]; | ||
227 | headerlen = perf[0]; | ||
228 | recordlen = perf[3]; | ||
229 | entries = perf[2]; | ||
230 | } | ||
231 | |||
232 | if (entries > NOUVEAU_PM_MAX_LEVEL) { | ||
233 | NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n"); | ||
234 | entries = NOUVEAU_PM_MAX_LEVEL; | ||
235 | } | 301 | } |
236 | 302 | ||
237 | entry = perf + headerlen; | 303 | perf = nouveau_perf_table(dev, &ver); |
238 | 304 | if (ver >= 0x20 && ver < 0x40) | |
239 | /* For version 0x15, initialize memtiming table */ | 305 | pm->fan.pwm_divisor = ROM16(perf[6]); |
240 | if(version == 0x15) { | ||
241 | memtimings->timing = | ||
242 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); | ||
243 | if (!memtimings->timing) { | ||
244 | NV_WARN(dev,"Could not allocate memtiming table\n"); | ||
245 | return; | ||
246 | } | ||
247 | |||
248 | mt_hdr.entry_cnt = entries; | ||
249 | mt_hdr.entry_len = 14; | ||
250 | mt_hdr.version = version; | ||
251 | mt_hdr.header_len = 4; | ||
252 | } | ||
253 | 306 | ||
254 | for (i = 0; i < entries; i++) { | 307 | while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) { |
255 | struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; | 308 | struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; |
256 | 309 | ||
257 | perflvl->timing = NULL; | 310 | if (perf[0] == 0xff) |
258 | |||
259 | if (entry[0] == 0xff) { | ||
260 | entry += recordlen; | ||
261 | continue; | 311 | continue; |
262 | } | ||
263 | 312 | ||
264 | switch (version) { | 313 | switch (ver) { |
265 | case 0x12: | 314 | case 0x12: |
266 | case 0x13: | 315 | case 0x13: |
267 | case 0x15: | 316 | case 0x15: |
268 | perflvl->fanspeed = entry[55]; | 317 | perflvl->fanspeed = perf[55]; |
269 | if (recordlen > 56) | 318 | if (hdr > 56) |
270 | perflvl->volt_min = entry[56]; | 319 | perflvl->volt_min = perf[56]; |
271 | perflvl->core = ROM32(entry[1]) * 10; | 320 | perflvl->core = ROM32(perf[1]) * 10; |
272 | perflvl->memory = ROM32(entry[5]) * 20; | 321 | perflvl->memory = ROM32(perf[5]) * 20; |
273 | break; | 322 | break; |
274 | case 0x21: | 323 | case 0x21: |
275 | case 0x23: | 324 | case 0x23: |
276 | case 0x24: | 325 | case 0x24: |
277 | perflvl->fanspeed = entry[4]; | 326 | perflvl->fanspeed = perf[4]; |
278 | perflvl->volt_min = entry[5]; | 327 | perflvl->volt_min = perf[5]; |
279 | perflvl->shader = ROM16(entry[6]) * 1000; | 328 | perflvl->shader = ROM16(perf[6]) * 1000; |
280 | perflvl->core = perflvl->shader; | 329 | perflvl->core = perflvl->shader; |
281 | perflvl->core += (signed char)entry[8] * 1000; | 330 | perflvl->core += (signed char)perf[8] * 1000; |
282 | if (dev_priv->chipset == 0x49 || | 331 | if (dev_priv->chipset == 0x49 || |
283 | dev_priv->chipset == 0x4b) | 332 | dev_priv->chipset == 0x4b) |
284 | perflvl->memory = ROM16(entry[11]) * 1000; | 333 | perflvl->memory = ROM16(perf[11]) * 1000; |
285 | else | 334 | else |
286 | perflvl->memory = ROM16(entry[11]) * 2000; | 335 | perflvl->memory = ROM16(perf[11]) * 2000; |
287 | break; | 336 | break; |
288 | case 0x25: | 337 | case 0x25: |
289 | perflvl->fanspeed = entry[4]; | 338 | perflvl->fanspeed = perf[4]; |
290 | perflvl->volt_min = entry[5]; | 339 | perflvl->volt_min = perf[5]; |
291 | perflvl->core = ROM16(entry[6]) * 1000; | 340 | perflvl->core = ROM16(perf[6]) * 1000; |
292 | perflvl->shader = ROM16(entry[10]) * 1000; | 341 | perflvl->shader = ROM16(perf[10]) * 1000; |
293 | perflvl->memory = ROM16(entry[12]) * 1000; | 342 | perflvl->memory = ROM16(perf[12]) * 1000; |
294 | break; | 343 | break; |
295 | case 0x30: | 344 | case 0x30: |
296 | perflvl->memscript = ROM16(entry[2]); | 345 | perflvl->memscript = ROM16(perf[2]); |
297 | case 0x35: | 346 | case 0x35: |
298 | perflvl->fanspeed = entry[6]; | 347 | perflvl->fanspeed = perf[6]; |
299 | perflvl->volt_min = entry[7]; | 348 | perflvl->volt_min = perf[7]; |
300 | perflvl->core = ROM16(entry[8]) * 1000; | 349 | perflvl->core = ROM16(perf[8]) * 1000; |
301 | perflvl->shader = ROM16(entry[10]) * 1000; | 350 | perflvl->shader = ROM16(perf[10]) * 1000; |
302 | perflvl->memory = ROM16(entry[12]) * 1000; | 351 | perflvl->memory = ROM16(perf[12]) * 1000; |
303 | perflvl->vdec = ROM16(entry[16]) * 1000; | 352 | perflvl->vdec = ROM16(perf[16]) * 1000; |
304 | perflvl->dom6 = ROM16(entry[20]) * 1000; | 353 | perflvl->dom6 = ROM16(perf[20]) * 1000; |
305 | break; | 354 | break; |
306 | case 0x40: | 355 | case 0x40: |
307 | #define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000 | 356 | #define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000) |
308 | perflvl->fanspeed = 0; /*XXX*/ | 357 | perflvl->fanspeed = 0; /*XXX*/ |
309 | perflvl->volt_min = entry[2]; | 358 | perflvl->volt_min = perf[2]; |
310 | if (dev_priv->card_type == NV_50) { | 359 | if (dev_priv->card_type == NV_50) { |
311 | perflvl->core = subent(0); | 360 | perflvl->core = subent(0); |
312 | perflvl->shader = subent(1); | 361 | perflvl->shader = subent(1); |
@@ -329,36 +378,34 @@ nouveau_perf_init(struct drm_device *dev) | |||
329 | } | 378 | } |
330 | 379 | ||
331 | /* make sure vid is valid */ | 380 | /* make sure vid is valid */ |
332 | nouveau_perf_voltage(dev, &P, perflvl); | 381 | nouveau_perf_voltage(dev, perflvl); |
333 | if (pm->voltage.supported && perflvl->volt_min) { | 382 | if (pm->voltage.supported && perflvl->volt_min) { |
334 | vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); | 383 | vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); |
335 | if (vid < 0) { | 384 | if (vid < 0) { |
336 | NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); | 385 | NV_DEBUG(dev, "perflvl %d, bad vid\n", i); |
337 | entry += recordlen; | ||
338 | continue; | 386 | continue; |
339 | } | 387 | } |
340 | } | 388 | } |
341 | 389 | ||
342 | /* get the corresponding memory timings */ | 390 | /* get the corresponding memory timings */ |
343 | if (version == 0x15) { | 391 | ret = nouveau_mem_timing_calc(dev, perflvl->memory, |
344 | memtimings->timing[i].id = i; | 392 | &perflvl->timing); |
345 | nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]); | 393 | if (ret) { |
346 | perflvl->timing = &memtimings->timing[i]; | 394 | NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret); |
347 | } else if (version > 0x15) { | 395 | continue; |
348 | /* last 3 args are for < 0x40, ignored for >= 0x40 */ | ||
349 | perflvl->timing = | ||
350 | nouveau_perf_timing(dev, &P, | ||
351 | perflvl->memory / 1000, | ||
352 | entry + perf[3], | ||
353 | perf[5], perf[4]); | ||
354 | } | 396 | } |
355 | 397 | ||
356 | snprintf(perflvl->name, sizeof(perflvl->name), | 398 | snprintf(perflvl->name, sizeof(perflvl->name), |
357 | "performance_level_%d", i); | 399 | "performance_level_%d", i); |
358 | perflvl->id = i; | 400 | perflvl->id = i; |
359 | pm->nr_perflvl++; | ||
360 | 401 | ||
361 | entry += recordlen; | 402 | snprintf(perflvl->profile.name, sizeof(perflvl->profile.name), |
403 | "%d", perflvl->id); | ||
404 | perflvl->profile.func = &nouveau_pm_static_profile_func; | ||
405 | list_add_tail(&perflvl->profile.head, &pm->profiles); | ||
406 | |||
407 | |||
408 | pm->nr_perflvl++; | ||
362 | } | 409 | } |
363 | } | 410 | } |
364 | 411 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9064d7f19794..34d591b7d4ef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -50,7 +50,7 @@ nouveau_pwmfan_get(struct drm_device *dev) | |||
50 | ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); | 50 | ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); |
51 | if (ret == 0) { | 51 | if (ret == 0) { |
52 | ret = pm->pwm_get(dev, gpio.line, &divs, &duty); | 52 | ret = pm->pwm_get(dev, gpio.line, &divs, &duty); |
53 | if (ret == 0) { | 53 | if (ret == 0 && divs) { |
54 | divs = max(divs, duty); | 54 | divs = max(divs, duty); |
55 | if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) | 55 | if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) |
56 | duty = divs - duty; | 56 | duty = divs - duty; |
@@ -77,7 +77,7 @@ nouveau_pwmfan_set(struct drm_device *dev, int percent) | |||
77 | 77 | ||
78 | ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); | 78 | ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); |
79 | if (ret == 0) { | 79 | if (ret == 0) { |
80 | divs = pm->pwm_divisor; | 80 | divs = pm->fan.pwm_divisor; |
81 | if (pm->fan.pwm_freq) { | 81 | if (pm->fan.pwm_freq) { |
82 | /*XXX: PNVIO clock more than likely... */ | 82 | /*XXX: PNVIO clock more than likely... */ |
83 | divs = 135000 / pm->fan.pwm_freq; | 83 | divs = 135000 / pm->fan.pwm_freq; |
@@ -89,7 +89,10 @@ nouveau_pwmfan_set(struct drm_device *dev, int percent) | |||
89 | if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) | 89 | if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1)) |
90 | duty = divs - duty; | 90 | duty = divs - duty; |
91 | 91 | ||
92 | return pm->pwm_set(dev, gpio.line, divs, duty); | 92 | ret = pm->pwm_set(dev, gpio.line, divs, duty); |
93 | if (!ret) | ||
94 | pm->fan.percent = percent; | ||
95 | return ret; | ||
93 | } | 96 | } |
94 | 97 | ||
95 | return -ENODEV; | 98 | return -ENODEV; |
@@ -144,9 +147,13 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
144 | return ret; | 147 | return ret; |
145 | 148 | ||
146 | state = pm->clocks_pre(dev, perflvl); | 149 | state = pm->clocks_pre(dev, perflvl); |
147 | if (IS_ERR(state)) | 150 | if (IS_ERR(state)) { |
148 | return PTR_ERR(state); | 151 | ret = PTR_ERR(state); |
149 | pm->clocks_set(dev, state); | 152 | goto error; |
153 | } | ||
154 | ret = pm->clocks_set(dev, state); | ||
155 | if (ret) | ||
156 | goto error; | ||
150 | 157 | ||
151 | ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); | 158 | ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); |
152 | if (ret) | 159 | if (ret) |
@@ -154,6 +161,65 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
154 | 161 | ||
155 | pm->cur = perflvl; | 162 | pm->cur = perflvl; |
156 | return 0; | 163 | return 0; |
164 | |||
165 | error: | ||
166 | /* restore the fan speed and voltage before leaving */ | ||
167 | nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | void | ||
172 | nouveau_pm_trigger(struct drm_device *dev) | ||
173 | { | ||
174 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
175 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
176 | struct nouveau_pm_profile *profile = NULL; | ||
177 | struct nouveau_pm_level *perflvl = NULL; | ||
178 | int ret; | ||
179 | |||
180 | /* select power profile based on current power source */ | ||
181 | if (power_supply_is_system_supplied()) | ||
182 | profile = pm->profile_ac; | ||
183 | else | ||
184 | profile = pm->profile_dc; | ||
185 | |||
186 | if (profile != pm->profile) { | ||
187 | pm->profile->func->fini(pm->profile); | ||
188 | pm->profile = profile; | ||
189 | pm->profile->func->init(pm->profile); | ||
190 | } | ||
191 | |||
192 | /* select performance level based on profile */ | ||
193 | perflvl = profile->func->select(profile); | ||
194 | |||
195 | /* change perflvl, if necessary */ | ||
196 | if (perflvl != pm->cur) { | ||
197 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
198 | u64 time0 = ptimer->read(dev); | ||
199 | |||
200 | NV_INFO(dev, "setting performance level: %d", perflvl->id); | ||
201 | ret = nouveau_pm_perflvl_set(dev, perflvl); | ||
202 | if (ret) | ||
203 | NV_INFO(dev, "> reclocking failed: %d\n\n", ret); | ||
204 | |||
205 | NV_INFO(dev, "> reclocking took %lluns\n\n", | ||
206 | ptimer->read(dev) - time0); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | static struct nouveau_pm_profile * | ||
211 | profile_find(struct drm_device *dev, const char *string) | ||
212 | { | ||
213 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
214 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | ||
215 | struct nouveau_pm_profile *profile; | ||
216 | |||
217 | list_for_each_entry(profile, &pm->profiles, head) { | ||
218 | if (!strncmp(profile->name, string, sizeof(profile->name))) | ||
219 | return profile; | ||
220 | } | ||
221 | |||
222 | return NULL; | ||
157 | } | 223 | } |
158 | 224 | ||
159 | static int | 225 | static int |
@@ -161,33 +227,54 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile) | |||
161 | { | 227 | { |
162 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 228 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
163 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 229 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
164 | struct nouveau_pm_level *perflvl = NULL; | 230 | struct nouveau_pm_profile *ac = NULL, *dc = NULL; |
231 | char string[16], *cur = string, *ptr; | ||
165 | 232 | ||
166 | /* safety precaution, for now */ | 233 | /* safety precaution, for now */ |
167 | if (nouveau_perflvl_wr != 7777) | 234 | if (nouveau_perflvl_wr != 7777) |
168 | return -EPERM; | 235 | return -EPERM; |
169 | 236 | ||
170 | if (!strncmp(profile, "boot", 4)) | 237 | strncpy(string, profile, sizeof(string)); |
171 | perflvl = &pm->boot; | 238 | if ((ptr = strchr(string, '\n'))) |
172 | else { | 239 | *ptr = '\0'; |
173 | int pl = simple_strtol(profile, NULL, 10); | ||
174 | int i; | ||
175 | 240 | ||
176 | for (i = 0; i < pm->nr_perflvl; i++) { | 241 | ptr = strsep(&cur, ","); |
177 | if (pm->perflvl[i].id == pl) { | 242 | if (ptr) |
178 | perflvl = &pm->perflvl[i]; | 243 | ac = profile_find(dev, ptr); |
179 | break; | ||
180 | } | ||
181 | } | ||
182 | 244 | ||
183 | if (!perflvl) | 245 | ptr = strsep(&cur, ","); |
184 | return -EINVAL; | 246 | if (ptr) |
185 | } | 247 | dc = profile_find(dev, ptr); |
248 | else | ||
249 | dc = ac; | ||
250 | |||
251 | if (ac == NULL || dc == NULL) | ||
252 | return -EINVAL; | ||
253 | |||
254 | pm->profile_ac = ac; | ||
255 | pm->profile_dc = dc; | ||
256 | nouveau_pm_trigger(dev); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static void | ||
261 | nouveau_pm_static_dummy(struct nouveau_pm_profile *profile) | ||
262 | { | ||
263 | } | ||
186 | 264 | ||
187 | NV_INFO(dev, "setting performance level: %s\n", profile); | 265 | static struct nouveau_pm_level * |
188 | return nouveau_pm_perflvl_set(dev, perflvl); | 266 | nouveau_pm_static_select(struct nouveau_pm_profile *profile) |
267 | { | ||
268 | return container_of(profile, struct nouveau_pm_level, profile); | ||
189 | } | 269 | } |
190 | 270 | ||
271 | const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = { | ||
272 | .destroy = nouveau_pm_static_dummy, | ||
273 | .init = nouveau_pm_static_dummy, | ||
274 | .fini = nouveau_pm_static_dummy, | ||
275 | .select = nouveau_pm_static_select, | ||
276 | }; | ||
277 | |||
191 | static int | 278 | static int |
192 | nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | 279 | nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) |
193 | { | 280 | { |
@@ -197,9 +284,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
197 | 284 | ||
198 | memset(perflvl, 0, sizeof(*perflvl)); | 285 | memset(perflvl, 0, sizeof(*perflvl)); |
199 | 286 | ||
200 | ret = pm->clocks_get(dev, perflvl); | 287 | if (pm->clocks_get) { |
201 | if (ret) | 288 | ret = pm->clocks_get(dev, perflvl); |
202 | return ret; | 289 | if (ret) |
290 | return ret; | ||
291 | } | ||
203 | 292 | ||
204 | if (pm->voltage.supported && pm->voltage_get) { | 293 | if (pm->voltage.supported && pm->voltage_get) { |
205 | ret = pm->voltage_get(dev); | 294 | ret = pm->voltage_get(dev); |
@@ -213,13 +302,14 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
213 | if (ret > 0) | 302 | if (ret > 0) |
214 | perflvl->fanspeed = ret; | 303 | perflvl->fanspeed = ret; |
215 | 304 | ||
305 | nouveau_mem_timing_read(dev, &perflvl->timing); | ||
216 | return 0; | 306 | return 0; |
217 | } | 307 | } |
218 | 308 | ||
219 | static void | 309 | static void |
220 | nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) | 310 | nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) |
221 | { | 311 | { |
222 | char c[16], s[16], v[32], f[16], t[16], m[16]; | 312 | char c[16], s[16], v[32], f[16], m[16]; |
223 | 313 | ||
224 | c[0] = '\0'; | 314 | c[0] = '\0'; |
225 | if (perflvl->core) | 315 | if (perflvl->core) |
@@ -247,18 +337,15 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) | |||
247 | if (perflvl->fanspeed) | 337 | if (perflvl->fanspeed) |
248 | snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); | 338 | snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); |
249 | 339 | ||
250 | t[0] = '\0'; | 340 | snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f); |
251 | if (perflvl->timing) | ||
252 | snprintf(t, sizeof(t), " timing %d", perflvl->timing->id); | ||
253 | |||
254 | snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f); | ||
255 | } | 341 | } |
256 | 342 | ||
257 | static ssize_t | 343 | static ssize_t |
258 | nouveau_pm_get_perflvl_info(struct device *d, | 344 | nouveau_pm_get_perflvl_info(struct device *d, |
259 | struct device_attribute *a, char *buf) | 345 | struct device_attribute *a, char *buf) |
260 | { | 346 | { |
261 | struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a; | 347 | struct nouveau_pm_level *perflvl = |
348 | container_of(a, struct nouveau_pm_level, dev_attr); | ||
262 | char *ptr = buf; | 349 | char *ptr = buf; |
263 | int len = PAGE_SIZE; | 350 | int len = PAGE_SIZE; |
264 | 351 | ||
@@ -280,12 +367,8 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) | |||
280 | int len = PAGE_SIZE, ret; | 367 | int len = PAGE_SIZE, ret; |
281 | char *ptr = buf; | 368 | char *ptr = buf; |
282 | 369 | ||
283 | if (!pm->cur) | 370 | snprintf(ptr, len, "profile: %s, %s\nc:", |
284 | snprintf(ptr, len, "setting: boot\n"); | 371 | pm->profile_ac->name, pm->profile_dc->name); |
285 | else if (pm->cur == &pm->boot) | ||
286 | snprintf(ptr, len, "setting: boot\nc:"); | ||
287 | else | ||
288 | snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id); | ||
289 | ptr += strlen(buf); | 372 | ptr += strlen(buf); |
290 | len -= strlen(buf); | 373 | len -= strlen(buf); |
291 | 374 | ||
@@ -397,7 +480,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, | |||
397 | struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; | 480 | struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; |
398 | long value; | 481 | long value; |
399 | 482 | ||
400 | if (strict_strtol(buf, 10, &value) == -EINVAL) | 483 | if (kstrtol(buf, 10, &value) == -EINVAL) |
401 | return count; | 484 | return count; |
402 | 485 | ||
403 | temp->down_clock = value/1000; | 486 | temp->down_clock = value/1000; |
@@ -432,7 +515,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, | |||
432 | struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; | 515 | struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; |
433 | long value; | 516 | long value; |
434 | 517 | ||
435 | if (strict_strtol(buf, 10, &value) == -EINVAL) | 518 | if (kstrtol(buf, 10, &value) == -EINVAL) |
436 | return count; | 519 | return count; |
437 | 520 | ||
438 | temp->critical = value/1000; | 521 | temp->critical = value/1000; |
@@ -529,7 +612,7 @@ nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a, | |||
529 | if (nouveau_perflvl_wr != 7777) | 612 | if (nouveau_perflvl_wr != 7777) |
530 | return -EPERM; | 613 | return -EPERM; |
531 | 614 | ||
532 | if (strict_strtol(buf, 10, &value) == -EINVAL) | 615 | if (kstrtol(buf, 10, &value) == -EINVAL) |
533 | return -EINVAL; | 616 | return -EINVAL; |
534 | 617 | ||
535 | if (value < pm->fan.min_duty) | 618 | if (value < pm->fan.min_duty) |
@@ -568,7 +651,7 @@ nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a, | |||
568 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 651 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
569 | long value; | 652 | long value; |
570 | 653 | ||
571 | if (strict_strtol(buf, 10, &value) == -EINVAL) | 654 | if (kstrtol(buf, 10, &value) == -EINVAL) |
572 | return -EINVAL; | 655 | return -EINVAL; |
573 | 656 | ||
574 | if (value < 0) | 657 | if (value < 0) |
@@ -609,7 +692,7 @@ nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a, | |||
609 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 692 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
610 | long value; | 693 | long value; |
611 | 694 | ||
612 | if (strict_strtol(buf, 10, &value) == -EINVAL) | 695 | if (kstrtol(buf, 10, &value) == -EINVAL) |
613 | return -EINVAL; | 696 | return -EINVAL; |
614 | 697 | ||
615 | if (value < 0) | 698 | if (value < 0) |
@@ -731,8 +814,10 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
731 | 814 | ||
732 | if (pm->hwmon) { | 815 | if (pm->hwmon) { |
733 | sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); | 816 | sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); |
734 | sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup); | 817 | sysfs_remove_group(&dev->pdev->dev.kobj, |
735 | sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup); | 818 | &hwmon_pwm_fan_attrgroup); |
819 | sysfs_remove_group(&dev->pdev->dev.kobj, | ||
820 | &hwmon_fan_rpm_attrgroup); | ||
736 | 821 | ||
737 | hwmon_device_unregister(pm->hwmon); | 822 | hwmon_device_unregister(pm->hwmon); |
738 | } | 823 | } |
@@ -752,6 +837,7 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) | |||
752 | bool ac = power_supply_is_system_supplied(); | 837 | bool ac = power_supply_is_system_supplied(); |
753 | 838 | ||
754 | NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); | 839 | NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); |
840 | nouveau_pm_trigger(dev); | ||
755 | } | 841 | } |
756 | 842 | ||
757 | return NOTIFY_OK; | 843 | return NOTIFY_OK; |
@@ -766,35 +852,48 @@ nouveau_pm_init(struct drm_device *dev) | |||
766 | char info[256]; | 852 | char info[256]; |
767 | int ret, i; | 853 | int ret, i; |
768 | 854 | ||
769 | nouveau_mem_timing_init(dev); | 855 | /* parse aux tables from vbios */ |
770 | nouveau_volt_init(dev); | 856 | nouveau_volt_init(dev); |
771 | nouveau_perf_init(dev); | ||
772 | nouveau_temp_init(dev); | 857 | nouveau_temp_init(dev); |
773 | 858 | ||
859 | /* determine current ("boot") performance level */ | ||
860 | ret = nouveau_pm_perflvl_get(dev, &pm->boot); | ||
861 | if (ret) { | ||
862 | NV_ERROR(dev, "failed to determine boot perflvl\n"); | ||
863 | return ret; | ||
864 | } | ||
865 | |||
866 | strncpy(pm->boot.name, "boot", 4); | ||
867 | strncpy(pm->boot.profile.name, "boot", 4); | ||
868 | pm->boot.profile.func = &nouveau_pm_static_profile_func; | ||
869 | |||
870 | INIT_LIST_HEAD(&pm->profiles); | ||
871 | list_add(&pm->boot.profile.head, &pm->profiles); | ||
872 | |||
873 | pm->profile_ac = &pm->boot.profile; | ||
874 | pm->profile_dc = &pm->boot.profile; | ||
875 | pm->profile = &pm->boot.profile; | ||
876 | pm->cur = &pm->boot; | ||
877 | |||
878 | /* add performance levels from vbios */ | ||
879 | nouveau_perf_init(dev); | ||
880 | |||
881 | /* display available performance levels */ | ||
774 | NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); | 882 | NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); |
775 | for (i = 0; i < pm->nr_perflvl; i++) { | 883 | for (i = 0; i < pm->nr_perflvl; i++) { |
776 | nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); | 884 | nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); |
777 | NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); | 885 | NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); |
778 | } | 886 | } |
779 | 887 | ||
780 | /* determine current ("boot") performance level */ | 888 | nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); |
781 | ret = nouveau_pm_perflvl_get(dev, &pm->boot); | 889 | NV_INFO(dev, "c:%s", info); |
782 | if (ret == 0) { | ||
783 | strncpy(pm->boot.name, "boot", 4); | ||
784 | pm->cur = &pm->boot; | ||
785 | |||
786 | nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); | ||
787 | NV_INFO(dev, "c:%s", info); | ||
788 | } | ||
789 | 890 | ||
790 | /* switch performance levels now if requested */ | 891 | /* switch performance levels now if requested */ |
791 | if (nouveau_perflvl != NULL) { | 892 | if (nouveau_perflvl != NULL) |
792 | ret = nouveau_pm_profile_set(dev, nouveau_perflvl); | 893 | nouveau_pm_profile_set(dev, nouveau_perflvl); |
793 | if (ret) { | 894 | |
794 | NV_ERROR(dev, "error setting perflvl \"%s\": %d\n", | 895 | /* determine the current fan speed */ |
795 | nouveau_perflvl, ret); | 896 | pm->fan.percent = nouveau_pwmfan_get(dev); |
796 | } | ||
797 | } | ||
798 | 897 | ||
799 | nouveau_sysfs_init(dev); | 898 | nouveau_sysfs_init(dev); |
800 | nouveau_hwmon_init(dev); | 899 | nouveau_hwmon_init(dev); |
@@ -811,6 +910,12 @@ nouveau_pm_fini(struct drm_device *dev) | |||
811 | { | 910 | { |
812 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 911 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
813 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 912 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
913 | struct nouveau_pm_profile *profile, *tmp; | ||
914 | |||
915 | list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { | ||
916 | list_del(&profile->head); | ||
917 | profile->func->destroy(profile); | ||
918 | } | ||
814 | 919 | ||
815 | if (pm->cur != &pm->boot) | 920 | if (pm->cur != &pm->boot) |
816 | nouveau_pm_perflvl_set(dev, &pm->boot); | 921 | nouveau_pm_perflvl_set(dev, &pm->boot); |
@@ -818,7 +923,6 @@ nouveau_pm_fini(struct drm_device *dev) | |||
818 | nouveau_temp_fini(dev); | 923 | nouveau_temp_fini(dev); |
819 | nouveau_perf_fini(dev); | 924 | nouveau_perf_fini(dev); |
820 | nouveau_volt_fini(dev); | 925 | nouveau_volt_fini(dev); |
821 | nouveau_mem_timing_fini(dev); | ||
822 | 926 | ||
823 | #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) | 927 | #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) |
824 | unregister_acpi_notifier(&pm->acpi_nb); | 928 | unregister_acpi_notifier(&pm->acpi_nb); |
@@ -840,4 +944,5 @@ nouveau_pm_resume(struct drm_device *dev) | |||
840 | perflvl = pm->cur; | 944 | perflvl = pm->cur; |
841 | pm->cur = &pm->boot; | 945 | pm->cur = &pm->boot; |
842 | nouveau_pm_perflvl_set(dev, perflvl); | 946 | nouveau_pm_perflvl_set(dev, perflvl); |
947 | nouveau_pwmfan_set(dev, pm->fan.percent); | ||
843 | } | 948 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h index 2f8e14fbcff8..3f82dfea61dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.h +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h | |||
@@ -25,10 +25,30 @@ | |||
25 | #ifndef __NOUVEAU_PM_H__ | 25 | #ifndef __NOUVEAU_PM_H__ |
26 | #define __NOUVEAU_PM_H__ | 26 | #define __NOUVEAU_PM_H__ |
27 | 27 | ||
28 | struct nouveau_mem_exec_func { | ||
29 | struct drm_device *dev; | ||
30 | void (*precharge)(struct nouveau_mem_exec_func *); | ||
31 | void (*refresh)(struct nouveau_mem_exec_func *); | ||
32 | void (*refresh_auto)(struct nouveau_mem_exec_func *, bool); | ||
33 | void (*refresh_self)(struct nouveau_mem_exec_func *, bool); | ||
34 | void (*wait)(struct nouveau_mem_exec_func *, u32 nsec); | ||
35 | u32 (*mrg)(struct nouveau_mem_exec_func *, int mr); | ||
36 | void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data); | ||
37 | void (*clock_set)(struct nouveau_mem_exec_func *); | ||
38 | void (*timing_set)(struct nouveau_mem_exec_func *); | ||
39 | void *priv; | ||
40 | }; | ||
41 | |||
42 | /* nouveau_mem.c */ | ||
43 | int nouveau_mem_exec(struct nouveau_mem_exec_func *, | ||
44 | struct nouveau_pm_level *); | ||
45 | |||
28 | /* nouveau_pm.c */ | 46 | /* nouveau_pm.c */ |
29 | int nouveau_pm_init(struct drm_device *dev); | 47 | int nouveau_pm_init(struct drm_device *dev); |
30 | void nouveau_pm_fini(struct drm_device *dev); | 48 | void nouveau_pm_fini(struct drm_device *dev); |
31 | void nouveau_pm_resume(struct drm_device *dev); | 49 | void nouveau_pm_resume(struct drm_device *dev); |
50 | extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func; | ||
51 | void nouveau_pm_trigger(struct drm_device *dev); | ||
32 | 52 | ||
33 | /* nouveau_volt.c */ | 53 | /* nouveau_volt.c */ |
34 | void nouveau_volt_init(struct drm_device *); | 54 | void nouveau_volt_init(struct drm_device *); |
@@ -41,6 +61,8 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage); | |||
41 | /* nouveau_perf.c */ | 61 | /* nouveau_perf.c */ |
42 | void nouveau_perf_init(struct drm_device *); | 62 | void nouveau_perf_init(struct drm_device *); |
43 | void nouveau_perf_fini(struct drm_device *); | 63 | void nouveau_perf_fini(struct drm_device *); |
64 | u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len); | ||
65 | u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len); | ||
44 | 66 | ||
45 | /* nouveau_mem.c */ | 67 | /* nouveau_mem.c */ |
46 | void nouveau_mem_timing_init(struct drm_device *); | 68 | void nouveau_mem_timing_init(struct drm_device *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f80c5e0762ff..9c144fb8bbba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -87,7 +87,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
87 | engine->pm.clocks_get = nv04_pm_clocks_get; | 87 | engine->pm.clocks_get = nv04_pm_clocks_get; |
88 | engine->pm.clocks_pre = nv04_pm_clocks_pre; | 88 | engine->pm.clocks_pre = nv04_pm_clocks_pre; |
89 | engine->pm.clocks_set = nv04_pm_clocks_set; | 89 | engine->pm.clocks_set = nv04_pm_clocks_set; |
90 | engine->vram.init = nouveau_mem_detect; | 90 | engine->vram.init = nv04_fb_vram_init; |
91 | engine->vram.takedown = nouveau_stub_takedown; | 91 | engine->vram.takedown = nouveau_stub_takedown; |
92 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 92 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
93 | break; | 93 | break; |
@@ -134,7 +134,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
134 | engine->pm.clocks_get = nv04_pm_clocks_get; | 134 | engine->pm.clocks_get = nv04_pm_clocks_get; |
135 | engine->pm.clocks_pre = nv04_pm_clocks_pre; | 135 | engine->pm.clocks_pre = nv04_pm_clocks_pre; |
136 | engine->pm.clocks_set = nv04_pm_clocks_set; | 136 | engine->pm.clocks_set = nv04_pm_clocks_set; |
137 | engine->vram.init = nouveau_mem_detect; | 137 | if (dev_priv->chipset == 0x1a || |
138 | dev_priv->chipset == 0x1f) | ||
139 | engine->vram.init = nv1a_fb_vram_init; | ||
140 | else | ||
141 | engine->vram.init = nv10_fb_vram_init; | ||
138 | engine->vram.takedown = nouveau_stub_takedown; | 142 | engine->vram.takedown = nouveau_stub_takedown; |
139 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 143 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
140 | break; | 144 | break; |
@@ -153,11 +157,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
153 | engine->timer.init = nv04_timer_init; | 157 | engine->timer.init = nv04_timer_init; |
154 | engine->timer.read = nv04_timer_read; | 158 | engine->timer.read = nv04_timer_read; |
155 | engine->timer.takedown = nv04_timer_takedown; | 159 | engine->timer.takedown = nv04_timer_takedown; |
156 | engine->fb.init = nv10_fb_init; | 160 | engine->fb.init = nv20_fb_init; |
157 | engine->fb.takedown = nv10_fb_takedown; | 161 | engine->fb.takedown = nv20_fb_takedown; |
158 | engine->fb.init_tile_region = nv10_fb_init_tile_region; | 162 | engine->fb.init_tile_region = nv20_fb_init_tile_region; |
159 | engine->fb.set_tile_region = nv10_fb_set_tile_region; | 163 | engine->fb.set_tile_region = nv20_fb_set_tile_region; |
160 | engine->fb.free_tile_region = nv10_fb_free_tile_region; | 164 | engine->fb.free_tile_region = nv20_fb_free_tile_region; |
161 | engine->fifo.channels = 32; | 165 | engine->fifo.channels = 32; |
162 | engine->fifo.init = nv10_fifo_init; | 166 | engine->fifo.init = nv10_fifo_init; |
163 | engine->fifo.takedown = nv04_fifo_fini; | 167 | engine->fifo.takedown = nv04_fifo_fini; |
@@ -181,7 +185,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
181 | engine->pm.clocks_get = nv04_pm_clocks_get; | 185 | engine->pm.clocks_get = nv04_pm_clocks_get; |
182 | engine->pm.clocks_pre = nv04_pm_clocks_pre; | 186 | engine->pm.clocks_pre = nv04_pm_clocks_pre; |
183 | engine->pm.clocks_set = nv04_pm_clocks_set; | 187 | engine->pm.clocks_set = nv04_pm_clocks_set; |
184 | engine->vram.init = nouveau_mem_detect; | 188 | engine->vram.init = nv20_fb_vram_init; |
185 | engine->vram.takedown = nouveau_stub_takedown; | 189 | engine->vram.takedown = nouveau_stub_takedown; |
186 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 190 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
187 | break; | 191 | break; |
@@ -230,7 +234,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
230 | engine->pm.clocks_set = nv04_pm_clocks_set; | 234 | engine->pm.clocks_set = nv04_pm_clocks_set; |
231 | engine->pm.voltage_get = nouveau_voltage_gpio_get; | 235 | engine->pm.voltage_get = nouveau_voltage_gpio_get; |
232 | engine->pm.voltage_set = nouveau_voltage_gpio_set; | 236 | engine->pm.voltage_set = nouveau_voltage_gpio_set; |
233 | engine->vram.init = nouveau_mem_detect; | 237 | engine->vram.init = nv20_fb_vram_init; |
234 | engine->vram.takedown = nouveau_stub_takedown; | 238 | engine->vram.takedown = nouveau_stub_takedown; |
235 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 239 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
236 | break; | 240 | break; |
@@ -286,7 +290,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
286 | engine->pm.temp_get = nv40_temp_get; | 290 | engine->pm.temp_get = nv40_temp_get; |
287 | engine->pm.pwm_get = nv40_pm_pwm_get; | 291 | engine->pm.pwm_get = nv40_pm_pwm_get; |
288 | engine->pm.pwm_set = nv40_pm_pwm_set; | 292 | engine->pm.pwm_set = nv40_pm_pwm_set; |
289 | engine->vram.init = nouveau_mem_detect; | 293 | engine->vram.init = nv40_fb_vram_init; |
290 | engine->vram.takedown = nouveau_stub_takedown; | 294 | engine->vram.takedown = nouveau_stub_takedown; |
291 | engine->vram.flags_valid = nouveau_mem_flags_valid; | 295 | engine->vram.flags_valid = nouveau_mem_flags_valid; |
292 | break; | 296 | break; |
@@ -588,47 +592,45 @@ nouveau_card_init(struct drm_device *dev) | |||
588 | nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); | 592 | nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); |
589 | } | 593 | } |
590 | 594 | ||
591 | nouveau_pm_init(dev); | 595 | /* PMC */ |
592 | 596 | ret = engine->mc.init(dev); | |
593 | ret = engine->vram.init(dev); | ||
594 | if (ret) | 597 | if (ret) |
595 | goto out_bios; | 598 | goto out_bios; |
596 | 599 | ||
597 | ret = nouveau_gpuobj_init(dev); | 600 | /* PTIMER */ |
601 | ret = engine->timer.init(dev); | ||
598 | if (ret) | 602 | if (ret) |
599 | goto out_vram; | 603 | goto out_mc; |
600 | 604 | ||
601 | ret = engine->instmem.init(dev); | 605 | /* PFB */ |
606 | ret = engine->fb.init(dev); | ||
602 | if (ret) | 607 | if (ret) |
603 | goto out_gpuobj; | 608 | goto out_timer; |
604 | 609 | ||
605 | ret = nouveau_mem_vram_init(dev); | 610 | ret = engine->vram.init(dev); |
606 | if (ret) | 611 | if (ret) |
607 | goto out_instmem; | 612 | goto out_fb; |
608 | 613 | ||
609 | ret = nouveau_mem_gart_init(dev); | 614 | /* PGPIO */ |
615 | ret = nouveau_gpio_create(dev); | ||
610 | if (ret) | 616 | if (ret) |
611 | goto out_ttmvram; | 617 | goto out_vram; |
612 | 618 | ||
613 | /* PMC */ | 619 | ret = nouveau_gpuobj_init(dev); |
614 | ret = engine->mc.init(dev); | ||
615 | if (ret) | 620 | if (ret) |
616 | goto out_gart; | 621 | goto out_gpio; |
617 | 622 | ||
618 | /* PGPIO */ | 623 | ret = engine->instmem.init(dev); |
619 | ret = nouveau_gpio_create(dev); | ||
620 | if (ret) | 624 | if (ret) |
621 | goto out_mc; | 625 | goto out_gpuobj; |
622 | 626 | ||
623 | /* PTIMER */ | 627 | ret = nouveau_mem_vram_init(dev); |
624 | ret = engine->timer.init(dev); | ||
625 | if (ret) | 628 | if (ret) |
626 | goto out_gpio; | 629 | goto out_instmem; |
627 | 630 | ||
628 | /* PFB */ | 631 | ret = nouveau_mem_gart_init(dev); |
629 | ret = engine->fb.init(dev); | ||
630 | if (ret) | 632 | if (ret) |
631 | goto out_timer; | 633 | goto out_ttmvram; |
632 | 634 | ||
633 | if (!dev_priv->noaccel) { | 635 | if (!dev_priv->noaccel) { |
634 | switch (dev_priv->card_type) { | 636 | switch (dev_priv->card_type) { |
@@ -734,11 +736,12 @@ nouveau_card_init(struct drm_device *dev) | |||
734 | goto out_irq; | 736 | goto out_irq; |
735 | 737 | ||
736 | nouveau_backlight_init(dev); | 738 | nouveau_backlight_init(dev); |
739 | nouveau_pm_init(dev); | ||
737 | 740 | ||
738 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { | 741 | if (dev_priv->eng[NVOBJ_ENGINE_GR]) { |
739 | ret = nouveau_fence_init(dev); | 742 | ret = nouveau_fence_init(dev); |
740 | if (ret) | 743 | if (ret) |
741 | goto out_disp; | 744 | goto out_pm; |
742 | 745 | ||
743 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, | 746 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL, |
744 | NvDmaFB, NvDmaTT); | 747 | NvDmaFB, NvDmaTT); |
@@ -762,7 +765,8 @@ out_chan: | |||
762 | nouveau_channel_put_unlocked(&dev_priv->channel); | 765 | nouveau_channel_put_unlocked(&dev_priv->channel); |
763 | out_fence: | 766 | out_fence: |
764 | nouveau_fence_fini(dev); | 767 | nouveau_fence_fini(dev); |
765 | out_disp: | 768 | out_pm: |
769 | nouveau_pm_fini(dev); | ||
766 | nouveau_backlight_exit(dev); | 770 | nouveau_backlight_exit(dev); |
767 | nouveau_display_destroy(dev); | 771 | nouveau_display_destroy(dev); |
768 | out_irq: | 772 | out_irq: |
@@ -779,15 +783,6 @@ out_engine: | |||
779 | dev_priv->eng[e]->destroy(dev,e ); | 783 | dev_priv->eng[e]->destroy(dev,e ); |
780 | } | 784 | } |
781 | } | 785 | } |
782 | |||
783 | engine->fb.takedown(dev); | ||
784 | out_timer: | ||
785 | engine->timer.takedown(dev); | ||
786 | out_gpio: | ||
787 | nouveau_gpio_destroy(dev); | ||
788 | out_mc: | ||
789 | engine->mc.takedown(dev); | ||
790 | out_gart: | ||
791 | nouveau_mem_gart_fini(dev); | 786 | nouveau_mem_gart_fini(dev); |
792 | out_ttmvram: | 787 | out_ttmvram: |
793 | nouveau_mem_vram_fini(dev); | 788 | nouveau_mem_vram_fini(dev); |
@@ -795,10 +790,17 @@ out_instmem: | |||
795 | engine->instmem.takedown(dev); | 790 | engine->instmem.takedown(dev); |
796 | out_gpuobj: | 791 | out_gpuobj: |
797 | nouveau_gpuobj_takedown(dev); | 792 | nouveau_gpuobj_takedown(dev); |
793 | out_gpio: | ||
794 | nouveau_gpio_destroy(dev); | ||
798 | out_vram: | 795 | out_vram: |
799 | engine->vram.takedown(dev); | 796 | engine->vram.takedown(dev); |
797 | out_fb: | ||
798 | engine->fb.takedown(dev); | ||
799 | out_timer: | ||
800 | engine->timer.takedown(dev); | ||
801 | out_mc: | ||
802 | engine->mc.takedown(dev); | ||
800 | out_bios: | 803 | out_bios: |
801 | nouveau_pm_fini(dev); | ||
802 | nouveau_bios_takedown(dev); | 804 | nouveau_bios_takedown(dev); |
803 | out_display_early: | 805 | out_display_early: |
804 | engine->display.late_takedown(dev); | 806 | engine->display.late_takedown(dev); |
@@ -823,6 +825,7 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
823 | nouveau_fence_fini(dev); | 825 | nouveau_fence_fini(dev); |
824 | } | 826 | } |
825 | 827 | ||
828 | nouveau_pm_fini(dev); | ||
826 | nouveau_backlight_exit(dev); | 829 | nouveau_backlight_exit(dev); |
827 | nouveau_display_destroy(dev); | 830 | nouveau_display_destroy(dev); |
828 | 831 | ||
@@ -835,11 +838,6 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
835 | } | 838 | } |
836 | } | 839 | } |
837 | } | 840 | } |
838 | engine->fb.takedown(dev); | ||
839 | engine->timer.takedown(dev); | ||
840 | nouveau_gpio_destroy(dev); | ||
841 | engine->mc.takedown(dev); | ||
842 | engine->display.late_takedown(dev); | ||
843 | 841 | ||
844 | if (dev_priv->vga_ram) { | 842 | if (dev_priv->vga_ram) { |
845 | nouveau_bo_unpin(dev_priv->vga_ram); | 843 | nouveau_bo_unpin(dev_priv->vga_ram); |
@@ -855,12 +853,17 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
855 | 853 | ||
856 | engine->instmem.takedown(dev); | 854 | engine->instmem.takedown(dev); |
857 | nouveau_gpuobj_takedown(dev); | 855 | nouveau_gpuobj_takedown(dev); |
858 | engine->vram.takedown(dev); | ||
859 | 856 | ||
860 | nouveau_irq_fini(dev); | 857 | nouveau_gpio_destroy(dev); |
858 | engine->vram.takedown(dev); | ||
859 | engine->fb.takedown(dev); | ||
860 | engine->timer.takedown(dev); | ||
861 | engine->mc.takedown(dev); | ||
861 | 862 | ||
862 | nouveau_pm_fini(dev); | ||
863 | nouveau_bios_takedown(dev); | 863 | nouveau_bios_takedown(dev); |
864 | engine->display.late_takedown(dev); | ||
865 | |||
866 | nouveau_irq_fini(dev); | ||
864 | 867 | ||
865 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 868 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
866 | } | 869 | } |
@@ -990,7 +993,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev) | |||
990 | int nouveau_load(struct drm_device *dev, unsigned long flags) | 993 | int nouveau_load(struct drm_device *dev, unsigned long flags) |
991 | { | 994 | { |
992 | struct drm_nouveau_private *dev_priv; | 995 | struct drm_nouveau_private *dev_priv; |
993 | uint32_t reg0, strap; | 996 | uint32_t reg0 = ~0, strap; |
994 | resource_size_t mmio_start_offs; | 997 | resource_size_t mmio_start_offs; |
995 | int ret; | 998 | int ret; |
996 | 999 | ||
@@ -1002,15 +1005,72 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
1002 | dev->dev_private = dev_priv; | 1005 | dev->dev_private = dev_priv; |
1003 | dev_priv->dev = dev; | 1006 | dev_priv->dev = dev; |
1004 | 1007 | ||
1008 | pci_set_master(dev->pdev); | ||
1009 | |||
1005 | dev_priv->flags = flags & NOUVEAU_FLAGS; | 1010 | dev_priv->flags = flags & NOUVEAU_FLAGS; |
1006 | 1011 | ||
1007 | NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", | 1012 | NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", |
1008 | dev->pci_vendor, dev->pci_device, dev->pdev->class); | 1013 | dev->pci_vendor, dev->pci_device, dev->pdev->class); |
1009 | 1014 | ||
1010 | /* resource 0 is mmio regs */ | 1015 | /* first up, map the start of mmio and determine the chipset */ |
1011 | /* resource 1 is linear FB */ | 1016 | dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE); |
1012 | /* resource 2 is RAMIN (mmio regs + 0x1000000) */ | 1017 | if (dev_priv->mmio) { |
1013 | /* resource 6 is bios */ | 1018 | #ifdef __BIG_ENDIAN |
1019 | /* put the card into big-endian mode if it's not */ | ||
1020 | if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001) | ||
1021 | nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001); | ||
1022 | DRM_MEMORYBARRIER(); | ||
1023 | #endif | ||
1024 | |||
1025 | /* determine chipset and derive architecture from it */ | ||
1026 | reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); | ||
1027 | if ((reg0 & 0x0f000000) > 0) { | ||
1028 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | ||
1029 | switch (dev_priv->chipset & 0xf0) { | ||
1030 | case 0x10: | ||
1031 | case 0x20: | ||
1032 | case 0x30: | ||
1033 | dev_priv->card_type = dev_priv->chipset & 0xf0; | ||
1034 | break; | ||
1035 | case 0x40: | ||
1036 | case 0x60: | ||
1037 | dev_priv->card_type = NV_40; | ||
1038 | break; | ||
1039 | case 0x50: | ||
1040 | case 0x80: | ||
1041 | case 0x90: | ||
1042 | case 0xa0: | ||
1043 | dev_priv->card_type = NV_50; | ||
1044 | break; | ||
1045 | case 0xc0: | ||
1046 | dev_priv->card_type = NV_C0; | ||
1047 | break; | ||
1048 | case 0xd0: | ||
1049 | dev_priv->card_type = NV_D0; | ||
1050 | break; | ||
1051 | default: | ||
1052 | break; | ||
1053 | } | ||
1054 | } else | ||
1055 | if ((reg0 & 0xff00fff0) == 0x20004000) { | ||
1056 | if (reg0 & 0x00f00000) | ||
1057 | dev_priv->chipset = 0x05; | ||
1058 | else | ||
1059 | dev_priv->chipset = 0x04; | ||
1060 | dev_priv->card_type = NV_04; | ||
1061 | } | ||
1062 | |||
1063 | iounmap(dev_priv->mmio); | ||
1064 | } | ||
1065 | |||
1066 | if (!dev_priv->card_type) { | ||
1067 | NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0); | ||
1068 | ret = -EINVAL; | ||
1069 | goto err_priv; | ||
1070 | } | ||
1071 | |||
1072 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | ||
1073 | dev_priv->card_type, reg0); | ||
1014 | 1074 | ||
1015 | /* map the mmio regs */ | 1075 | /* map the mmio regs */ |
1016 | mmio_start_offs = pci_resource_start(dev->pdev, 0); | 1076 | mmio_start_offs = pci_resource_start(dev->pdev, 0); |
@@ -1024,62 +1084,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
1024 | NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", | 1084 | NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", |
1025 | (unsigned long long)mmio_start_offs); | 1085 | (unsigned long long)mmio_start_offs); |
1026 | 1086 | ||
1027 | #ifdef __BIG_ENDIAN | ||
1028 | /* Put the card in BE mode if it's not */ | ||
1029 | if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001) | ||
1030 | nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001); | ||
1031 | |||
1032 | DRM_MEMORYBARRIER(); | ||
1033 | #endif | ||
1034 | |||
1035 | /* Time to determine the card architecture */ | ||
1036 | reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); | ||
1037 | |||
1038 | /* We're dealing with >=NV10 */ | ||
1039 | if ((reg0 & 0x0f000000) > 0) { | ||
1040 | /* Bit 27-20 contain the architecture in hex */ | ||
1041 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | ||
1042 | /* NV04 or NV05 */ | ||
1043 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { | ||
1044 | if (reg0 & 0x00f00000) | ||
1045 | dev_priv->chipset = 0x05; | ||
1046 | else | ||
1047 | dev_priv->chipset = 0x04; | ||
1048 | } else | ||
1049 | dev_priv->chipset = 0xff; | ||
1050 | |||
1051 | switch (dev_priv->chipset & 0xf0) { | ||
1052 | case 0x00: | ||
1053 | case 0x10: | ||
1054 | case 0x20: | ||
1055 | case 0x30: | ||
1056 | dev_priv->card_type = dev_priv->chipset & 0xf0; | ||
1057 | break; | ||
1058 | case 0x40: | ||
1059 | case 0x60: | ||
1060 | dev_priv->card_type = NV_40; | ||
1061 | break; | ||
1062 | case 0x50: | ||
1063 | case 0x80: | ||
1064 | case 0x90: | ||
1065 | case 0xa0: | ||
1066 | dev_priv->card_type = NV_50; | ||
1067 | break; | ||
1068 | case 0xc0: | ||
1069 | dev_priv->card_type = NV_C0; | ||
1070 | break; | ||
1071 | case 0xd0: | ||
1072 | dev_priv->card_type = NV_D0; | ||
1073 | break; | ||
1074 | default: | ||
1075 | NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0); | ||
1076 | ret = -EINVAL; | ||
1077 | goto err_mmio; | ||
1078 | } | ||
1079 | |||
1080 | NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", | ||
1081 | dev_priv->card_type, reg0); | ||
1082 | |||
1083 | /* determine frequency of timing crystal */ | 1087 | /* determine frequency of timing crystal */ |
1084 | strap = nv_rd32(dev, 0x101000); | 1088 | strap = nv_rd32(dev, 0x101000); |
1085 | if ( dev_priv->chipset < 0x17 || | 1089 | if ( dev_priv->chipset < 0x17 || |
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c index 638cf601c427..d5eedd67afe5 100644 --- a/drivers/gpu/drm/nouveau/nv04_fb.c +++ b/drivers/gpu/drm/nouveau/nv04_fb.c | |||
@@ -4,6 +4,40 @@ | |||
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | int | 6 | int |
7 | nv04_fb_vram_init(struct drm_device *dev) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0); | ||
11 | |||
12 | if (boot0 & 0x00000100) { | ||
13 | dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2; | ||
14 | dev_priv->vram_size *= 1024 * 1024; | ||
15 | } else { | ||
16 | switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { | ||
17 | case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: | ||
18 | dev_priv->vram_size = 32 * 1024 * 1024; | ||
19 | break; | ||
20 | case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: | ||
21 | dev_priv->vram_size = 16 * 1024 * 1024; | ||
22 | break; | ||
23 | case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: | ||
24 | dev_priv->vram_size = 8 * 1024 * 1024; | ||
25 | break; | ||
26 | case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: | ||
27 | dev_priv->vram_size = 4 * 1024 * 1024; | ||
28 | break; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | if ((boot0 & 0x00000038) <= 0x10) | ||
33 | dev_priv->vram_type = NV_MEM_TYPE_SGRAM; | ||
34 | else | ||
35 | dev_priv->vram_type = NV_MEM_TYPE_SDRAM; | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | int | ||
7 | nv04_fb_init(struct drm_device *dev) | 41 | nv04_fb_init(struct drm_device *dev) |
8 | { | 42 | { |
9 | /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows | 43 | /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows |
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index f78181a59b4a..420b1608536d 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c | |||
@@ -3,81 +3,16 @@ | |||
3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
5 | 5 | ||
6 | static struct drm_mm_node * | ||
7 | nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
11 | struct drm_mm_node *mem; | ||
12 | int ret; | ||
13 | |||
14 | ret = drm_mm_pre_get(&pfb->tag_heap); | ||
15 | if (ret) | ||
16 | return NULL; | ||
17 | |||
18 | spin_lock(&dev_priv->tile.lock); | ||
19 | mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); | ||
20 | if (mem) | ||
21 | mem = drm_mm_get_block_atomic(mem, size, 0); | ||
22 | spin_unlock(&dev_priv->tile.lock); | ||
23 | |||
24 | return mem; | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) | ||
29 | { | ||
30 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
31 | |||
32 | spin_lock(&dev_priv->tile.lock); | ||
33 | drm_mm_put_block(mem); | ||
34 | spin_unlock(&dev_priv->tile.lock); | ||
35 | } | ||
36 | |||
37 | void | 6 | void |
38 | nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | 7 | nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, |
39 | uint32_t size, uint32_t pitch, uint32_t flags) | 8 | uint32_t size, uint32_t pitch, uint32_t flags) |
40 | { | 9 | { |
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 10 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
42 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 11 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
43 | int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); | ||
44 | 12 | ||
45 | tile->addr = addr; | 13 | tile->addr = 0x80000000 | addr; |
46 | tile->limit = max(1u, addr + size) - 1; | 14 | tile->limit = max(1u, addr + size) - 1; |
47 | tile->pitch = pitch; | 15 | tile->pitch = pitch; |
48 | |||
49 | if (dev_priv->card_type == NV_20) { | ||
50 | if (flags & NOUVEAU_GEM_TILE_ZETA) { | ||
51 | /* | ||
52 | * Allocate some of the on-die tag memory, | ||
53 | * used to store Z compression meta-data (most | ||
54 | * likely just a bitmap determining if a given | ||
55 | * tile is compressed or not). | ||
56 | */ | ||
57 | tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); | ||
58 | |||
59 | if (tile->tag_mem) { | ||
60 | /* Enable Z compression */ | ||
61 | if (dev_priv->chipset >= 0x25) | ||
62 | tile->zcomp = tile->tag_mem->start | | ||
63 | (bpp == 16 ? | ||
64 | NV25_PFB_ZCOMP_MODE_16 : | ||
65 | NV25_PFB_ZCOMP_MODE_32); | ||
66 | else | ||
67 | tile->zcomp = tile->tag_mem->start | | ||
68 | NV20_PFB_ZCOMP_EN | | ||
69 | (bpp == 16 ? 0 : | ||
70 | NV20_PFB_ZCOMP_MODE_32); | ||
71 | } | ||
72 | |||
73 | tile->addr |= 3; | ||
74 | } else { | ||
75 | tile->addr |= 1; | ||
76 | } | ||
77 | |||
78 | } else { | ||
79 | tile->addr |= 1 << 31; | ||
80 | } | ||
81 | } | 16 | } |
82 | 17 | ||
83 | void | 18 | void |
@@ -86,11 +21,6 @@ nv10_fb_free_tile_region(struct drm_device *dev, int i) | |||
86 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 21 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
87 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 22 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
88 | 23 | ||
89 | if (tile->tag_mem) { | ||
90 | nv20_fb_free_tag(dev, tile->tag_mem); | ||
91 | tile->tag_mem = NULL; | ||
92 | } | ||
93 | |||
94 | tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; | 24 | tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; |
95 | } | 25 | } |
96 | 26 | ||
@@ -103,9 +33,48 @@ nv10_fb_set_tile_region(struct drm_device *dev, int i) | |||
103 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); | 33 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); |
104 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); | 34 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); |
105 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); | 35 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); |
36 | } | ||
37 | |||
38 | int | ||
39 | nv1a_fb_vram_init(struct drm_device *dev) | ||
40 | { | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct pci_dev *bridge; | ||
43 | uint32_t mem, mib; | ||
44 | |||
45 | bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); | ||
46 | if (!bridge) { | ||
47 | NV_ERROR(dev, "no bridge device\n"); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | if (dev_priv->chipset == 0x1a) { | ||
52 | pci_read_config_dword(bridge, 0x7c, &mem); | ||
53 | mib = ((mem >> 6) & 31) + 1; | ||
54 | } else { | ||
55 | pci_read_config_dword(bridge, 0x84, &mem); | ||
56 | mib = ((mem >> 4) & 127) + 1; | ||
57 | } | ||
58 | |||
59 | dev_priv->vram_size = mib * 1024 * 1024; | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | int | ||
64 | nv10_fb_vram_init(struct drm_device *dev) | ||
65 | { | ||
66 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
67 | u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA); | ||
68 | u32 cfg0 = nv_rd32(dev, 0x100200); | ||
106 | 69 | ||
107 | if (dev_priv->card_type == NV_20) | 70 | dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
108 | nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); | 71 | |
72 | if (cfg0 & 0x00000001) | ||
73 | dev_priv->vram_type = NV_MEM_TYPE_DDR1; | ||
74 | else | ||
75 | dev_priv->vram_type = NV_MEM_TYPE_SDRAM; | ||
76 | |||
77 | return 0; | ||
109 | } | 78 | } |
110 | 79 | ||
111 | int | 80 | int |
@@ -115,14 +84,8 @@ nv10_fb_init(struct drm_device *dev) | |||
115 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 84 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
116 | int i; | 85 | int i; |
117 | 86 | ||
118 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
119 | |||
120 | if (dev_priv->card_type == NV_20) | ||
121 | drm_mm_init(&pfb->tag_heap, 0, | ||
122 | (dev_priv->chipset >= 0x25 ? | ||
123 | 64 * 1024 : 32 * 1024)); | ||
124 | |||
125 | /* Turn all the tiling regions off. */ | 87 | /* Turn all the tiling regions off. */ |
88 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
126 | for (i = 0; i < pfb->num_tiles; i++) | 89 | for (i = 0; i < pfb->num_tiles; i++) |
127 | pfb->set_tile_region(dev, i); | 90 | pfb->set_tile_region(dev, i); |
128 | 91 | ||
@@ -138,7 +101,4 @@ nv10_fb_takedown(struct drm_device *dev) | |||
138 | 101 | ||
139 | for (i = 0; i < pfb->num_tiles; i++) | 102 | for (i = 0; i < pfb->num_tiles; i++) |
140 | pfb->free_tile_region(dev, i); | 103 | pfb->free_tile_region(dev, i); |
141 | |||
142 | if (dev_priv->card_type == NV_20) | ||
143 | drm_mm_takedown(&pfb->tag_heap); | ||
144 | } | 104 | } |
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c new file mode 100644 index 000000000000..19bd64059a66 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv20_fb.c | |||
@@ -0,0 +1,148 @@ | |||
1 | #include "drmP.h" | ||
2 | #include "drm.h" | ||
3 | #include "nouveau_drv.h" | ||
4 | #include "nouveau_drm.h" | ||
5 | |||
6 | static struct drm_mm_node * | ||
7 | nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) | ||
8 | { | ||
9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
10 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
11 | struct drm_mm_node *mem; | ||
12 | int ret; | ||
13 | |||
14 | ret = drm_mm_pre_get(&pfb->tag_heap); | ||
15 | if (ret) | ||
16 | return NULL; | ||
17 | |||
18 | spin_lock(&dev_priv->tile.lock); | ||
19 | mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); | ||
20 | if (mem) | ||
21 | mem = drm_mm_get_block_atomic(mem, size, 0); | ||
22 | spin_unlock(&dev_priv->tile.lock); | ||
23 | |||
24 | return mem; | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem) | ||
29 | { | ||
30 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
31 | struct drm_mm_node *mem = *pmem; | ||
32 | if (mem) { | ||
33 | spin_lock(&dev_priv->tile.lock); | ||
34 | drm_mm_put_block(mem); | ||
35 | spin_unlock(&dev_priv->tile.lock); | ||
36 | *pmem = NULL; | ||
37 | } | ||
38 | } | ||
39 | |||
40 | void | ||
41 | nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, | ||
42 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
43 | { | ||
44 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
45 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
46 | int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); | ||
47 | |||
48 | tile->addr = 0x00000001 | addr; | ||
49 | tile->limit = max(1u, addr + size) - 1; | ||
50 | tile->pitch = pitch; | ||
51 | |||
52 | /* Allocate some of the on-die tag memory, used to store Z | ||
53 | * compression meta-data (most likely just a bitmap determining | ||
54 | * if a given tile is compressed or not). | ||
55 | */ | ||
56 | if (flags & NOUVEAU_GEM_TILE_ZETA) { | ||
57 | tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); | ||
58 | if (tile->tag_mem) { | ||
59 | /* Enable Z compression */ | ||
60 | tile->zcomp = tile->tag_mem->start; | ||
61 | if (dev_priv->chipset >= 0x25) { | ||
62 | if (bpp == 16) | ||
63 | tile->zcomp |= NV25_PFB_ZCOMP_MODE_16; | ||
64 | else | ||
65 | tile->zcomp |= NV25_PFB_ZCOMP_MODE_32; | ||
66 | } else { | ||
67 | tile->zcomp |= NV20_PFB_ZCOMP_EN; | ||
68 | if (bpp != 16) | ||
69 | tile->zcomp |= NV20_PFB_ZCOMP_MODE_32; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | tile->addr |= 2; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | nv20_fb_free_tile_region(struct drm_device *dev, int i) | ||
79 | { | ||
80 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
81 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
82 | |||
83 | tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; | ||
84 | nv20_fb_free_tag(dev, &tile->tag_mem); | ||
85 | } | ||
86 | |||
87 | void | ||
88 | nv20_fb_set_tile_region(struct drm_device *dev, int i) | ||
89 | { | ||
90 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
91 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
92 | |||
93 | nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); | ||
94 | nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); | ||
95 | nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); | ||
96 | nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); | ||
97 | } | ||
98 | |||
99 | int | ||
100 | nv20_fb_vram_init(struct drm_device *dev) | ||
101 | { | ||
102 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
103 | u32 mem_size = nv_rd32(dev, 0x10020c); | ||
104 | u32 pbus1218 = nv_rd32(dev, 0x001218); | ||
105 | |||
106 | dev_priv->vram_size = mem_size & 0xff000000; | ||
107 | switch (pbus1218 & 0x00000300) { | ||
108 | case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break; | ||
109 | case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; | ||
110 | case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; | ||
111 | case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break; | ||
112 | } | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | int | ||
118 | nv20_fb_init(struct drm_device *dev) | ||
119 | { | ||
120 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
121 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
122 | int i; | ||
123 | |||
124 | if (dev_priv->chipset >= 0x25) | ||
125 | drm_mm_init(&pfb->tag_heap, 0, 64 * 1024); | ||
126 | else | ||
127 | drm_mm_init(&pfb->tag_heap, 0, 32 * 1024); | ||
128 | |||
129 | /* Turn all the tiling regions off. */ | ||
130 | pfb->num_tiles = NV10_PFB_TILE__SIZE; | ||
131 | for (i = 0; i < pfb->num_tiles; i++) | ||
132 | pfb->set_tile_region(dev, i); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | void | ||
138 | nv20_fb_takedown(struct drm_device *dev) | ||
139 | { | ||
140 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
141 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
142 | int i; | ||
143 | |||
144 | for (i = 0; i < pfb->num_tiles; i++) | ||
145 | pfb->free_tile_region(dev, i); | ||
146 | |||
147 | drm_mm_takedown(&pfb->tag_heap); | ||
148 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index f0ac2a768c67..7fbcb334c096 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c | |||
@@ -72,6 +72,51 @@ nv44_fb_init_gart(struct drm_device *dev) | |||
72 | } | 72 | } |
73 | 73 | ||
74 | int | 74 | int |
75 | nv40_fb_vram_init(struct drm_device *dev) | ||
76 | { | ||
77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
78 | |||
79 | /* 0x001218 is actually present on a few other NV4X I looked at, | ||
80 | * and even contains sane values matching 0x100474. From looking | ||
81 | * at various vbios images however, this isn't the case everywhere. | ||
82 | * So, I chose to use the same regs I've seen NVIDIA reading around | ||
83 | * the memory detection, hopefully that'll get us the right numbers | ||
84 | */ | ||
85 | if (dev_priv->chipset == 0x40) { | ||
86 | u32 pbus1218 = nv_rd32(dev, 0x001218); | ||
87 | switch (pbus1218 & 0x00000300) { | ||
88 | case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break; | ||
89 | case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; | ||
90 | case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; | ||
91 | case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break; | ||
92 | } | ||
93 | } else | ||
94 | if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { | ||
95 | u32 pfb914 = nv_rd32(dev, 0x100914); | ||
96 | switch (pfb914 & 0x00000003) { | ||
97 | case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; | ||
98 | case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break; | ||
99 | case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; | ||
100 | case 0x00000003: break; | ||
101 | } | ||
102 | } else | ||
103 | if (dev_priv->chipset != 0x4e) { | ||
104 | u32 pfb474 = nv_rd32(dev, 0x100474); | ||
105 | if (pfb474 & 0x00000004) | ||
106 | dev_priv->vram_type = NV_MEM_TYPE_GDDR3; | ||
107 | if (pfb474 & 0x00000002) | ||
108 | dev_priv->vram_type = NV_MEM_TYPE_DDR2; | ||
109 | if (pfb474 & 0x00000001) | ||
110 | dev_priv->vram_type = NV_MEM_TYPE_DDR1; | ||
111 | } else { | ||
112 | dev_priv->vram_type = NV_MEM_TYPE_STOLEN; | ||
113 | } | ||
114 | |||
115 | dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000; | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | int | ||
75 | nv40_fb_init(struct drm_device *dev) | 120 | nv40_fb_init(struct drm_device *dev) |
76 | { | 121 | { |
77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 122 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 8f6c2ace3adf..701b927998bf 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -170,6 +170,41 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) | |||
170 | return ret; | 170 | return ret; |
171 | } | 171 | } |
172 | 172 | ||
173 | static int | ||
174 | nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) | ||
175 | { | ||
176 | struct drm_device *dev = nv_crtc->base.dev; | ||
177 | struct nouveau_channel *evo = nv50_display(dev)->master; | ||
178 | int ret; | ||
179 | int adj; | ||
180 | u32 hue, vib; | ||
181 | |||
182 | NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n", | ||
183 | nv_crtc->color_vibrance, nv_crtc->vibrant_hue); | ||
184 | |||
185 | ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); | ||
186 | if (ret) { | ||
187 | NV_ERROR(dev, "no space while setting color vibrance\n"); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; | ||
192 | vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; | ||
193 | |||
194 | hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; | ||
195 | |||
196 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); | ||
197 | OUT_RING (evo, (hue << 20) | (vib << 8)); | ||
198 | |||
199 | if (update) { | ||
200 | BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); | ||
201 | OUT_RING (evo, 0); | ||
202 | FIRE_RING (evo); | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
173 | struct nouveau_connector * | 208 | struct nouveau_connector * |
174 | nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) | 209 | nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) |
175 | { | 210 | { |
@@ -577,8 +612,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
577 | OUT_RING (evo, fb->base.depth == 8 ? | 612 | OUT_RING (evo, fb->base.depth == 8 ? |
578 | NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); | 613 | NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); |
579 | 614 | ||
580 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); | ||
581 | OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR); | ||
582 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); | 615 | BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); |
583 | OUT_RING (evo, (y << 16) | x); | 616 | OUT_RING (evo, (y << 16) | x); |
584 | 617 | ||
@@ -661,6 +694,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | |||
661 | 694 | ||
662 | nv_crtc->set_dither(nv_crtc, false); | 695 | nv_crtc->set_dither(nv_crtc, false); |
663 | nv_crtc->set_scale(nv_crtc, false); | 696 | nv_crtc->set_scale(nv_crtc, false); |
697 | nv_crtc->set_color_vibrance(nv_crtc, false); | ||
664 | 698 | ||
665 | return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | 699 | return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); |
666 | } | 700 | } |
@@ -721,6 +755,9 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
721 | if (!nv_crtc) | 755 | if (!nv_crtc) |
722 | return -ENOMEM; | 756 | return -ENOMEM; |
723 | 757 | ||
758 | nv_crtc->color_vibrance = 50; | ||
759 | nv_crtc->vibrant_hue = 0; | ||
760 | |||
724 | /* Default CLUT parameters, will be activated on the hw upon | 761 | /* Default CLUT parameters, will be activated on the hw upon |
725 | * first mode set. | 762 | * first mode set. |
726 | */ | 763 | */ |
@@ -751,6 +788,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
751 | /* set function pointers */ | 788 | /* set function pointers */ |
752 | nv_crtc->set_dither = nv50_crtc_set_dither; | 789 | nv_crtc->set_dither = nv50_crtc_set_dither; |
753 | nv_crtc->set_scale = nv50_crtc_set_scale; | 790 | nv_crtc->set_scale = nv50_crtc_set_scale; |
791 | nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance; | ||
754 | 792 | ||
755 | drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); | 793 | drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); |
756 | drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); | 794 | drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); |
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index a0f2bebf49e3..55c56330be6d 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c | |||
@@ -190,11 +190,8 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
190 | } | 190 | } |
191 | 191 | ||
192 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && | 192 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && |
193 | connector->native_mode) { | 193 | connector->native_mode) |
194 | int id = adjusted_mode->base.id; | 194 | drm_mode_copy(adjusted_mode, connector->native_mode); |
195 | *adjusted_mode = *connector->native_mode; | ||
196 | adjusted_mode->base.id = id; | ||
197 | } | ||
198 | 195 | ||
199 | return true; | 196 | return true; |
200 | } | 197 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7ba28e08ee31..0e47a898f415 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -50,6 +50,29 @@ nv50_sor_nr(struct drm_device *dev) | |||
50 | return 4; | 50 | return 4; |
51 | } | 51 | } |
52 | 52 | ||
53 | u32 | ||
54 | nv50_display_active_crtcs(struct drm_device *dev) | ||
55 | { | ||
56 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
57 | u32 mask = 0; | ||
58 | int i; | ||
59 | |||
60 | if (dev_priv->chipset < 0x90 || | ||
61 | dev_priv->chipset == 0x92 || | ||
62 | dev_priv->chipset == 0xa0) { | ||
63 | for (i = 0; i < 2; i++) | ||
64 | mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); | ||
65 | } else { | ||
66 | for (i = 0; i < 4; i++) | ||
67 | mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); | ||
68 | } | ||
69 | |||
70 | for (i = 0; i < 3; i++) | ||
71 | mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); | ||
72 | |||
73 | return mask & 3; | ||
74 | } | ||
75 | |||
53 | static int | 76 | static int |
54 | evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data) | 77 | evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data) |
55 | { | 78 | { |
@@ -840,9 +863,9 @@ nv50_display_unk20_handler(struct drm_device *dev) | |||
840 | if (type == OUTPUT_DP) { | 863 | if (type == OUTPUT_DP) { |
841 | int link = !(dcb->dpconf.sor.link & 1); | 864 | int link = !(dcb->dpconf.sor.link & 1); |
842 | if ((mc & 0x000f0000) == 0x00020000) | 865 | if ((mc & 0x000f0000) == 0x00020000) |
843 | nouveau_dp_tu_update(dev, or, link, pclk, 18); | 866 | nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); |
844 | else | 867 | else |
845 | nouveau_dp_tu_update(dev, or, link, pclk, 24); | 868 | nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); |
846 | } | 869 | } |
847 | 870 | ||
848 | if (dcb->type != OUTPUT_ANALOG) { | 871 | if (dcb->type != OUTPUT_ANALOG) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index 95874f7c043c..5d3dd14d2837 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h | |||
@@ -74,6 +74,8 @@ void nv50_display_destroy(struct drm_device *dev); | |||
74 | int nv50_crtc_blank(struct nouveau_crtc *, bool blank); | 74 | int nv50_crtc_blank(struct nouveau_crtc *, bool blank); |
75 | int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); | 75 | int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); |
76 | 76 | ||
77 | u32 nv50_display_active_crtcs(struct drm_device *); | ||
78 | |||
77 | int nv50_display_sync(struct drm_device *); | 79 | int nv50_display_sync(struct drm_device *); |
78 | int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, | 80 | int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, |
79 | struct nouveau_channel *chan); | 81 | struct nouveau_channel *chan); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h index 3860ca62cb19..771d879bc834 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.h +++ b/drivers/gpu/drm/nouveau/nv50_evo.h | |||
@@ -104,7 +104,8 @@ | |||
104 | #define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 | 104 | #define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 |
105 | #define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 | 105 | #define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 |
106 | #define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 | 106 | #define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 |
107 | #define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000 | 107 | #define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00 |
108 | #define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000 | ||
108 | #define NV50_EVO_CRTC_FB_POS 0x000008c0 | 109 | #define NV50_EVO_CRTC_FB_POS 0x000008c0 |
109 | #define NV50_EVO_CRTC_REAL_RES 0x000008c8 | 110 | #define NV50_EVO_CRTC_REAL_RES 0x000008c8 |
110 | #define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 | 111 | #define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 |
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index ec5481dfcd82..d020ed4979b4 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "nouveau_hw.h" | 28 | #include "nouveau_hw.h" |
29 | #include "nouveau_pm.h" | 29 | #include "nouveau_pm.h" |
30 | #include "nouveau_hwsq.h" | 30 | #include "nouveau_hwsq.h" |
31 | #include "nv50_display.h" | ||
31 | 32 | ||
32 | enum clk_src { | 33 | enum clk_src { |
33 | clk_src_crystal, | 34 | clk_src_crystal, |
@@ -352,17 +353,13 @@ nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
352 | } | 353 | } |
353 | 354 | ||
354 | struct nv50_pm_state { | 355 | struct nv50_pm_state { |
356 | struct nouveau_pm_level *perflvl; | ||
357 | struct hwsq_ucode eclk_hwsq; | ||
355 | struct hwsq_ucode mclk_hwsq; | 358 | struct hwsq_ucode mclk_hwsq; |
356 | u32 mscript; | 359 | u32 mscript; |
357 | 360 | u32 mmast; | |
358 | u32 emast; | 361 | u32 mctrl; |
359 | u32 nctrl; | 362 | u32 mcoef; |
360 | u32 ncoef; | ||
361 | u32 sctrl; | ||
362 | u32 scoef; | ||
363 | |||
364 | u32 amast; | ||
365 | u32 pdivs; | ||
366 | }; | 363 | }; |
367 | 364 | ||
368 | static u32 | 365 | static u32 |
@@ -415,40 +412,153 @@ clk_same(u32 a, u32 b) | |||
415 | return ((a / 1000) == (b / 1000)); | 412 | return ((a / 1000) == (b / 1000)); |
416 | } | 413 | } |
417 | 414 | ||
415 | static void | ||
416 | mclk_precharge(struct nouveau_mem_exec_func *exec) | ||
417 | { | ||
418 | struct nv50_pm_state *info = exec->priv; | ||
419 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
420 | |||
421 | hwsq_wr32(hwsq, 0x1002d4, 0x00000001); | ||
422 | } | ||
423 | |||
424 | static void | ||
425 | mclk_refresh(struct nouveau_mem_exec_func *exec) | ||
426 | { | ||
427 | struct nv50_pm_state *info = exec->priv; | ||
428 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
429 | |||
430 | hwsq_wr32(hwsq, 0x1002d0, 0x00000001); | ||
431 | } | ||
432 | |||
433 | static void | ||
434 | mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) | ||
435 | { | ||
436 | struct nv50_pm_state *info = exec->priv; | ||
437 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
438 | |||
439 | hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000); | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) | ||
444 | { | ||
445 | struct nv50_pm_state *info = exec->priv; | ||
446 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
447 | |||
448 | hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000); | ||
449 | } | ||
450 | |||
451 | static void | ||
452 | mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) | ||
453 | { | ||
454 | struct nv50_pm_state *info = exec->priv; | ||
455 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
456 | |||
457 | if (nsec > 1000) | ||
458 | hwsq_usec(hwsq, (nsec + 500) / 1000); | ||
459 | } | ||
460 | |||
461 | static u32 | ||
462 | mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) | ||
463 | { | ||
464 | if (mr <= 1) | ||
465 | return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); | ||
466 | if (mr <= 3) | ||
467 | return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static void | ||
472 | mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) | ||
473 | { | ||
474 | struct drm_nouveau_private *dev_priv = exec->dev->dev_private; | ||
475 | struct nv50_pm_state *info = exec->priv; | ||
476 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
477 | |||
478 | if (mr <= 1) { | ||
479 | if (dev_priv->vram_rank_B) | ||
480 | hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data); | ||
481 | hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data); | ||
482 | } else | ||
483 | if (mr <= 3) { | ||
484 | if (dev_priv->vram_rank_B) | ||
485 | hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data); | ||
486 | hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | static void | ||
491 | mclk_clock_set(struct nouveau_mem_exec_func *exec) | ||
492 | { | ||
493 | struct nv50_pm_state *info = exec->priv; | ||
494 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
495 | u32 ctrl = nv_rd32(exec->dev, 0x004008); | ||
496 | |||
497 | info->mmast = nv_rd32(exec->dev, 0x00c040); | ||
498 | info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */ | ||
499 | info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ | ||
500 | |||
501 | hwsq_wr32(hwsq, 0xc040, info->mmast); | ||
502 | hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */ | ||
503 | if (info->mctrl & 0x80000000) | ||
504 | hwsq_wr32(hwsq, 0x400c, info->mcoef); | ||
505 | hwsq_wr32(hwsq, 0x4008, info->mctrl); | ||
506 | } | ||
507 | |||
508 | static void | ||
509 | mclk_timing_set(struct nouveau_mem_exec_func *exec) | ||
510 | { | ||
511 | struct drm_device *dev = exec->dev; | ||
512 | struct nv50_pm_state *info = exec->priv; | ||
513 | struct nouveau_pm_level *perflvl = info->perflvl; | ||
514 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
515 | int i; | ||
516 | |||
517 | for (i = 0; i < 9; i++) { | ||
518 | u32 reg = 0x100220 + (i * 4); | ||
519 | u32 val = nv_rd32(dev, reg); | ||
520 | if (val != perflvl->timing.reg[i]) | ||
521 | hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]); | ||
522 | } | ||
523 | } | ||
524 | |||
418 | static int | 525 | static int |
419 | calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq) | 526 | calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, |
527 | struct nv50_pm_state *info) | ||
420 | { | 528 | { |
421 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 529 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
530 | u32 crtc_mask = nv50_display_active_crtcs(dev); | ||
531 | struct nouveau_mem_exec_func exec = { | ||
532 | .dev = dev, | ||
533 | .precharge = mclk_precharge, | ||
534 | .refresh = mclk_refresh, | ||
535 | .refresh_auto = mclk_refresh_auto, | ||
536 | .refresh_self = mclk_refresh_self, | ||
537 | .wait = mclk_wait, | ||
538 | .mrg = mclk_mrg, | ||
539 | .mrs = mclk_mrs, | ||
540 | .clock_set = mclk_clock_set, | ||
541 | .timing_set = mclk_timing_set, | ||
542 | .priv = info | ||
543 | }; | ||
544 | struct hwsq_ucode *hwsq = &info->mclk_hwsq; | ||
422 | struct pll_lims pll; | 545 | struct pll_lims pll; |
423 | u32 mast = nv_rd32(dev, 0x00c040); | ||
424 | u32 ctrl = nv_rd32(dev, 0x004008); | ||
425 | u32 coef = nv_rd32(dev, 0x00400c); | ||
426 | u32 orig = ctrl; | ||
427 | u32 crtc_mask = 0; | ||
428 | int N, M, P; | 546 | int N, M, P; |
429 | int ret, i; | 547 | int ret; |
430 | 548 | ||
431 | /* use pcie refclock if possible, otherwise use mpll */ | 549 | /* use pcie refclock if possible, otherwise use mpll */ |
432 | ctrl &= ~0x81ff0200; | 550 | info->mctrl = nv_rd32(dev, 0x004008); |
433 | if (clk_same(freq, read_clk(dev, clk_src_href))) { | 551 | info->mctrl &= ~0x81ff0200; |
434 | ctrl |= 0x00000200 | (pll.log2p_bias << 19); | 552 | if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) { |
553 | info->mctrl |= 0x00000200 | (pll.log2p_bias << 19); | ||
435 | } else { | 554 | } else { |
436 | ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P); | 555 | ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P); |
437 | if (ret == 0) | 556 | if (ret == 0) |
438 | return -EINVAL; | 557 | return -EINVAL; |
439 | 558 | ||
440 | ctrl |= 0x80000000 | (P << 22) | (P << 16); | 559 | info->mctrl |= 0x80000000 | (P << 22) | (P << 16); |
441 | ctrl |= pll.log2p_bias << 19; | 560 | info->mctrl |= pll.log2p_bias << 19; |
442 | coef = (N << 8) | M; | 561 | info->mcoef = (N << 8) | M; |
443 | } | ||
444 | |||
445 | mast &= ~0xc0000000; /* get MCLK_2 from HREF */ | ||
446 | mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ | ||
447 | |||
448 | /* determine active crtcs */ | ||
449 | for (i = 0; i < 2; i++) { | ||
450 | if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK))) | ||
451 | crtc_mask |= (1 << i); | ||
452 | } | 562 | } |
453 | 563 | ||
454 | /* build the ucode which will reclock the memory for us */ | 564 | /* build the ucode which will reclock the memory for us */ |
@@ -462,25 +572,10 @@ calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq) | |||
462 | hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ | 572 | hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ |
463 | hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ | 573 | hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ |
464 | 574 | ||
465 | /* prepare memory controller */ | 575 | ret = nouveau_mem_exec(&exec, perflvl); |
466 | hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */ | 576 | if (ret) |
467 | hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */ | 577 | return ret; |
468 | hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */ | 578 | |
469 | hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */ | ||
470 | |||
471 | /* reclock memory */ | ||
472 | hwsq_wr32(hwsq, 0xc040, mast); | ||
473 | hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */ | ||
474 | hwsq_wr32(hwsq, 0x400c, coef); | ||
475 | hwsq_wr32(hwsq, 0x4008, ctrl); | ||
476 | |||
477 | /* restart memory controller */ | ||
478 | hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */ | ||
479 | hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */ | ||
480 | hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */ | ||
481 | hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */ | ||
482 | |||
483 | hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */ | ||
484 | hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ | 579 | hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ |
485 | hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ | 580 | hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ |
486 | if (dev_priv->chipset >= 0x92) | 581 | if (dev_priv->chipset >= 0x92) |
@@ -494,10 +589,11 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
494 | { | 589 | { |
495 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 590 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
496 | struct nv50_pm_state *info; | 591 | struct nv50_pm_state *info; |
592 | struct hwsq_ucode *hwsq; | ||
497 | struct pll_lims pll; | 593 | struct pll_lims pll; |
594 | u32 out, mast, divs, ctrl; | ||
498 | int clk, ret = -EINVAL; | 595 | int clk, ret = -EINVAL; |
499 | int N, M, P1, P2; | 596 | int N, M, P1, P2; |
500 | u32 out; | ||
501 | 597 | ||
502 | if (dev_priv->chipset == 0xaa || | 598 | if (dev_priv->chipset == 0xaa || |
503 | dev_priv->chipset == 0xac) | 599 | dev_priv->chipset == 0xac) |
@@ -506,54 +602,44 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
506 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 602 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
507 | if (!info) | 603 | if (!info) |
508 | return ERR_PTR(-ENOMEM); | 604 | return ERR_PTR(-ENOMEM); |
605 | info->perflvl = perflvl; | ||
509 | 606 | ||
510 | /* core: for the moment at least, always use nvpll */ | 607 | /* memory: build hwsq ucode which we'll use to reclock memory. |
511 | clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1); | 608 | * use pcie refclock if possible, otherwise use mpll */ |
512 | if (clk == 0) | 609 | info->mclk_hwsq.len = 0; |
513 | goto error; | 610 | if (perflvl->memory) { |
611 | ret = calc_mclk(dev, perflvl, info); | ||
612 | if (ret) | ||
613 | goto error; | ||
614 | info->mscript = perflvl->memscript; | ||
615 | } | ||
514 | 616 | ||
515 | info->emast = 0x00000003; | 617 | divs = read_div(dev); |
516 | info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16); | 618 | mast = info->mmast; |
517 | info->ncoef = (N << 8) | M; | ||
518 | 619 | ||
519 | /* shader: tie to nvclk if possible, otherwise use spll. have to be | 620 | /* start building HWSQ script for engine reclocking */ |
520 | * very careful that the shader clock is at least twice the core, or | 621 | hwsq = &info->eclk_hwsq; |
521 | * some chipsets will be very unhappy. i expect most or all of these | 622 | hwsq_init(hwsq); |
522 | * cases will be handled by tying to nvclk, but it's possible there's | 623 | hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ |
523 | * corners | 624 | hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */ |
524 | */ | ||
525 | if (P1-- && perflvl->shader == (perflvl->core << 1)) { | ||
526 | info->emast |= 0x00000020; | ||
527 | info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16); | ||
528 | info->scoef = nv_rd32(dev, 0x004024); | ||
529 | } else { | ||
530 | clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1); | ||
531 | if (clk == 0) | ||
532 | goto error; | ||
533 | 625 | ||
534 | info->emast |= 0x00000030; | 626 | /* vdec/dom6: switch to "safe" clocks temporarily */ |
535 | info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16); | 627 | if (perflvl->vdec) { |
536 | info->scoef = (N << 8) | M; | 628 | mast &= ~0x00000c00; |
629 | divs &= ~0x00000700; | ||
537 | } | 630 | } |
538 | 631 | ||
539 | /* memory: build hwsq ucode which we'll use to reclock memory */ | 632 | if (perflvl->dom6) { |
540 | info->mclk_hwsq.len = 0; | 633 | mast &= ~0x0c000000; |
541 | if (perflvl->memory) { | 634 | divs &= ~0x00000007; |
542 | clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq); | ||
543 | if (clk < 0) { | ||
544 | ret = clk; | ||
545 | goto error; | ||
546 | } | ||
547 | |||
548 | info->mscript = perflvl->memscript; | ||
549 | } | 635 | } |
550 | 636 | ||
637 | hwsq_wr32(hwsq, 0x00c040, mast); | ||
638 | |||
551 | /* vdec: avoid modifying xpll until we know exactly how the other | 639 | /* vdec: avoid modifying xpll until we know exactly how the other |
552 | * clock domains work, i suspect at least some of them can also be | 640 | * clock domains work, i suspect at least some of them can also be |
553 | * tied to xpll... | 641 | * tied to xpll... |
554 | */ | 642 | */ |
555 | info->amast = nv_rd32(dev, 0x00c040); | ||
556 | info->pdivs = read_div(dev); | ||
557 | if (perflvl->vdec) { | 643 | if (perflvl->vdec) { |
558 | /* see how close we can get using nvclk as a source */ | 644 | /* see how close we can get using nvclk as a source */ |
559 | clk = calc_div(perflvl->core, perflvl->vdec, &P1); | 645 | clk = calc_div(perflvl->core, perflvl->vdec, &P1); |
@@ -566,16 +652,14 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
566 | out = calc_div(out, perflvl->vdec, &P2); | 652 | out = calc_div(out, perflvl->vdec, &P2); |
567 | 653 | ||
568 | /* select whichever gets us closest */ | 654 | /* select whichever gets us closest */ |
569 | info->amast &= ~0x00000c00; | ||
570 | info->pdivs &= ~0x00000700; | ||
571 | if (abs((int)perflvl->vdec - clk) <= | 655 | if (abs((int)perflvl->vdec - clk) <= |
572 | abs((int)perflvl->vdec - out)) { | 656 | abs((int)perflvl->vdec - out)) { |
573 | if (dev_priv->chipset != 0x98) | 657 | if (dev_priv->chipset != 0x98) |
574 | info->amast |= 0x00000c00; | 658 | mast |= 0x00000c00; |
575 | info->pdivs |= P1 << 8; | 659 | divs |= P1 << 8; |
576 | } else { | 660 | } else { |
577 | info->amast |= 0x00000800; | 661 | mast |= 0x00000800; |
578 | info->pdivs |= P2 << 8; | 662 | divs |= P2 << 8; |
579 | } | 663 | } |
580 | } | 664 | } |
581 | 665 | ||
@@ -583,21 +667,82 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
583 | * of the host clock frequency | 667 | * of the host clock frequency |
584 | */ | 668 | */ |
585 | if (perflvl->dom6) { | 669 | if (perflvl->dom6) { |
586 | info->amast &= ~0x0c000000; | ||
587 | if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) { | 670 | if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) { |
588 | info->amast |= 0x00000000; | 671 | mast |= 0x00000000; |
589 | } else | 672 | } else |
590 | if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) { | 673 | if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) { |
591 | info->amast |= 0x08000000; | 674 | mast |= 0x08000000; |
592 | } else { | 675 | } else { |
593 | clk = read_clk(dev, clk_src_hclk) * 3; | 676 | clk = read_clk(dev, clk_src_hclk) * 3; |
594 | clk = calc_div(clk, perflvl->dom6, &P1); | 677 | clk = calc_div(clk, perflvl->dom6, &P1); |
595 | 678 | ||
596 | info->amast |= 0x0c000000; | 679 | mast |= 0x0c000000; |
597 | info->pdivs = (info->pdivs & ~0x00000007) | P1; | 680 | divs |= P1; |
598 | } | 681 | } |
599 | } | 682 | } |
600 | 683 | ||
684 | /* vdec/dom6: complete switch to new clocks */ | ||
685 | switch (dev_priv->chipset) { | ||
686 | case 0x92: | ||
687 | case 0x94: | ||
688 | case 0x96: | ||
689 | hwsq_wr32(hwsq, 0x004800, divs); | ||
690 | break; | ||
691 | default: | ||
692 | hwsq_wr32(hwsq, 0x004700, divs); | ||
693 | break; | ||
694 | } | ||
695 | |||
696 | hwsq_wr32(hwsq, 0x00c040, mast); | ||
697 | |||
698 | /* core/shader: make sure sclk/nvclk are disconnected from their | ||
699 | * PLLs (nvclk to dom6, sclk to hclk) | ||
700 | */ | ||
701 | if (dev_priv->chipset < 0x92) | ||
702 | mast = (mast & ~0x001000b0) | 0x00100080; | ||
703 | else | ||
704 | mast = (mast & ~0x000000b3) | 0x00000081; | ||
705 | |||
706 | hwsq_wr32(hwsq, 0x00c040, mast); | ||
707 | |||
708 | /* core: for the moment at least, always use nvpll */ | ||
709 | clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1); | ||
710 | if (clk == 0) | ||
711 | goto error; | ||
712 | |||
713 | ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100; | ||
714 | mast &= ~0x00100000; | ||
715 | mast |= 3; | ||
716 | |||
717 | hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl); | ||
718 | hwsq_wr32(hwsq, 0x00402c, (N << 8) | M); | ||
719 | |||
720 | /* shader: tie to nvclk if possible, otherwise use spll. have to be | ||
721 | * very careful that the shader clock is at least twice the core, or | ||
722 | * some chipsets will be very unhappy. i expect most or all of these | ||
723 | * cases will be handled by tying to nvclk, but it's possible there's | ||
724 | * corners | ||
725 | */ | ||
726 | ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100; | ||
727 | |||
728 | if (P1-- && perflvl->shader == (perflvl->core << 1)) { | ||
729 | hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); | ||
730 | hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast); | ||
731 | } else { | ||
732 | clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1); | ||
733 | if (clk == 0) | ||
734 | goto error; | ||
735 | ctrl |= 0x80000000; | ||
736 | |||
737 | hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); | ||
738 | hwsq_wr32(hwsq, 0x004024, (N << 8) | M); | ||
739 | hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast); | ||
740 | } | ||
741 | |||
742 | hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ | ||
743 | hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */ | ||
744 | hwsq_fini(hwsq); | ||
745 | |||
601 | return info; | 746 | return info; |
602 | error: | 747 | error: |
603 | kfree(info); | 748 | kfree(info); |
@@ -605,23 +750,24 @@ error: | |||
605 | } | 750 | } |
606 | 751 | ||
607 | static int | 752 | static int |
608 | prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq) | 753 | prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq) |
609 | { | 754 | { |
610 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 755 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
611 | u32 hwsq_data, hwsq_kick; | 756 | u32 hwsq_data, hwsq_kick; |
612 | int i; | 757 | int i; |
613 | 758 | ||
614 | if (dev_priv->chipset < 0x90) { | 759 | if (dev_priv->chipset < 0x94) { |
615 | hwsq_data = 0x001400; | 760 | hwsq_data = 0x001400; |
616 | hwsq_kick = 0x00000003; | 761 | hwsq_kick = 0x00000003; |
617 | } else { | 762 | } else { |
618 | hwsq_data = 0x080000; | 763 | hwsq_data = 0x080000; |
619 | hwsq_kick = 0x00000001; | 764 | hwsq_kick = 0x00000001; |
620 | } | 765 | } |
621 | |||
622 | /* upload hwsq ucode */ | 766 | /* upload hwsq ucode */ |
623 | nv_mask(dev, 0x001098, 0x00000008, 0x00000000); | 767 | nv_mask(dev, 0x001098, 0x00000008, 0x00000000); |
624 | nv_wr32(dev, 0x001304, 0x00000000); | 768 | nv_wr32(dev, 0x001304, 0x00000000); |
769 | if (dev_priv->chipset >= 0x92) | ||
770 | nv_wr32(dev, 0x001318, 0x00000000); | ||
625 | for (i = 0; i < hwsq->len / 4; i++) | 771 | for (i = 0; i < hwsq->len / 4; i++) |
626 | nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]); | 772 | nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]); |
627 | nv_mask(dev, 0x001098, 0x00000018, 0x00000018); | 773 | nv_mask(dev, 0x001098, 0x00000018, 0x00000018); |
@@ -645,20 +791,19 @@ prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq) | |||
645 | int | 791 | int |
646 | nv50_pm_clocks_set(struct drm_device *dev, void *data) | 792 | nv50_pm_clocks_set(struct drm_device *dev, void *data) |
647 | { | 793 | { |
648 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
649 | struct nv50_pm_state *info = data; | 794 | struct nv50_pm_state *info = data; |
650 | struct bit_entry M; | 795 | struct bit_entry M; |
651 | int ret = 0; | 796 | int ret = -EBUSY; |
652 | 797 | ||
653 | /* halt and idle execution engines */ | 798 | /* halt and idle execution engines */ |
654 | nv_mask(dev, 0x002504, 0x00000001, 0x00000001); | 799 | nv_mask(dev, 0x002504, 0x00000001, 0x00000001); |
655 | if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) | 800 | if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) |
656 | goto error; | 801 | goto resume; |
802 | if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f)) | ||
803 | goto resume; | ||
657 | 804 | ||
658 | /* memory: it is *very* important we change this first, the ucode | 805 | /* program memory clock, if necessary - must come before engine clock |
659 | * we build in pre() now has hardcoded 0xc040 values, which can't | 806 | * reprogramming due to how we construct the hwsq scripts in pre() |
660 | * change before we execute it or the engine clocks may end up | ||
661 | * messed up. | ||
662 | */ | 807 | */ |
663 | if (info->mclk_hwsq.len) { | 808 | if (info->mclk_hwsq.len) { |
664 | /* execute some scripts that do ??? from the vbios.. */ | 809 | /* execute some scripts that do ??? from the vbios.. */ |
@@ -672,42 +817,14 @@ nv50_pm_clocks_set(struct drm_device *dev, void *data) | |||
672 | nouveau_bios_init_exec(dev, info->mscript); | 817 | nouveau_bios_init_exec(dev, info->mscript); |
673 | } | 818 | } |
674 | 819 | ||
675 | ret = prog_mclk(dev, &info->mclk_hwsq); | 820 | ret = prog_hwsq(dev, &info->mclk_hwsq); |
676 | if (ret) | 821 | if (ret) |
677 | goto resume; | 822 | goto resume; |
678 | } | 823 | } |
679 | 824 | ||
680 | /* reclock vdec/dom6 */ | 825 | /* program engine clocks */ |
681 | nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000); | 826 | ret = prog_hwsq(dev, &info->eclk_hwsq); |
682 | switch (dev_priv->chipset) { | ||
683 | case 0x92: | ||
684 | case 0x94: | ||
685 | case 0x96: | ||
686 | nv_mask(dev, 0x004800, 0x00000707, info->pdivs); | ||
687 | break; | ||
688 | default: | ||
689 | nv_mask(dev, 0x004700, 0x00000707, info->pdivs); | ||
690 | break; | ||
691 | } | ||
692 | nv_mask(dev, 0x00c040, 0x0c000c00, info->amast); | ||
693 | 827 | ||
694 | /* core/shader: make sure sclk/nvclk are disconnected from their | ||
695 | * plls (nvclk to dom6, sclk to hclk), modify the plls, and | ||
696 | * reconnect sclk/nvclk to their new clock source | ||
697 | */ | ||
698 | if (dev_priv->chipset < 0x92) | ||
699 | nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */ | ||
700 | else | ||
701 | nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081); | ||
702 | nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl); | ||
703 | nv_wr32(dev, 0x004024, info->scoef); | ||
704 | nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl); | ||
705 | nv_wr32(dev, 0x00402c, info->ncoef); | ||
706 | nv_mask(dev, 0x00c040, 0x00100033, info->emast); | ||
707 | |||
708 | goto resume; | ||
709 | error: | ||
710 | ret = -EBUSY; | ||
711 | resume: | 828 | resume: |
712 | nv_mask(dev, 0x002504, 0x00000001, 0x00000000); | 829 | nv_mask(dev, 0x002504, 0x00000001, 0x00000000); |
713 | kfree(info); | 830 | kfree(info); |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index c4423ba9c9bf..a7844ab6a50c 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -36,6 +36,193 @@ | |||
36 | #include "nouveau_crtc.h" | 36 | #include "nouveau_crtc.h" |
37 | #include "nv50_display.h" | 37 | #include "nv50_display.h" |
38 | 38 | ||
39 | static u32 | ||
40 | nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) | ||
41 | { | ||
42 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
43 | static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ | ||
44 | static const u8 nv50[] = { 16, 8, 0, 24 }; | ||
45 | if (dev_priv->card_type == 0xaf) | ||
46 | return nvaf[lane]; | ||
47 | return nv50[lane]; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) | ||
52 | { | ||
53 | u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
54 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24); | ||
55 | } | ||
56 | |||
57 | static void | ||
58 | nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, | ||
59 | u8 lane, u8 swing, u8 preem) | ||
60 | { | ||
61 | u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
62 | u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane); | ||
63 | u32 mask = 0x000000ff << shift; | ||
64 | u8 *table, *entry, *config; | ||
65 | |||
66 | table = nouveau_dp_bios_data(dev, dcb, &entry); | ||
67 | if (!table || (table[0] != 0x20 && table[0] != 0x21)) { | ||
68 | NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | config = entry + table[4]; | ||
73 | while (config[0] != swing || config[1] != preem) { | ||
74 | config += table[5]; | ||
75 | if (config >= entry + table[4] + entry[4] * table[5]) | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift); | ||
80 | nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift); | ||
81 | nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8); | ||
82 | } | ||
83 | |||
84 | static void | ||
85 | nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, | ||
86 | int link_nr, u32 link_bw, bool enhframe) | ||
87 | { | ||
88 | u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
89 | u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000; | ||
90 | u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000; | ||
91 | u8 *table, *entry, mask; | ||
92 | int i; | ||
93 | |||
94 | table = nouveau_dp_bios_data(dev, dcb, &entry); | ||
95 | if (!table || (table[0] != 0x20 && table[0] != 0x21)) { | ||
96 | NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); | ||
97 | return; | ||
98 | } | ||
99 | |||
100 | entry = ROMPTR(dev, entry[10]); | ||
101 | if (entry) { | ||
102 | while (link_bw < ROM16(entry[0]) * 10) | ||
103 | entry += 4; | ||
104 | |||
105 | nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc); | ||
106 | } | ||
107 | |||
108 | dpctrl |= ((1 << link_nr) - 1) << 16; | ||
109 | if (enhframe) | ||
110 | dpctrl |= 0x00004000; | ||
111 | |||
112 | if (link_bw > 162000) | ||
113 | clksor |= 0x00040000; | ||
114 | |||
115 | nv_wr32(dev, 0x614300 + (or * 0x800), clksor); | ||
116 | nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl); | ||
117 | |||
118 | mask = 0; | ||
119 | for (i = 0; i < link_nr; i++) | ||
120 | mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3); | ||
121 | nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask); | ||
122 | } | ||
123 | |||
124 | static void | ||
125 | nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw) | ||
126 | { | ||
127 | u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000; | ||
128 | u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)); | ||
129 | if (clksor & 0x000c0000) | ||
130 | *bw = 270000; | ||
131 | else | ||
132 | *bw = 162000; | ||
133 | |||
134 | if (dpctrl > 0x00030000) *nr = 4; | ||
135 | else if (dpctrl > 0x00010000) *nr = 2; | ||
136 | else *nr = 1; | ||
137 | } | ||
138 | |||
139 | void | ||
140 | nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) | ||
141 | { | ||
142 | const u32 symbol = 100000; | ||
143 | int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; | ||
144 | int TU, VTUi, VTUf, VTUa; | ||
145 | u64 link_data_rate, link_ratio, unk; | ||
146 | u32 best_diff = 64 * symbol; | ||
147 | u32 link_nr, link_bw, r; | ||
148 | |||
149 | /* calculate packed data rate for each lane */ | ||
150 | nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw); | ||
151 | link_data_rate = (clk * bpp / 8) / link_nr; | ||
152 | |||
153 | /* calculate ratio of packed data rate to link symbol rate */ | ||
154 | link_ratio = link_data_rate * symbol; | ||
155 | r = do_div(link_ratio, link_bw); | ||
156 | |||
157 | for (TU = 64; TU >= 32; TU--) { | ||
158 | /* calculate average number of valid symbols in each TU */ | ||
159 | u32 tu_valid = link_ratio * TU; | ||
160 | u32 calc, diff; | ||
161 | |||
162 | /* find a hw representation for the fraction.. */ | ||
163 | VTUi = tu_valid / symbol; | ||
164 | calc = VTUi * symbol; | ||
165 | diff = tu_valid - calc; | ||
166 | if (diff) { | ||
167 | if (diff >= (symbol / 2)) { | ||
168 | VTUf = symbol / (symbol - diff); | ||
169 | if (symbol - (VTUf * diff)) | ||
170 | VTUf++; | ||
171 | |||
172 | if (VTUf <= 15) { | ||
173 | VTUa = 1; | ||
174 | calc += symbol - (symbol / VTUf); | ||
175 | } else { | ||
176 | VTUa = 0; | ||
177 | VTUf = 1; | ||
178 | calc += symbol; | ||
179 | } | ||
180 | } else { | ||
181 | VTUa = 0; | ||
182 | VTUf = min((int)(symbol / diff), 15); | ||
183 | calc += symbol / VTUf; | ||
184 | } | ||
185 | |||
186 | diff = calc - tu_valid; | ||
187 | } else { | ||
188 | /* no remainder, but the hw doesn't like the fractional | ||
189 | * part to be zero. decrement the integer part and | ||
190 | * have the fraction add a whole symbol back | ||
191 | */ | ||
192 | VTUa = 0; | ||
193 | VTUf = 1; | ||
194 | VTUi--; | ||
195 | } | ||
196 | |||
197 | if (diff < best_diff) { | ||
198 | best_diff = diff; | ||
199 | bestTU = TU; | ||
200 | bestVTUa = VTUa; | ||
201 | bestVTUf = VTUf; | ||
202 | bestVTUi = VTUi; | ||
203 | if (diff == 0) | ||
204 | break; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | if (!bestTU) { | ||
209 | NV_ERROR(dev, "DP: unable to find suitable config\n"); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | /* XXX close to vbios numbers, but not right */ | ||
214 | unk = (symbol - link_ratio) * bestTU; | ||
215 | unk *= link_ratio; | ||
216 | r = do_div(unk, symbol); | ||
217 | r = do_div(unk, symbol); | ||
218 | unk += 6; | ||
219 | |||
220 | nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); | ||
221 | nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | | ||
222 | bestVTUf << 16 | | ||
223 | bestVTUi << 8 | | ||
224 | unk); | ||
225 | } | ||
39 | static void | 226 | static void |
40 | nv50_sor_disconnect(struct drm_encoder *encoder) | 227 | nv50_sor_disconnect(struct drm_encoder *encoder) |
41 | { | 228 | { |
@@ -117,20 +304,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
117 | } | 304 | } |
118 | 305 | ||
119 | if (nv_encoder->dcb->type == OUTPUT_DP) { | 306 | if (nv_encoder->dcb->type == OUTPUT_DP) { |
120 | struct nouveau_i2c_chan *auxch; | 307 | struct dp_train_func func = { |
121 | 308 | .link_set = nv50_sor_dp_link_set, | |
122 | auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | 309 | .train_set = nv50_sor_dp_train_set, |
123 | if (!auxch) | 310 | .train_adj = nv50_sor_dp_train_adj |
124 | return; | 311 | }; |
125 | 312 | ||
126 | if (mode == DRM_MODE_DPMS_ON) { | 313 | nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); |
127 | u8 status = DP_SET_POWER_D0; | ||
128 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | ||
129 | nouveau_dp_link_train(encoder, nv_encoder->dp.datarate); | ||
130 | } else { | ||
131 | u8 status = DP_SET_POWER_D3; | ||
132 | nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); | ||
133 | } | ||
134 | } | 314 | } |
135 | } | 315 | } |
136 | 316 | ||
@@ -162,11 +342,8 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
162 | } | 342 | } |
163 | 343 | ||
164 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && | 344 | if (connector->scaling_mode != DRM_MODE_SCALE_NONE && |
165 | connector->native_mode) { | 345 | connector->native_mode) |
166 | int id = adjusted_mode->base.id; | 346 | drm_mode_copy(adjusted_mode, connector->native_mode); |
167 | *adjusted_mode = *connector->native_mode; | ||
168 | adjusted_mode->base.id = id; | ||
169 | } | ||
170 | 347 | ||
171 | return true; | 348 | return true; |
172 | } | 349 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 6f38ceae3aa4..44fbac9c7d93 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -57,27 +57,15 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, | |||
57 | } | 57 | } |
58 | 58 | ||
59 | static inline u64 | 59 | static inline u64 |
60 | nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) | 60 | vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) |
61 | { | 61 | { |
62 | struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private; | ||
63 | |||
64 | phys |= 1; /* present */ | 62 | phys |= 1; /* present */ |
65 | phys |= (u64)memtype << 40; | 63 | phys |= (u64)memtype << 40; |
66 | |||
67 | /* IGPs don't have real VRAM, re-target to stolen system memory */ | ||
68 | if (target == 0 && dev_priv->vram_sys_base) { | ||
69 | phys += dev_priv->vram_sys_base; | ||
70 | target = 3; | ||
71 | } | ||
72 | |||
73 | phys |= target << 4; | 64 | phys |= target << 4; |
74 | |||
75 | if (vma->access & NV_MEM_ACCESS_SYS) | 65 | if (vma->access & NV_MEM_ACCESS_SYS) |
76 | phys |= (1 << 6); | 66 | phys |= (1 << 6); |
77 | |||
78 | if (!(vma->access & NV_MEM_ACCESS_WO)) | 67 | if (!(vma->access & NV_MEM_ACCESS_WO)) |
79 | phys |= (1 << 3); | 68 | phys |= (1 << 3); |
80 | |||
81 | return phys; | 69 | return phys; |
82 | } | 70 | } |
83 | 71 | ||
@@ -85,11 +73,19 @@ void | |||
85 | nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | 73 | nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, |
86 | struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) | 74 | struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) |
87 | { | 75 | { |
76 | struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private; | ||
88 | u32 comp = (mem->memtype & 0x180) >> 7; | 77 | u32 comp = (mem->memtype & 0x180) >> 7; |
89 | u32 block; | 78 | u32 block, target; |
90 | int i; | 79 | int i; |
91 | 80 | ||
92 | phys = nv50_vm_addr(vma, phys, mem->memtype, 0); | 81 | /* IGPs don't have real VRAM, re-target to stolen system memory */ |
82 | target = 0; | ||
83 | if (dev_priv->vram_sys_base) { | ||
84 | phys += dev_priv->vram_sys_base; | ||
85 | target = 3; | ||
86 | } | ||
87 | |||
88 | phys = vm_addr(vma, phys, mem->memtype, target); | ||
93 | pte <<= 3; | 89 | pte <<= 3; |
94 | cnt <<= 3; | 90 | cnt <<= 3; |
95 | 91 | ||
@@ -125,9 +121,10 @@ void | |||
125 | nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | 121 | nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, |
126 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) | 122 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) |
127 | { | 123 | { |
124 | u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; | ||
128 | pte <<= 3; | 125 | pte <<= 3; |
129 | while (cnt--) { | 126 | while (cnt--) { |
130 | u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2); | 127 | u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); |
131 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | 128 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); |
132 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | 129 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); |
133 | pte += 8; | 130 | pte += 8; |
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 2e45e57fd869..9ed9ae397d75 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -189,8 +189,25 @@ nv50_vram_init(struct drm_device *dev) | |||
189 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | 189 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
190 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 190 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
191 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 191 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
192 | u32 pfb714 = nv_rd32(dev, 0x100714); | ||
192 | u32 rblock, length; | 193 | u32 rblock, length; |
193 | 194 | ||
195 | switch (pfb714 & 0x00000007) { | ||
196 | case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; | ||
197 | case 1: | ||
198 | if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3) | ||
199 | dev_priv->vram_type = NV_MEM_TYPE_DDR3; | ||
200 | else | ||
201 | dev_priv->vram_type = NV_MEM_TYPE_DDR2; | ||
202 | break; | ||
203 | case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; | ||
204 | case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break; | ||
205 | case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break; | ||
206 | default: | ||
207 | break; | ||
208 | } | ||
209 | |||
210 | dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4); | ||
194 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); | 211 | dev_priv->vram_size = nv_rd32(dev, 0x10020c); |
195 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; | 212 | dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; |
196 | dev_priv->vram_size &= 0xffffffff00ULL; | 213 | dev_priv->vram_size &= 0xffffffff00ULL; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c index e9992f62c1c0..ce65f81bb871 100644 --- a/drivers/gpu/drm/nouveau/nvc0_pm.c +++ b/drivers/gpu/drm/nouveau/nvc0_pm.c | |||
@@ -269,7 +269,7 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq) | |||
269 | clk0 = calc_div(dev, clk, clk0, freq, &div1D); | 269 | clk0 = calc_div(dev, clk, clk0, freq, &div1D); |
270 | 270 | ||
271 | /* see if we can get any closer using PLLs */ | 271 | /* see if we can get any closer using PLLs */ |
272 | if (clk0 != freq) { | 272 | if (clk0 != freq && (0x00004387 & (1 << clk))) { |
273 | if (clk < 7) | 273 | if (clk < 7) |
274 | clk1 = calc_pll(dev, clk, freq, &info->coef); | 274 | clk1 = calc_pll(dev, clk, freq, &info->coef); |
275 | else | 275 | else |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index 9e352944a35a..30d2bd58828f 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
@@ -77,9 +77,11 @@ void | |||
77 | nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, | 77 | nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, |
78 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) | 78 | struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) |
79 | { | 79 | { |
80 | u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; | ||
81 | |||
80 | pte <<= 3; | 82 | pte <<= 3; |
81 | while (cnt--) { | 83 | while (cnt--) { |
82 | u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5); | 84 | u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target); |
83 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); | 85 | nv_wo32(pgt, pte + 0, lower_32_bits(phys)); |
84 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); | 86 | nv_wo32(pgt, pte + 4, upper_32_bits(phys)); |
85 | pte += 8; | 87 | pte += 8; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index ce984d573a51..a7eef8934c07 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -106,31 +106,32 @@ nvc0_vram_init(struct drm_device *dev) | |||
106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | 106 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 107 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 108 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
109 | u32 parts = nv_rd32(dev, 0x121c74); | 109 | u32 parts = nv_rd32(dev, 0x022438); |
110 | u32 pmask = nv_rd32(dev, 0x022554); | ||
110 | u32 bsize = nv_rd32(dev, 0x10f20c); | 111 | u32 bsize = nv_rd32(dev, 0x10f20c); |
111 | u32 offset, length; | 112 | u32 offset, length; |
112 | bool uniform = true; | 113 | bool uniform = true; |
113 | int ret, part; | 114 | int ret, part; |
114 | 115 | ||
115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); | 116 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); |
116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); | 117 | NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask); |
118 | |||
119 | dev_priv->vram_type = nouveau_mem_vbios_type(dev); | ||
120 | dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004); | ||
117 | 121 | ||
118 | /* read amount of vram attached to each memory controller */ | 122 | /* read amount of vram attached to each memory controller */ |
119 | part = 0; | 123 | for (part = 0; part < parts; part++) { |
120 | while (parts) { | 124 | if (!(pmask & (1 << part))) { |
121 | u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000)); | 125 | u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000)); |
122 | if (psize == 0) | 126 | if (psize != bsize) { |
123 | continue; | 127 | if (psize < bsize) |
124 | parts--; | 128 | bsize = psize; |
125 | 129 | uniform = false; | |
126 | if (psize != bsize) { | 130 | } |
127 | if (psize < bsize) | 131 | |
128 | bsize = psize; | 132 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); |
129 | uniform = false; | 133 | dev_priv->vram_size += (u64)psize << 20; |
130 | } | 134 | } |
131 | |||
132 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); | ||
133 | dev_priv->vram_size += (u64)psize << 20; | ||
134 | } | 135 | } |
135 | 136 | ||
136 | /* if all controllers have the same amount attached, there's no holes */ | 137 | /* if all controllers have the same amount attached, there's no holes */ |
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index d2ba2f07400b..dfb8a951cbbe 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c | |||
@@ -284,6 +284,8 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
284 | u32 *push; | 284 | u32 *push; |
285 | int ret; | 285 | int ret; |
286 | 286 | ||
287 | evo_sync(crtc->dev, EVO_MASTER); | ||
288 | |||
287 | swap_interval <<= 4; | 289 | swap_interval <<= 4; |
288 | if (swap_interval == 0) | 290 | if (swap_interval == 0) |
289 | swap_interval |= 0x100; | 291 | swap_interval |= 0x100; |
@@ -593,7 +595,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc) | |||
593 | evo_kick(push, crtc->dev, EVO_MASTER); | 595 | evo_kick(push, crtc->dev, EVO_MASTER); |
594 | } | 596 | } |
595 | 597 | ||
596 | nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false); | 598 | nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); |
597 | nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); | 599 | nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); |
598 | } | 600 | } |
599 | 601 | ||
@@ -634,8 +636,7 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | |||
634 | u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; | 636 | u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; |
635 | u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; | 637 | u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; |
636 | u32 vblan2e = 0, vblan2s = 1; | 638 | u32 vblan2e = 0, vblan2s = 1; |
637 | u32 magic = 0x31ec6000; | 639 | u32 *push; |
638 | u32 syncs, *push; | ||
639 | int ret; | 640 | int ret; |
640 | 641 | ||
641 | hactive = mode->htotal; | 642 | hactive = mode->htotal; |
@@ -655,15 +656,8 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | |||
655 | vblan2e = vactive + vsynce + vbackp; | 656 | vblan2e = vactive + vsynce + vbackp; |
656 | vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); | 657 | vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); |
657 | vactive = (vactive * 2) + 1; | 658 | vactive = (vactive * 2) + 1; |
658 | magic |= 0x00000001; | ||
659 | } | 659 | } |
660 | 660 | ||
661 | syncs = 0x00000001; | ||
662 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
663 | syncs |= 0x00000008; | ||
664 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
665 | syncs |= 0x00000010; | ||
666 | |||
667 | ret = nvd0_crtc_swap_fbs(crtc, old_fb); | 661 | ret = nvd0_crtc_swap_fbs(crtc, old_fb); |
668 | if (ret) | 662 | if (ret) |
669 | return ret; | 663 | return ret; |
@@ -683,9 +677,6 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, | |||
683 | evo_data(push, mode->clock * 1000); | 677 | evo_data(push, mode->clock * 1000); |
684 | evo_data(push, 0x00200000); /* ??? */ | 678 | evo_data(push, 0x00200000); /* ??? */ |
685 | evo_data(push, mode->clock * 1000); | 679 | evo_data(push, mode->clock * 1000); |
686 | evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); | ||
687 | evo_data(push, syncs); | ||
688 | evo_data(push, magic); | ||
689 | evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); | 680 | evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); |
690 | evo_data(push, 0x00000311); | 681 | evo_data(push, 0x00000311); |
691 | evo_data(push, 0x00000100); | 682 | evo_data(push, 0x00000100); |
@@ -959,11 +950,6 @@ nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
959 | } | 950 | } |
960 | 951 | ||
961 | static void | 952 | static void |
962 | nvd0_dac_prepare(struct drm_encoder *encoder) | ||
963 | { | ||
964 | } | ||
965 | |||
966 | static void | ||
967 | nvd0_dac_commit(struct drm_encoder *encoder) | 953 | nvd0_dac_commit(struct drm_encoder *encoder) |
968 | { | 954 | { |
969 | } | 955 | } |
@@ -974,13 +960,26 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
974 | { | 960 | { |
975 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 961 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
976 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); | 962 | struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); |
977 | u32 *push; | 963 | u32 syncs, magic, *push; |
964 | |||
965 | syncs = 0x00000001; | ||
966 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
967 | syncs |= 0x00000008; | ||
968 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
969 | syncs |= 0x00000010; | ||
970 | |||
971 | magic = 0x31ec6000 | (nv_crtc->index << 25); | ||
972 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
973 | magic |= 0x00000001; | ||
978 | 974 | ||
979 | nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); | 975 | nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); |
980 | 976 | ||
981 | push = evo_wait(encoder->dev, EVO_MASTER, 4); | 977 | push = evo_wait(encoder->dev, EVO_MASTER, 8); |
982 | if (push) { | 978 | if (push) { |
983 | evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2); | 979 | evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); |
980 | evo_data(push, syncs); | ||
981 | evo_data(push, magic); | ||
982 | evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2); | ||
984 | evo_data(push, 1 << nv_crtc->index); | 983 | evo_data(push, 1 << nv_crtc->index); |
985 | evo_data(push, 0x00ff); | 984 | evo_data(push, 0x00ff); |
986 | evo_kick(push, encoder->dev, EVO_MASTER); | 985 | evo_kick(push, encoder->dev, EVO_MASTER); |
@@ -1043,7 +1042,7 @@ nvd0_dac_destroy(struct drm_encoder *encoder) | |||
1043 | static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { | 1042 | static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { |
1044 | .dpms = nvd0_dac_dpms, | 1043 | .dpms = nvd0_dac_dpms, |
1045 | .mode_fixup = nvd0_dac_mode_fixup, | 1044 | .mode_fixup = nvd0_dac_mode_fixup, |
1046 | .prepare = nvd0_dac_prepare, | 1045 | .prepare = nvd0_dac_disconnect, |
1047 | .commit = nvd0_dac_commit, | 1046 | .commit = nvd0_dac_commit, |
1048 | .mode_set = nvd0_dac_mode_set, | 1047 | .mode_set = nvd0_dac_mode_set, |
1049 | .disable = nvd0_dac_disconnect, | 1048 | .disable = nvd0_dac_disconnect, |
@@ -1183,6 +1182,143 @@ nvd0_hdmi_disconnect(struct drm_encoder *encoder) | |||
1183 | /****************************************************************************** | 1182 | /****************************************************************************** |
1184 | * SOR | 1183 | * SOR |
1185 | *****************************************************************************/ | 1184 | *****************************************************************************/ |
1185 | static inline u32 | ||
1186 | nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) | ||
1187 | { | ||
1188 | static const u8 nvd0[] = { 16, 8, 0, 24 }; | ||
1189 | return nvd0[lane]; | ||
1190 | } | ||
1191 | |||
1192 | static void | ||
1193 | nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) | ||
1194 | { | ||
1195 | const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
1196 | const u32 loff = (or * 0x800) + (link * 0x80); | ||
1197 | nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); | ||
1198 | } | ||
1199 | |||
1200 | static void | ||
1201 | nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, | ||
1202 | u8 lane, u8 swing, u8 preem) | ||
1203 | { | ||
1204 | const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
1205 | const u32 loff = (or * 0x800) + (link * 0x80); | ||
1206 | u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); | ||
1207 | u32 mask = 0x000000ff << shift; | ||
1208 | u8 *table, *entry, *config = NULL; | ||
1209 | |||
1210 | switch (swing) { | ||
1211 | case 0: preem += 0; break; | ||
1212 | case 1: preem += 4; break; | ||
1213 | case 2: preem += 7; break; | ||
1214 | case 3: preem += 9; break; | ||
1215 | } | ||
1216 | |||
1217 | table = nouveau_dp_bios_data(dev, dcb, &entry); | ||
1218 | if (table) { | ||
1219 | if (table[0] == 0x30) { | ||
1220 | config = entry + table[4]; | ||
1221 | config += table[5] * preem; | ||
1222 | } | ||
1223 | } | ||
1224 | |||
1225 | if (!config) { | ||
1226 | NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); | ||
1227 | return; | ||
1228 | } | ||
1229 | |||
1230 | nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift); | ||
1231 | nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift); | ||
1232 | nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8); | ||
1233 | nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000); | ||
1234 | } | ||
1235 | |||
1236 | static void | ||
1237 | nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, | ||
1238 | int link_nr, u32 link_bw, bool enhframe) | ||
1239 | { | ||
1240 | const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
1241 | const u32 loff = (or * 0x800) + (link * 0x80); | ||
1242 | const u32 soff = (or * 0x800); | ||
1243 | u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000; | ||
1244 | u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000; | ||
1245 | u32 script = 0x0000, lane_mask = 0; | ||
1246 | u8 *table, *entry; | ||
1247 | int i; | ||
1248 | |||
1249 | link_bw /= 27000; | ||
1250 | |||
1251 | table = nouveau_dp_bios_data(dev, dcb, &entry); | ||
1252 | if (table) { | ||
1253 | if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]); | ||
1254 | else entry = NULL; | ||
1255 | |||
1256 | while (entry) { | ||
1257 | if (entry[0] >= link_bw) | ||
1258 | break; | ||
1259 | entry += 3; | ||
1260 | } | ||
1261 | |||
1262 | nouveau_bios_run_init_table(dev, script, dcb, crtc); | ||
1263 | } | ||
1264 | |||
1265 | clksor |= link_bw << 18; | ||
1266 | dpctrl |= ((1 << link_nr) - 1) << 16; | ||
1267 | if (enhframe) | ||
1268 | dpctrl |= 0x00004000; | ||
1269 | |||
1270 | for (i = 0; i < link_nr; i++) | ||
1271 | lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); | ||
1272 | |||
1273 | nv_wr32(dev, 0x612300 + soff, clksor); | ||
1274 | nv_wr32(dev, 0x61c10c + loff, dpctrl); | ||
1275 | nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask); | ||
1276 | } | ||
1277 | |||
1278 | static void | ||
1279 | nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb, | ||
1280 | u32 *link_nr, u32 *link_bw) | ||
1281 | { | ||
1282 | const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); | ||
1283 | const u32 loff = (or * 0x800) + (link * 0x80); | ||
1284 | const u32 soff = (or * 0x800); | ||
1285 | u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000; | ||
1286 | u32 clksor = nv_rd32(dev, 0x612300 + soff); | ||
1287 | |||
1288 | if (dpctrl > 0x00030000) *link_nr = 4; | ||
1289 | else if (dpctrl > 0x00010000) *link_nr = 2; | ||
1290 | else *link_nr = 1; | ||
1291 | |||
1292 | *link_bw = (clksor & 0x007c0000) >> 18; | ||
1293 | *link_bw *= 27000; | ||
1294 | } | ||
1295 | |||
1296 | static void | ||
1297 | nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb, | ||
1298 | u32 crtc, u32 datarate) | ||
1299 | { | ||
1300 | const u32 symbol = 100000; | ||
1301 | const u32 TU = 64; | ||
1302 | u32 link_nr, link_bw; | ||
1303 | u64 ratio, value; | ||
1304 | |||
1305 | nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw); | ||
1306 | |||
1307 | ratio = datarate; | ||
1308 | ratio *= symbol; | ||
1309 | do_div(ratio, link_nr * link_bw); | ||
1310 | |||
1311 | value = (symbol - ratio) * TU; | ||
1312 | value *= ratio; | ||
1313 | do_div(value, symbol); | ||
1314 | do_div(value, symbol); | ||
1315 | |||
1316 | value += 5; | ||
1317 | value |= 0x08000000; | ||
1318 | |||
1319 | nv_wr32(dev, 0x616610 + (crtc * 0x800), value); | ||
1320 | } | ||
1321 | |||
1186 | static void | 1322 | static void |
1187 | nvd0_sor_dpms(struct drm_encoder *encoder, int mode) | 1323 | nvd0_sor_dpms(struct drm_encoder *encoder, int mode) |
1188 | { | 1324 | { |
@@ -1215,6 +1351,16 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode) | |||
1215 | nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); | 1351 | nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); |
1216 | nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); | 1352 | nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); |
1217 | nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); | 1353 | nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); |
1354 | |||
1355 | if (nv_encoder->dcb->type == OUTPUT_DP) { | ||
1356 | struct dp_train_func func = { | ||
1357 | .link_set = nvd0_sor_dp_link_set, | ||
1358 | .train_set = nvd0_sor_dp_train_set, | ||
1359 | .train_adj = nvd0_sor_dp_train_adj | ||
1360 | }; | ||
1361 | |||
1362 | nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); | ||
1363 | } | ||
1218 | } | 1364 | } |
1219 | 1365 | ||
1220 | static bool | 1366 | static bool |
@@ -1237,8 +1383,37 @@ nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1237 | } | 1383 | } |
1238 | 1384 | ||
1239 | static void | 1385 | static void |
1386 | nvd0_sor_disconnect(struct drm_encoder *encoder) | ||
1387 | { | ||
1388 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
1389 | struct drm_device *dev = encoder->dev; | ||
1390 | u32 *push; | ||
1391 | |||
1392 | if (nv_encoder->crtc) { | ||
1393 | nvd0_crtc_prepare(nv_encoder->crtc); | ||
1394 | |||
1395 | push = evo_wait(dev, EVO_MASTER, 4); | ||
1396 | if (push) { | ||
1397 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); | ||
1398 | evo_data(push, 0x00000000); | ||
1399 | evo_mthd(push, 0x0080, 1); | ||
1400 | evo_data(push, 0x00000000); | ||
1401 | evo_kick(push, dev, EVO_MASTER); | ||
1402 | } | ||
1403 | |||
1404 | nvd0_hdmi_disconnect(encoder); | ||
1405 | |||
1406 | nv_encoder->crtc = NULL; | ||
1407 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | static void | ||
1240 | nvd0_sor_prepare(struct drm_encoder *encoder) | 1412 | nvd0_sor_prepare(struct drm_encoder *encoder) |
1241 | { | 1413 | { |
1414 | nvd0_sor_disconnect(encoder); | ||
1415 | if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP) | ||
1416 | evo_sync(encoder->dev, EVO_MASTER); | ||
1242 | } | 1417 | } |
1243 | 1418 | ||
1244 | static void | 1419 | static void |
@@ -1257,7 +1432,18 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1257 | struct nouveau_connector *nv_connector; | 1432 | struct nouveau_connector *nv_connector; |
1258 | struct nvbios *bios = &dev_priv->vbios; | 1433 | struct nvbios *bios = &dev_priv->vbios; |
1259 | u32 mode_ctrl = (1 << nv_crtc->index); | 1434 | u32 mode_ctrl = (1 << nv_crtc->index); |
1260 | u32 *push, or_config; | 1435 | u32 syncs, magic, *push; |
1436 | u32 or_config; | ||
1437 | |||
1438 | syncs = 0x00000001; | ||
1439 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
1440 | syncs |= 0x00000008; | ||
1441 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
1442 | syncs |= 0x00000010; | ||
1443 | |||
1444 | magic = 0x31ec6000 | (nv_crtc->index << 25); | ||
1445 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1446 | magic |= 0x00000001; | ||
1261 | 1447 | ||
1262 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | 1448 | nv_connector = nouveau_encoder_connector_get(nv_encoder); |
1263 | switch (nv_encoder->dcb->type) { | 1449 | switch (nv_encoder->dcb->type) { |
@@ -1306,6 +1492,22 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1306 | 1492 | ||
1307 | } | 1493 | } |
1308 | break; | 1494 | break; |
1495 | case OUTPUT_DP: | ||
1496 | if (nv_connector->base.display_info.bpc == 6) { | ||
1497 | nv_encoder->dp.datarate = mode->clock * 18 / 8; | ||
1498 | syncs |= 0x00000140; | ||
1499 | } else { | ||
1500 | nv_encoder->dp.datarate = mode->clock * 24 / 8; | ||
1501 | syncs |= 0x00000180; | ||
1502 | } | ||
1503 | |||
1504 | if (nv_encoder->dcb->sorconf.link & 1) | ||
1505 | mode_ctrl |= 0x00000800; | ||
1506 | else | ||
1507 | mode_ctrl |= 0x00000900; | ||
1508 | |||
1509 | or_config = (mode_ctrl & 0x00000f00) >> 8; | ||
1510 | break; | ||
1309 | default: | 1511 | default: |
1310 | BUG_ON(1); | 1512 | BUG_ON(1); |
1311 | break; | 1513 | break; |
@@ -1313,9 +1515,17 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1313 | 1515 | ||
1314 | nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); | 1516 | nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); |
1315 | 1517 | ||
1316 | push = evo_wait(dev, EVO_MASTER, 4); | 1518 | if (nv_encoder->dcb->type == OUTPUT_DP) { |
1519 | nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, | ||
1520 | nv_encoder->dp.datarate); | ||
1521 | } | ||
1522 | |||
1523 | push = evo_wait(dev, EVO_MASTER, 8); | ||
1317 | if (push) { | 1524 | if (push) { |
1318 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2); | 1525 | evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); |
1526 | evo_data(push, syncs); | ||
1527 | evo_data(push, magic); | ||
1528 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2); | ||
1319 | evo_data(push, mode_ctrl); | 1529 | evo_data(push, mode_ctrl); |
1320 | evo_data(push, or_config); | 1530 | evo_data(push, or_config); |
1321 | evo_kick(push, dev, EVO_MASTER); | 1531 | evo_kick(push, dev, EVO_MASTER); |
@@ -1325,32 +1535,6 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, | |||
1325 | } | 1535 | } |
1326 | 1536 | ||
1327 | static void | 1537 | static void |
1328 | nvd0_sor_disconnect(struct drm_encoder *encoder) | ||
1329 | { | ||
1330 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
1331 | struct drm_device *dev = encoder->dev; | ||
1332 | u32 *push; | ||
1333 | |||
1334 | if (nv_encoder->crtc) { | ||
1335 | nvd0_crtc_prepare(nv_encoder->crtc); | ||
1336 | |||
1337 | push = evo_wait(dev, EVO_MASTER, 4); | ||
1338 | if (push) { | ||
1339 | evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); | ||
1340 | evo_data(push, 0x00000000); | ||
1341 | evo_mthd(push, 0x0080, 1); | ||
1342 | evo_data(push, 0x00000000); | ||
1343 | evo_kick(push, dev, EVO_MASTER); | ||
1344 | } | ||
1345 | |||
1346 | nvd0_hdmi_disconnect(encoder); | ||
1347 | |||
1348 | nv_encoder->crtc = NULL; | ||
1349 | nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | static void | ||
1354 | nvd0_sor_destroy(struct drm_encoder *encoder) | 1538 | nvd0_sor_destroy(struct drm_encoder *encoder) |
1355 | { | 1539 | { |
1356 | drm_encoder_cleanup(encoder); | 1540 | drm_encoder_cleanup(encoder); |
@@ -1402,17 +1586,19 @@ static struct dcb_entry * | |||
1402 | lookup_dcb(struct drm_device *dev, int id, u32 mc) | 1586 | lookup_dcb(struct drm_device *dev, int id, u32 mc) |
1403 | { | 1587 | { |
1404 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 1588 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
1405 | int type, or, i; | 1589 | int type, or, i, link = -1; |
1406 | 1590 | ||
1407 | if (id < 4) { | 1591 | if (id < 4) { |
1408 | type = OUTPUT_ANALOG; | 1592 | type = OUTPUT_ANALOG; |
1409 | or = id; | 1593 | or = id; |
1410 | } else { | 1594 | } else { |
1411 | switch (mc & 0x00000f00) { | 1595 | switch (mc & 0x00000f00) { |
1412 | case 0x00000000: type = OUTPUT_LVDS; break; | 1596 | case 0x00000000: link = 0; type = OUTPUT_LVDS; break; |
1413 | case 0x00000100: type = OUTPUT_TMDS; break; | 1597 | case 0x00000100: link = 0; type = OUTPUT_TMDS; break; |
1414 | case 0x00000200: type = OUTPUT_TMDS; break; | 1598 | case 0x00000200: link = 1; type = OUTPUT_TMDS; break; |
1415 | case 0x00000500: type = OUTPUT_TMDS; break; | 1599 | case 0x00000500: link = 0; type = OUTPUT_TMDS; break; |
1600 | case 0x00000800: link = 0; type = OUTPUT_DP; break; | ||
1601 | case 0x00000900: link = 1; type = OUTPUT_DP; break; | ||
1416 | default: | 1602 | default: |
1417 | NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); | 1603 | NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); |
1418 | return NULL; | 1604 | return NULL; |
@@ -1423,7 +1609,8 @@ lookup_dcb(struct drm_device *dev, int id, u32 mc) | |||
1423 | 1609 | ||
1424 | for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { | 1610 | for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { |
1425 | struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; | 1611 | struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; |
1426 | if (dcb->type == type && (dcb->or & (1 << or))) | 1612 | if (dcb->type == type && (dcb->or & (1 << or)) && |
1613 | (link < 0 || link == !(dcb->sorconf.link & 1))) | ||
1427 | return dcb; | 1614 | return dcb; |
1428 | } | 1615 | } |
1429 | 1616 | ||
@@ -1498,6 +1685,7 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) | |||
1498 | break; | 1685 | break; |
1499 | case OUTPUT_TMDS: | 1686 | case OUTPUT_TMDS: |
1500 | case OUTPUT_LVDS: | 1687 | case OUTPUT_LVDS: |
1688 | case OUTPUT_DP: | ||
1501 | if (cfg & 0x00000100) | 1689 | if (cfg & 0x00000100) |
1502 | tmp = 0x00000101; | 1690 | tmp = 0x00000101; |
1503 | else | 1691 | else |
@@ -1548,7 +1736,7 @@ nvd0_display_bh(unsigned long data) | |||
1548 | { | 1736 | { |
1549 | struct drm_device *dev = (struct drm_device *)data; | 1737 | struct drm_device *dev = (struct drm_device *)data; |
1550 | struct nvd0_display *disp = nvd0_display(dev); | 1738 | struct nvd0_display *disp = nvd0_display(dev); |
1551 | u32 mask, crtc; | 1739 | u32 mask = 0, crtc = ~0; |
1552 | int i; | 1740 | int i; |
1553 | 1741 | ||
1554 | if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { | 1742 | if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { |
@@ -1564,12 +1752,8 @@ nvd0_display_bh(unsigned long data) | |||
1564 | } | 1752 | } |
1565 | } | 1753 | } |
1566 | 1754 | ||
1567 | mask = nv_rd32(dev, 0x6101d4); | 1755 | while (!mask && ++crtc < dev->mode_config.num_crtc) |
1568 | crtc = 0; | 1756 | mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800)); |
1569 | if (!mask) { | ||
1570 | mask = nv_rd32(dev, 0x6109d4); | ||
1571 | crtc = 1; | ||
1572 | } | ||
1573 | 1757 | ||
1574 | if (disp->modeset & 0x00000001) | 1758 | if (disp->modeset & 0x00000001) |
1575 | nvd0_display_unk1_handler(dev, crtc, mask); | 1759 | nvd0_display_unk1_handler(dev, crtc, mask); |
@@ -1584,6 +1768,7 @@ nvd0_display_intr(struct drm_device *dev) | |||
1584 | { | 1768 | { |
1585 | struct nvd0_display *disp = nvd0_display(dev); | 1769 | struct nvd0_display *disp = nvd0_display(dev); |
1586 | u32 intr = nv_rd32(dev, 0x610088); | 1770 | u32 intr = nv_rd32(dev, 0x610088); |
1771 | int i; | ||
1587 | 1772 | ||
1588 | if (intr & 0x00000001) { | 1773 | if (intr & 0x00000001) { |
1589 | u32 stat = nv_rd32(dev, 0x61008c); | 1774 | u32 stat = nv_rd32(dev, 0x61008c); |
@@ -1628,16 +1813,13 @@ nvd0_display_intr(struct drm_device *dev) | |||
1628 | intr &= ~0x00100000; | 1813 | intr &= ~0x00100000; |
1629 | } | 1814 | } |
1630 | 1815 | ||
1631 | if (intr & 0x01000000) { | 1816 | for (i = 0; i < dev->mode_config.num_crtc; i++) { |
1632 | u32 stat = nv_rd32(dev, 0x6100bc); | 1817 | u32 mask = 0x01000000 << i; |
1633 | nv_wr32(dev, 0x6100bc, stat); | 1818 | if (intr & mask) { |
1634 | intr &= ~0x01000000; | 1819 | u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800)); |
1635 | } | 1820 | nv_wr32(dev, 0x6100bc + (i * 0x800), stat); |
1636 | 1821 | intr &= ~mask; | |
1637 | if (intr & 0x02000000) { | 1822 | } |
1638 | u32 stat = nv_rd32(dev, 0x6108bc); | ||
1639 | nv_wr32(dev, 0x6108bc, stat); | ||
1640 | intr &= ~0x02000000; | ||
1641 | } | 1823 | } |
1642 | 1824 | ||
1643 | if (intr) | 1825 | if (intr) |
@@ -1774,7 +1956,7 @@ nvd0_display_create(struct drm_device *dev) | |||
1774 | struct pci_dev *pdev = dev->pdev; | 1956 | struct pci_dev *pdev = dev->pdev; |
1775 | struct nvd0_display *disp; | 1957 | struct nvd0_display *disp; |
1776 | struct dcb_entry *dcbe; | 1958 | struct dcb_entry *dcbe; |
1777 | int ret, i; | 1959 | int crtcs, ret, i; |
1778 | 1960 | ||
1779 | disp = kzalloc(sizeof(*disp), GFP_KERNEL); | 1961 | disp = kzalloc(sizeof(*disp), GFP_KERNEL); |
1780 | if (!disp) | 1962 | if (!disp) |
@@ -1782,7 +1964,8 @@ nvd0_display_create(struct drm_device *dev) | |||
1782 | dev_priv->engine.display.priv = disp; | 1964 | dev_priv->engine.display.priv = disp; |
1783 | 1965 | ||
1784 | /* create crtc objects to represent the hw heads */ | 1966 | /* create crtc objects to represent the hw heads */ |
1785 | for (i = 0; i < 2; i++) { | 1967 | crtcs = nv_rd32(dev, 0x022448); |
1968 | for (i = 0; i < crtcs; i++) { | ||
1786 | ret = nvd0_crtc_create(dev, i); | 1969 | ret = nvd0_crtc_create(dev, i); |
1787 | if (ret) | 1970 | if (ret) |
1788 | goto out; | 1971 | goto out; |
@@ -1803,6 +1986,7 @@ nvd0_display_create(struct drm_device *dev) | |||
1803 | switch (dcbe->type) { | 1986 | switch (dcbe->type) { |
1804 | case OUTPUT_TMDS: | 1987 | case OUTPUT_TMDS: |
1805 | case OUTPUT_LVDS: | 1988 | case OUTPUT_LVDS: |
1989 | case OUTPUT_DP: | ||
1806 | nvd0_sor_create(connector, dcbe); | 1990 | nvd0_sor_create(connector, dcbe); |
1807 | break; | 1991 | break; |
1808 | case OUTPUT_ANALOG: | 1992 | case OUTPUT_ANALOG: |
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 6a5f4395838f..88718fad5d6d 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c | |||
@@ -85,6 +85,7 @@ static struct drm_driver driver = { | |||
85 | 85 | ||
86 | int r128_driver_load(struct drm_device *dev, unsigned long flags) | 86 | int r128_driver_load(struct drm_device *dev, unsigned long flags) |
87 | { | 87 | { |
88 | pci_set_master(dev->pdev); | ||
88 | return drm_vblank_init(dev, 1); | 89 | return drm_vblank_init(dev, 1); |
89 | } | 90 | } |
90 | 91 | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 2139fe893ec5..84104153a684 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -71,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ | 72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
73 | radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \ | 73 | radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \ |
74 | radeon_semaphore.o radeon_sa.o | 74 | radeon_semaphore.o radeon_sa.o atombios_i2c.o |
75 | 75 | ||
76 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 76 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
77 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 77 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 742f17f009a9..72672ea3f6d3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1031,6 +1031,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1031 | struct radeon_bo *rbo; | 1031 | struct radeon_bo *rbo; |
1032 | uint64_t fb_location; | 1032 | uint64_t fb_location; |
1033 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1033 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1034 | unsigned bankw, bankh, mtaspect, tile_split; | ||
1034 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | 1035 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); |
1035 | u32 tmp, viewport_w, viewport_h; | 1036 | u32 tmp, viewport_w, viewport_h; |
1036 | int r; | 1037 | int r; |
@@ -1121,20 +1122,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1121 | break; | 1122 | break; |
1122 | } | 1123 | } |
1123 | 1124 | ||
1124 | switch ((tmp & 0xf000) >> 12) { | ||
1125 | case 0: /* 1KB rows */ | ||
1126 | default: | ||
1127 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB); | ||
1128 | break; | ||
1129 | case 1: /* 2KB rows */ | ||
1130 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB); | ||
1131 | break; | ||
1132 | case 2: /* 4KB rows */ | ||
1133 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB); | ||
1134 | break; | ||
1135 | } | ||
1136 | |||
1137 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); | 1125 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); |
1126 | |||
1127 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | ||
1128 | fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); | ||
1129 | fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); | ||
1130 | fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); | ||
1131 | fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); | ||
1138 | } else if (tiling_flags & RADEON_TILING_MICRO) | 1132 | } else if (tiling_flags & RADEON_TILING_MICRO) |
1139 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); | 1133 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); |
1140 | 1134 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 552b436451fd..191218ad92e7 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -746,7 +746,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) | |||
746 | 746 | ||
747 | /* set the lane count on the sink */ | 747 | /* set the lane count on the sink */ |
748 | tmp = dp_info->dp_lane_count; | 748 | tmp = dp_info->dp_lane_count; |
749 | if (dp_info->dpcd[0] >= 0x11) | 749 | if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 && |
750 | dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP) | ||
750 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 751 | tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
751 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); | 752 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); |
752 | 753 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c new file mode 100644 index 000000000000..44d87b6b4220 --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_i2c.c | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | * | ||
24 | */ | ||
25 | #include "drmP.h" | ||
26 | #include "radeon_drm.h" | ||
27 | #include "radeon.h" | ||
28 | #include "atom.h" | ||
29 | |||
30 | #define TARGET_HW_I2C_CLOCK 50 | ||
31 | |||
32 | /* these are a limitation of ProcessI2cChannelTransaction not the hw */ | ||
33 | #define ATOM_MAX_HW_I2C_WRITE 2 | ||
34 | #define ATOM_MAX_HW_I2C_READ 255 | ||
35 | |||
36 | static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, | ||
37 | u8 slave_addr, u8 flags, | ||
38 | u8 *buf, u8 num) | ||
39 | { | ||
40 | struct drm_device *dev = chan->dev; | ||
41 | struct radeon_device *rdev = dev->dev_private; | ||
42 | PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; | ||
43 | int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); | ||
44 | unsigned char *base; | ||
45 | u16 out; | ||
46 | |||
47 | memset(&args, 0, sizeof(args)); | ||
48 | |||
49 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | ||
50 | |||
51 | if (flags & HW_I2C_WRITE) { | ||
52 | if (num > ATOM_MAX_HW_I2C_WRITE) { | ||
53 | DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | memcpy(&out, buf, num); | ||
57 | args.lpI2CDataOut = cpu_to_le16(out); | ||
58 | } else { | ||
59 | if (num > ATOM_MAX_HW_I2C_READ) { | ||
60 | DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); | ||
61 | return -EINVAL; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; | ||
66 | args.ucRegIndex = 0; | ||
67 | args.ucTransBytes = num; | ||
68 | args.ucSlaveAddr = slave_addr << 1; | ||
69 | args.ucLineNumber = chan->rec.i2c_id; | ||
70 | |||
71 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
72 | |||
73 | /* error */ | ||
74 | if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { | ||
75 | DRM_DEBUG_KMS("hw_i2c error\n"); | ||
76 | return -EIO; | ||
77 | } | ||
78 | |||
79 | if (!(flags & HW_I2C_WRITE)) | ||
80 | memcpy(buf, base, num); | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
86 | struct i2c_msg *msgs, int num) | ||
87 | { | ||
88 | struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); | ||
89 | struct i2c_msg *p; | ||
90 | int i, remaining, current_count, buffer_offset, max_bytes, ret; | ||
91 | u8 buf = 0, flags; | ||
92 | |||
93 | /* check for bus probe */ | ||
94 | p = &msgs[0]; | ||
95 | if ((num == 1) && (p->len == 0)) { | ||
96 | ret = radeon_process_i2c_ch(i2c, | ||
97 | p->addr, HW_I2C_WRITE, | ||
98 | &buf, 1); | ||
99 | if (ret) | ||
100 | return ret; | ||
101 | else | ||
102 | return num; | ||
103 | } | ||
104 | |||
105 | for (i = 0; i < num; i++) { | ||
106 | p = &msgs[i]; | ||
107 | remaining = p->len; | ||
108 | buffer_offset = 0; | ||
109 | /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */ | ||
110 | if (p->flags & I2C_M_RD) { | ||
111 | max_bytes = ATOM_MAX_HW_I2C_READ; | ||
112 | flags = HW_I2C_READ; | ||
113 | } else { | ||
114 | max_bytes = ATOM_MAX_HW_I2C_WRITE; | ||
115 | flags = HW_I2C_WRITE; | ||
116 | } | ||
117 | while (remaining) { | ||
118 | if (remaining > max_bytes) | ||
119 | current_count = max_bytes; | ||
120 | else | ||
121 | current_count = remaining; | ||
122 | ret = radeon_process_i2c_ch(i2c, | ||
123 | p->addr, flags, | ||
124 | &p->buf[buffer_offset], current_count); | ||
125 | if (ret) | ||
126 | return ret; | ||
127 | remaining -= current_count; | ||
128 | buffer_offset += current_count; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | return num; | ||
133 | } | ||
134 | |||
135 | u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap) | ||
136 | { | ||
137 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | ||
138 | } | ||
139 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f58254a3fb01..466db4115cd5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -43,6 +43,37 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | |||
43 | extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, | 43 | extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
44 | int ring, u32 cp_int_cntl); | 44 | int ring, u32 cp_int_cntl); |
45 | 45 | ||
46 | void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, | ||
47 | unsigned *bankh, unsigned *mtaspect, | ||
48 | unsigned *tile_split) | ||
49 | { | ||
50 | *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | ||
51 | *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | ||
52 | *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | ||
53 | *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | ||
54 | switch (*bankw) { | ||
55 | default: | ||
56 | case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break; | ||
57 | case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break; | ||
58 | case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break; | ||
59 | case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break; | ||
60 | } | ||
61 | switch (*bankh) { | ||
62 | default: | ||
63 | case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break; | ||
64 | case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break; | ||
65 | case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break; | ||
66 | case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break; | ||
67 | } | ||
68 | switch (*mtaspect) { | ||
69 | default: | ||
70 | case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break; | ||
71 | case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break; | ||
72 | case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break; | ||
73 | case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break; | ||
74 | } | ||
75 | } | ||
76 | |||
46 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | 77 | void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
47 | { | 78 | { |
48 | u16 ctl, v; | 79 | u16 ctl, v; |
@@ -68,6 +99,25 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | |||
68 | } | 99 | } |
69 | } | 100 | } |
70 | 101 | ||
102 | void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) | ||
103 | { | ||
104 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
105 | int i; | ||
106 | |||
107 | if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { | ||
108 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
109 | if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) | ||
110 | break; | ||
111 | udelay(1); | ||
112 | } | ||
113 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
114 | if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) | ||
115 | break; | ||
116 | udelay(1); | ||
117 | } | ||
118 | } | ||
119 | } | ||
120 | |||
71 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | 121 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) |
72 | { | 122 | { |
73 | /* enable the pflip int */ | 123 | /* enable the pflip int */ |
@@ -1489,7 +1539,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1489 | 1539 | ||
1490 | evergreen_cp_start(rdev); | 1540 | evergreen_cp_start(rdev); |
1491 | ring->ready = true; | 1541 | ring->ready = true; |
1492 | r = radeon_ring_test(rdev, ring); | 1542 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
1493 | if (r) { | 1543 | if (r) { |
1494 | ring->ready = false; | 1544 | ring->ready = false; |
1495 | return r; | 1545 | return r; |
@@ -3147,7 +3197,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3147 | r = evergreen_blit_init(rdev); | 3197 | r = evergreen_blit_init(rdev); |
3148 | if (r) { | 3198 | if (r) { |
3149 | r600_blit_fini(rdev); | 3199 | r600_blit_fini(rdev); |
3150 | rdev->asic->copy = NULL; | 3200 | rdev->asic->copy.copy = NULL; |
3151 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 3201 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
3152 | } | 3202 | } |
3153 | 3203 | ||
@@ -3187,7 +3237,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3187 | if (r) | 3237 | if (r) |
3188 | return r; | 3238 | return r; |
3189 | 3239 | ||
3190 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | 3240 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
3191 | if (r) { | 3241 | if (r) { |
3192 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | 3242 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
3193 | rdev->accel_working = false; | 3243 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2379849515c7..4e83fdcf4bc5 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -32,17 +32,7 @@ | |||
32 | #include "evergreend.h" | 32 | #include "evergreend.h" |
33 | #include "evergreen_blit_shaders.h" | 33 | #include "evergreen_blit_shaders.h" |
34 | #include "cayman_blit_shaders.h" | 34 | #include "cayman_blit_shaders.h" |
35 | 35 | #include "radeon_blit_common.h" | |
36 | #define DI_PT_RECTLIST 0x11 | ||
37 | #define DI_INDEX_SIZE_16_BIT 0x0 | ||
38 | #define DI_SRC_SEL_AUTO_INDEX 0x2 | ||
39 | |||
40 | #define FMT_8 0x1 | ||
41 | #define FMT_5_6_5 0x8 | ||
42 | #define FMT_8_8_8_8 0x1a | ||
43 | #define COLOR_8 0x1 | ||
44 | #define COLOR_5_6_5 0x8 | ||
45 | #define COLOR_8_8_8_8 0x1a | ||
46 | 36 | ||
47 | /* emits 17 */ | 37 | /* emits 17 */ |
48 | static void | 38 | static void |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 8e8cd85e5c00..a58b37a2e65a 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -31,6 +31,9 @@ | |||
31 | #include "evergreen_reg_safe.h" | 31 | #include "evergreen_reg_safe.h" |
32 | #include "cayman_reg_safe.h" | 32 | #include "cayman_reg_safe.h" |
33 | 33 | ||
34 | #define MAX(a,b) (((a)>(b))?(a):(b)) | ||
35 | #define MIN(a,b) (((a)<(b))?(a):(b)) | ||
36 | |||
34 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | 37 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, |
35 | struct radeon_cs_reloc **cs_reloc); | 38 | struct radeon_cs_reloc **cs_reloc); |
36 | 39 | ||
@@ -40,42 +43,43 @@ struct evergreen_cs_track { | |||
40 | u32 npipes; | 43 | u32 npipes; |
41 | u32 row_size; | 44 | u32 row_size; |
42 | /* value we track */ | 45 | /* value we track */ |
43 | u32 nsamples; | 46 | u32 nsamples; /* unused */ |
44 | u32 cb_color_base_last[12]; | ||
45 | struct radeon_bo *cb_color_bo[12]; | 47 | struct radeon_bo *cb_color_bo[12]; |
46 | u32 cb_color_bo_offset[12]; | 48 | u32 cb_color_bo_offset[12]; |
47 | struct radeon_bo *cb_color_fmask_bo[8]; | 49 | struct radeon_bo *cb_color_fmask_bo[8]; /* unused */ |
48 | struct radeon_bo *cb_color_cmask_bo[8]; | 50 | struct radeon_bo *cb_color_cmask_bo[8]; /* unused */ |
49 | u32 cb_color_info[12]; | 51 | u32 cb_color_info[12]; |
50 | u32 cb_color_view[12]; | 52 | u32 cb_color_view[12]; |
51 | u32 cb_color_pitch_idx[12]; | ||
52 | u32 cb_color_slice_idx[12]; | ||
53 | u32 cb_color_dim_idx[12]; | ||
54 | u32 cb_color_dim[12]; | ||
55 | u32 cb_color_pitch[12]; | 53 | u32 cb_color_pitch[12]; |
56 | u32 cb_color_slice[12]; | 54 | u32 cb_color_slice[12]; |
57 | u32 cb_color_cmask_slice[8]; | 55 | u32 cb_color_attrib[12]; |
58 | u32 cb_color_fmask_slice[8]; | 56 | u32 cb_color_cmask_slice[8];/* unused */ |
57 | u32 cb_color_fmask_slice[8];/* unused */ | ||
59 | u32 cb_target_mask; | 58 | u32 cb_target_mask; |
60 | u32 cb_shader_mask; | 59 | u32 cb_shader_mask; /* unused */ |
61 | u32 vgt_strmout_config; | 60 | u32 vgt_strmout_config; |
62 | u32 vgt_strmout_buffer_config; | 61 | u32 vgt_strmout_buffer_config; |
62 | struct radeon_bo *vgt_strmout_bo[4]; | ||
63 | u32 vgt_strmout_bo_offset[4]; | ||
64 | u32 vgt_strmout_size[4]; | ||
63 | u32 db_depth_control; | 65 | u32 db_depth_control; |
64 | u32 db_depth_view; | 66 | u32 db_depth_view; |
67 | u32 db_depth_slice; | ||
65 | u32 db_depth_size; | 68 | u32 db_depth_size; |
66 | u32 db_depth_size_idx; | ||
67 | u32 db_z_info; | 69 | u32 db_z_info; |
68 | u32 db_z_idx; | ||
69 | u32 db_z_read_offset; | 70 | u32 db_z_read_offset; |
70 | u32 db_z_write_offset; | 71 | u32 db_z_write_offset; |
71 | struct radeon_bo *db_z_read_bo; | 72 | struct radeon_bo *db_z_read_bo; |
72 | struct radeon_bo *db_z_write_bo; | 73 | struct radeon_bo *db_z_write_bo; |
73 | u32 db_s_info; | 74 | u32 db_s_info; |
74 | u32 db_s_idx; | ||
75 | u32 db_s_read_offset; | 75 | u32 db_s_read_offset; |
76 | u32 db_s_write_offset; | 76 | u32 db_s_write_offset; |
77 | struct radeon_bo *db_s_read_bo; | 77 | struct radeon_bo *db_s_read_bo; |
78 | struct radeon_bo *db_s_write_bo; | 78 | struct radeon_bo *db_s_write_bo; |
79 | bool sx_misc_kill_all_prims; | ||
80 | bool cb_dirty; | ||
81 | bool db_dirty; | ||
82 | bool streamout_dirty; | ||
79 | }; | 83 | }; |
80 | 84 | ||
81 | static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) | 85 | static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) |
@@ -103,19 +107,6 @@ static u32 evergreen_cs_get_num_banks(u32 nbanks) | |||
103 | } | 107 | } |
104 | } | 108 | } |
105 | 109 | ||
106 | static u32 evergreen_cs_get_tile_split(u32 row_size) | ||
107 | { | ||
108 | switch (row_size) { | ||
109 | case 1: | ||
110 | default: | ||
111 | return ADDR_SURF_TILE_SPLIT_1KB; | ||
112 | case 2: | ||
113 | return ADDR_SURF_TILE_SPLIT_2KB; | ||
114 | case 4: | ||
115 | return ADDR_SURF_TILE_SPLIT_4KB; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static void evergreen_cs_track_init(struct evergreen_cs_track *track) | 110 | static void evergreen_cs_track_init(struct evergreen_cs_track *track) |
120 | { | 111 | { |
121 | int i; | 112 | int i; |
@@ -128,50 +119,745 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
128 | } | 119 | } |
129 | 120 | ||
130 | for (i = 0; i < 12; i++) { | 121 | for (i = 0; i < 12; i++) { |
131 | track->cb_color_base_last[i] = 0; | ||
132 | track->cb_color_bo[i] = NULL; | 122 | track->cb_color_bo[i] = NULL; |
133 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 123 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
134 | track->cb_color_info[i] = 0; | 124 | track->cb_color_info[i] = 0; |
135 | track->cb_color_view[i] = 0; | 125 | track->cb_color_view[i] = 0xFFFFFFFF; |
136 | track->cb_color_pitch_idx[i] = 0; | ||
137 | track->cb_color_slice_idx[i] = 0; | ||
138 | track->cb_color_dim[i] = 0; | ||
139 | track->cb_color_pitch[i] = 0; | 126 | track->cb_color_pitch[i] = 0; |
140 | track->cb_color_slice[i] = 0; | 127 | track->cb_color_slice[i] = 0; |
141 | track->cb_color_dim[i] = 0; | ||
142 | } | 128 | } |
143 | track->cb_target_mask = 0xFFFFFFFF; | 129 | track->cb_target_mask = 0xFFFFFFFF; |
144 | track->cb_shader_mask = 0xFFFFFFFF; | 130 | track->cb_shader_mask = 0xFFFFFFFF; |
131 | track->cb_dirty = true; | ||
145 | 132 | ||
146 | track->db_depth_view = 0xFFFFC000; | 133 | track->db_depth_view = 0xFFFFC000; |
147 | track->db_depth_size = 0xFFFFFFFF; | 134 | track->db_depth_size = 0xFFFFFFFF; |
148 | track->db_depth_size_idx = 0; | ||
149 | track->db_depth_control = 0xFFFFFFFF; | 135 | track->db_depth_control = 0xFFFFFFFF; |
150 | track->db_z_info = 0xFFFFFFFF; | 136 | track->db_z_info = 0xFFFFFFFF; |
151 | track->db_z_idx = 0xFFFFFFFF; | ||
152 | track->db_z_read_offset = 0xFFFFFFFF; | 137 | track->db_z_read_offset = 0xFFFFFFFF; |
153 | track->db_z_write_offset = 0xFFFFFFFF; | 138 | track->db_z_write_offset = 0xFFFFFFFF; |
154 | track->db_z_read_bo = NULL; | 139 | track->db_z_read_bo = NULL; |
155 | track->db_z_write_bo = NULL; | 140 | track->db_z_write_bo = NULL; |
156 | track->db_s_info = 0xFFFFFFFF; | 141 | track->db_s_info = 0xFFFFFFFF; |
157 | track->db_s_idx = 0xFFFFFFFF; | ||
158 | track->db_s_read_offset = 0xFFFFFFFF; | 142 | track->db_s_read_offset = 0xFFFFFFFF; |
159 | track->db_s_write_offset = 0xFFFFFFFF; | 143 | track->db_s_write_offset = 0xFFFFFFFF; |
160 | track->db_s_read_bo = NULL; | 144 | track->db_s_read_bo = NULL; |
161 | track->db_s_write_bo = NULL; | 145 | track->db_s_write_bo = NULL; |
146 | track->db_dirty = true; | ||
147 | |||
148 | for (i = 0; i < 4; i++) { | ||
149 | track->vgt_strmout_size[i] = 0; | ||
150 | track->vgt_strmout_bo[i] = NULL; | ||
151 | track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; | ||
152 | } | ||
153 | track->streamout_dirty = true; | ||
154 | track->sx_misc_kill_all_prims = false; | ||
162 | } | 155 | } |
163 | 156 | ||
164 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) | 157 | struct eg_surface { |
158 | /* value gathered from cs */ | ||
159 | unsigned nbx; | ||
160 | unsigned nby; | ||
161 | unsigned format; | ||
162 | unsigned mode; | ||
163 | unsigned nbanks; | ||
164 | unsigned bankw; | ||
165 | unsigned bankh; | ||
166 | unsigned tsplit; | ||
167 | unsigned mtilea; | ||
168 | unsigned nsamples; | ||
169 | /* output value */ | ||
170 | unsigned bpe; | ||
171 | unsigned layer_size; | ||
172 | unsigned palign; | ||
173 | unsigned halign; | ||
174 | unsigned long base_align; | ||
175 | }; | ||
176 | |||
177 | static int evergreen_surface_check_linear(struct radeon_cs_parser *p, | ||
178 | struct eg_surface *surf, | ||
179 | const char *prefix) | ||
180 | { | ||
181 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples; | ||
182 | surf->base_align = surf->bpe; | ||
183 | surf->palign = 1; | ||
184 | surf->halign = 1; | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p, | ||
189 | struct eg_surface *surf, | ||
190 | const char *prefix) | ||
165 | { | 191 | { |
166 | struct evergreen_cs_track *track = p->track; | 192 | struct evergreen_cs_track *track = p->track; |
193 | unsigned palign; | ||
167 | 194 | ||
168 | /* we don't support stream out buffer yet */ | 195 | palign = MAX(64, track->group_size / surf->bpe); |
169 | if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { | 196 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples; |
170 | dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); | 197 | surf->base_align = track->group_size; |
198 | surf->palign = palign; | ||
199 | surf->halign = 1; | ||
200 | if (surf->nbx & (palign - 1)) { | ||
201 | if (prefix) { | ||
202 | dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", | ||
203 | __func__, __LINE__, prefix, surf->nbx, palign); | ||
204 | } | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int evergreen_surface_check_1d(struct radeon_cs_parser *p, | ||
211 | struct eg_surface *surf, | ||
212 | const char *prefix) | ||
213 | { | ||
214 | struct evergreen_cs_track *track = p->track; | ||
215 | unsigned palign; | ||
216 | |||
217 | palign = track->group_size / (8 * surf->bpe * surf->nsamples); | ||
218 | palign = MAX(8, palign); | ||
219 | surf->layer_size = surf->nbx * surf->nby * surf->bpe; | ||
220 | surf->base_align = track->group_size; | ||
221 | surf->palign = palign; | ||
222 | surf->halign = 8; | ||
223 | if ((surf->nbx & (palign - 1))) { | ||
224 | if (prefix) { | ||
225 | dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n", | ||
226 | __func__, __LINE__, prefix, surf->nbx, palign, | ||
227 | track->group_size, surf->bpe, surf->nsamples); | ||
228 | } | ||
229 | return -EINVAL; | ||
230 | } | ||
231 | if ((surf->nby & (8 - 1))) { | ||
232 | if (prefix) { | ||
233 | dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n", | ||
234 | __func__, __LINE__, prefix, surf->nby); | ||
235 | } | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | ||
242 | struct eg_surface *surf, | ||
243 | const char *prefix) | ||
244 | { | ||
245 | struct evergreen_cs_track *track = p->track; | ||
246 | unsigned palign, halign, tileb, slice_pt; | ||
247 | |||
248 | tileb = 64 * surf->bpe * surf->nsamples; | ||
249 | palign = track->group_size / (8 * surf->bpe * surf->nsamples); | ||
250 | palign = MAX(8, palign); | ||
251 | slice_pt = 1; | ||
252 | if (tileb > surf->tsplit) { | ||
253 | slice_pt = tileb / surf->tsplit; | ||
254 | } | ||
255 | tileb = tileb / slice_pt; | ||
256 | /* macro tile width & height */ | ||
257 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; | ||
258 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; | ||
259 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; | ||
260 | surf->base_align = (palign / 8) * (halign / 8) * tileb; | ||
261 | surf->palign = palign; | ||
262 | surf->halign = halign; | ||
263 | |||
264 | if ((surf->nbx & (palign - 1))) { | ||
265 | if (prefix) { | ||
266 | dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", | ||
267 | __func__, __LINE__, prefix, surf->nbx, palign); | ||
268 | } | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | if ((surf->nby & (halign - 1))) { | ||
272 | if (prefix) { | ||
273 | dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n", | ||
274 | __func__, __LINE__, prefix, surf->nby, halign); | ||
275 | } | ||
171 | return -EINVAL; | 276 | return -EINVAL; |
172 | } | 277 | } |
173 | 278 | ||
174 | /* XXX fill in */ | 279 | return 0; |
280 | } | ||
281 | |||
282 | static int evergreen_surface_check(struct radeon_cs_parser *p, | ||
283 | struct eg_surface *surf, | ||
284 | const char *prefix) | ||
285 | { | ||
286 | /* some common value computed here */ | ||
287 | surf->bpe = r600_fmt_get_blocksize(surf->format); | ||
288 | |||
289 | switch (surf->mode) { | ||
290 | case ARRAY_LINEAR_GENERAL: | ||
291 | return evergreen_surface_check_linear(p, surf, prefix); | ||
292 | case ARRAY_LINEAR_ALIGNED: | ||
293 | return evergreen_surface_check_linear_aligned(p, surf, prefix); | ||
294 | case ARRAY_1D_TILED_THIN1: | ||
295 | return evergreen_surface_check_1d(p, surf, prefix); | ||
296 | case ARRAY_2D_TILED_THIN1: | ||
297 | return evergreen_surface_check_2d(p, surf, prefix); | ||
298 | default: | ||
299 | dev_warn(p->dev, "%s:%d %s invalid array mode %d\n", | ||
300 | __func__, __LINE__, prefix, surf->mode); | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p, | ||
307 | struct eg_surface *surf, | ||
308 | const char *prefix) | ||
309 | { | ||
310 | switch (surf->mode) { | ||
311 | case ARRAY_2D_TILED_THIN1: | ||
312 | break; | ||
313 | case ARRAY_LINEAR_GENERAL: | ||
314 | case ARRAY_LINEAR_ALIGNED: | ||
315 | case ARRAY_1D_TILED_THIN1: | ||
316 | return 0; | ||
317 | default: | ||
318 | dev_warn(p->dev, "%s:%d %s invalid array mode %d\n", | ||
319 | __func__, __LINE__, prefix, surf->mode); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | |||
323 | switch (surf->nbanks) { | ||
324 | case 0: surf->nbanks = 2; break; | ||
325 | case 1: surf->nbanks = 4; break; | ||
326 | case 2: surf->nbanks = 8; break; | ||
327 | case 3: surf->nbanks = 16; break; | ||
328 | default: | ||
329 | dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n", | ||
330 | __func__, __LINE__, prefix, surf->nbanks); | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | switch (surf->bankw) { | ||
334 | case 0: surf->bankw = 1; break; | ||
335 | case 1: surf->bankw = 2; break; | ||
336 | case 2: surf->bankw = 4; break; | ||
337 | case 3: surf->bankw = 8; break; | ||
338 | default: | ||
339 | dev_warn(p->dev, "%s:%d %s invalid bankw %d\n", | ||
340 | __func__, __LINE__, prefix, surf->bankw); | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | switch (surf->bankh) { | ||
344 | case 0: surf->bankh = 1; break; | ||
345 | case 1: surf->bankh = 2; break; | ||
346 | case 2: surf->bankh = 4; break; | ||
347 | case 3: surf->bankh = 8; break; | ||
348 | default: | ||
349 | dev_warn(p->dev, "%s:%d %s invalid bankh %d\n", | ||
350 | __func__, __LINE__, prefix, surf->bankh); | ||
351 | return -EINVAL; | ||
352 | } | ||
353 | switch (surf->mtilea) { | ||
354 | case 0: surf->mtilea = 1; break; | ||
355 | case 1: surf->mtilea = 2; break; | ||
356 | case 2: surf->mtilea = 4; break; | ||
357 | case 3: surf->mtilea = 8; break; | ||
358 | default: | ||
359 | dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n", | ||
360 | __func__, __LINE__, prefix, surf->mtilea); | ||
361 | return -EINVAL; | ||
362 | } | ||
363 | switch (surf->tsplit) { | ||
364 | case 0: surf->tsplit = 64; break; | ||
365 | case 1: surf->tsplit = 128; break; | ||
366 | case 2: surf->tsplit = 256; break; | ||
367 | case 3: surf->tsplit = 512; break; | ||
368 | case 4: surf->tsplit = 1024; break; | ||
369 | case 5: surf->tsplit = 2048; break; | ||
370 | case 6: surf->tsplit = 4096; break; | ||
371 | default: | ||
372 | dev_warn(p->dev, "%s:%d %s invalid tile split %d\n", | ||
373 | __func__, __LINE__, prefix, surf->tsplit); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id) | ||
380 | { | ||
381 | struct evergreen_cs_track *track = p->track; | ||
382 | struct eg_surface surf; | ||
383 | unsigned pitch, slice, mslice; | ||
384 | unsigned long offset; | ||
385 | int r; | ||
386 | |||
387 | mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; | ||
388 | pitch = track->cb_color_pitch[id]; | ||
389 | slice = track->cb_color_slice[id]; | ||
390 | surf.nbx = (pitch + 1) * 8; | ||
391 | surf.nby = ((slice + 1) * 64) / surf.nbx; | ||
392 | surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]); | ||
393 | surf.format = G_028C70_FORMAT(track->cb_color_info[id]); | ||
394 | surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]); | ||
395 | surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]); | ||
396 | surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]); | ||
397 | surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]); | ||
398 | surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]); | ||
399 | surf.nsamples = 1; | ||
400 | |||
401 | if (!r600_fmt_is_valid_color(surf.format)) { | ||
402 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n", | ||
403 | __func__, __LINE__, surf.format, | ||
404 | id, track->cb_color_info[id]); | ||
405 | return -EINVAL; | ||
406 | } | ||
407 | |||
408 | r = evergreen_surface_value_conv_check(p, &surf, "cb"); | ||
409 | if (r) { | ||
410 | return r; | ||
411 | } | ||
412 | |||
413 | r = evergreen_surface_check(p, &surf, "cb"); | ||
414 | if (r) { | ||
415 | dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", | ||
416 | __func__, __LINE__, id, track->cb_color_pitch[id], | ||
417 | track->cb_color_slice[id], track->cb_color_attrib[id], | ||
418 | track->cb_color_info[id]); | ||
419 | return r; | ||
420 | } | ||
421 | |||
422 | offset = track->cb_color_bo_offset[id] << 8; | ||
423 | if (offset & (surf.base_align - 1)) { | ||
424 | dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", | ||
425 | __func__, __LINE__, id, offset, surf.base_align); | ||
426 | return -EINVAL; | ||
427 | } | ||
428 | |||
429 | offset += surf.layer_size * mslice; | ||
430 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { | ||
431 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " | ||
432 | "offset %d, max layer %d, bo size %ld, slice %d)\n", | ||
433 | __func__, __LINE__, id, surf.layer_size, | ||
434 | track->cb_color_bo_offset[id] << 8, mslice, | ||
435 | radeon_bo_size(track->cb_color_bo[id]), slice); | ||
436 | dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", | ||
437 | __func__, __LINE__, surf.nbx, surf.nby, | ||
438 | surf.mode, surf.bpe, surf.nsamples, | ||
439 | surf.bankw, surf.bankh, | ||
440 | surf.tsplit, surf.mtilea); | ||
441 | return -EINVAL; | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) | ||
448 | { | ||
449 | struct evergreen_cs_track *track = p->track; | ||
450 | struct eg_surface surf; | ||
451 | unsigned pitch, slice, mslice; | ||
452 | unsigned long offset; | ||
453 | int r; | ||
454 | |||
455 | mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; | ||
456 | pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size); | ||
457 | slice = track->db_depth_slice; | ||
458 | surf.nbx = (pitch + 1) * 8; | ||
459 | surf.nby = ((slice + 1) * 64) / surf.nbx; | ||
460 | surf.mode = G_028040_ARRAY_MODE(track->db_z_info); | ||
461 | surf.format = G_028044_FORMAT(track->db_s_info); | ||
462 | surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info); | ||
463 | surf.nbanks = G_028040_NUM_BANKS(track->db_z_info); | ||
464 | surf.bankw = G_028040_BANK_WIDTH(track->db_z_info); | ||
465 | surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info); | ||
466 | surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info); | ||
467 | surf.nsamples = 1; | ||
468 | |||
469 | if (surf.format != 1) { | ||
470 | dev_warn(p->dev, "%s:%d stencil invalid format %d\n", | ||
471 | __func__, __LINE__, surf.format); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | /* replace by color format so we can use same code */ | ||
475 | surf.format = V_028C70_COLOR_8; | ||
476 | |||
477 | r = evergreen_surface_value_conv_check(p, &surf, "stencil"); | ||
478 | if (r) { | ||
479 | return r; | ||
480 | } | ||
481 | |||
482 | r = evergreen_surface_check(p, &surf, NULL); | ||
483 | if (r) { | ||
484 | /* old userspace doesn't compute proper depth/stencil alignment | ||
485 | * check that alignment against a bigger byte per elements and | ||
486 | * only report if that alignment is wrong too. | ||
487 | */ | ||
488 | surf.format = V_028C70_COLOR_8_8_8_8; | ||
489 | r = evergreen_surface_check(p, &surf, "stencil"); | ||
490 | if (r) { | ||
491 | dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", | ||
492 | __func__, __LINE__, track->db_depth_size, | ||
493 | track->db_depth_slice, track->db_s_info, track->db_z_info); | ||
494 | } | ||
495 | return r; | ||
496 | } | ||
497 | |||
498 | offset = track->db_s_read_offset << 8; | ||
499 | if (offset & (surf.base_align - 1)) { | ||
500 | dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", | ||
501 | __func__, __LINE__, offset, surf.base_align); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | offset += surf.layer_size * mslice; | ||
505 | if (offset > radeon_bo_size(track->db_s_read_bo)) { | ||
506 | dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " | ||
507 | "offset %ld, max layer %d, bo size %ld)\n", | ||
508 | __func__, __LINE__, surf.layer_size, | ||
509 | (unsigned long)track->db_s_read_offset << 8, mslice, | ||
510 | radeon_bo_size(track->db_s_read_bo)); | ||
511 | dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", | ||
512 | __func__, __LINE__, track->db_depth_size, | ||
513 | track->db_depth_slice, track->db_s_info, track->db_z_info); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | offset = track->db_s_write_offset << 8; | ||
518 | if (offset & (surf.base_align - 1)) { | ||
519 | dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", | ||
520 | __func__, __LINE__, offset, surf.base_align); | ||
521 | return -EINVAL; | ||
522 | } | ||
523 | offset += surf.layer_size * mslice; | ||
524 | if (offset > radeon_bo_size(track->db_s_write_bo)) { | ||
525 | dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " | ||
526 | "offset %ld, max layer %d, bo size %ld)\n", | ||
527 | __func__, __LINE__, surf.layer_size, | ||
528 | (unsigned long)track->db_s_write_offset << 8, mslice, | ||
529 | radeon_bo_size(track->db_s_write_bo)); | ||
530 | return -EINVAL; | ||
531 | } | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) | ||
537 | { | ||
538 | struct evergreen_cs_track *track = p->track; | ||
539 | struct eg_surface surf; | ||
540 | unsigned pitch, slice, mslice; | ||
541 | unsigned long offset; | ||
542 | int r; | ||
543 | |||
544 | mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; | ||
545 | pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size); | ||
546 | slice = track->db_depth_slice; | ||
547 | surf.nbx = (pitch + 1) * 8; | ||
548 | surf.nby = ((slice + 1) * 64) / surf.nbx; | ||
549 | surf.mode = G_028040_ARRAY_MODE(track->db_z_info); | ||
550 | surf.format = G_028040_FORMAT(track->db_z_info); | ||
551 | surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info); | ||
552 | surf.nbanks = G_028040_NUM_BANKS(track->db_z_info); | ||
553 | surf.bankw = G_028040_BANK_WIDTH(track->db_z_info); | ||
554 | surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info); | ||
555 | surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info); | ||
556 | surf.nsamples = 1; | ||
557 | |||
558 | switch (surf.format) { | ||
559 | case V_028040_Z_16: | ||
560 | surf.format = V_028C70_COLOR_16; | ||
561 | break; | ||
562 | case V_028040_Z_24: | ||
563 | case V_028040_Z_32_FLOAT: | ||
564 | surf.format = V_028C70_COLOR_8_8_8_8; | ||
565 | break; | ||
566 | default: | ||
567 | dev_warn(p->dev, "%s:%d depth invalid format %d\n", | ||
568 | __func__, __LINE__, surf.format); | ||
569 | return -EINVAL; | ||
570 | } | ||
571 | |||
572 | r = evergreen_surface_value_conv_check(p, &surf, "depth"); | ||
573 | if (r) { | ||
574 | dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n", | ||
575 | __func__, __LINE__, track->db_depth_size, | ||
576 | track->db_depth_slice, track->db_z_info); | ||
577 | return r; | ||
578 | } | ||
579 | |||
580 | r = evergreen_surface_check(p, &surf, "depth"); | ||
581 | if (r) { | ||
582 | dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n", | ||
583 | __func__, __LINE__, track->db_depth_size, | ||
584 | track->db_depth_slice, track->db_z_info); | ||
585 | return r; | ||
586 | } | ||
587 | |||
588 | offset = track->db_z_read_offset << 8; | ||
589 | if (offset & (surf.base_align - 1)) { | ||
590 | dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", | ||
591 | __func__, __LINE__, offset, surf.base_align); | ||
592 | return -EINVAL; | ||
593 | } | ||
594 | offset += surf.layer_size * mslice; | ||
595 | if (offset > radeon_bo_size(track->db_z_read_bo)) { | ||
596 | dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " | ||
597 | "offset %ld, max layer %d, bo size %ld)\n", | ||
598 | __func__, __LINE__, surf.layer_size, | ||
599 | (unsigned long)track->db_z_read_offset << 8, mslice, | ||
600 | radeon_bo_size(track->db_z_read_bo)); | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | offset = track->db_z_write_offset << 8; | ||
605 | if (offset & (surf.base_align - 1)) { | ||
606 | dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", | ||
607 | __func__, __LINE__, offset, surf.base_align); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | offset += surf.layer_size * mslice; | ||
611 | if (offset > radeon_bo_size(track->db_z_write_bo)) { | ||
612 | dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " | ||
613 | "offset %ld, max layer %d, bo size %ld)\n", | ||
614 | __func__, __LINE__, surf.layer_size, | ||
615 | (unsigned long)track->db_z_write_offset << 8, mslice, | ||
616 | radeon_bo_size(track->db_z_write_bo)); | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, | ||
624 | struct radeon_bo *texture, | ||
625 | struct radeon_bo *mipmap, | ||
626 | unsigned idx) | ||
627 | { | ||
628 | struct eg_surface surf; | ||
629 | unsigned long toffset, moffset; | ||
630 | unsigned dim, llevel, mslice, width, height, depth, i; | ||
631 | u32 texdw[8]; | ||
632 | int r; | ||
633 | |||
634 | texdw[0] = radeon_get_ib_value(p, idx + 0); | ||
635 | texdw[1] = radeon_get_ib_value(p, idx + 1); | ||
636 | texdw[2] = radeon_get_ib_value(p, idx + 2); | ||
637 | texdw[3] = radeon_get_ib_value(p, idx + 3); | ||
638 | texdw[4] = radeon_get_ib_value(p, idx + 4); | ||
639 | texdw[5] = radeon_get_ib_value(p, idx + 5); | ||
640 | texdw[6] = radeon_get_ib_value(p, idx + 6); | ||
641 | texdw[7] = radeon_get_ib_value(p, idx + 7); | ||
642 | dim = G_030000_DIM(texdw[0]); | ||
643 | llevel = G_030014_LAST_LEVEL(texdw[5]); | ||
644 | mslice = G_030014_LAST_ARRAY(texdw[5]) + 1; | ||
645 | width = G_030000_TEX_WIDTH(texdw[0]) + 1; | ||
646 | height = G_030004_TEX_HEIGHT(texdw[1]) + 1; | ||
647 | depth = G_030004_TEX_DEPTH(texdw[1]) + 1; | ||
648 | surf.format = G_03001C_DATA_FORMAT(texdw[7]); | ||
649 | surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8; | ||
650 | surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx); | ||
651 | surf.nby = r600_fmt_get_nblocksy(surf.format, height); | ||
652 | surf.mode = G_030004_ARRAY_MODE(texdw[1]); | ||
653 | surf.tsplit = G_030018_TILE_SPLIT(texdw[6]); | ||
654 | surf.nbanks = G_03001C_NUM_BANKS(texdw[7]); | ||
655 | surf.bankw = G_03001C_BANK_WIDTH(texdw[7]); | ||
656 | surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]); | ||
657 | surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]); | ||
658 | surf.nsamples = 1; | ||
659 | toffset = texdw[2] << 8; | ||
660 | moffset = texdw[3] << 8; | ||
661 | |||
662 | if (!r600_fmt_is_valid_texture(surf.format, p->family)) { | ||
663 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", | ||
664 | __func__, __LINE__, surf.format); | ||
665 | return -EINVAL; | ||
666 | } | ||
667 | switch (dim) { | ||
668 | case V_030000_SQ_TEX_DIM_1D: | ||
669 | case V_030000_SQ_TEX_DIM_2D: | ||
670 | case V_030000_SQ_TEX_DIM_CUBEMAP: | ||
671 | case V_030000_SQ_TEX_DIM_1D_ARRAY: | ||
672 | case V_030000_SQ_TEX_DIM_2D_ARRAY: | ||
673 | depth = 1; | ||
674 | case V_030000_SQ_TEX_DIM_3D: | ||
675 | break; | ||
676 | default: | ||
677 | dev_warn(p->dev, "%s:%d texture invalid dimension %d\n", | ||
678 | __func__, __LINE__, dim); | ||
679 | return -EINVAL; | ||
680 | } | ||
681 | |||
682 | r = evergreen_surface_value_conv_check(p, &surf, "texture"); | ||
683 | if (r) { | ||
684 | return r; | ||
685 | } | ||
686 | |||
687 | /* align height */ | ||
688 | evergreen_surface_check(p, &surf, NULL); | ||
689 | surf.nby = ALIGN(surf.nby, surf.halign); | ||
690 | |||
691 | r = evergreen_surface_check(p, &surf, "texture"); | ||
692 | if (r) { | ||
693 | dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
694 | __func__, __LINE__, texdw[0], texdw[1], texdw[4], | ||
695 | texdw[5], texdw[6], texdw[7]); | ||
696 | return r; | ||
697 | } | ||
698 | |||
699 | /* check texture size */ | ||
700 | if (toffset & (surf.base_align - 1)) { | ||
701 | dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n", | ||
702 | __func__, __LINE__, toffset, surf.base_align); | ||
703 | return -EINVAL; | ||
704 | } | ||
705 | if (moffset & (surf.base_align - 1)) { | ||
706 | dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", | ||
707 | __func__, __LINE__, moffset, surf.base_align); | ||
708 | return -EINVAL; | ||
709 | } | ||
710 | if (dim == SQ_TEX_DIM_3D) { | ||
711 | toffset += surf.layer_size * depth; | ||
712 | } else { | ||
713 | toffset += surf.layer_size * mslice; | ||
714 | } | ||
715 | if (toffset > radeon_bo_size(texture)) { | ||
716 | dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, " | ||
717 | "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n", | ||
718 | __func__, __LINE__, surf.layer_size, | ||
719 | (unsigned long)texdw[2] << 8, mslice, | ||
720 | depth, radeon_bo_size(texture), | ||
721 | surf.nbx, surf.nby); | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | |||
725 | /* check mipmap size */ | ||
726 | for (i = 1; i <= llevel; i++) { | ||
727 | unsigned w, h, d; | ||
728 | |||
729 | w = r600_mip_minify(width, i); | ||
730 | h = r600_mip_minify(height, i); | ||
731 | d = r600_mip_minify(depth, i); | ||
732 | surf.nbx = r600_fmt_get_nblocksx(surf.format, w); | ||
733 | surf.nby = r600_fmt_get_nblocksy(surf.format, h); | ||
734 | |||
735 | switch (surf.mode) { | ||
736 | case ARRAY_2D_TILED_THIN1: | ||
737 | if (surf.nbx < surf.palign || surf.nby < surf.halign) { | ||
738 | surf.mode = ARRAY_1D_TILED_THIN1; | ||
739 | } | ||
740 | /* recompute alignment */ | ||
741 | evergreen_surface_check(p, &surf, NULL); | ||
742 | break; | ||
743 | case ARRAY_LINEAR_GENERAL: | ||
744 | case ARRAY_LINEAR_ALIGNED: | ||
745 | case ARRAY_1D_TILED_THIN1: | ||
746 | break; | ||
747 | default: | ||
748 | dev_warn(p->dev, "%s:%d invalid array mode %d\n", | ||
749 | __func__, __LINE__, surf.mode); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | surf.nbx = ALIGN(surf.nbx, surf.palign); | ||
753 | surf.nby = ALIGN(surf.nby, surf.halign); | ||
754 | |||
755 | r = evergreen_surface_check(p, &surf, "mipmap"); | ||
756 | if (r) { | ||
757 | return r; | ||
758 | } | ||
759 | |||
760 | if (dim == SQ_TEX_DIM_3D) { | ||
761 | moffset += surf.layer_size * d; | ||
762 | } else { | ||
763 | moffset += surf.layer_size * mslice; | ||
764 | } | ||
765 | if (moffset > radeon_bo_size(mipmap)) { | ||
766 | dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, " | ||
767 | "offset %ld, coffset %ld, max layer %d, depth %d, " | ||
768 | "bo size %ld) level0 (%d %d %d)\n", | ||
769 | __func__, __LINE__, i, surf.layer_size, | ||
770 | (unsigned long)texdw[3] << 8, moffset, mslice, | ||
771 | d, radeon_bo_size(mipmap), | ||
772 | width, height, depth); | ||
773 | dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", | ||
774 | __func__, __LINE__, surf.nbx, surf.nby, | ||
775 | surf.mode, surf.bpe, surf.nsamples, | ||
776 | surf.bankw, surf.bankh, | ||
777 | surf.tsplit, surf.mtilea); | ||
778 | return -EINVAL; | ||
779 | } | ||
780 | } | ||
781 | |||
782 | return 0; | ||
783 | } | ||
784 | |||
785 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) | ||
786 | { | ||
787 | struct evergreen_cs_track *track = p->track; | ||
788 | unsigned tmp, i; | ||
789 | int r; | ||
790 | unsigned buffer_mask = 0; | ||
791 | |||
792 | /* check streamout */ | ||
793 | if (track->streamout_dirty && track->vgt_strmout_config) { | ||
794 | for (i = 0; i < 4; i++) { | ||
795 | if (track->vgt_strmout_config & (1 << i)) { | ||
796 | buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf; | ||
797 | } | ||
798 | } | ||
799 | |||
800 | for (i = 0; i < 4; i++) { | ||
801 | if (buffer_mask & (1 << i)) { | ||
802 | if (track->vgt_strmout_bo[i]) { | ||
803 | u64 offset = (u64)track->vgt_strmout_bo_offset[i] + | ||
804 | (u64)track->vgt_strmout_size[i]; | ||
805 | if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { | ||
806 | DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", | ||
807 | i, offset, | ||
808 | radeon_bo_size(track->vgt_strmout_bo[i])); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | } else { | ||
812 | dev_warn(p->dev, "No buffer for streamout %d\n", i); | ||
813 | return -EINVAL; | ||
814 | } | ||
815 | } | ||
816 | } | ||
817 | track->streamout_dirty = false; | ||
818 | } | ||
819 | |||
820 | if (track->sx_misc_kill_all_prims) | ||
821 | return 0; | ||
822 | |||
823 | /* check that we have a cb for each enabled target | ||
824 | */ | ||
825 | if (track->cb_dirty) { | ||
826 | tmp = track->cb_target_mask; | ||
827 | for (i = 0; i < 8; i++) { | ||
828 | if ((tmp >> (i * 4)) & 0xF) { | ||
829 | /* at least one component is enabled */ | ||
830 | if (track->cb_color_bo[i] == NULL) { | ||
831 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | ||
832 | __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); | ||
833 | return -EINVAL; | ||
834 | } | ||
835 | /* check cb */ | ||
836 | r = evergreen_cs_track_validate_cb(p, i); | ||
837 | if (r) { | ||
838 | return r; | ||
839 | } | ||
840 | } | ||
841 | } | ||
842 | track->cb_dirty = false; | ||
843 | } | ||
844 | |||
845 | if (track->db_dirty) { | ||
846 | /* Check stencil buffer */ | ||
847 | if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { | ||
848 | r = evergreen_cs_track_validate_stencil(p); | ||
849 | if (r) | ||
850 | return r; | ||
851 | } | ||
852 | /* Check depth buffer */ | ||
853 | if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) { | ||
854 | r = evergreen_cs_track_validate_depth(p); | ||
855 | if (r) | ||
856 | return r; | ||
857 | } | ||
858 | track->db_dirty = false; | ||
859 | } | ||
860 | |||
175 | return 0; | 861 | return 0; |
176 | } | 862 | } |
177 | 863 | ||
@@ -503,6 +1189,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
503 | break; | 1189 | break; |
504 | case DB_DEPTH_CONTROL: | 1190 | case DB_DEPTH_CONTROL: |
505 | track->db_depth_control = radeon_get_ib_value(p, idx); | 1191 | track->db_depth_control = radeon_get_ib_value(p, idx); |
1192 | track->db_dirty = true; | ||
506 | break; | 1193 | break; |
507 | case CAYMAN_DB_EQAA: | 1194 | case CAYMAN_DB_EQAA: |
508 | if (p->rdev->family < CHIP_CAYMAN) { | 1195 | if (p->rdev->family < CHIP_CAYMAN) { |
@@ -532,20 +1219,35 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
532 | ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1219 | ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
533 | track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1220 | track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
534 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1221 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { |
1222 | unsigned bankw, bankh, mtaspect, tile_split; | ||
1223 | |||
1224 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | ||
1225 | &bankw, &bankh, &mtaspect, | ||
1226 | &tile_split); | ||
535 | ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 1227 | ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); |
536 | ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); | 1228 | ib[idx] |= DB_TILE_SPLIT(tile_split) | |
1229 | DB_BANK_WIDTH(bankw) | | ||
1230 | DB_BANK_HEIGHT(bankh) | | ||
1231 | DB_MACRO_TILE_ASPECT(mtaspect); | ||
537 | } | 1232 | } |
538 | } | 1233 | } |
1234 | track->db_dirty = true; | ||
539 | break; | 1235 | break; |
540 | case DB_STENCIL_INFO: | 1236 | case DB_STENCIL_INFO: |
541 | track->db_s_info = radeon_get_ib_value(p, idx); | 1237 | track->db_s_info = radeon_get_ib_value(p, idx); |
1238 | track->db_dirty = true; | ||
542 | break; | 1239 | break; |
543 | case DB_DEPTH_VIEW: | 1240 | case DB_DEPTH_VIEW: |
544 | track->db_depth_view = radeon_get_ib_value(p, idx); | 1241 | track->db_depth_view = radeon_get_ib_value(p, idx); |
1242 | track->db_dirty = true; | ||
545 | break; | 1243 | break; |
546 | case DB_DEPTH_SIZE: | 1244 | case DB_DEPTH_SIZE: |
547 | track->db_depth_size = radeon_get_ib_value(p, idx); | 1245 | track->db_depth_size = radeon_get_ib_value(p, idx); |
548 | track->db_depth_size_idx = idx; | 1246 | track->db_dirty = true; |
1247 | break; | ||
1248 | case R_02805C_DB_DEPTH_SLICE: | ||
1249 | track->db_depth_slice = radeon_get_ib_value(p, idx); | ||
1250 | track->db_dirty = true; | ||
549 | break; | 1251 | break; |
550 | case DB_Z_READ_BASE: | 1252 | case DB_Z_READ_BASE: |
551 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 1253 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
@@ -557,6 +1259,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
557 | track->db_z_read_offset = radeon_get_ib_value(p, idx); | 1259 | track->db_z_read_offset = radeon_get_ib_value(p, idx); |
558 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1260 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
559 | track->db_z_read_bo = reloc->robj; | 1261 | track->db_z_read_bo = reloc->robj; |
1262 | track->db_dirty = true; | ||
560 | break; | 1263 | break; |
561 | case DB_Z_WRITE_BASE: | 1264 | case DB_Z_WRITE_BASE: |
562 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 1265 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
@@ -568,6 +1271,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
568 | track->db_z_write_offset = radeon_get_ib_value(p, idx); | 1271 | track->db_z_write_offset = radeon_get_ib_value(p, idx); |
569 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1272 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
570 | track->db_z_write_bo = reloc->robj; | 1273 | track->db_z_write_bo = reloc->robj; |
1274 | track->db_dirty = true; | ||
571 | break; | 1275 | break; |
572 | case DB_STENCIL_READ_BASE: | 1276 | case DB_STENCIL_READ_BASE: |
573 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 1277 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
@@ -579,6 +1283,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
579 | track->db_s_read_offset = radeon_get_ib_value(p, idx); | 1283 | track->db_s_read_offset = radeon_get_ib_value(p, idx); |
580 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1284 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
581 | track->db_s_read_bo = reloc->robj; | 1285 | track->db_s_read_bo = reloc->robj; |
1286 | track->db_dirty = true; | ||
582 | break; | 1287 | break; |
583 | case DB_STENCIL_WRITE_BASE: | 1288 | case DB_STENCIL_WRITE_BASE: |
584 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 1289 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
@@ -590,18 +1295,56 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
590 | track->db_s_write_offset = radeon_get_ib_value(p, idx); | 1295 | track->db_s_write_offset = radeon_get_ib_value(p, idx); |
591 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1296 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
592 | track->db_s_write_bo = reloc->robj; | 1297 | track->db_s_write_bo = reloc->robj; |
1298 | track->db_dirty = true; | ||
593 | break; | 1299 | break; |
594 | case VGT_STRMOUT_CONFIG: | 1300 | case VGT_STRMOUT_CONFIG: |
595 | track->vgt_strmout_config = radeon_get_ib_value(p, idx); | 1301 | track->vgt_strmout_config = radeon_get_ib_value(p, idx); |
1302 | track->streamout_dirty = true; | ||
596 | break; | 1303 | break; |
597 | case VGT_STRMOUT_BUFFER_CONFIG: | 1304 | case VGT_STRMOUT_BUFFER_CONFIG: |
598 | track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); | 1305 | track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); |
1306 | track->streamout_dirty = true; | ||
599 | break; | 1307 | break; |
1308 | case VGT_STRMOUT_BUFFER_BASE_0: | ||
1309 | case VGT_STRMOUT_BUFFER_BASE_1: | ||
1310 | case VGT_STRMOUT_BUFFER_BASE_2: | ||
1311 | case VGT_STRMOUT_BUFFER_BASE_3: | ||
1312 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1313 | if (r) { | ||
1314 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
1315 | "0x%04X\n", reg); | ||
1316 | return -EINVAL; | ||
1317 | } | ||
1318 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; | ||
1319 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; | ||
1320 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1321 | track->vgt_strmout_bo[tmp] = reloc->robj; | ||
1322 | track->streamout_dirty = true; | ||
1323 | break; | ||
1324 | case VGT_STRMOUT_BUFFER_SIZE_0: | ||
1325 | case VGT_STRMOUT_BUFFER_SIZE_1: | ||
1326 | case VGT_STRMOUT_BUFFER_SIZE_2: | ||
1327 | case VGT_STRMOUT_BUFFER_SIZE_3: | ||
1328 | tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; | ||
1329 | /* size in register is DWs, convert to bytes */ | ||
1330 | track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; | ||
1331 | track->streamout_dirty = true; | ||
1332 | break; | ||
1333 | case CP_COHER_BASE: | ||
1334 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1335 | if (r) { | ||
1336 | dev_warn(p->dev, "missing reloc for CP_COHER_BASE " | ||
1337 | "0x%04X\n", reg); | ||
1338 | return -EINVAL; | ||
1339 | } | ||
1340 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
600 | case CB_TARGET_MASK: | 1341 | case CB_TARGET_MASK: |
601 | track->cb_target_mask = radeon_get_ib_value(p, idx); | 1342 | track->cb_target_mask = radeon_get_ib_value(p, idx); |
1343 | track->cb_dirty = true; | ||
602 | break; | 1344 | break; |
603 | case CB_SHADER_MASK: | 1345 | case CB_SHADER_MASK: |
604 | track->cb_shader_mask = radeon_get_ib_value(p, idx); | 1346 | track->cb_shader_mask = radeon_get_ib_value(p, idx); |
1347 | track->cb_dirty = true; | ||
605 | break; | 1348 | break; |
606 | case PA_SC_AA_CONFIG: | 1349 | case PA_SC_AA_CONFIG: |
607 | if (p->rdev->family >= CHIP_CAYMAN) { | 1350 | if (p->rdev->family >= CHIP_CAYMAN) { |
@@ -631,6 +1374,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
631 | case CB_COLOR7_VIEW: | 1374 | case CB_COLOR7_VIEW: |
632 | tmp = (reg - CB_COLOR0_VIEW) / 0x3c; | 1375 | tmp = (reg - CB_COLOR0_VIEW) / 0x3c; |
633 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | 1376 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); |
1377 | track->cb_dirty = true; | ||
634 | break; | 1378 | break; |
635 | case CB_COLOR8_VIEW: | 1379 | case CB_COLOR8_VIEW: |
636 | case CB_COLOR9_VIEW: | 1380 | case CB_COLOR9_VIEW: |
@@ -638,6 +1382,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
638 | case CB_COLOR11_VIEW: | 1382 | case CB_COLOR11_VIEW: |
639 | tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; | 1383 | tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; |
640 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | 1384 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); |
1385 | track->cb_dirty = true; | ||
641 | break; | 1386 | break; |
642 | case CB_COLOR0_INFO: | 1387 | case CB_COLOR0_INFO: |
643 | case CB_COLOR1_INFO: | 1388 | case CB_COLOR1_INFO: |
@@ -659,6 +1404,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
659 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1404 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
660 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1405 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
661 | } | 1406 | } |
1407 | track->cb_dirty = true; | ||
662 | break; | 1408 | break; |
663 | case CB_COLOR8_INFO: | 1409 | case CB_COLOR8_INFO: |
664 | case CB_COLOR9_INFO: | 1410 | case CB_COLOR9_INFO: |
@@ -676,6 +1422,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
676 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1422 | ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
677 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 1423 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
678 | } | 1424 | } |
1425 | track->cb_dirty = true; | ||
679 | break; | 1426 | break; |
680 | case CB_COLOR0_PITCH: | 1427 | case CB_COLOR0_PITCH: |
681 | case CB_COLOR1_PITCH: | 1428 | case CB_COLOR1_PITCH: |
@@ -687,7 +1434,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
687 | case CB_COLOR7_PITCH: | 1434 | case CB_COLOR7_PITCH: |
688 | tmp = (reg - CB_COLOR0_PITCH) / 0x3c; | 1435 | tmp = (reg - CB_COLOR0_PITCH) / 0x3c; |
689 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | 1436 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); |
690 | track->cb_color_pitch_idx[tmp] = idx; | 1437 | track->cb_dirty = true; |
691 | break; | 1438 | break; |
692 | case CB_COLOR8_PITCH: | 1439 | case CB_COLOR8_PITCH: |
693 | case CB_COLOR9_PITCH: | 1440 | case CB_COLOR9_PITCH: |
@@ -695,7 +1442,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
695 | case CB_COLOR11_PITCH: | 1442 | case CB_COLOR11_PITCH: |
696 | tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; | 1443 | tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; |
697 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | 1444 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); |
698 | track->cb_color_pitch_idx[tmp] = idx; | 1445 | track->cb_dirty = true; |
699 | break; | 1446 | break; |
700 | case CB_COLOR0_SLICE: | 1447 | case CB_COLOR0_SLICE: |
701 | case CB_COLOR1_SLICE: | 1448 | case CB_COLOR1_SLICE: |
@@ -707,7 +1454,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
707 | case CB_COLOR7_SLICE: | 1454 | case CB_COLOR7_SLICE: |
708 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | 1455 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; |
709 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1456 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
710 | track->cb_color_slice_idx[tmp] = idx; | 1457 | track->cb_dirty = true; |
711 | break; | 1458 | break; |
712 | case CB_COLOR8_SLICE: | 1459 | case CB_COLOR8_SLICE: |
713 | case CB_COLOR9_SLICE: | 1460 | case CB_COLOR9_SLICE: |
@@ -715,7 +1462,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
715 | case CB_COLOR11_SLICE: | 1462 | case CB_COLOR11_SLICE: |
716 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | 1463 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; |
717 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1464 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
718 | track->cb_color_slice_idx[tmp] = idx; | 1465 | track->cb_dirty = true; |
719 | break; | 1466 | break; |
720 | case CB_COLOR0_ATTRIB: | 1467 | case CB_COLOR0_ATTRIB: |
721 | case CB_COLOR1_ATTRIB: | 1468 | case CB_COLOR1_ATTRIB: |
@@ -725,6 +1472,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
725 | case CB_COLOR5_ATTRIB: | 1472 | case CB_COLOR5_ATTRIB: |
726 | case CB_COLOR6_ATTRIB: | 1473 | case CB_COLOR6_ATTRIB: |
727 | case CB_COLOR7_ATTRIB: | 1474 | case CB_COLOR7_ATTRIB: |
1475 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
1476 | if (r) { | ||
1477 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
1478 | "0x%04X\n", reg); | ||
1479 | return -EINVAL; | ||
1480 | } | ||
1481 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | ||
1482 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
1483 | unsigned bankw, bankh, mtaspect, tile_split; | ||
1484 | |||
1485 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | ||
1486 | &bankw, &bankh, &mtaspect, | ||
1487 | &tile_split); | ||
1488 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | ||
1489 | ib[idx] |= CB_TILE_SPLIT(tile_split) | | ||
1490 | CB_BANK_WIDTH(bankw) | | ||
1491 | CB_BANK_HEIGHT(bankh) | | ||
1492 | CB_MACRO_TILE_ASPECT(mtaspect); | ||
1493 | } | ||
1494 | } | ||
1495 | tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c); | ||
1496 | track->cb_color_attrib[tmp] = ib[idx]; | ||
1497 | track->cb_dirty = true; | ||
1498 | break; | ||
728 | case CB_COLOR8_ATTRIB: | 1499 | case CB_COLOR8_ATTRIB: |
729 | case CB_COLOR9_ATTRIB: | 1500 | case CB_COLOR9_ATTRIB: |
730 | case CB_COLOR10_ATTRIB: | 1501 | case CB_COLOR10_ATTRIB: |
@@ -735,30 +1506,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
735 | "0x%04X\n", reg); | 1506 | "0x%04X\n", reg); |
736 | return -EINVAL; | 1507 | return -EINVAL; |
737 | } | 1508 | } |
738 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 1509 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
739 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 1510 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { |
740 | ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); | 1511 | unsigned bankw, bankh, mtaspect, tile_split; |
1512 | |||
1513 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | ||
1514 | &bankw, &bankh, &mtaspect, | ||
1515 | &tile_split); | ||
1516 | ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | ||
1517 | ib[idx] |= CB_TILE_SPLIT(tile_split) | | ||
1518 | CB_BANK_WIDTH(bankw) | | ||
1519 | CB_BANK_HEIGHT(bankh) | | ||
1520 | CB_MACRO_TILE_ASPECT(mtaspect); | ||
1521 | } | ||
741 | } | 1522 | } |
742 | break; | 1523 | tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8; |
743 | case CB_COLOR0_DIM: | 1524 | track->cb_color_attrib[tmp] = ib[idx]; |
744 | case CB_COLOR1_DIM: | 1525 | track->cb_dirty = true; |
745 | case CB_COLOR2_DIM: | ||
746 | case CB_COLOR3_DIM: | ||
747 | case CB_COLOR4_DIM: | ||
748 | case CB_COLOR5_DIM: | ||
749 | case CB_COLOR6_DIM: | ||
750 | case CB_COLOR7_DIM: | ||
751 | tmp = (reg - CB_COLOR0_DIM) / 0x3c; | ||
752 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
753 | track->cb_color_dim_idx[tmp] = idx; | ||
754 | break; | ||
755 | case CB_COLOR8_DIM: | ||
756 | case CB_COLOR9_DIM: | ||
757 | case CB_COLOR10_DIM: | ||
758 | case CB_COLOR11_DIM: | ||
759 | tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; | ||
760 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
761 | track->cb_color_dim_idx[tmp] = idx; | ||
762 | break; | 1526 | break; |
763 | case CB_COLOR0_FMASK: | 1527 | case CB_COLOR0_FMASK: |
764 | case CB_COLOR1_FMASK: | 1528 | case CB_COLOR1_FMASK: |
@@ -833,8 +1597,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
833 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; | 1597 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; |
834 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | 1598 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); |
835 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1599 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
836 | track->cb_color_base_last[tmp] = ib[idx]; | ||
837 | track->cb_color_bo[tmp] = reloc->robj; | 1600 | track->cb_color_bo[tmp] = reloc->robj; |
1601 | track->cb_dirty = true; | ||
838 | break; | 1602 | break; |
839 | case CB_COLOR8_BASE: | 1603 | case CB_COLOR8_BASE: |
840 | case CB_COLOR9_BASE: | 1604 | case CB_COLOR9_BASE: |
@@ -849,8 +1613,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
849 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; | 1613 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; |
850 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | 1614 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); |
851 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1615 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
852 | track->cb_color_base_last[tmp] = ib[idx]; | ||
853 | track->cb_color_bo[tmp] = reloc->robj; | 1616 | track->cb_color_bo[tmp] = reloc->robj; |
1617 | track->cb_dirty = true; | ||
854 | break; | 1618 | break; |
855 | case CB_IMMED0_BASE: | 1619 | case CB_IMMED0_BASE: |
856 | case CB_IMMED1_BASE: | 1620 | case CB_IMMED1_BASE: |
@@ -989,6 +1753,9 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
989 | } | 1753 | } |
990 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1754 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
991 | break; | 1755 | break; |
1756 | case SX_MISC: | ||
1757 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; | ||
1758 | break; | ||
992 | default: | 1759 | default: |
993 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 1760 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
994 | return -EINVAL; | 1761 | return -EINVAL; |
@@ -996,22 +1763,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
996 | return 0; | 1763 | return 0; |
997 | } | 1764 | } |
998 | 1765 | ||
999 | /** | 1766 | static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
1000 | * evergreen_check_texture_resource() - check if register is authorized or not | ||
1001 | * @p: parser structure holding parsing context | ||
1002 | * @idx: index into the cs buffer | ||
1003 | * @texture: texture's bo structure | ||
1004 | * @mipmap: mipmap's bo structure | ||
1005 | * | ||
1006 | * This function will check that the resource has valid field and that | ||
1007 | * the texture and mipmap bo object are big enough to cover this resource. | ||
1008 | */ | ||
1009 | static int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | ||
1010 | struct radeon_bo *texture, | ||
1011 | struct radeon_bo *mipmap) | ||
1012 | { | 1767 | { |
1013 | /* XXX fill in */ | 1768 | u32 last_reg, m, i; |
1014 | return 0; | 1769 | |
1770 | if (p->rdev->family >= CHIP_CAYMAN) | ||
1771 | last_reg = ARRAY_SIZE(cayman_reg_safe_bm); | ||
1772 | else | ||
1773 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | ||
1774 | |||
1775 | i = (reg >> 7); | ||
1776 | if (i >= last_reg) { | ||
1777 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
1778 | return false; | ||
1779 | } | ||
1780 | m = 1 << ((reg >> 2) & 31); | ||
1781 | if (p->rdev->family >= CHIP_CAYMAN) { | ||
1782 | if (!(cayman_reg_safe_bm[i] & m)) | ||
1783 | return true; | ||
1784 | } else { | ||
1785 | if (!(evergreen_reg_safe_bm[i] & m)) | ||
1786 | return true; | ||
1787 | } | ||
1788 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
1789 | return false; | ||
1015 | } | 1790 | } |
1016 | 1791 | ||
1017 | static int evergreen_packet3_check(struct radeon_cs_parser *p, | 1792 | static int evergreen_packet3_check(struct radeon_cs_parser *p, |
@@ -1036,6 +1811,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1036 | { | 1811 | { |
1037 | int pred_op; | 1812 | int pred_op; |
1038 | int tmp; | 1813 | int tmp; |
1814 | uint64_t offset; | ||
1815 | |||
1039 | if (pkt->count != 1) { | 1816 | if (pkt->count != 1) { |
1040 | DRM_ERROR("bad SET PREDICATION\n"); | 1817 | DRM_ERROR("bad SET PREDICATION\n"); |
1041 | return -EINVAL; | 1818 | return -EINVAL; |
@@ -1059,8 +1836,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1059 | return -EINVAL; | 1836 | return -EINVAL; |
1060 | } | 1837 | } |
1061 | 1838 | ||
1062 | ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1839 | offset = reloc->lobj.gpu_offset + |
1063 | ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff); | 1840 | (idx_value & 0xfffffff0) + |
1841 | ((u64)(tmp & 0xff) << 32); | ||
1842 | |||
1843 | ib[idx + 0] = offset; | ||
1844 | ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); | ||
1064 | } | 1845 | } |
1065 | break; | 1846 | break; |
1066 | case PACKET3_CONTEXT_CONTROL: | 1847 | case PACKET3_CONTEXT_CONTROL: |
@@ -1088,6 +1869,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1088 | } | 1869 | } |
1089 | break; | 1870 | break; |
1090 | case PACKET3_INDEX_BASE: | 1871 | case PACKET3_INDEX_BASE: |
1872 | { | ||
1873 | uint64_t offset; | ||
1874 | |||
1091 | if (pkt->count != 1) { | 1875 | if (pkt->count != 1) { |
1092 | DRM_ERROR("bad INDEX_BASE\n"); | 1876 | DRM_ERROR("bad INDEX_BASE\n"); |
1093 | return -EINVAL; | 1877 | return -EINVAL; |
@@ -1097,15 +1881,24 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1097 | DRM_ERROR("bad INDEX_BASE\n"); | 1881 | DRM_ERROR("bad INDEX_BASE\n"); |
1098 | return -EINVAL; | 1882 | return -EINVAL; |
1099 | } | 1883 | } |
1100 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1884 | |
1101 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1885 | offset = reloc->lobj.gpu_offset + |
1886 | idx_value + | ||
1887 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | ||
1888 | |||
1889 | ib[idx+0] = offset; | ||
1890 | ib[idx+1] = upper_32_bits(offset) & 0xff; | ||
1891 | |||
1102 | r = evergreen_cs_track_check(p); | 1892 | r = evergreen_cs_track_check(p); |
1103 | if (r) { | 1893 | if (r) { |
1104 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | 1894 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
1105 | return r; | 1895 | return r; |
1106 | } | 1896 | } |
1107 | break; | 1897 | break; |
1898 | } | ||
1108 | case PACKET3_DRAW_INDEX: | 1899 | case PACKET3_DRAW_INDEX: |
1900 | { | ||
1901 | uint64_t offset; | ||
1109 | if (pkt->count != 3) { | 1902 | if (pkt->count != 3) { |
1110 | DRM_ERROR("bad DRAW_INDEX\n"); | 1903 | DRM_ERROR("bad DRAW_INDEX\n"); |
1111 | return -EINVAL; | 1904 | return -EINVAL; |
@@ -1115,15 +1908,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1115 | DRM_ERROR("bad DRAW_INDEX\n"); | 1908 | DRM_ERROR("bad DRAW_INDEX\n"); |
1116 | return -EINVAL; | 1909 | return -EINVAL; |
1117 | } | 1910 | } |
1118 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1911 | |
1119 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1912 | offset = reloc->lobj.gpu_offset + |
1913 | idx_value + | ||
1914 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | ||
1915 | |||
1916 | ib[idx+0] = offset; | ||
1917 | ib[idx+1] = upper_32_bits(offset) & 0xff; | ||
1918 | |||
1120 | r = evergreen_cs_track_check(p); | 1919 | r = evergreen_cs_track_check(p); |
1121 | if (r) { | 1920 | if (r) { |
1122 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | 1921 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
1123 | return r; | 1922 | return r; |
1124 | } | 1923 | } |
1125 | break; | 1924 | break; |
1925 | } | ||
1126 | case PACKET3_DRAW_INDEX_2: | 1926 | case PACKET3_DRAW_INDEX_2: |
1927 | { | ||
1928 | uint64_t offset; | ||
1929 | |||
1127 | if (pkt->count != 4) { | 1930 | if (pkt->count != 4) { |
1128 | DRM_ERROR("bad DRAW_INDEX_2\n"); | 1931 | DRM_ERROR("bad DRAW_INDEX_2\n"); |
1129 | return -EINVAL; | 1932 | return -EINVAL; |
@@ -1133,14 +1936,21 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1133 | DRM_ERROR("bad DRAW_INDEX_2\n"); | 1936 | DRM_ERROR("bad DRAW_INDEX_2\n"); |
1134 | return -EINVAL; | 1937 | return -EINVAL; |
1135 | } | 1938 | } |
1136 | ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1939 | |
1137 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1940 | offset = reloc->lobj.gpu_offset + |
1941 | radeon_get_ib_value(p, idx+1) + | ||
1942 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
1943 | |||
1944 | ib[idx+1] = offset; | ||
1945 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1946 | |||
1138 | r = evergreen_cs_track_check(p); | 1947 | r = evergreen_cs_track_check(p); |
1139 | if (r) { | 1948 | if (r) { |
1140 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | 1949 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
1141 | return r; | 1950 | return r; |
1142 | } | 1951 | } |
1143 | break; | 1952 | break; |
1953 | } | ||
1144 | case PACKET3_DRAW_INDEX_AUTO: | 1954 | case PACKET3_DRAW_INDEX_AUTO: |
1145 | if (pkt->count != 1) { | 1955 | if (pkt->count != 1) { |
1146 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); | 1956 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); |
@@ -1231,13 +2041,20 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1231 | } | 2041 | } |
1232 | /* bit 4 is reg (0) or mem (1) */ | 2042 | /* bit 4 is reg (0) or mem (1) */ |
1233 | if (idx_value & 0x10) { | 2043 | if (idx_value & 0x10) { |
2044 | uint64_t offset; | ||
2045 | |||
1234 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 2046 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
1235 | if (r) { | 2047 | if (r) { |
1236 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 2048 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
1237 | return -EINVAL; | 2049 | return -EINVAL; |
1238 | } | 2050 | } |
1239 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 2051 | |
1240 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 2052 | offset = reloc->lobj.gpu_offset + |
2053 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | ||
2054 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
2055 | |||
2056 | ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); | ||
2057 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1241 | } | 2058 | } |
1242 | break; | 2059 | break; |
1243 | case PACKET3_SURFACE_SYNC: | 2060 | case PACKET3_SURFACE_SYNC: |
@@ -1262,16 +2079,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1262 | return -EINVAL; | 2079 | return -EINVAL; |
1263 | } | 2080 | } |
1264 | if (pkt->count) { | 2081 | if (pkt->count) { |
2082 | uint64_t offset; | ||
2083 | |||
1265 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 2084 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
1266 | if (r) { | 2085 | if (r) { |
1267 | DRM_ERROR("bad EVENT_WRITE\n"); | 2086 | DRM_ERROR("bad EVENT_WRITE\n"); |
1268 | return -EINVAL; | 2087 | return -EINVAL; |
1269 | } | 2088 | } |
1270 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 2089 | offset = reloc->lobj.gpu_offset + |
1271 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 2090 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
2091 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
2092 | |||
2093 | ib[idx+1] = offset & 0xfffffff8; | ||
2094 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1272 | } | 2095 | } |
1273 | break; | 2096 | break; |
1274 | case PACKET3_EVENT_WRITE_EOP: | 2097 | case PACKET3_EVENT_WRITE_EOP: |
2098 | { | ||
2099 | uint64_t offset; | ||
2100 | |||
1275 | if (pkt->count != 4) { | 2101 | if (pkt->count != 4) { |
1276 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | 2102 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); |
1277 | return -EINVAL; | 2103 | return -EINVAL; |
@@ -1281,10 +2107,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1281 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | 2107 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); |
1282 | return -EINVAL; | 2108 | return -EINVAL; |
1283 | } | 2109 | } |
1284 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 2110 | |
1285 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 2111 | offset = reloc->lobj.gpu_offset + |
2112 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | ||
2113 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
2114 | |||
2115 | ib[idx+1] = offset & 0xfffffffc; | ||
2116 | ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); | ||
1286 | break; | 2117 | break; |
2118 | } | ||
1287 | case PACKET3_EVENT_WRITE_EOS: | 2119 | case PACKET3_EVENT_WRITE_EOS: |
2120 | { | ||
2121 | uint64_t offset; | ||
2122 | |||
1288 | if (pkt->count != 3) { | 2123 | if (pkt->count != 3) { |
1289 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | 2124 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); |
1290 | return -EINVAL; | 2125 | return -EINVAL; |
@@ -1294,9 +2129,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1294 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | 2129 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); |
1295 | return -EINVAL; | 2130 | return -EINVAL; |
1296 | } | 2131 | } |
1297 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 2132 | |
1298 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 2133 | offset = reloc->lobj.gpu_offset + |
2134 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | ||
2135 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
2136 | |||
2137 | ib[idx+1] = offset & 0xfffffffc; | ||
2138 | ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); | ||
1299 | break; | 2139 | break; |
2140 | } | ||
1300 | case PACKET3_SET_CONFIG_REG: | 2141 | case PACKET3_SET_CONFIG_REG: |
1301 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; | 2142 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; |
1302 | end_reg = 4 * pkt->count + start_reg - 4; | 2143 | end_reg = 4 * pkt->count + start_reg - 4; |
@@ -1344,6 +2185,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1344 | } | 2185 | } |
1345 | for (i = 0; i < (pkt->count / 8); i++) { | 2186 | for (i = 0; i < (pkt->count / 8); i++) { |
1346 | struct radeon_bo *texture, *mipmap; | 2187 | struct radeon_bo *texture, *mipmap; |
2188 | u32 toffset, moffset; | ||
1347 | u32 size, offset; | 2189 | u32 size, offset; |
1348 | 2190 | ||
1349 | switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { | 2191 | switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { |
@@ -1354,32 +2196,42 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1354 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 2196 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
1355 | return -EINVAL; | 2197 | return -EINVAL; |
1356 | } | 2198 | } |
1357 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1358 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | 2199 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1359 | ib[idx+1+(i*8)+1] |= | 2200 | ib[idx+1+(i*8)+1] |= |
1360 | TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); | 2201 | TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); |
1361 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | 2202 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { |
1362 | ib[idx+1+(i*8)+6] |= | 2203 | unsigned bankw, bankh, mtaspect, tile_split; |
1363 | TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); | 2204 | |
2205 | evergreen_tiling_fields(reloc->lobj.tiling_flags, | ||
2206 | &bankw, &bankh, &mtaspect, | ||
2207 | &tile_split); | ||
2208 | ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); | ||
1364 | ib[idx+1+(i*8)+7] |= | 2209 | ib[idx+1+(i*8)+7] |= |
2210 | TEX_BANK_WIDTH(bankw) | | ||
2211 | TEX_BANK_HEIGHT(bankh) | | ||
2212 | MACRO_TILE_ASPECT(mtaspect) | | ||
1365 | TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); | 2213 | TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); |
1366 | } | 2214 | } |
1367 | } | 2215 | } |
1368 | texture = reloc->robj; | 2216 | texture = reloc->robj; |
2217 | toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1369 | /* tex mip base */ | 2218 | /* tex mip base */ |
1370 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 2219 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
1371 | if (r) { | 2220 | if (r) { |
1372 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 2221 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
1373 | return -EINVAL; | 2222 | return -EINVAL; |
1374 | } | 2223 | } |
1375 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 2224 | moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1376 | mipmap = reloc->robj; | 2225 | mipmap = reloc->robj; |
1377 | r = evergreen_check_texture_resource(p, idx+1+(i*8), | 2226 | r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8)); |
1378 | texture, mipmap); | ||
1379 | if (r) | 2227 | if (r) |
1380 | return r; | 2228 | return r; |
2229 | ib[idx+1+(i*8)+2] += toffset; | ||
2230 | ib[idx+1+(i*8)+3] += moffset; | ||
1381 | break; | 2231 | break; |
1382 | case SQ_TEX_VTX_VALID_BUFFER: | 2232 | case SQ_TEX_VTX_VALID_BUFFER: |
2233 | { | ||
2234 | uint64_t offset64; | ||
1383 | /* vtx base */ | 2235 | /* vtx base */ |
1384 | r = evergreen_cs_packet_next_reloc(p, &reloc); | 2236 | r = evergreen_cs_packet_next_reloc(p, &reloc); |
1385 | if (r) { | 2237 | if (r) { |
@@ -1391,11 +2243,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1391 | if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { | 2243 | if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { |
1392 | /* force size to size of the buffer */ | 2244 | /* force size to size of the buffer */ |
1393 | dev_warn(p->dev, "vbo resource seems too big for the bo\n"); | 2245 | dev_warn(p->dev, "vbo resource seems too big for the bo\n"); |
1394 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); | 2246 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; |
1395 | } | 2247 | } |
1396 | ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | 2248 | |
1397 | ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 2249 | offset64 = reloc->lobj.gpu_offset + offset; |
2250 | ib[idx+1+(i*8)+0] = offset64; | ||
2251 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | | ||
2252 | (upper_32_bits(offset64) & 0xff); | ||
1398 | break; | 2253 | break; |
2254 | } | ||
1399 | case SQ_TEX_VTX_INVALID_TEXTURE: | 2255 | case SQ_TEX_VTX_INVALID_TEXTURE: |
1400 | case SQ_TEX_VTX_INVALID_BUFFER: | 2256 | case SQ_TEX_VTX_INVALID_BUFFER: |
1401 | default: | 2257 | default: |
@@ -1451,6 +2307,104 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1451 | return -EINVAL; | 2307 | return -EINVAL; |
1452 | } | 2308 | } |
1453 | break; | 2309 | break; |
2310 | case PACKET3_STRMOUT_BUFFER_UPDATE: | ||
2311 | if (pkt->count != 4) { | ||
2312 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); | ||
2313 | return -EINVAL; | ||
2314 | } | ||
2315 | /* Updating memory at DST_ADDRESS. */ | ||
2316 | if (idx_value & 0x1) { | ||
2317 | u64 offset; | ||
2318 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
2319 | if (r) { | ||
2320 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); | ||
2321 | return -EINVAL; | ||
2322 | } | ||
2323 | offset = radeon_get_ib_value(p, idx+1); | ||
2324 | offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | ||
2325 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2326 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", | ||
2327 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2328 | return -EINVAL; | ||
2329 | } | ||
2330 | offset += reloc->lobj.gpu_offset; | ||
2331 | ib[idx+1] = offset; | ||
2332 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
2333 | } | ||
2334 | /* Reading data from SRC_ADDRESS. */ | ||
2335 | if (((idx_value >> 1) & 0x3) == 2) { | ||
2336 | u64 offset; | ||
2337 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
2338 | if (r) { | ||
2339 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); | ||
2340 | return -EINVAL; | ||
2341 | } | ||
2342 | offset = radeon_get_ib_value(p, idx+3); | ||
2343 | offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; | ||
2344 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2345 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", | ||
2346 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2347 | return -EINVAL; | ||
2348 | } | ||
2349 | offset += reloc->lobj.gpu_offset; | ||
2350 | ib[idx+3] = offset; | ||
2351 | ib[idx+4] = upper_32_bits(offset) & 0xff; | ||
2352 | } | ||
2353 | break; | ||
2354 | case PACKET3_COPY_DW: | ||
2355 | if (pkt->count != 4) { | ||
2356 | DRM_ERROR("bad COPY_DW (invalid count)\n"); | ||
2357 | return -EINVAL; | ||
2358 | } | ||
2359 | if (idx_value & 0x1) { | ||
2360 | u64 offset; | ||
2361 | /* SRC is memory. */ | ||
2362 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
2363 | if (r) { | ||
2364 | DRM_ERROR("bad COPY_DW (missing src reloc)\n"); | ||
2365 | return -EINVAL; | ||
2366 | } | ||
2367 | offset = radeon_get_ib_value(p, idx+1); | ||
2368 | offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | ||
2369 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2370 | DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", | ||
2371 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2372 | return -EINVAL; | ||
2373 | } | ||
2374 | offset += reloc->lobj.gpu_offset; | ||
2375 | ib[idx+1] = offset; | ||
2376 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
2377 | } else { | ||
2378 | /* SRC is a reg. */ | ||
2379 | reg = radeon_get_ib_value(p, idx+1) << 2; | ||
2380 | if (!evergreen_is_safe_reg(p, reg, idx+1)) | ||
2381 | return -EINVAL; | ||
2382 | } | ||
2383 | if (idx_value & 0x2) { | ||
2384 | u64 offset; | ||
2385 | /* DST is memory. */ | ||
2386 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
2387 | if (r) { | ||
2388 | DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); | ||
2389 | return -EINVAL; | ||
2390 | } | ||
2391 | offset = radeon_get_ib_value(p, idx+3); | ||
2392 | offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; | ||
2393 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2394 | DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", | ||
2395 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2396 | return -EINVAL; | ||
2397 | } | ||
2398 | offset += reloc->lobj.gpu_offset; | ||
2399 | ib[idx+3] = offset; | ||
2400 | ib[idx+4] = upper_32_bits(offset) & 0xff; | ||
2401 | } else { | ||
2402 | /* DST is a reg. */ | ||
2403 | reg = radeon_get_ib_value(p, idx+3) << 2; | ||
2404 | if (!evergreen_is_safe_reg(p, reg, idx+3)) | ||
2405 | return -EINVAL; | ||
2406 | } | ||
2407 | break; | ||
1454 | case PACKET3_NOP: | 2408 | case PACKET3_NOP: |
1455 | break; | 2409 | break; |
1456 | default: | 2410 | default: |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 4215de95477e..96c10b3991aa 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
@@ -219,6 +219,7 @@ | |||
219 | # define EVERGREEN_CRTC_MASTER_EN (1 << 0) | 219 | # define EVERGREEN_CRTC_MASTER_EN (1 << 0) |
220 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | 220 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) |
221 | #define EVERGREEN_CRTC_STATUS 0x6e8c | 221 | #define EVERGREEN_CRTC_STATUS 0x6e8c |
222 | # define EVERGREEN_CRTC_V_BLANK (1 << 0) | ||
222 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | 223 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 |
223 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 | 224 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 |
224 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 | 225 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 74713d42df29..eb5708c7159d 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -77,6 +77,7 @@ | |||
77 | 77 | ||
78 | #define CONFIG_MEMSIZE 0x5428 | 78 | #define CONFIG_MEMSIZE 0x5428 |
79 | 79 | ||
80 | #define CP_COHER_BASE 0x85F8 | ||
80 | #define CP_ME_CNTL 0x86D8 | 81 | #define CP_ME_CNTL 0x86D8 |
81 | #define CP_ME_HALT (1 << 28) | 82 | #define CP_ME_HALT (1 << 28) |
82 | #define CP_PFP_HALT (1 << 26) | 83 | #define CP_PFP_HALT (1 << 26) |
@@ -925,7 +926,70 @@ | |||
925 | #define DB_DEBUG4 0x983C | 926 | #define DB_DEBUG4 0x983C |
926 | #define DB_WATERMARKS 0x9854 | 927 | #define DB_WATERMARKS 0x9854 |
927 | #define DB_DEPTH_CONTROL 0x28800 | 928 | #define DB_DEPTH_CONTROL 0x28800 |
929 | #define R_028800_DB_DEPTH_CONTROL 0x028800 | ||
930 | #define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0) | ||
931 | #define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1) | ||
932 | #define C_028800_STENCIL_ENABLE 0xFFFFFFFE | ||
933 | #define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1) | ||
934 | #define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1) | ||
935 | #define C_028800_Z_ENABLE 0xFFFFFFFD | ||
936 | #define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2) | ||
937 | #define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1) | ||
938 | #define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB | ||
939 | #define S_028800_ZFUNC(x) (((x) & 0x7) << 4) | ||
940 | #define G_028800_ZFUNC(x) (((x) >> 4) & 0x7) | ||
941 | #define C_028800_ZFUNC 0xFFFFFF8F | ||
942 | #define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7) | ||
943 | #define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1) | ||
944 | #define C_028800_BACKFACE_ENABLE 0xFFFFFF7F | ||
945 | #define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8) | ||
946 | #define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7) | ||
947 | #define C_028800_STENCILFUNC 0xFFFFF8FF | ||
948 | #define V_028800_STENCILFUNC_NEVER 0x00000000 | ||
949 | #define V_028800_STENCILFUNC_LESS 0x00000001 | ||
950 | #define V_028800_STENCILFUNC_EQUAL 0x00000002 | ||
951 | #define V_028800_STENCILFUNC_LEQUAL 0x00000003 | ||
952 | #define V_028800_STENCILFUNC_GREATER 0x00000004 | ||
953 | #define V_028800_STENCILFUNC_NOTEQUAL 0x00000005 | ||
954 | #define V_028800_STENCILFUNC_GEQUAL 0x00000006 | ||
955 | #define V_028800_STENCILFUNC_ALWAYS 0x00000007 | ||
956 | #define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11) | ||
957 | #define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7) | ||
958 | #define C_028800_STENCILFAIL 0xFFFFC7FF | ||
959 | #define V_028800_STENCIL_KEEP 0x00000000 | ||
960 | #define V_028800_STENCIL_ZERO 0x00000001 | ||
961 | #define V_028800_STENCIL_REPLACE 0x00000002 | ||
962 | #define V_028800_STENCIL_INCR 0x00000003 | ||
963 | #define V_028800_STENCIL_DECR 0x00000004 | ||
964 | #define V_028800_STENCIL_INVERT 0x00000005 | ||
965 | #define V_028800_STENCIL_INCR_WRAP 0x00000006 | ||
966 | #define V_028800_STENCIL_DECR_WRAP 0x00000007 | ||
967 | #define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14) | ||
968 | #define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7) | ||
969 | #define C_028800_STENCILZPASS 0xFFFE3FFF | ||
970 | #define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17) | ||
971 | #define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7) | ||
972 | #define C_028800_STENCILZFAIL 0xFFF1FFFF | ||
973 | #define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20) | ||
974 | #define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7) | ||
975 | #define C_028800_STENCILFUNC_BF 0xFF8FFFFF | ||
976 | #define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23) | ||
977 | #define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7) | ||
978 | #define C_028800_STENCILFAIL_BF 0xFC7FFFFF | ||
979 | #define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26) | ||
980 | #define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7) | ||
981 | #define C_028800_STENCILZPASS_BF 0xE3FFFFFF | ||
982 | #define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29) | ||
983 | #define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7) | ||
984 | #define C_028800_STENCILZFAIL_BF 0x1FFFFFFF | ||
928 | #define DB_DEPTH_VIEW 0x28008 | 985 | #define DB_DEPTH_VIEW 0x28008 |
986 | #define R_028008_DB_DEPTH_VIEW 0x00028008 | ||
987 | #define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0) | ||
988 | #define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF) | ||
989 | #define C_028008_SLICE_START 0xFFFFF800 | ||
990 | #define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13) | ||
991 | #define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF) | ||
992 | #define C_028008_SLICE_MAX 0xFF001FFF | ||
929 | #define DB_HTILE_DATA_BASE 0x28014 | 993 | #define DB_HTILE_DATA_BASE 0x28014 |
930 | #define DB_Z_INFO 0x28040 | 994 | #define DB_Z_INFO 0x28040 |
931 | # define Z_ARRAY_MODE(x) ((x) << 4) | 995 | # define Z_ARRAY_MODE(x) ((x) << 4) |
@@ -933,12 +997,59 @@ | |||
933 | # define DB_NUM_BANKS(x) (((x) & 0x3) << 12) | 997 | # define DB_NUM_BANKS(x) (((x) & 0x3) << 12) |
934 | # define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) | 998 | # define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) |
935 | # define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) | 999 | # define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) |
1000 | # define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24) | ||
1001 | #define R_028040_DB_Z_INFO 0x028040 | ||
1002 | #define S_028040_FORMAT(x) (((x) & 0x3) << 0) | ||
1003 | #define G_028040_FORMAT(x) (((x) >> 0) & 0x3) | ||
1004 | #define C_028040_FORMAT 0xFFFFFFFC | ||
1005 | #define V_028040_Z_INVALID 0x00000000 | ||
1006 | #define V_028040_Z_16 0x00000001 | ||
1007 | #define V_028040_Z_24 0x00000002 | ||
1008 | #define V_028040_Z_32_FLOAT 0x00000003 | ||
1009 | #define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4) | ||
1010 | #define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF) | ||
1011 | #define C_028040_ARRAY_MODE 0xFFFFFF0F | ||
1012 | #define S_028040_READ_SIZE(x) (((x) & 0x1) << 28) | ||
1013 | #define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1) | ||
1014 | #define C_028040_READ_SIZE 0xEFFFFFFF | ||
1015 | #define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29) | ||
1016 | #define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1) | ||
1017 | #define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF | ||
1018 | #define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31) | ||
1019 | #define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1) | ||
1020 | #define C_028040_ZRANGE_PRECISION 0x7FFFFFFF | ||
1021 | #define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8) | ||
1022 | #define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7) | ||
1023 | #define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12) | ||
1024 | #define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3) | ||
1025 | #define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16) | ||
1026 | #define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3) | ||
1027 | #define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20) | ||
1028 | #define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3) | ||
1029 | #define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24) | ||
1030 | #define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3) | ||
936 | #define DB_STENCIL_INFO 0x28044 | 1031 | #define DB_STENCIL_INFO 0x28044 |
1032 | #define R_028044_DB_STENCIL_INFO 0x028044 | ||
1033 | #define S_028044_FORMAT(x) (((x) & 0x1) << 0) | ||
1034 | #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) | ||
1035 | #define C_028044_FORMAT 0xFFFFFFFE | ||
1036 | #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) | ||
937 | #define DB_Z_READ_BASE 0x28048 | 1037 | #define DB_Z_READ_BASE 0x28048 |
938 | #define DB_STENCIL_READ_BASE 0x2804c | 1038 | #define DB_STENCIL_READ_BASE 0x2804c |
939 | #define DB_Z_WRITE_BASE 0x28050 | 1039 | #define DB_Z_WRITE_BASE 0x28050 |
940 | #define DB_STENCIL_WRITE_BASE 0x28054 | 1040 | #define DB_STENCIL_WRITE_BASE 0x28054 |
941 | #define DB_DEPTH_SIZE 0x28058 | 1041 | #define DB_DEPTH_SIZE 0x28058 |
1042 | #define R_028058_DB_DEPTH_SIZE 0x028058 | ||
1043 | #define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0) | ||
1044 | #define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF) | ||
1045 | #define C_028058_PITCH_TILE_MAX 0xFFFFF800 | ||
1046 | #define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11) | ||
1047 | #define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF) | ||
1048 | #define C_028058_HEIGHT_TILE_MAX 0xFFC007FF | ||
1049 | #define R_02805C_DB_DEPTH_SLICE 0x02805C | ||
1050 | #define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0) | ||
1051 | #define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF) | ||
1052 | #define C_02805C_SLICE_TILE_MAX 0xFFC00000 | ||
942 | 1053 | ||
943 | #define SQ_PGM_START_PS 0x28840 | 1054 | #define SQ_PGM_START_PS 0x28840 |
944 | #define SQ_PGM_START_VS 0x2885c | 1055 | #define SQ_PGM_START_VS 0x2885c |
@@ -948,6 +1059,14 @@ | |||
948 | #define SQ_PGM_START_HS 0x288b8 | 1059 | #define SQ_PGM_START_HS 0x288b8 |
949 | #define SQ_PGM_START_LS 0x288d0 | 1060 | #define SQ_PGM_START_LS 0x288d0 |
950 | 1061 | ||
1062 | #define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8 | ||
1063 | #define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8 | ||
1064 | #define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8 | ||
1065 | #define VGT_STRMOUT_BUFFER_BASE_3 0x28B08 | ||
1066 | #define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0 | ||
1067 | #define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0 | ||
1068 | #define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0 | ||
1069 | #define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00 | ||
951 | #define VGT_STRMOUT_CONFIG 0x28b94 | 1070 | #define VGT_STRMOUT_CONFIG 0x28b94 |
952 | #define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 | 1071 | #define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 |
953 | 1072 | ||
@@ -974,6 +1093,114 @@ | |||
974 | #define CB_COLOR0_PITCH 0x28c64 | 1093 | #define CB_COLOR0_PITCH 0x28c64 |
975 | #define CB_COLOR0_SLICE 0x28c68 | 1094 | #define CB_COLOR0_SLICE 0x28c68 |
976 | #define CB_COLOR0_VIEW 0x28c6c | 1095 | #define CB_COLOR0_VIEW 0x28c6c |
1096 | #define R_028C6C_CB_COLOR0_VIEW 0x00028C6C | ||
1097 | #define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0) | ||
1098 | #define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF) | ||
1099 | #define C_028C6C_SLICE_START 0xFFFFF800 | ||
1100 | #define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13) | ||
1101 | #define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF) | ||
1102 | #define C_028C6C_SLICE_MAX 0xFF001FFF | ||
1103 | #define R_028C70_CB_COLOR0_INFO 0x028C70 | ||
1104 | #define S_028C70_ENDIAN(x) (((x) & 0x3) << 0) | ||
1105 | #define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3) | ||
1106 | #define C_028C70_ENDIAN 0xFFFFFFFC | ||
1107 | #define S_028C70_FORMAT(x) (((x) & 0x3F) << 2) | ||
1108 | #define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F) | ||
1109 | #define C_028C70_FORMAT 0xFFFFFF03 | ||
1110 | #define V_028C70_COLOR_INVALID 0x00000000 | ||
1111 | #define V_028C70_COLOR_8 0x00000001 | ||
1112 | #define V_028C70_COLOR_4_4 0x00000002 | ||
1113 | #define V_028C70_COLOR_3_3_2 0x00000003 | ||
1114 | #define V_028C70_COLOR_16 0x00000005 | ||
1115 | #define V_028C70_COLOR_16_FLOAT 0x00000006 | ||
1116 | #define V_028C70_COLOR_8_8 0x00000007 | ||
1117 | #define V_028C70_COLOR_5_6_5 0x00000008 | ||
1118 | #define V_028C70_COLOR_6_5_5 0x00000009 | ||
1119 | #define V_028C70_COLOR_1_5_5_5 0x0000000A | ||
1120 | #define V_028C70_COLOR_4_4_4_4 0x0000000B | ||
1121 | #define V_028C70_COLOR_5_5_5_1 0x0000000C | ||
1122 | #define V_028C70_COLOR_32 0x0000000D | ||
1123 | #define V_028C70_COLOR_32_FLOAT 0x0000000E | ||
1124 | #define V_028C70_COLOR_16_16 0x0000000F | ||
1125 | #define V_028C70_COLOR_16_16_FLOAT 0x00000010 | ||
1126 | #define V_028C70_COLOR_8_24 0x00000011 | ||
1127 | #define V_028C70_COLOR_8_24_FLOAT 0x00000012 | ||
1128 | #define V_028C70_COLOR_24_8 0x00000013 | ||
1129 | #define V_028C70_COLOR_24_8_FLOAT 0x00000014 | ||
1130 | #define V_028C70_COLOR_10_11_11 0x00000015 | ||
1131 | #define V_028C70_COLOR_10_11_11_FLOAT 0x00000016 | ||
1132 | #define V_028C70_COLOR_11_11_10 0x00000017 | ||
1133 | #define V_028C70_COLOR_11_11_10_FLOAT 0x00000018 | ||
1134 | #define V_028C70_COLOR_2_10_10_10 0x00000019 | ||
1135 | #define V_028C70_COLOR_8_8_8_8 0x0000001A | ||
1136 | #define V_028C70_COLOR_10_10_10_2 0x0000001B | ||
1137 | #define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C | ||
1138 | #define V_028C70_COLOR_32_32 0x0000001D | ||
1139 | #define V_028C70_COLOR_32_32_FLOAT 0x0000001E | ||
1140 | #define V_028C70_COLOR_16_16_16_16 0x0000001F | ||
1141 | #define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020 | ||
1142 | #define V_028C70_COLOR_32_32_32_32 0x00000022 | ||
1143 | #define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023 | ||
1144 | #define V_028C70_COLOR_32_32_32_FLOAT 0x00000030 | ||
1145 | #define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8) | ||
1146 | #define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF) | ||
1147 | #define C_028C70_ARRAY_MODE 0xFFFFF0FF | ||
1148 | #define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000 | ||
1149 | #define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001 | ||
1150 | #define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002 | ||
1151 | #define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004 | ||
1152 | #define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12) | ||
1153 | #define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7) | ||
1154 | #define C_028C70_NUMBER_TYPE 0xFFFF8FFF | ||
1155 | #define V_028C70_NUMBER_UNORM 0x00000000 | ||
1156 | #define V_028C70_NUMBER_SNORM 0x00000001 | ||
1157 | #define V_028C70_NUMBER_USCALED 0x00000002 | ||
1158 | #define V_028C70_NUMBER_SSCALED 0x00000003 | ||
1159 | #define V_028C70_NUMBER_UINT 0x00000004 | ||
1160 | #define V_028C70_NUMBER_SINT 0x00000005 | ||
1161 | #define V_028C70_NUMBER_SRGB 0x00000006 | ||
1162 | #define V_028C70_NUMBER_FLOAT 0x00000007 | ||
1163 | #define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15) | ||
1164 | #define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3) | ||
1165 | #define C_028C70_COMP_SWAP 0xFFFE7FFF | ||
1166 | #define V_028C70_SWAP_STD 0x00000000 | ||
1167 | #define V_028C70_SWAP_ALT 0x00000001 | ||
1168 | #define V_028C70_SWAP_STD_REV 0x00000002 | ||
1169 | #define V_028C70_SWAP_ALT_REV 0x00000003 | ||
1170 | #define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17) | ||
1171 | #define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1) | ||
1172 | #define C_028C70_FAST_CLEAR 0xFFFDFFFF | ||
1173 | #define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18) | ||
1174 | #define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3) | ||
1175 | #define C_028C70_COMPRESSION 0xFFF3FFFF | ||
1176 | #define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19) | ||
1177 | #define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1) | ||
1178 | #define C_028C70_BLEND_CLAMP 0xFFF7FFFF | ||
1179 | #define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20) | ||
1180 | #define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1) | ||
1181 | #define C_028C70_BLEND_BYPASS 0xFFEFFFFF | ||
1182 | #define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21) | ||
1183 | #define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1) | ||
1184 | #define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF | ||
1185 | #define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22) | ||
1186 | #define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1) | ||
1187 | #define C_028C70_ROUND_MODE 0xFFBFFFFF | ||
1188 | #define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23) | ||
1189 | #define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1) | ||
1190 | #define C_028C70_TILE_COMPACT 0xFF7FFFFF | ||
1191 | #define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24) | ||
1192 | #define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3) | ||
1193 | #define C_028C70_SOURCE_FORMAT 0xFCFFFFFF | ||
1194 | #define V_028C70_EXPORT_4C_32BPC 0x0 | ||
1195 | #define V_028C70_EXPORT_4C_16BPC 0x1 | ||
1196 | #define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */ | ||
1197 | #define S_028C70_RAT(x) (((x) & 0x1) << 26) | ||
1198 | #define G_028C70_RAT(x) (((x) >> 26) & 0x1) | ||
1199 | #define C_028C70_RAT 0xFBFFFFFF | ||
1200 | #define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27) | ||
1201 | #define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7) | ||
1202 | #define C_028C70_RESOURCE_TYPE 0xC7FFFFFF | ||
1203 | |||
977 | #define CB_COLOR0_INFO 0x28c70 | 1204 | #define CB_COLOR0_INFO 0x28c70 |
978 | # define CB_FORMAT(x) ((x) << 2) | 1205 | # define CB_FORMAT(x) ((x) << 2) |
979 | # define CB_ARRAY_MODE(x) ((x) << 8) | 1206 | # define CB_ARRAY_MODE(x) ((x) << 8) |
@@ -984,6 +1211,20 @@ | |||
984 | # define CB_SOURCE_FORMAT(x) ((x) << 24) | 1211 | # define CB_SOURCE_FORMAT(x) ((x) << 24) |
985 | # define CB_SF_EXPORT_FULL 0 | 1212 | # define CB_SF_EXPORT_FULL 0 |
986 | # define CB_SF_EXPORT_NORM 1 | 1213 | # define CB_SF_EXPORT_NORM 1 |
1214 | #define R_028C74_CB_COLOR0_ATTRIB 0x028C74 | ||
1215 | #define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4) | ||
1216 | #define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1) | ||
1217 | #define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF | ||
1218 | #define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5) | ||
1219 | #define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf) | ||
1220 | #define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10) | ||
1221 | #define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3) | ||
1222 | #define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13) | ||
1223 | #define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3) | ||
1224 | #define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16) | ||
1225 | #define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3) | ||
1226 | #define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19) | ||
1227 | #define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3) | ||
987 | #define CB_COLOR0_ATTRIB 0x28c74 | 1228 | #define CB_COLOR0_ATTRIB 0x28c74 |
988 | # define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) | 1229 | # define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) |
989 | # define ADDR_SURF_TILE_SPLIT_64B 0 | 1230 | # define ADDR_SURF_TILE_SPLIT_64B 0 |
@@ -1008,6 +1249,7 @@ | |||
1008 | # define ADDR_SURF_BANK_HEIGHT_2 1 | 1249 | # define ADDR_SURF_BANK_HEIGHT_2 1 |
1009 | # define ADDR_SURF_BANK_HEIGHT_4 2 | 1250 | # define ADDR_SURF_BANK_HEIGHT_4 2 |
1010 | # define ADDR_SURF_BANK_HEIGHT_8 3 | 1251 | # define ADDR_SURF_BANK_HEIGHT_8 3 |
1252 | # define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19) | ||
1011 | #define CB_COLOR0_DIM 0x28c78 | 1253 | #define CB_COLOR0_DIM 0x28c78 |
1012 | /* only CB0-7 blocks have these regs */ | 1254 | /* only CB0-7 blocks have these regs */ |
1013 | #define CB_COLOR0_CMASK 0x28c7c | 1255 | #define CB_COLOR0_CMASK 0x28c7c |
@@ -1196,9 +1438,144 @@ | |||
1196 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 | 1438 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 |
1197 | # define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) | 1439 | # define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) |
1198 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c | 1440 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c |
1441 | # define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6) | ||
1199 | # define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) | 1442 | # define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) |
1200 | # define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) | 1443 | # define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) |
1201 | # define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) | 1444 | # define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) |
1445 | #define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000 | ||
1446 | #define S_030000_DIM(x) (((x) & 0x7) << 0) | ||
1447 | #define G_030000_DIM(x) (((x) >> 0) & 0x7) | ||
1448 | #define C_030000_DIM 0xFFFFFFF8 | ||
1449 | #define V_030000_SQ_TEX_DIM_1D 0x00000000 | ||
1450 | #define V_030000_SQ_TEX_DIM_2D 0x00000001 | ||
1451 | #define V_030000_SQ_TEX_DIM_3D 0x00000002 | ||
1452 | #define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003 | ||
1453 | #define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004 | ||
1454 | #define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005 | ||
1455 | #define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006 | ||
1456 | #define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007 | ||
1457 | #define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5) | ||
1458 | #define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1) | ||
1459 | #define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF | ||
1460 | #define S_030000_PITCH(x) (((x) & 0xFFF) << 6) | ||
1461 | #define G_030000_PITCH(x) (((x) >> 6) & 0xFFF) | ||
1462 | #define C_030000_PITCH 0xFFFC003F | ||
1463 | #define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18) | ||
1464 | #define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF) | ||
1465 | #define C_030000_TEX_WIDTH 0x0003FFFF | ||
1466 | #define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004 | ||
1467 | #define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0) | ||
1468 | #define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF) | ||
1469 | #define C_030004_TEX_HEIGHT 0xFFFFC000 | ||
1470 | #define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14) | ||
1471 | #define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF) | ||
1472 | #define C_030004_TEX_DEPTH 0xF8003FFF | ||
1473 | #define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28) | ||
1474 | #define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF) | ||
1475 | #define C_030004_ARRAY_MODE 0x0FFFFFFF | ||
1476 | #define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008 | ||
1477 | #define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
1478 | #define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
1479 | #define C_030008_BASE_ADDRESS 0x00000000 | ||
1480 | #define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C | ||
1481 | #define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0) | ||
1482 | #define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF) | ||
1483 | #define C_03000C_MIP_ADDRESS 0x00000000 | ||
1484 | #define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010 | ||
1485 | #define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) | ||
1486 | #define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) | ||
1487 | #define C_030010_FORMAT_COMP_X 0xFFFFFFFC | ||
1488 | #define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000 | ||
1489 | #define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001 | ||
1490 | #define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002 | ||
1491 | #define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2) | ||
1492 | #define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3) | ||
1493 | #define C_030010_FORMAT_COMP_Y 0xFFFFFFF3 | ||
1494 | #define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4) | ||
1495 | #define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3) | ||
1496 | #define C_030010_FORMAT_COMP_Z 0xFFFFFFCF | ||
1497 | #define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6) | ||
1498 | #define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3) | ||
1499 | #define C_030010_FORMAT_COMP_W 0xFFFFFF3F | ||
1500 | #define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8) | ||
1501 | #define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3) | ||
1502 | #define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF | ||
1503 | #define V_030010_SQ_NUM_FORMAT_NORM 0x00000000 | ||
1504 | #define V_030010_SQ_NUM_FORMAT_INT 0x00000001 | ||
1505 | #define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002 | ||
1506 | #define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10) | ||
1507 | #define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1) | ||
1508 | #define C_030010_SRF_MODE_ALL 0xFFFFFBFF | ||
1509 | #define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000 | ||
1510 | #define V_030010_SRF_MODE_NO_ZERO 0x00000001 | ||
1511 | #define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11) | ||
1512 | #define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1) | ||
1513 | #define C_030010_FORCE_DEGAMMA 0xFFFFF7FF | ||
1514 | #define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12) | ||
1515 | #define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3) | ||
1516 | #define C_030010_ENDIAN_SWAP 0xFFFFCFFF | ||
1517 | #define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16) | ||
1518 | #define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7) | ||
1519 | #define C_030010_DST_SEL_X 0xFFF8FFFF | ||
1520 | #define V_030010_SQ_SEL_X 0x00000000 | ||
1521 | #define V_030010_SQ_SEL_Y 0x00000001 | ||
1522 | #define V_030010_SQ_SEL_Z 0x00000002 | ||
1523 | #define V_030010_SQ_SEL_W 0x00000003 | ||
1524 | #define V_030010_SQ_SEL_0 0x00000004 | ||
1525 | #define V_030010_SQ_SEL_1 0x00000005 | ||
1526 | #define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19) | ||
1527 | #define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7) | ||
1528 | #define C_030010_DST_SEL_Y 0xFFC7FFFF | ||
1529 | #define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22) | ||
1530 | #define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7) | ||
1531 | #define C_030010_DST_SEL_Z 0xFE3FFFFF | ||
1532 | #define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25) | ||
1533 | #define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7) | ||
1534 | #define C_030010_DST_SEL_W 0xF1FFFFFF | ||
1535 | #define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28) | ||
1536 | #define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF) | ||
1537 | #define C_030010_BASE_LEVEL 0x0FFFFFFF | ||
1538 | #define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014 | ||
1539 | #define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0) | ||
1540 | #define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF) | ||
1541 | #define C_030014_LAST_LEVEL 0xFFFFFFF0 | ||
1542 | #define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4) | ||
1543 | #define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF) | ||
1544 | #define C_030014_BASE_ARRAY 0xFFFE000F | ||
1545 | #define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17) | ||
1546 | #define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF) | ||
1547 | #define C_030014_LAST_ARRAY 0xC001FFFF | ||
1548 | #define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018 | ||
1549 | #define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0) | ||
1550 | #define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7) | ||
1551 | #define C_030018_MAX_ANISO 0xFFFFFFF8 | ||
1552 | #define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3) | ||
1553 | #define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7) | ||
1554 | #define C_030018_PERF_MODULATION 0xFFFFFFC7 | ||
1555 | #define S_030018_INTERLACED(x) (((x) & 0x1) << 6) | ||
1556 | #define G_030018_INTERLACED(x) (((x) >> 6) & 0x1) | ||
1557 | #define C_030018_INTERLACED 0xFFFFFFBF | ||
1558 | #define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29) | ||
1559 | #define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7) | ||
1560 | #define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C | ||
1561 | #define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6) | ||
1562 | #define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3) | ||
1563 | #define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8) | ||
1564 | #define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3) | ||
1565 | #define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10) | ||
1566 | #define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3) | ||
1567 | #define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16) | ||
1568 | #define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3) | ||
1569 | #define S_03001C_TYPE(x) (((x) & 0x3) << 30) | ||
1570 | #define G_03001C_TYPE(x) (((x) >> 30) & 0x3) | ||
1571 | #define C_03001C_TYPE 0x3FFFFFFF | ||
1572 | #define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000 | ||
1573 | #define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001 | ||
1574 | #define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002 | ||
1575 | #define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003 | ||
1576 | #define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0) | ||
1577 | #define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F) | ||
1578 | #define C_03001C_DATA_FORMAT 0xFFFFFFC0 | ||
1202 | 1579 | ||
1203 | #define SQ_VTX_CONSTANT_WORD0_0 0x30000 | 1580 | #define SQ_VTX_CONSTANT_WORD0_0 0x30000 |
1204 | #define SQ_VTX_CONSTANT_WORD1_0 0x30004 | 1581 | #define SQ_VTX_CONSTANT_WORD1_0 0x30004 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 2509c505acb8..160799c14b91 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1318,7 +1318,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1318 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | 1318 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1319 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | 1319 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1320 | /* this only test cp0 */ | 1320 | /* this only test cp0 */ |
1321 | r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); | 1321 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1322 | if (r) { | 1322 | if (r) { |
1323 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1323 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1324 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | 1324 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
@@ -1466,7 +1466,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1466 | r = evergreen_blit_init(rdev); | 1466 | r = evergreen_blit_init(rdev); |
1467 | if (r) { | 1467 | if (r) { |
1468 | r600_blit_fini(rdev); | 1468 | r600_blit_fini(rdev); |
1469 | rdev->asic->copy = NULL; | 1469 | rdev->asic->copy.copy = NULL; |
1470 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 1470 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
1471 | } | 1471 | } |
1472 | 1472 | ||
@@ -1518,7 +1518,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1518 | if (r) | 1518 | if (r) |
1519 | return r; | 1519 | return r; |
1520 | 1520 | ||
1521 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | 1521 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1522 | if (r) { | 1522 | if (r) { |
1523 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | 1523 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
1524 | rdev->accel_working = false; | 1524 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 333cde9d4e7b..81801c176aa5 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -65,6 +65,40 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
65 | 65 | ||
66 | #include "r100_track.h" | 66 | #include "r100_track.h" |
67 | 67 | ||
68 | void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) | ||
69 | { | ||
70 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
71 | int i; | ||
72 | |||
73 | if (radeon_crtc->crtc_id == 0) { | ||
74 | if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { | ||
75 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
76 | if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) | ||
77 | break; | ||
78 | udelay(1); | ||
79 | } | ||
80 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
81 | if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) | ||
82 | break; | ||
83 | udelay(1); | ||
84 | } | ||
85 | } | ||
86 | } else { | ||
87 | if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { | ||
88 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
89 | if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) | ||
90 | break; | ||
91 | udelay(1); | ||
92 | } | ||
93 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
94 | if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) | ||
95 | break; | ||
96 | udelay(1); | ||
97 | } | ||
98 | } | ||
99 | } | ||
100 | } | ||
101 | |||
68 | /* This files gather functions specifics to: | 102 | /* This files gather functions specifics to: |
69 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 103 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
70 | */ | 104 | */ |
@@ -87,23 +121,27 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, | |||
87 | r100_cs_dump_packet(p, pkt); | 121 | r100_cs_dump_packet(p, pkt); |
88 | return r; | 122 | return r; |
89 | } | 123 | } |
124 | |||
90 | value = radeon_get_ib_value(p, idx); | 125 | value = radeon_get_ib_value(p, idx); |
91 | tmp = value & 0x003fffff; | 126 | tmp = value & 0x003fffff; |
92 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 127 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
93 | 128 | ||
94 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 129 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
95 | tile_flags |= RADEON_DST_TILE_MACRO; | 130 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
96 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | 131 | tile_flags |= RADEON_DST_TILE_MACRO; |
97 | if (reg == RADEON_SRC_PITCH_OFFSET) { | 132 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { |
98 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | 133 | if (reg == RADEON_SRC_PITCH_OFFSET) { |
99 | r100_cs_dump_packet(p, pkt); | 134 | DRM_ERROR("Cannot src blit from microtiled surface\n"); |
100 | return -EINVAL; | 135 | r100_cs_dump_packet(p, pkt); |
136 | return -EINVAL; | ||
137 | } | ||
138 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
101 | } | 139 | } |
102 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
103 | } | ||
104 | 140 | ||
105 | tmp |= tile_flags; | 141 | tmp |= tile_flags; |
106 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; | 142 | p->ib->ptr[idx] = (value & 0x3fc00000) | tmp; |
143 | } else | ||
144 | p->ib->ptr[idx] = (value & 0xffc00000) | tmp; | ||
107 | return 0; | 145 | return 0; |
108 | } | 146 | } |
109 | 147 | ||
@@ -412,7 +450,7 @@ void r100_pm_misc(struct radeon_device *rdev) | |||
412 | /* set pcie lanes */ | 450 | /* set pcie lanes */ |
413 | if ((rdev->flags & RADEON_IS_PCIE) && | 451 | if ((rdev->flags & RADEON_IS_PCIE) && |
414 | !(rdev->flags & RADEON_IS_IGP) && | 452 | !(rdev->flags & RADEON_IS_IGP) && |
415 | rdev->asic->set_pcie_lanes && | 453 | rdev->asic->pm.set_pcie_lanes && |
416 | (ps->pcie_lanes != | 454 | (ps->pcie_lanes != |
417 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { | 455 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { |
418 | radeon_set_pcie_lanes(rdev, | 456 | radeon_set_pcie_lanes(rdev, |
@@ -592,8 +630,8 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
592 | if (r) | 630 | if (r) |
593 | return r; | 631 | return r; |
594 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 632 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
595 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | 633 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
596 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | 634 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
597 | return radeon_gart_table_ram_alloc(rdev); | 635 | return radeon_gart_table_ram_alloc(rdev); |
598 | } | 636 | } |
599 | 637 | ||
@@ -930,9 +968,8 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev) | |||
930 | return -1; | 968 | return -1; |
931 | } | 969 | } |
932 | 970 | ||
933 | void r100_ring_start(struct radeon_device *rdev) | 971 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
934 | { | 972 | { |
935 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
936 | int r; | 973 | int r; |
937 | 974 | ||
938 | r = radeon_ring_lock(rdev, ring, 2); | 975 | r = radeon_ring_lock(rdev, ring, 2); |
@@ -1143,8 +1180,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1143 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); | 1180 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1144 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); | 1181 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1145 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1182 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1146 | radeon_ring_start(rdev); | 1183 | radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1147 | r = radeon_ring_test(rdev, ring); | 1184 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
1148 | if (r) { | 1185 | if (r) { |
1149 | DRM_ERROR("radeon: cp isn't working (%d).\n", r); | 1186 | DRM_ERROR("radeon: cp isn't working (%d).\n", r); |
1150 | return r; | 1187 | return r; |
@@ -1552,7 +1589,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1552 | r100_cs_dump_packet(p, pkt); | 1589 | r100_cs_dump_packet(p, pkt); |
1553 | return r; | 1590 | return r; |
1554 | } | 1591 | } |
1555 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1592 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
1593 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1594 | tile_flags |= RADEON_TXO_MACRO_TILE; | ||
1595 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1596 | tile_flags |= RADEON_TXO_MICRO_TILE_X2; | ||
1597 | |||
1598 | tmp = idx_value & ~(0x7 << 2); | ||
1599 | tmp |= tile_flags; | ||
1600 | ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); | ||
1601 | } else | ||
1602 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
1556 | track->textures[i].robj = reloc->robj; | 1603 | track->textures[i].robj = reloc->robj; |
1557 | track->tex_dirty = true; | 1604 | track->tex_dirty = true; |
1558 | break; | 1605 | break; |
@@ -1623,15 +1670,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1623 | r100_cs_dump_packet(p, pkt); | 1670 | r100_cs_dump_packet(p, pkt); |
1624 | return r; | 1671 | return r; |
1625 | } | 1672 | } |
1626 | 1673 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { | |
1627 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1674 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
1628 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | 1675 | tile_flags |= RADEON_COLOR_TILE_ENABLE; |
1629 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1676 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
1630 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 1677 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
1631 | 1678 | ||
1632 | tmp = idx_value & ~(0x7 << 16); | 1679 | tmp = idx_value & ~(0x7 << 16); |
1633 | tmp |= tile_flags; | 1680 | tmp |= tile_flags; |
1634 | ib[idx] = tmp; | 1681 | ib[idx] = tmp; |
1682 | } else | ||
1683 | ib[idx] = idx_value; | ||
1635 | 1684 | ||
1636 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 1685 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1637 | track->cb_dirty = true; | 1686 | track->cb_dirty = true; |
@@ -3691,7 +3740,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
3691 | radeon_ring_write(ring, ib->length_dw); | 3740 | radeon_ring_write(ring, ib->length_dw); |
3692 | } | 3741 | } |
3693 | 3742 | ||
3694 | int r100_ib_test(struct radeon_device *rdev) | 3743 | int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3695 | { | 3744 | { |
3696 | struct radeon_ib *ib; | 3745 | struct radeon_ib *ib; |
3697 | uint32_t scratch; | 3746 | uint32_t scratch; |
@@ -3916,7 +3965,7 @@ static int r100_startup(struct radeon_device *rdev) | |||
3916 | if (r) | 3965 | if (r) |
3917 | return r; | 3966 | return r; |
3918 | 3967 | ||
3919 | r = r100_ib_test(rdev); | 3968 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
3920 | if (r) { | 3969 | if (r) { |
3921 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 3970 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
3922 | rdev->accel_working = false; | 3971 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index eba4cbfa78f6..a59cc474d537 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -215,7 +215,17 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
215 | r100_cs_dump_packet(p, pkt); | 215 | r100_cs_dump_packet(p, pkt); |
216 | return r; | 216 | return r; |
217 | } | 217 | } |
218 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 218 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
219 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
220 | tile_flags |= R200_TXO_MACRO_TILE; | ||
221 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
222 | tile_flags |= R200_TXO_MICRO_TILE; | ||
223 | |||
224 | tmp = idx_value & ~(0x7 << 2); | ||
225 | tmp |= tile_flags; | ||
226 | ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); | ||
227 | } else | ||
228 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
219 | track->textures[i].robj = reloc->robj; | 229 | track->textures[i].robj = reloc->robj; |
220 | track->tex_dirty = true; | 230 | track->tex_dirty = true; |
221 | break; | 231 | break; |
@@ -277,14 +287,17 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
277 | return r; | 287 | return r; |
278 | } | 288 | } |
279 | 289 | ||
280 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 290 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
281 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | 291 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
282 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 292 | tile_flags |= RADEON_COLOR_TILE_ENABLE; |
283 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | 293 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
294 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | ||
284 | 295 | ||
285 | tmp = idx_value & ~(0x7 << 16); | 296 | tmp = idx_value & ~(0x7 << 16); |
286 | tmp |= tile_flags; | 297 | tmp |= tile_flags; |
287 | ib[idx] = tmp; | 298 | ib[idx] = tmp; |
299 | } else | ||
300 | ib[idx] = idx_value; | ||
288 | 301 | ||
289 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 302 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
290 | track->cb_dirty = true; | 303 | track->cb_dirty = true; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 6829638cca40..fa14383f9ca0 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -105,8 +105,8 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
105 | if (r) | 105 | if (r) |
106 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); | 106 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
107 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 107 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
108 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | 108 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
109 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | 109 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
110 | return radeon_gart_table_vram_alloc(rdev); | 110 | return radeon_gart_table_vram_alloc(rdev); |
111 | } | 111 | } |
112 | 112 | ||
@@ -206,9 +206,8 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
206 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); | 206 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
207 | } | 207 | } |
208 | 208 | ||
209 | void r300_ring_start(struct radeon_device *rdev) | 209 | void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
210 | { | 210 | { |
211 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
212 | unsigned gb_tile_config; | 211 | unsigned gb_tile_config; |
213 | int r; | 212 | int r; |
214 | 213 | ||
@@ -1419,7 +1418,7 @@ static int r300_startup(struct radeon_device *rdev) | |||
1419 | if (r) | 1418 | if (r) |
1420 | return r; | 1419 | return r; |
1421 | 1420 | ||
1422 | r = r100_ib_test(rdev); | 1421 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1423 | if (r) { | 1422 | if (r) { |
1424 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 1423 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
1425 | rdev->accel_working = false; | 1424 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index b14323053bad..f3fcaacfea01 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -279,7 +279,7 @@ static int r420_startup(struct radeon_device *rdev) | |||
279 | if (r) | 279 | if (r) |
280 | return r; | 280 | return r; |
281 | 281 | ||
282 | r = r100_ib_test(rdev); | 282 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
283 | if (r) { | 283 | if (r) { |
284 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 284 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
285 | rdev->accel_working = false; | 285 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 3bd8f1b1c606..ec576aaafb73 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -351,6 +351,8 @@ | |||
351 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 | 351 | #define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 |
352 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 | 352 | #define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 |
353 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c | 353 | #define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c |
354 | #define AVIVO_D1CRTC_STATUS 0x609c | ||
355 | # define AVIVO_D1CRTC_V_BLANK (1 << 0) | ||
354 | #define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 | 356 | #define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 |
355 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 | 357 | #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 |
356 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 | 358 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 25084e824dbc..ebcc15b03c9f 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */ | 34 | /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */ |
35 | 35 | ||
36 | static int r520_mc_wait_for_idle(struct radeon_device *rdev) | 36 | int r520_mc_wait_for_idle(struct radeon_device *rdev) |
37 | { | 37 | { |
38 | unsigned i; | 38 | unsigned i; |
39 | uint32_t tmp; | 39 | uint32_t tmp; |
@@ -207,7 +207,7 @@ static int r520_startup(struct radeon_device *rdev) | |||
207 | if (r) | 207 | if (r) |
208 | return r; | 208 | return r; |
209 | 209 | ||
210 | r = r100_ib_test(rdev); | 210 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
211 | if (r) { | 211 | if (r) { |
212 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 212 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
213 | rdev->accel_working = false; | 213 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 17ca72ce3027..5eb23829353f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2226,7 +2226,7 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2226 | 2226 | ||
2227 | r600_cp_start(rdev); | 2227 | r600_cp_start(rdev); |
2228 | ring->ready = true; | 2228 | ring->ready = true; |
2229 | r = radeon_ring_test(rdev, ring); | 2229 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
2230 | if (r) { | 2230 | if (r) { |
2231 | ring->ready = false; | 2231 | ring->ready = false; |
2232 | return r; | 2232 | return r; |
@@ -2452,7 +2452,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2452 | r = r600_blit_init(rdev); | 2452 | r = r600_blit_init(rdev); |
2453 | if (r) { | 2453 | if (r) { |
2454 | r600_blit_fini(rdev); | 2454 | r600_blit_fini(rdev); |
2455 | rdev->asic->copy = NULL; | 2455 | rdev->asic->copy.copy = NULL; |
2456 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 2456 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
2457 | } | 2457 | } |
2458 | 2458 | ||
@@ -2493,7 +2493,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2493 | if (r) | 2493 | if (r) |
2494 | return r; | 2494 | return r; |
2495 | 2495 | ||
2496 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | 2496 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
2497 | if (r) { | 2497 | if (r) { |
2498 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | 2498 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); |
2499 | rdev->accel_working = false; | 2499 | rdev->accel_working = false; |
@@ -2701,13 +2701,14 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
2701 | radeon_ring_write(ring, ib->length_dw); | 2701 | radeon_ring_write(ring, ib->length_dw); |
2702 | } | 2702 | } |
2703 | 2703 | ||
2704 | int r600_ib_test(struct radeon_device *rdev, int ring) | 2704 | int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2705 | { | 2705 | { |
2706 | struct radeon_ib *ib; | 2706 | struct radeon_ib *ib; |
2707 | uint32_t scratch; | 2707 | uint32_t scratch; |
2708 | uint32_t tmp = 0; | 2708 | uint32_t tmp = 0; |
2709 | unsigned i; | 2709 | unsigned i; |
2710 | int r; | 2710 | int r; |
2711 | int ring_index = radeon_ring_index(rdev, ring); | ||
2711 | 2712 | ||
2712 | r = radeon_scratch_get(rdev, &scratch); | 2713 | r = radeon_scratch_get(rdev, &scratch); |
2713 | if (r) { | 2714 | if (r) { |
@@ -2715,7 +2716,7 @@ int r600_ib_test(struct radeon_device *rdev, int ring) | |||
2715 | return r; | 2716 | return r; |
2716 | } | 2717 | } |
2717 | WREG32(scratch, 0xCAFEDEAD); | 2718 | WREG32(scratch, 0xCAFEDEAD); |
2718 | r = radeon_ib_get(rdev, ring, &ib, 256); | 2719 | r = radeon_ib_get(rdev, ring_index, &ib, 256); |
2719 | if (r) { | 2720 | if (r) { |
2720 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | 2721 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); |
2721 | return r; | 2722 | return r; |
@@ -2723,20 +2724,7 @@ int r600_ib_test(struct radeon_device *rdev, int ring) | |||
2723 | ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); | 2724 | ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); |
2724 | ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2725 | ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2725 | ib->ptr[2] = 0xDEADBEEF; | 2726 | ib->ptr[2] = 0xDEADBEEF; |
2726 | ib->ptr[3] = PACKET2(0); | 2727 | ib->length_dw = 3; |
2727 | ib->ptr[4] = PACKET2(0); | ||
2728 | ib->ptr[5] = PACKET2(0); | ||
2729 | ib->ptr[6] = PACKET2(0); | ||
2730 | ib->ptr[7] = PACKET2(0); | ||
2731 | ib->ptr[8] = PACKET2(0); | ||
2732 | ib->ptr[9] = PACKET2(0); | ||
2733 | ib->ptr[10] = PACKET2(0); | ||
2734 | ib->ptr[11] = PACKET2(0); | ||
2735 | ib->ptr[12] = PACKET2(0); | ||
2736 | ib->ptr[13] = PACKET2(0); | ||
2737 | ib->ptr[14] = PACKET2(0); | ||
2738 | ib->ptr[15] = PACKET2(0); | ||
2739 | ib->length_dw = 16; | ||
2740 | r = radeon_ib_schedule(rdev, ib); | 2728 | r = radeon_ib_schedule(rdev, ib); |
2741 | if (r) { | 2729 | if (r) { |
2742 | radeon_scratch_free(rdev, scratch); | 2730 | radeon_scratch_free(rdev, scratch); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index accc032c103f..db38f587f27a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -30,20 +30,7 @@ | |||
30 | 30 | ||
31 | #include "r600d.h" | 31 | #include "r600d.h" |
32 | #include "r600_blit_shaders.h" | 32 | #include "r600_blit_shaders.h" |
33 | 33 | #include "radeon_blit_common.h" | |
34 | #define DI_PT_RECTLIST 0x11 | ||
35 | #define DI_INDEX_SIZE_16_BIT 0x0 | ||
36 | #define DI_SRC_SEL_AUTO_INDEX 0x2 | ||
37 | |||
38 | #define FMT_8 0x1 | ||
39 | #define FMT_5_6_5 0x8 | ||
40 | #define FMT_8_8_8_8 0x1a | ||
41 | #define COLOR_8 0x1 | ||
42 | #define COLOR_5_6_5 0x8 | ||
43 | #define COLOR_8_8_8_8 0x1a | ||
44 | |||
45 | #define RECT_UNIT_H 32 | ||
46 | #define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H) | ||
47 | 34 | ||
48 | /* emits 21 on rv770+, 23 on r600 */ | 35 | /* emits 21 on rv770+, 23 on r600 */ |
49 | static void | 36 | static void |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 387fcc9f03ef..0ec3f205f9c4 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -52,15 +52,20 @@ struct r600_cs_track { | |||
52 | struct radeon_bo *cb_color_bo[8]; | 52 | struct radeon_bo *cb_color_bo[8]; |
53 | u64 cb_color_bo_mc[8]; | 53 | u64 cb_color_bo_mc[8]; |
54 | u32 cb_color_bo_offset[8]; | 54 | u32 cb_color_bo_offset[8]; |
55 | struct radeon_bo *cb_color_frag_bo[8]; | 55 | struct radeon_bo *cb_color_frag_bo[8]; /* unused */ |
56 | struct radeon_bo *cb_color_tile_bo[8]; | 56 | struct radeon_bo *cb_color_tile_bo[8]; /* unused */ |
57 | u32 cb_color_info[8]; | 57 | u32 cb_color_info[8]; |
58 | u32 cb_color_size_idx[8]; | 58 | u32 cb_color_view[8]; |
59 | u32 cb_color_size_idx[8]; /* unused */ | ||
59 | u32 cb_target_mask; | 60 | u32 cb_target_mask; |
60 | u32 cb_shader_mask; | 61 | u32 cb_shader_mask; /* unused */ |
61 | u32 cb_color_size[8]; | 62 | u32 cb_color_size[8]; |
62 | u32 vgt_strmout_en; | 63 | u32 vgt_strmout_en; |
63 | u32 vgt_strmout_buffer_en; | 64 | u32 vgt_strmout_buffer_en; |
65 | struct radeon_bo *vgt_strmout_bo[4]; | ||
66 | u64 vgt_strmout_bo_mc[4]; /* unused */ | ||
67 | u32 vgt_strmout_bo_offset[4]; | ||
68 | u32 vgt_strmout_size[4]; | ||
64 | u32 db_depth_control; | 69 | u32 db_depth_control; |
65 | u32 db_depth_info; | 70 | u32 db_depth_info; |
66 | u32 db_depth_size_idx; | 71 | u32 db_depth_size_idx; |
@@ -69,13 +74,17 @@ struct r600_cs_track { | |||
69 | u32 db_offset; | 74 | u32 db_offset; |
70 | struct radeon_bo *db_bo; | 75 | struct radeon_bo *db_bo; |
71 | u64 db_bo_mc; | 76 | u64 db_bo_mc; |
77 | bool sx_misc_kill_all_prims; | ||
78 | bool cb_dirty; | ||
79 | bool db_dirty; | ||
80 | bool streamout_dirty; | ||
72 | }; | 81 | }; |
73 | 82 | ||
74 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } | 83 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } |
75 | #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } | 84 | #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } |
76 | #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 } | 85 | #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } |
77 | #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } | 86 | #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } |
78 | #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 } | 87 | #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } |
79 | #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } | 88 | #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } |
80 | #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } | 89 | #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } |
81 | #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } | 90 | #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } |
@@ -107,7 +116,7 @@ static const struct gpu_formats color_formats_table[] = { | |||
107 | 116 | ||
108 | /* 24-bit */ | 117 | /* 24-bit */ |
109 | FMT_24_BIT(V_038004_FMT_8_8_8), | 118 | FMT_24_BIT(V_038004_FMT_8_8_8), |
110 | 119 | ||
111 | /* 32-bit */ | 120 | /* 32-bit */ |
112 | FMT_32_BIT(V_038004_COLOR_32, 1), | 121 | FMT_32_BIT(V_038004_COLOR_32, 1), |
113 | FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), | 122 | FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), |
@@ -162,22 +171,22 @@ static const struct gpu_formats color_formats_table[] = { | |||
162 | [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, | 171 | [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, |
163 | }; | 172 | }; |
164 | 173 | ||
165 | static bool fmt_is_valid_color(u32 format) | 174 | bool r600_fmt_is_valid_color(u32 format) |
166 | { | 175 | { |
167 | if (format >= ARRAY_SIZE(color_formats_table)) | 176 | if (format >= ARRAY_SIZE(color_formats_table)) |
168 | return false; | 177 | return false; |
169 | 178 | ||
170 | if (color_formats_table[format].valid_color) | 179 | if (color_formats_table[format].valid_color) |
171 | return true; | 180 | return true; |
172 | 181 | ||
173 | return false; | 182 | return false; |
174 | } | 183 | } |
175 | 184 | ||
176 | static bool fmt_is_valid_texture(u32 format, enum radeon_family family) | 185 | bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) |
177 | { | 186 | { |
178 | if (format >= ARRAY_SIZE(color_formats_table)) | 187 | if (format >= ARRAY_SIZE(color_formats_table)) |
179 | return false; | 188 | return false; |
180 | 189 | ||
181 | if (family < color_formats_table[format].min_family) | 190 | if (family < color_formats_table[format].min_family) |
182 | return false; | 191 | return false; |
183 | 192 | ||
@@ -187,7 +196,7 @@ static bool fmt_is_valid_texture(u32 format, enum radeon_family family) | |||
187 | return false; | 196 | return false; |
188 | } | 197 | } |
189 | 198 | ||
190 | static int fmt_get_blocksize(u32 format) | 199 | int r600_fmt_get_blocksize(u32 format) |
191 | { | 200 | { |
192 | if (format >= ARRAY_SIZE(color_formats_table)) | 201 | if (format >= ARRAY_SIZE(color_formats_table)) |
193 | return 0; | 202 | return 0; |
@@ -195,7 +204,7 @@ static int fmt_get_blocksize(u32 format) | |||
195 | return color_formats_table[format].blocksize; | 204 | return color_formats_table[format].blocksize; |
196 | } | 205 | } |
197 | 206 | ||
198 | static int fmt_get_nblocksx(u32 format, u32 w) | 207 | int r600_fmt_get_nblocksx(u32 format, u32 w) |
199 | { | 208 | { |
200 | unsigned bw; | 209 | unsigned bw; |
201 | 210 | ||
@@ -209,7 +218,7 @@ static int fmt_get_nblocksx(u32 format, u32 w) | |||
209 | return (w + bw - 1) / bw; | 218 | return (w + bw - 1) / bw; |
210 | } | 219 | } |
211 | 220 | ||
212 | static int fmt_get_nblocksy(u32 format, u32 h) | 221 | int r600_fmt_get_nblocksy(u32 format, u32 h) |
213 | { | 222 | { |
214 | unsigned bh; | 223 | unsigned bh; |
215 | 224 | ||
@@ -256,7 +265,7 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values, | |||
256 | break; | 265 | break; |
257 | case ARRAY_LINEAR_ALIGNED: | 266 | case ARRAY_LINEAR_ALIGNED: |
258 | *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); | 267 | *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); |
259 | *height_align = tile_height; | 268 | *height_align = 1; |
260 | *depth_align = 1; | 269 | *depth_align = 1; |
261 | *base_align = values->group_size; | 270 | *base_align = values->group_size; |
262 | break; | 271 | break; |
@@ -269,10 +278,9 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values, | |||
269 | *base_align = values->group_size; | 278 | *base_align = values->group_size; |
270 | break; | 279 | break; |
271 | case ARRAY_2D_TILED_THIN1: | 280 | case ARRAY_2D_TILED_THIN1: |
272 | *pitch_align = max((u32)macro_tile_width, | 281 | *pitch_align = max((u32)macro_tile_width * tile_width, |
273 | (u32)(((values->group_size / tile_height) / | 282 | (u32)((values->group_size * values->nbanks) / |
274 | (values->blocksize * values->nsamples)) * | 283 | (values->blocksize * values->nsamples * tile_width))); |
275 | values->nbanks)) * tile_width; | ||
276 | *height_align = macro_tile_height * tile_height; | 284 | *height_align = macro_tile_height * tile_height; |
277 | *depth_align = 1; | 285 | *depth_align = 1; |
278 | *base_align = max(macro_tile_bytes, | 286 | *base_align = max(macro_tile_bytes, |
@@ -296,12 +304,14 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
296 | track->cb_color_size[i] = 0; | 304 | track->cb_color_size[i] = 0; |
297 | track->cb_color_size_idx[i] = 0; | 305 | track->cb_color_size_idx[i] = 0; |
298 | track->cb_color_info[i] = 0; | 306 | track->cb_color_info[i] = 0; |
307 | track->cb_color_view[i] = 0xFFFFFFFF; | ||
299 | track->cb_color_bo[i] = NULL; | 308 | track->cb_color_bo[i] = NULL; |
300 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 309 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
301 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | 310 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; |
302 | } | 311 | } |
303 | track->cb_target_mask = 0xFFFFFFFF; | 312 | track->cb_target_mask = 0xFFFFFFFF; |
304 | track->cb_shader_mask = 0xFFFFFFFF; | 313 | track->cb_shader_mask = 0xFFFFFFFF; |
314 | track->cb_dirty = true; | ||
305 | track->db_bo = NULL; | 315 | track->db_bo = NULL; |
306 | track->db_bo_mc = 0xFFFFFFFF; | 316 | track->db_bo_mc = 0xFFFFFFFF; |
307 | /* assume the biggest format and that htile is enabled */ | 317 | /* assume the biggest format and that htile is enabled */ |
@@ -310,6 +320,16 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
310 | track->db_depth_size = 0xFFFFFFFF; | 320 | track->db_depth_size = 0xFFFFFFFF; |
311 | track->db_depth_size_idx = 0; | 321 | track->db_depth_size_idx = 0; |
312 | track->db_depth_control = 0xFFFFFFFF; | 322 | track->db_depth_control = 0xFFFFFFFF; |
323 | track->db_dirty = true; | ||
324 | |||
325 | for (i = 0; i < 4; i++) { | ||
326 | track->vgt_strmout_size[i] = 0; | ||
327 | track->vgt_strmout_bo[i] = NULL; | ||
328 | track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; | ||
329 | track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; | ||
330 | } | ||
331 | track->streamout_dirty = true; | ||
332 | track->sx_misc_kill_all_prims = false; | ||
313 | } | 333 | } |
314 | 334 | ||
315 | static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 335 | static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
@@ -322,13 +342,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
322 | volatile u32 *ib = p->ib->ptr; | 342 | volatile u32 *ib = p->ib->ptr; |
323 | unsigned array_mode; | 343 | unsigned array_mode; |
324 | u32 format; | 344 | u32 format; |
345 | |||
325 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { | 346 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { |
326 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); | 347 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); |
327 | return -EINVAL; | 348 | return -EINVAL; |
328 | } | 349 | } |
329 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; | 350 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; |
330 | format = G_0280A0_FORMAT(track->cb_color_info[i]); | 351 | format = G_0280A0_FORMAT(track->cb_color_info[i]); |
331 | if (!fmt_is_valid_color(format)) { | 352 | if (!r600_fmt_is_valid_color(format)) { |
332 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", | 353 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", |
333 | __func__, __LINE__, format, | 354 | __func__, __LINE__, format, |
334 | i, track->cb_color_info[i]); | 355 | i, track->cb_color_info[i]); |
@@ -349,7 +370,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
349 | array_check.nbanks = track->nbanks; | 370 | array_check.nbanks = track->nbanks; |
350 | array_check.npipes = track->npipes; | 371 | array_check.npipes = track->npipes; |
351 | array_check.nsamples = track->nsamples; | 372 | array_check.nsamples = track->nsamples; |
352 | array_check.blocksize = fmt_get_blocksize(format); | 373 | array_check.blocksize = r600_fmt_get_blocksize(format); |
353 | if (r600_get_array_mode_alignment(&array_check, | 374 | if (r600_get_array_mode_alignment(&array_check, |
354 | &pitch_align, &height_align, &depth_align, &base_align)) { | 375 | &pitch_align, &height_align, &depth_align, &base_align)) { |
355 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 376 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
@@ -393,7 +414,18 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
393 | } | 414 | } |
394 | 415 | ||
395 | /* check offset */ | 416 | /* check offset */ |
396 | tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format); | 417 | tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); |
418 | switch (array_mode) { | ||
419 | default: | ||
420 | case V_0280A0_ARRAY_LINEAR_GENERAL: | ||
421 | case V_0280A0_ARRAY_LINEAR_ALIGNED: | ||
422 | tmp += track->cb_color_view[i] & 0xFF; | ||
423 | break; | ||
424 | case V_0280A0_ARRAY_1D_TILED_THIN1: | ||
425 | case V_0280A0_ARRAY_2D_TILED_THIN1: | ||
426 | tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; | ||
427 | break; | ||
428 | } | ||
397 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 429 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
398 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | 430 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { |
399 | /* the initial DDX does bad things with the CB size occasionally */ | 431 | /* the initial DDX does bad things with the CB size occasionally */ |
@@ -403,10 +435,13 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
403 | * broken userspace. | 435 | * broken userspace. |
404 | */ | 436 | */ |
405 | } else { | 437 | } else { |
406 | dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, | 438 | dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", |
407 | array_mode, | 439 | __func__, i, array_mode, |
408 | track->cb_color_bo_offset[i], tmp, | 440 | track->cb_color_bo_offset[i], tmp, |
409 | radeon_bo_size(track->cb_color_bo[i])); | 441 | radeon_bo_size(track->cb_color_bo[i]), |
442 | pitch, height, r600_fmt_get_nblocksx(format, pitch), | ||
443 | r600_fmt_get_nblocksy(format, height), | ||
444 | r600_fmt_get_blocksize(format)); | ||
410 | return -EINVAL; | 445 | return -EINVAL; |
411 | } | 446 | } |
412 | } | 447 | } |
@@ -430,143 +465,171 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
430 | /* on legacy kernel we don't perform advanced check */ | 465 | /* on legacy kernel we don't perform advanced check */ |
431 | if (p->rdev == NULL) | 466 | if (p->rdev == NULL) |
432 | return 0; | 467 | return 0; |
433 | /* we don't support out buffer yet */ | 468 | |
434 | if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) { | 469 | /* check streamout */ |
435 | dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); | 470 | if (track->streamout_dirty && track->vgt_strmout_en) { |
436 | return -EINVAL; | 471 | for (i = 0; i < 4; i++) { |
472 | if (track->vgt_strmout_buffer_en & (1 << i)) { | ||
473 | if (track->vgt_strmout_bo[i]) { | ||
474 | u64 offset = (u64)track->vgt_strmout_bo_offset[i] + | ||
475 | (u64)track->vgt_strmout_size[i]; | ||
476 | if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { | ||
477 | DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", | ||
478 | i, offset, | ||
479 | radeon_bo_size(track->vgt_strmout_bo[i])); | ||
480 | return -EINVAL; | ||
481 | } | ||
482 | } else { | ||
483 | dev_warn(p->dev, "No buffer for streamout %d\n", i); | ||
484 | return -EINVAL; | ||
485 | } | ||
486 | } | ||
487 | } | ||
488 | track->streamout_dirty = false; | ||
437 | } | 489 | } |
490 | |||
491 | if (track->sx_misc_kill_all_prims) | ||
492 | return 0; | ||
493 | |||
438 | /* check that we have a cb for each enabled target, we don't check | 494 | /* check that we have a cb for each enabled target, we don't check |
439 | * shader_mask because it seems mesa isn't always setting it :( | 495 | * shader_mask because it seems mesa isn't always setting it :( |
440 | */ | 496 | */ |
441 | tmp = track->cb_target_mask; | 497 | if (track->cb_dirty) { |
442 | for (i = 0; i < 8; i++) { | 498 | tmp = track->cb_target_mask; |
443 | if ((tmp >> (i * 4)) & 0xF) { | 499 | for (i = 0; i < 8; i++) { |
444 | /* at least one component is enabled */ | 500 | if ((tmp >> (i * 4)) & 0xF) { |
445 | if (track->cb_color_bo[i] == NULL) { | 501 | /* at least one component is enabled */ |
446 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", | 502 | if (track->cb_color_bo[i] == NULL) { |
447 | __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); | 503 | dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", |
448 | return -EINVAL; | 504 | __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); |
505 | return -EINVAL; | ||
506 | } | ||
507 | /* perform rewrite of CB_COLOR[0-7]_SIZE */ | ||
508 | r = r600_cs_track_validate_cb(p, i); | ||
509 | if (r) | ||
510 | return r; | ||
449 | } | 511 | } |
450 | /* perform rewrite of CB_COLOR[0-7]_SIZE */ | ||
451 | r = r600_cs_track_validate_cb(p, i); | ||
452 | if (r) | ||
453 | return r; | ||
454 | } | 512 | } |
513 | track->cb_dirty = false; | ||
455 | } | 514 | } |
456 | /* Check depth buffer */ | 515 | |
457 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || | 516 | if (track->db_dirty) { |
458 | G_028800_Z_ENABLE(track->db_depth_control)) { | 517 | /* Check depth buffer */ |
459 | u32 nviews, bpe, ntiles, size, slice_tile_max; | 518 | if (G_028800_STENCIL_ENABLE(track->db_depth_control) || |
460 | u32 height, height_align, pitch, pitch_align, depth_align; | 519 | G_028800_Z_ENABLE(track->db_depth_control)) { |
461 | u64 base_offset, base_align; | 520 | u32 nviews, bpe, ntiles, size, slice_tile_max; |
462 | struct array_mode_checker array_check; | 521 | u32 height, height_align, pitch, pitch_align, depth_align; |
463 | int array_mode; | 522 | u64 base_offset, base_align; |
464 | 523 | struct array_mode_checker array_check; | |
465 | if (track->db_bo == NULL) { | 524 | int array_mode; |
466 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); | 525 | |
467 | return -EINVAL; | 526 | if (track->db_bo == NULL) { |
468 | } | 527 | dev_warn(p->dev, "z/stencil with no depth buffer\n"); |
469 | if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { | ||
470 | dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n"); | ||
471 | return -EINVAL; | ||
472 | } | ||
473 | switch (G_028010_FORMAT(track->db_depth_info)) { | ||
474 | case V_028010_DEPTH_16: | ||
475 | bpe = 2; | ||
476 | break; | ||
477 | case V_028010_DEPTH_X8_24: | ||
478 | case V_028010_DEPTH_8_24: | ||
479 | case V_028010_DEPTH_X8_24_FLOAT: | ||
480 | case V_028010_DEPTH_8_24_FLOAT: | ||
481 | case V_028010_DEPTH_32_FLOAT: | ||
482 | bpe = 4; | ||
483 | break; | ||
484 | case V_028010_DEPTH_X24_8_32_FLOAT: | ||
485 | bpe = 8; | ||
486 | break; | ||
487 | default: | ||
488 | dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); | ||
489 | return -EINVAL; | ||
490 | } | ||
491 | if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { | ||
492 | if (!track->db_depth_size_idx) { | ||
493 | dev_warn(p->dev, "z/stencil buffer size not set\n"); | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | tmp = radeon_bo_size(track->db_bo) - track->db_offset; | ||
497 | tmp = (tmp / bpe) >> 6; | ||
498 | if (!tmp) { | ||
499 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", | ||
500 | track->db_depth_size, bpe, track->db_offset, | ||
501 | radeon_bo_size(track->db_bo)); | ||
502 | return -EINVAL; | 528 | return -EINVAL; |
503 | } | 529 | } |
504 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | 530 | if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { |
505 | } else { | 531 | dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n"); |
506 | size = radeon_bo_size(track->db_bo); | ||
507 | /* pitch in pixels */ | ||
508 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
509 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
510 | slice_tile_max *= 64; | ||
511 | height = slice_tile_max / pitch; | ||
512 | if (height > 8192) | ||
513 | height = 8192; | ||
514 | base_offset = track->db_bo_mc + track->db_offset; | ||
515 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
516 | array_check.array_mode = array_mode; | ||
517 | array_check.group_size = track->group_size; | ||
518 | array_check.nbanks = track->nbanks; | ||
519 | array_check.npipes = track->npipes; | ||
520 | array_check.nsamples = track->nsamples; | ||
521 | array_check.blocksize = bpe; | ||
522 | if (r600_get_array_mode_alignment(&array_check, | ||
523 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
524 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
525 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
526 | track->db_depth_info); | ||
527 | return -EINVAL; | 532 | return -EINVAL; |
528 | } | 533 | } |
529 | switch (array_mode) { | 534 | switch (G_028010_FORMAT(track->db_depth_info)) { |
530 | case V_028010_ARRAY_1D_TILED_THIN1: | 535 | case V_028010_DEPTH_16: |
531 | /* don't break userspace */ | 536 | bpe = 2; |
532 | height &= ~0x7; | 537 | break; |
538 | case V_028010_DEPTH_X8_24: | ||
539 | case V_028010_DEPTH_8_24: | ||
540 | case V_028010_DEPTH_X8_24_FLOAT: | ||
541 | case V_028010_DEPTH_8_24_FLOAT: | ||
542 | case V_028010_DEPTH_32_FLOAT: | ||
543 | bpe = 4; | ||
533 | break; | 544 | break; |
534 | case V_028010_ARRAY_2D_TILED_THIN1: | 545 | case V_028010_DEPTH_X24_8_32_FLOAT: |
546 | bpe = 8; | ||
535 | break; | 547 | break; |
536 | default: | 548 | default: |
537 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 549 | dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); |
538 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
539 | track->db_depth_info); | ||
540 | return -EINVAL; | 550 | return -EINVAL; |
541 | } | 551 | } |
552 | if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { | ||
553 | if (!track->db_depth_size_idx) { | ||
554 | dev_warn(p->dev, "z/stencil buffer size not set\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | tmp = radeon_bo_size(track->db_bo) - track->db_offset; | ||
558 | tmp = (tmp / bpe) >> 6; | ||
559 | if (!tmp) { | ||
560 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", | ||
561 | track->db_depth_size, bpe, track->db_offset, | ||
562 | radeon_bo_size(track->db_bo)); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); | ||
566 | } else { | ||
567 | size = radeon_bo_size(track->db_bo); | ||
568 | /* pitch in pixels */ | ||
569 | pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; | ||
570 | slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | ||
571 | slice_tile_max *= 64; | ||
572 | height = slice_tile_max / pitch; | ||
573 | if (height > 8192) | ||
574 | height = 8192; | ||
575 | base_offset = track->db_bo_mc + track->db_offset; | ||
576 | array_mode = G_028010_ARRAY_MODE(track->db_depth_info); | ||
577 | array_check.array_mode = array_mode; | ||
578 | array_check.group_size = track->group_size; | ||
579 | array_check.nbanks = track->nbanks; | ||
580 | array_check.npipes = track->npipes; | ||
581 | array_check.nsamples = track->nsamples; | ||
582 | array_check.blocksize = bpe; | ||
583 | if (r600_get_array_mode_alignment(&array_check, | ||
584 | &pitch_align, &height_align, &depth_align, &base_align)) { | ||
585 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
586 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
587 | track->db_depth_info); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | switch (array_mode) { | ||
591 | case V_028010_ARRAY_1D_TILED_THIN1: | ||
592 | /* don't break userspace */ | ||
593 | height &= ~0x7; | ||
594 | break; | ||
595 | case V_028010_ARRAY_2D_TILED_THIN1: | ||
596 | break; | ||
597 | default: | ||
598 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | ||
599 | G_028010_ARRAY_MODE(track->db_depth_info), | ||
600 | track->db_depth_info); | ||
601 | return -EINVAL; | ||
602 | } | ||
542 | 603 | ||
543 | if (!IS_ALIGNED(pitch, pitch_align)) { | 604 | if (!IS_ALIGNED(pitch, pitch_align)) { |
544 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", | 605 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", |
545 | __func__, __LINE__, pitch, pitch_align, array_mode); | 606 | __func__, __LINE__, pitch, pitch_align, array_mode); |
546 | return -EINVAL; | 607 | return -EINVAL; |
547 | } | 608 | } |
548 | if (!IS_ALIGNED(height, height_align)) { | 609 | if (!IS_ALIGNED(height, height_align)) { |
549 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", | 610 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", |
550 | __func__, __LINE__, height, height_align, array_mode); | 611 | __func__, __LINE__, height, height_align, array_mode); |
551 | return -EINVAL; | 612 | return -EINVAL; |
552 | } | 613 | } |
553 | if (!IS_ALIGNED(base_offset, base_align)) { | 614 | if (!IS_ALIGNED(base_offset, base_align)) { |
554 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, | 615 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, |
555 | base_offset, base_align, array_mode); | 616 | base_offset, base_align, array_mode); |
556 | return -EINVAL; | 617 | return -EINVAL; |
557 | } | 618 | } |
558 | 619 | ||
559 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; | 620 | ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; |
560 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 621 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
561 | tmp = ntiles * bpe * 64 * nviews; | 622 | tmp = ntiles * bpe * 64 * nviews; |
562 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 623 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
563 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", | 624 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", |
564 | array_mode, | 625 | array_mode, |
565 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 626 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
566 | radeon_bo_size(track->db_bo)); | 627 | radeon_bo_size(track->db_bo)); |
567 | return -EINVAL; | 628 | return -EINVAL; |
629 | } | ||
568 | } | 630 | } |
569 | } | 631 | } |
632 | track->db_dirty = false; | ||
570 | } | 633 | } |
571 | return 0; | 634 | return 0; |
572 | } | 635 | } |
@@ -939,6 +1002,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
939 | break; | 1002 | break; |
940 | case R_028800_DB_DEPTH_CONTROL: | 1003 | case R_028800_DB_DEPTH_CONTROL: |
941 | track->db_depth_control = radeon_get_ib_value(p, idx); | 1004 | track->db_depth_control = radeon_get_ib_value(p, idx); |
1005 | track->db_dirty = true; | ||
942 | break; | 1006 | break; |
943 | case R_028010_DB_DEPTH_INFO: | 1007 | case R_028010_DB_DEPTH_INFO: |
944 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && | 1008 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && |
@@ -959,24 +1023,66 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
959 | ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); | 1023 | ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); |
960 | track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); | 1024 | track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); |
961 | } | 1025 | } |
962 | } else | 1026 | } else { |
963 | track->db_depth_info = radeon_get_ib_value(p, idx); | 1027 | track->db_depth_info = radeon_get_ib_value(p, idx); |
1028 | } | ||
1029 | track->db_dirty = true; | ||
964 | break; | 1030 | break; |
965 | case R_028004_DB_DEPTH_VIEW: | 1031 | case R_028004_DB_DEPTH_VIEW: |
966 | track->db_depth_view = radeon_get_ib_value(p, idx); | 1032 | track->db_depth_view = radeon_get_ib_value(p, idx); |
1033 | track->db_dirty = true; | ||
967 | break; | 1034 | break; |
968 | case R_028000_DB_DEPTH_SIZE: | 1035 | case R_028000_DB_DEPTH_SIZE: |
969 | track->db_depth_size = radeon_get_ib_value(p, idx); | 1036 | track->db_depth_size = radeon_get_ib_value(p, idx); |
970 | track->db_depth_size_idx = idx; | 1037 | track->db_depth_size_idx = idx; |
1038 | track->db_dirty = true; | ||
971 | break; | 1039 | break; |
972 | case R_028AB0_VGT_STRMOUT_EN: | 1040 | case R_028AB0_VGT_STRMOUT_EN: |
973 | track->vgt_strmout_en = radeon_get_ib_value(p, idx); | 1041 | track->vgt_strmout_en = radeon_get_ib_value(p, idx); |
1042 | track->streamout_dirty = true; | ||
974 | break; | 1043 | break; |
975 | case R_028B20_VGT_STRMOUT_BUFFER_EN: | 1044 | case R_028B20_VGT_STRMOUT_BUFFER_EN: |
976 | track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); | 1045 | track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); |
1046 | track->streamout_dirty = true; | ||
1047 | break; | ||
1048 | case VGT_STRMOUT_BUFFER_BASE_0: | ||
1049 | case VGT_STRMOUT_BUFFER_BASE_1: | ||
1050 | case VGT_STRMOUT_BUFFER_BASE_2: | ||
1051 | case VGT_STRMOUT_BUFFER_BASE_3: | ||
1052 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1053 | if (r) { | ||
1054 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
1055 | "0x%04X\n", reg); | ||
1056 | return -EINVAL; | ||
1057 | } | ||
1058 | tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; | ||
1059 | track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; | ||
1060 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
1061 | track->vgt_strmout_bo[tmp] = reloc->robj; | ||
1062 | track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; | ||
1063 | track->streamout_dirty = true; | ||
1064 | break; | ||
1065 | case VGT_STRMOUT_BUFFER_SIZE_0: | ||
1066 | case VGT_STRMOUT_BUFFER_SIZE_1: | ||
1067 | case VGT_STRMOUT_BUFFER_SIZE_2: | ||
1068 | case VGT_STRMOUT_BUFFER_SIZE_3: | ||
1069 | tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; | ||
1070 | /* size in register is DWs, convert to bytes */ | ||
1071 | track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; | ||
1072 | track->streamout_dirty = true; | ||
1073 | break; | ||
1074 | case CP_COHER_BASE: | ||
1075 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1076 | if (r) { | ||
1077 | dev_warn(p->dev, "missing reloc for CP_COHER_BASE " | ||
1078 | "0x%04X\n", reg); | ||
1079 | return -EINVAL; | ||
1080 | } | ||
1081 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
977 | break; | 1082 | break; |
978 | case R_028238_CB_TARGET_MASK: | 1083 | case R_028238_CB_TARGET_MASK: |
979 | track->cb_target_mask = radeon_get_ib_value(p, idx); | 1084 | track->cb_target_mask = radeon_get_ib_value(p, idx); |
1085 | track->cb_dirty = true; | ||
980 | break; | 1086 | break; |
981 | case R_02823C_CB_SHADER_MASK: | 1087 | case R_02823C_CB_SHADER_MASK: |
982 | track->cb_shader_mask = radeon_get_ib_value(p, idx); | 1088 | track->cb_shader_mask = radeon_get_ib_value(p, idx); |
@@ -984,6 +1090,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
984 | case R_028C04_PA_SC_AA_CONFIG: | 1090 | case R_028C04_PA_SC_AA_CONFIG: |
985 | tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); | 1091 | tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); |
986 | track->nsamples = 1 << tmp; | 1092 | track->nsamples = 1 << tmp; |
1093 | track->cb_dirty = true; | ||
987 | break; | 1094 | break; |
988 | case R_0280A0_CB_COLOR0_INFO: | 1095 | case R_0280A0_CB_COLOR0_INFO: |
989 | case R_0280A4_CB_COLOR1_INFO: | 1096 | case R_0280A4_CB_COLOR1_INFO: |
@@ -1013,6 +1120,19 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1013 | tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; | 1120 | tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; |
1014 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | 1121 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); |
1015 | } | 1122 | } |
1123 | track->cb_dirty = true; | ||
1124 | break; | ||
1125 | case R_028080_CB_COLOR0_VIEW: | ||
1126 | case R_028084_CB_COLOR1_VIEW: | ||
1127 | case R_028088_CB_COLOR2_VIEW: | ||
1128 | case R_02808C_CB_COLOR3_VIEW: | ||
1129 | case R_028090_CB_COLOR4_VIEW: | ||
1130 | case R_028094_CB_COLOR5_VIEW: | ||
1131 | case R_028098_CB_COLOR6_VIEW: | ||
1132 | case R_02809C_CB_COLOR7_VIEW: | ||
1133 | tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; | ||
1134 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | ||
1135 | track->cb_dirty = true; | ||
1016 | break; | 1136 | break; |
1017 | case R_028060_CB_COLOR0_SIZE: | 1137 | case R_028060_CB_COLOR0_SIZE: |
1018 | case R_028064_CB_COLOR1_SIZE: | 1138 | case R_028064_CB_COLOR1_SIZE: |
@@ -1025,6 +1145,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1025 | tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; | 1145 | tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; |
1026 | track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); | 1146 | track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); |
1027 | track->cb_color_size_idx[tmp] = idx; | 1147 | track->cb_color_size_idx[tmp] = idx; |
1148 | track->cb_dirty = true; | ||
1028 | break; | 1149 | break; |
1029 | /* This register were added late, there is userspace | 1150 | /* This register were added late, there is userspace |
1030 | * which does provide relocation for those but set | 1151 | * which does provide relocation for those but set |
@@ -1107,6 +1228,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1107 | track->cb_color_base_last[tmp] = ib[idx]; | 1228 | track->cb_color_base_last[tmp] = ib[idx]; |
1108 | track->cb_color_bo[tmp] = reloc->robj; | 1229 | track->cb_color_bo[tmp] = reloc->robj; |
1109 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; | 1230 | track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; |
1231 | track->cb_dirty = true; | ||
1110 | break; | 1232 | break; |
1111 | case DB_DEPTH_BASE: | 1233 | case DB_DEPTH_BASE: |
1112 | r = r600_cs_packet_next_reloc(p, &reloc); | 1234 | r = r600_cs_packet_next_reloc(p, &reloc); |
@@ -1119,6 +1241,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1119 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1241 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1120 | track->db_bo = reloc->robj; | 1242 | track->db_bo = reloc->robj; |
1121 | track->db_bo_mc = reloc->lobj.gpu_offset; | 1243 | track->db_bo_mc = reloc->lobj.gpu_offset; |
1244 | track->db_dirty = true; | ||
1122 | break; | 1245 | break; |
1123 | case DB_HTILE_DATA_BASE: | 1246 | case DB_HTILE_DATA_BASE: |
1124 | case SQ_PGM_START_FS: | 1247 | case SQ_PGM_START_FS: |
@@ -1191,6 +1314,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1191 | } | 1314 | } |
1192 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1315 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1193 | break; | 1316 | break; |
1317 | case SX_MISC: | ||
1318 | track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; | ||
1319 | break; | ||
1194 | default: | 1320 | default: |
1195 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 1321 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
1196 | return -EINVAL; | 1322 | return -EINVAL; |
@@ -1198,7 +1324,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1198 | return 0; | 1324 | return 0; |
1199 | } | 1325 | } |
1200 | 1326 | ||
1201 | static unsigned mip_minify(unsigned size, unsigned level) | 1327 | unsigned r600_mip_minify(unsigned size, unsigned level) |
1202 | { | 1328 | { |
1203 | unsigned val; | 1329 | unsigned val; |
1204 | 1330 | ||
@@ -1220,22 +1346,22 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, | |||
1220 | unsigned nlevels = llevel - blevel + 1; | 1346 | unsigned nlevels = llevel - blevel + 1; |
1221 | 1347 | ||
1222 | *l0_size = -1; | 1348 | *l0_size = -1; |
1223 | blocksize = fmt_get_blocksize(format); | 1349 | blocksize = r600_fmt_get_blocksize(format); |
1224 | 1350 | ||
1225 | w0 = mip_minify(w0, 0); | 1351 | w0 = r600_mip_minify(w0, 0); |
1226 | h0 = mip_minify(h0, 0); | 1352 | h0 = r600_mip_minify(h0, 0); |
1227 | d0 = mip_minify(d0, 0); | 1353 | d0 = r600_mip_minify(d0, 0); |
1228 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { | 1354 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { |
1229 | width = mip_minify(w0, i); | 1355 | width = r600_mip_minify(w0, i); |
1230 | nbx = fmt_get_nblocksx(format, width); | 1356 | nbx = r600_fmt_get_nblocksx(format, width); |
1231 | 1357 | ||
1232 | nbx = round_up(nbx, block_align); | 1358 | nbx = round_up(nbx, block_align); |
1233 | 1359 | ||
1234 | height = mip_minify(h0, i); | 1360 | height = r600_mip_minify(h0, i); |
1235 | nby = fmt_get_nblocksy(format, height); | 1361 | nby = r600_fmt_get_nblocksy(format, height); |
1236 | nby = round_up(nby, height_align); | 1362 | nby = round_up(nby, height_align); |
1237 | 1363 | ||
1238 | depth = mip_minify(d0, i); | 1364 | depth = r600_mip_minify(d0, i); |
1239 | 1365 | ||
1240 | size = nbx * nby * blocksize; | 1366 | size = nbx * nby * blocksize; |
1241 | if (nfaces) | 1367 | if (nfaces) |
@@ -1327,7 +1453,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1327 | return -EINVAL; | 1453 | return -EINVAL; |
1328 | } | 1454 | } |
1329 | format = G_038004_DATA_FORMAT(word1); | 1455 | format = G_038004_DATA_FORMAT(word1); |
1330 | if (!fmt_is_valid_texture(format, p->family)) { | 1456 | if (!r600_fmt_is_valid_texture(format, p->family)) { |
1331 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", | 1457 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", |
1332 | __func__, __LINE__, format); | 1458 | __func__, __LINE__, format); |
1333 | return -EINVAL; | 1459 | return -EINVAL; |
@@ -1340,7 +1466,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1340 | array_check.nbanks = track->nbanks; | 1466 | array_check.nbanks = track->nbanks; |
1341 | array_check.npipes = track->npipes; | 1467 | array_check.npipes = track->npipes; |
1342 | array_check.nsamples = 1; | 1468 | array_check.nsamples = 1; |
1343 | array_check.blocksize = fmt_get_blocksize(format); | 1469 | array_check.blocksize = r600_fmt_get_blocksize(format); |
1344 | if (r600_get_array_mode_alignment(&array_check, | 1470 | if (r600_get_array_mode_alignment(&array_check, |
1345 | &pitch_align, &height_align, &depth_align, &base_align)) { | 1471 | &pitch_align, &height_align, &depth_align, &base_align)) { |
1346 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", | 1472 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
@@ -1373,6 +1499,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1373 | word1 = radeon_get_ib_value(p, idx + 5); | 1499 | word1 = radeon_get_ib_value(p, idx + 5); |
1374 | blevel = G_038010_BASE_LEVEL(word0); | 1500 | blevel = G_038010_BASE_LEVEL(word0); |
1375 | llevel = G_038014_LAST_LEVEL(word1); | 1501 | llevel = G_038014_LAST_LEVEL(word1); |
1502 | if (blevel > llevel) { | ||
1503 | dev_warn(p->dev, "texture blevel %d > llevel %d\n", | ||
1504 | blevel, llevel); | ||
1505 | } | ||
1376 | if (array == 1) { | 1506 | if (array == 1) { |
1377 | barray = G_038014_BASE_ARRAY(word1); | 1507 | barray = G_038014_BASE_ARRAY(word1); |
1378 | larray = G_038014_LAST_ARRAY(word1); | 1508 | larray = G_038014_LAST_ARRAY(word1); |
@@ -1384,8 +1514,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1384 | &l0_size, &mipmap_size); | 1514 | &l0_size, &mipmap_size); |
1385 | /* using get ib will give us the offset into the texture bo */ | 1515 | /* using get ib will give us the offset into the texture bo */ |
1386 | if ((l0_size + word2) > radeon_bo_size(texture)) { | 1516 | if ((l0_size + word2) > radeon_bo_size(texture)) { |
1387 | dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", | 1517 | dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", |
1388 | w0, h0, format, word2, l0_size, radeon_bo_size(texture)); | 1518 | w0, h0, pitch_align, height_align, |
1519 | array_check.array_mode, format, word2, | ||
1520 | l0_size, radeon_bo_size(texture)); | ||
1389 | dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); | 1521 | dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); |
1390 | return -EINVAL; | 1522 | return -EINVAL; |
1391 | } | 1523 | } |
@@ -1398,6 +1530,22 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | |||
1398 | return 0; | 1530 | return 0; |
1399 | } | 1531 | } |
1400 | 1532 | ||
1533 | static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | ||
1534 | { | ||
1535 | u32 m, i; | ||
1536 | |||
1537 | i = (reg >> 7); | ||
1538 | if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { | ||
1539 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
1540 | return false; | ||
1541 | } | ||
1542 | m = 1 << ((reg >> 2) & 31); | ||
1543 | if (!(r600_reg_safe_bm[i] & m)) | ||
1544 | return true; | ||
1545 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
1546 | return false; | ||
1547 | } | ||
1548 | |||
1401 | static int r600_packet3_check(struct radeon_cs_parser *p, | 1549 | static int r600_packet3_check(struct radeon_cs_parser *p, |
1402 | struct radeon_cs_packet *pkt) | 1550 | struct radeon_cs_packet *pkt) |
1403 | { | 1551 | { |
@@ -1420,6 +1568,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1420 | { | 1568 | { |
1421 | int pred_op; | 1569 | int pred_op; |
1422 | int tmp; | 1570 | int tmp; |
1571 | uint64_t offset; | ||
1572 | |||
1423 | if (pkt->count != 1) { | 1573 | if (pkt->count != 1) { |
1424 | DRM_ERROR("bad SET PREDICATION\n"); | 1574 | DRM_ERROR("bad SET PREDICATION\n"); |
1425 | return -EINVAL; | 1575 | return -EINVAL; |
@@ -1443,8 +1593,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1443 | return -EINVAL; | 1593 | return -EINVAL; |
1444 | } | 1594 | } |
1445 | 1595 | ||
1446 | ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1596 | offset = reloc->lobj.gpu_offset + |
1447 | ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff); | 1597 | (idx_value & 0xfffffff0) + |
1598 | ((u64)(tmp & 0xff) << 32); | ||
1599 | |||
1600 | ib[idx + 0] = offset; | ||
1601 | ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); | ||
1448 | } | 1602 | } |
1449 | break; | 1603 | break; |
1450 | 1604 | ||
@@ -1468,6 +1622,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1468 | } | 1622 | } |
1469 | break; | 1623 | break; |
1470 | case PACKET3_DRAW_INDEX: | 1624 | case PACKET3_DRAW_INDEX: |
1625 | { | ||
1626 | uint64_t offset; | ||
1471 | if (pkt->count != 3) { | 1627 | if (pkt->count != 3) { |
1472 | DRM_ERROR("bad DRAW_INDEX\n"); | 1628 | DRM_ERROR("bad DRAW_INDEX\n"); |
1473 | return -EINVAL; | 1629 | return -EINVAL; |
@@ -1477,14 +1633,21 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1477 | DRM_ERROR("bad DRAW_INDEX\n"); | 1633 | DRM_ERROR("bad DRAW_INDEX\n"); |
1478 | return -EINVAL; | 1634 | return -EINVAL; |
1479 | } | 1635 | } |
1480 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1636 | |
1481 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1637 | offset = reloc->lobj.gpu_offset + |
1638 | idx_value + | ||
1639 | ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); | ||
1640 | |||
1641 | ib[idx+0] = offset; | ||
1642 | ib[idx+1] = upper_32_bits(offset) & 0xff; | ||
1643 | |||
1482 | r = r600_cs_track_check(p); | 1644 | r = r600_cs_track_check(p); |
1483 | if (r) { | 1645 | if (r) { |
1484 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | 1646 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
1485 | return r; | 1647 | return r; |
1486 | } | 1648 | } |
1487 | break; | 1649 | break; |
1650 | } | ||
1488 | case PACKET3_DRAW_INDEX_AUTO: | 1651 | case PACKET3_DRAW_INDEX_AUTO: |
1489 | if (pkt->count != 1) { | 1652 | if (pkt->count != 1) { |
1490 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); | 1653 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); |
@@ -1515,13 +1678,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1515 | } | 1678 | } |
1516 | /* bit 4 is reg (0) or mem (1) */ | 1679 | /* bit 4 is reg (0) or mem (1) */ |
1517 | if (idx_value & 0x10) { | 1680 | if (idx_value & 0x10) { |
1681 | uint64_t offset; | ||
1682 | |||
1518 | r = r600_cs_packet_next_reloc(p, &reloc); | 1683 | r = r600_cs_packet_next_reloc(p, &reloc); |
1519 | if (r) { | 1684 | if (r) { |
1520 | DRM_ERROR("bad WAIT_REG_MEM\n"); | 1685 | DRM_ERROR("bad WAIT_REG_MEM\n"); |
1521 | return -EINVAL; | 1686 | return -EINVAL; |
1522 | } | 1687 | } |
1523 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1688 | |
1524 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1689 | offset = reloc->lobj.gpu_offset + |
1690 | (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + | ||
1691 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
1692 | |||
1693 | ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); | ||
1694 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1525 | } | 1695 | } |
1526 | break; | 1696 | break; |
1527 | case PACKET3_SURFACE_SYNC: | 1697 | case PACKET3_SURFACE_SYNC: |
@@ -1546,16 +1716,25 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1546 | return -EINVAL; | 1716 | return -EINVAL; |
1547 | } | 1717 | } |
1548 | if (pkt->count) { | 1718 | if (pkt->count) { |
1719 | uint64_t offset; | ||
1720 | |||
1549 | r = r600_cs_packet_next_reloc(p, &reloc); | 1721 | r = r600_cs_packet_next_reloc(p, &reloc); |
1550 | if (r) { | 1722 | if (r) { |
1551 | DRM_ERROR("bad EVENT_WRITE\n"); | 1723 | DRM_ERROR("bad EVENT_WRITE\n"); |
1552 | return -EINVAL; | 1724 | return -EINVAL; |
1553 | } | 1725 | } |
1554 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1726 | offset = reloc->lobj.gpu_offset + |
1555 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1727 | (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
1728 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
1729 | |||
1730 | ib[idx+1] = offset & 0xfffffff8; | ||
1731 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1556 | } | 1732 | } |
1557 | break; | 1733 | break; |
1558 | case PACKET3_EVENT_WRITE_EOP: | 1734 | case PACKET3_EVENT_WRITE_EOP: |
1735 | { | ||
1736 | uint64_t offset; | ||
1737 | |||
1559 | if (pkt->count != 4) { | 1738 | if (pkt->count != 4) { |
1560 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | 1739 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); |
1561 | return -EINVAL; | 1740 | return -EINVAL; |
@@ -1565,9 +1744,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1565 | DRM_ERROR("bad EVENT_WRITE\n"); | 1744 | DRM_ERROR("bad EVENT_WRITE\n"); |
1566 | return -EINVAL; | 1745 | return -EINVAL; |
1567 | } | 1746 | } |
1568 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | 1747 | |
1569 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1748 | offset = reloc->lobj.gpu_offset + |
1749 | (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + | ||
1750 | ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); | ||
1751 | |||
1752 | ib[idx+1] = offset & 0xfffffffc; | ||
1753 | ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); | ||
1570 | break; | 1754 | break; |
1755 | } | ||
1571 | case PACKET3_SET_CONFIG_REG: | 1756 | case PACKET3_SET_CONFIG_REG: |
1572 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; | 1757 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; |
1573 | end_reg = 4 * pkt->count + start_reg - 4; | 1758 | end_reg = 4 * pkt->count + start_reg - 4; |
@@ -1652,6 +1837,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1652 | ib[idx+1+(i*7)+3] += mip_offset; | 1837 | ib[idx+1+(i*7)+3] += mip_offset; |
1653 | break; | 1838 | break; |
1654 | case SQ_TEX_VTX_VALID_BUFFER: | 1839 | case SQ_TEX_VTX_VALID_BUFFER: |
1840 | { | ||
1841 | uint64_t offset64; | ||
1655 | /* vtx base */ | 1842 | /* vtx base */ |
1656 | r = r600_cs_packet_next_reloc(p, &reloc); | 1843 | r = r600_cs_packet_next_reloc(p, &reloc); |
1657 | if (r) { | 1844 | if (r) { |
@@ -1664,11 +1851,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1664 | /* force size to size of the buffer */ | 1851 | /* force size to size of the buffer */ |
1665 | dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", | 1852 | dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", |
1666 | size + offset, radeon_bo_size(reloc->robj)); | 1853 | size + offset, radeon_bo_size(reloc->robj)); |
1667 | ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj); | 1854 | ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; |
1668 | } | 1855 | } |
1669 | ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | 1856 | |
1670 | ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | 1857 | offset64 = reloc->lobj.gpu_offset + offset; |
1858 | ib[idx+1+(i*8)+0] = offset64; | ||
1859 | ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | | ||
1860 | (upper_32_bits(offset64) & 0xff); | ||
1671 | break; | 1861 | break; |
1862 | } | ||
1672 | case SQ_TEX_VTX_INVALID_TEXTURE: | 1863 | case SQ_TEX_VTX_INVALID_TEXTURE: |
1673 | case SQ_TEX_VTX_INVALID_BUFFER: | 1864 | case SQ_TEX_VTX_INVALID_BUFFER: |
1674 | default: | 1865 | default: |
@@ -1743,6 +1934,104 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
1743 | return -EINVAL; | 1934 | return -EINVAL; |
1744 | } | 1935 | } |
1745 | break; | 1936 | break; |
1937 | case PACKET3_STRMOUT_BUFFER_UPDATE: | ||
1938 | if (pkt->count != 4) { | ||
1939 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); | ||
1940 | return -EINVAL; | ||
1941 | } | ||
1942 | /* Updating memory at DST_ADDRESS. */ | ||
1943 | if (idx_value & 0x1) { | ||
1944 | u64 offset; | ||
1945 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1946 | if (r) { | ||
1947 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); | ||
1948 | return -EINVAL; | ||
1949 | } | ||
1950 | offset = radeon_get_ib_value(p, idx+1); | ||
1951 | offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | ||
1952 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
1953 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", | ||
1954 | offset + 4, radeon_bo_size(reloc->robj)); | ||
1955 | return -EINVAL; | ||
1956 | } | ||
1957 | offset += reloc->lobj.gpu_offset; | ||
1958 | ib[idx+1] = offset; | ||
1959 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
1960 | } | ||
1961 | /* Reading data from SRC_ADDRESS. */ | ||
1962 | if (((idx_value >> 1) & 0x3) == 2) { | ||
1963 | u64 offset; | ||
1964 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1965 | if (r) { | ||
1966 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); | ||
1967 | return -EINVAL; | ||
1968 | } | ||
1969 | offset = radeon_get_ib_value(p, idx+3); | ||
1970 | offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; | ||
1971 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
1972 | DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", | ||
1973 | offset + 4, radeon_bo_size(reloc->robj)); | ||
1974 | return -EINVAL; | ||
1975 | } | ||
1976 | offset += reloc->lobj.gpu_offset; | ||
1977 | ib[idx+3] = offset; | ||
1978 | ib[idx+4] = upper_32_bits(offset) & 0xff; | ||
1979 | } | ||
1980 | break; | ||
1981 | case PACKET3_COPY_DW: | ||
1982 | if (pkt->count != 4) { | ||
1983 | DRM_ERROR("bad COPY_DW (invalid count)\n"); | ||
1984 | return -EINVAL; | ||
1985 | } | ||
1986 | if (idx_value & 0x1) { | ||
1987 | u64 offset; | ||
1988 | /* SRC is memory. */ | ||
1989 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
1990 | if (r) { | ||
1991 | DRM_ERROR("bad COPY_DW (missing src reloc)\n"); | ||
1992 | return -EINVAL; | ||
1993 | } | ||
1994 | offset = radeon_get_ib_value(p, idx+1); | ||
1995 | offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; | ||
1996 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
1997 | DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", | ||
1998 | offset + 4, radeon_bo_size(reloc->robj)); | ||
1999 | return -EINVAL; | ||
2000 | } | ||
2001 | offset += reloc->lobj.gpu_offset; | ||
2002 | ib[idx+1] = offset; | ||
2003 | ib[idx+2] = upper_32_bits(offset) & 0xff; | ||
2004 | } else { | ||
2005 | /* SRC is a reg. */ | ||
2006 | reg = radeon_get_ib_value(p, idx+1) << 2; | ||
2007 | if (!r600_is_safe_reg(p, reg, idx+1)) | ||
2008 | return -EINVAL; | ||
2009 | } | ||
2010 | if (idx_value & 0x2) { | ||
2011 | u64 offset; | ||
2012 | /* DST is memory. */ | ||
2013 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
2014 | if (r) { | ||
2015 | DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); | ||
2016 | return -EINVAL; | ||
2017 | } | ||
2018 | offset = radeon_get_ib_value(p, idx+3); | ||
2019 | offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; | ||
2020 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2021 | DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", | ||
2022 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2023 | return -EINVAL; | ||
2024 | } | ||
2025 | offset += reloc->lobj.gpu_offset; | ||
2026 | ib[idx+3] = offset; | ||
2027 | ib[idx+4] = upper_32_bits(offset) & 0xff; | ||
2028 | } else { | ||
2029 | /* DST is a reg. */ | ||
2030 | reg = radeon_get_ib_value(p, idx+3) << 2; | ||
2031 | if (!r600_is_safe_reg(p, reg, idx+3)) | ||
2032 | return -EINVAL; | ||
2033 | } | ||
2034 | break; | ||
1746 | case PACKET3_NOP: | 2035 | case PACKET3_NOP: |
1747 | break; | 2036 | break; |
1748 | default: | 2037 | default: |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 9b23670716f1..8ae328ff5fdd 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -78,6 +78,20 @@ | |||
78 | 78 | ||
79 | #define CB_COLOR0_SIZE 0x28060 | 79 | #define CB_COLOR0_SIZE 0x28060 |
80 | #define CB_COLOR0_VIEW 0x28080 | 80 | #define CB_COLOR0_VIEW 0x28080 |
81 | #define R_028080_CB_COLOR0_VIEW 0x028080 | ||
82 | #define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0) | ||
83 | #define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF) | ||
84 | #define C_028080_SLICE_START 0xFFFFF800 | ||
85 | #define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13) | ||
86 | #define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF) | ||
87 | #define C_028080_SLICE_MAX 0xFF001FFF | ||
88 | #define R_028084_CB_COLOR1_VIEW 0x028084 | ||
89 | #define R_028088_CB_COLOR2_VIEW 0x028088 | ||
90 | #define R_02808C_CB_COLOR3_VIEW 0x02808C | ||
91 | #define R_028090_CB_COLOR4_VIEW 0x028090 | ||
92 | #define R_028094_CB_COLOR5_VIEW 0x028094 | ||
93 | #define R_028098_CB_COLOR6_VIEW 0x028098 | ||
94 | #define R_02809C_CB_COLOR7_VIEW 0x02809C | ||
81 | #define CB_COLOR0_INFO 0x280a0 | 95 | #define CB_COLOR0_INFO 0x280a0 |
82 | # define CB_FORMAT(x) ((x) << 2) | 96 | # define CB_FORMAT(x) ((x) << 2) |
83 | # define CB_ARRAY_MODE(x) ((x) << 8) | 97 | # define CB_ARRAY_MODE(x) ((x) << 8) |
@@ -493,6 +507,11 @@ | |||
493 | #define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC | 507 | #define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC |
494 | #define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC | 508 | #define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC |
495 | #define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C | 509 | #define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C |
510 | #define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0 | ||
511 | #define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0 | ||
512 | #define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0 | ||
513 | #define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00 | ||
514 | |||
496 | #define VGT_STRMOUT_EN 0x28AB0 | 515 | #define VGT_STRMOUT_EN 0x28AB0 |
497 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 | 516 | #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 |
498 | #define VTX_REUSE_DEPTH_MASK 0x000000FF | 517 | #define VTX_REUSE_DEPTH_MASK 0x000000FF |
@@ -835,6 +854,7 @@ | |||
835 | # define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) | 854 | # define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) |
836 | # define PACKET3_SEM_SEL_WAIT (0x7 << 29) | 855 | # define PACKET3_SEM_SEL_WAIT (0x7 << 29) |
837 | #define PACKET3_MPEG_INDEX 0x3A | 856 | #define PACKET3_MPEG_INDEX 0x3A |
857 | #define PACKET3_COPY_DW 0x3B | ||
838 | #define PACKET3_WAIT_REG_MEM 0x3C | 858 | #define PACKET3_WAIT_REG_MEM 0x3C |
839 | #define PACKET3_MEM_WRITE 0x3D | 859 | #define PACKET3_MEM_WRITE 0x3D |
840 | #define PACKET3_INDIRECT_BUFFER 0x32 | 860 | #define PACKET3_INDIRECT_BUFFER 0x32 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1668ec1ee770..d2870a014ec0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -242,6 +242,9 @@ extern int rv6xx_get_temp(struct radeon_device *rdev); | |||
242 | extern int rv770_get_temp(struct radeon_device *rdev); | 242 | extern int rv770_get_temp(struct radeon_device *rdev); |
243 | extern int evergreen_get_temp(struct radeon_device *rdev); | 243 | extern int evergreen_get_temp(struct radeon_device *rdev); |
244 | extern int sumo_get_temp(struct radeon_device *rdev); | 244 | extern int sumo_get_temp(struct radeon_device *rdev); |
245 | extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, | ||
246 | unsigned *bankh, unsigned *mtaspect, | ||
247 | unsigned *tile_split); | ||
245 | 248 | ||
246 | /* | 249 | /* |
247 | * Fences. | 250 | * Fences. |
@@ -411,9 +414,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
411 | int alignment, int initial_domain, | 414 | int alignment, int initial_domain, |
412 | bool discardable, bool kernel, | 415 | bool discardable, bool kernel, |
413 | struct drm_gem_object **obj); | 416 | struct drm_gem_object **obj); |
414 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | ||
415 | uint64_t *gpu_addr); | ||
416 | void radeon_gem_object_unpin(struct drm_gem_object *obj); | ||
417 | 417 | ||
418 | int radeon_mode_dumb_create(struct drm_file *file_priv, | 418 | int radeon_mode_dumb_create(struct drm_file *file_priv, |
419 | struct drm_device *dev, | 419 | struct drm_device *dev, |
@@ -780,7 +780,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev); | |||
780 | void radeon_ib_pool_fini(struct radeon_device *rdev); | 780 | void radeon_ib_pool_fini(struct radeon_device *rdev); |
781 | int radeon_ib_pool_start(struct radeon_device *rdev); | 781 | int radeon_ib_pool_start(struct radeon_device *rdev); |
782 | int radeon_ib_pool_suspend(struct radeon_device *rdev); | 782 | int radeon_ib_pool_suspend(struct radeon_device *rdev); |
783 | int radeon_ib_test(struct radeon_device *rdev); | ||
784 | /* Ring access between begin & end cannot sleep */ | 783 | /* Ring access between begin & end cannot sleep */ |
785 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); | 784 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); |
786 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); | 785 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); |
@@ -833,7 +832,6 @@ struct radeon_cs_parser { | |||
833 | struct radeon_cs_reloc *relocs; | 832 | struct radeon_cs_reloc *relocs; |
834 | struct radeon_cs_reloc **relocs_ptr; | 833 | struct radeon_cs_reloc **relocs_ptr; |
835 | struct list_head validated; | 834 | struct list_head validated; |
836 | bool sync_to_ring[RADEON_NUM_RINGS]; | ||
837 | /* indices of various chunks */ | 835 | /* indices of various chunks */ |
838 | int chunk_ib_idx; | 836 | int chunk_ib_idx; |
839 | int chunk_relocs_idx; | 837 | int chunk_relocs_idx; |
@@ -1132,57 +1130,6 @@ struct radeon_asic { | |||
1132 | void (*vga_set_state)(struct radeon_device *rdev, bool state); | 1130 | void (*vga_set_state)(struct radeon_device *rdev, bool state); |
1133 | bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); | 1131 | bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); |
1134 | int (*asic_reset)(struct radeon_device *rdev); | 1132 | int (*asic_reset)(struct radeon_device *rdev); |
1135 | void (*gart_tlb_flush)(struct radeon_device *rdev); | ||
1136 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); | ||
1137 | int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); | ||
1138 | void (*cp_fini)(struct radeon_device *rdev); | ||
1139 | void (*cp_disable)(struct radeon_device *rdev); | ||
1140 | void (*ring_start)(struct radeon_device *rdev); | ||
1141 | |||
1142 | struct { | ||
1143 | void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | ||
1144 | int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); | ||
1145 | void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1146 | void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, | ||
1147 | struct radeon_semaphore *semaphore, bool emit_wait); | ||
1148 | } ring[RADEON_NUM_RINGS]; | ||
1149 | |||
1150 | int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); | ||
1151 | int (*irq_set)(struct radeon_device *rdev); | ||
1152 | int (*irq_process)(struct radeon_device *rdev); | ||
1153 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | ||
1154 | int (*cs_parse)(struct radeon_cs_parser *p); | ||
1155 | int (*copy_blit)(struct radeon_device *rdev, | ||
1156 | uint64_t src_offset, | ||
1157 | uint64_t dst_offset, | ||
1158 | unsigned num_gpu_pages, | ||
1159 | struct radeon_fence *fence); | ||
1160 | int (*copy_dma)(struct radeon_device *rdev, | ||
1161 | uint64_t src_offset, | ||
1162 | uint64_t dst_offset, | ||
1163 | unsigned num_gpu_pages, | ||
1164 | struct radeon_fence *fence); | ||
1165 | int (*copy)(struct radeon_device *rdev, | ||
1166 | uint64_t src_offset, | ||
1167 | uint64_t dst_offset, | ||
1168 | unsigned num_gpu_pages, | ||
1169 | struct radeon_fence *fence); | ||
1170 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | ||
1171 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | ||
1172 | uint32_t (*get_memory_clock)(struct radeon_device *rdev); | ||
1173 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | ||
1174 | int (*get_pcie_lanes)(struct radeon_device *rdev); | ||
1175 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | ||
1176 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | ||
1177 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | ||
1178 | uint32_t tiling_flags, uint32_t pitch, | ||
1179 | uint32_t offset, uint32_t obj_size); | ||
1180 | void (*clear_surface_reg)(struct radeon_device *rdev, int reg); | ||
1181 | void (*bandwidth_update)(struct radeon_device *rdev); | ||
1182 | void (*hpd_init)(struct radeon_device *rdev); | ||
1183 | void (*hpd_fini)(struct radeon_device *rdev); | ||
1184 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
1185 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
1186 | /* ioctl hw specific callback. Some hw might want to perform special | 1133 | /* ioctl hw specific callback. Some hw might want to perform special |
1187 | * operation on specific ioctl. For instance on wait idle some hw | 1134 | * operation on specific ioctl. For instance on wait idle some hw |
1188 | * might want to perform and HDP flush through MMIO as it seems that | 1135 | * might want to perform and HDP flush through MMIO as it seems that |
@@ -1190,17 +1137,99 @@ struct radeon_asic { | |||
1190 | * through ring. | 1137 | * through ring. |
1191 | */ | 1138 | */ |
1192 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); | 1139 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); |
1140 | /* check if 3D engine is idle */ | ||
1193 | bool (*gui_idle)(struct radeon_device *rdev); | 1141 | bool (*gui_idle)(struct radeon_device *rdev); |
1142 | /* wait for mc_idle */ | ||
1143 | int (*mc_wait_for_idle)(struct radeon_device *rdev); | ||
1144 | /* gart */ | ||
1145 | struct { | ||
1146 | void (*tlb_flush)(struct radeon_device *rdev); | ||
1147 | int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); | ||
1148 | } gart; | ||
1149 | /* ring specific callbacks */ | ||
1150 | struct { | ||
1151 | void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | ||
1152 | int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); | ||
1153 | void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1154 | void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, | ||
1155 | struct radeon_semaphore *semaphore, bool emit_wait); | ||
1156 | int (*cs_parse)(struct radeon_cs_parser *p); | ||
1157 | void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); | ||
1158 | int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); | ||
1159 | int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); | ||
1160 | } ring[RADEON_NUM_RINGS]; | ||
1161 | /* irqs */ | ||
1162 | struct { | ||
1163 | int (*set)(struct radeon_device *rdev); | ||
1164 | int (*process)(struct radeon_device *rdev); | ||
1165 | } irq; | ||
1166 | /* displays */ | ||
1167 | struct { | ||
1168 | /* display watermarks */ | ||
1169 | void (*bandwidth_update)(struct radeon_device *rdev); | ||
1170 | /* get frame count */ | ||
1171 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | ||
1172 | /* wait for vblank */ | ||
1173 | void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); | ||
1174 | } display; | ||
1175 | /* copy functions for bo handling */ | ||
1176 | struct { | ||
1177 | int (*blit)(struct radeon_device *rdev, | ||
1178 | uint64_t src_offset, | ||
1179 | uint64_t dst_offset, | ||
1180 | unsigned num_gpu_pages, | ||
1181 | struct radeon_fence *fence); | ||
1182 | u32 blit_ring_index; | ||
1183 | int (*dma)(struct radeon_device *rdev, | ||
1184 | uint64_t src_offset, | ||
1185 | uint64_t dst_offset, | ||
1186 | unsigned num_gpu_pages, | ||
1187 | struct radeon_fence *fence); | ||
1188 | u32 dma_ring_index; | ||
1189 | /* method used for bo copy */ | ||
1190 | int (*copy)(struct radeon_device *rdev, | ||
1191 | uint64_t src_offset, | ||
1192 | uint64_t dst_offset, | ||
1193 | unsigned num_gpu_pages, | ||
1194 | struct radeon_fence *fence); | ||
1195 | /* ring used for bo copies */ | ||
1196 | u32 copy_ring_index; | ||
1197 | } copy; | ||
1198 | /* surfaces */ | ||
1199 | struct { | ||
1200 | int (*set_reg)(struct radeon_device *rdev, int reg, | ||
1201 | uint32_t tiling_flags, uint32_t pitch, | ||
1202 | uint32_t offset, uint32_t obj_size); | ||
1203 | void (*clear_reg)(struct radeon_device *rdev, int reg); | ||
1204 | } surface; | ||
1205 | /* hotplug detect */ | ||
1206 | struct { | ||
1207 | void (*init)(struct radeon_device *rdev); | ||
1208 | void (*fini)(struct radeon_device *rdev); | ||
1209 | bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
1210 | void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
1211 | } hpd; | ||
1194 | /* power management */ | 1212 | /* power management */ |
1195 | void (*pm_misc)(struct radeon_device *rdev); | 1213 | struct { |
1196 | void (*pm_prepare)(struct radeon_device *rdev); | 1214 | void (*misc)(struct radeon_device *rdev); |
1197 | void (*pm_finish)(struct radeon_device *rdev); | 1215 | void (*prepare)(struct radeon_device *rdev); |
1198 | void (*pm_init_profile)(struct radeon_device *rdev); | 1216 | void (*finish)(struct radeon_device *rdev); |
1199 | void (*pm_get_dynpm_state)(struct radeon_device *rdev); | 1217 | void (*init_profile)(struct radeon_device *rdev); |
1218 | void (*get_dynpm_state)(struct radeon_device *rdev); | ||
1219 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | ||
1220 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | ||
1221 | uint32_t (*get_memory_clock)(struct radeon_device *rdev); | ||
1222 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | ||
1223 | int (*get_pcie_lanes)(struct radeon_device *rdev); | ||
1224 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | ||
1225 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | ||
1226 | } pm; | ||
1200 | /* pageflipping */ | 1227 | /* pageflipping */ |
1201 | void (*pre_page_flip)(struct radeon_device *rdev, int crtc); | 1228 | struct { |
1202 | u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); | 1229 | void (*pre_page_flip)(struct radeon_device *rdev, int crtc); |
1203 | void (*post_page_flip)(struct radeon_device *rdev, int crtc); | 1230 | u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); |
1231 | void (*post_page_flip)(struct radeon_device *rdev, int crtc); | ||
1232 | } pflip; | ||
1204 | }; | 1233 | }; |
1205 | 1234 | ||
1206 | /* | 1235 | /* |
@@ -1491,8 +1520,6 @@ struct radeon_device { | |||
1491 | unsigned debugfs_count; | 1520 | unsigned debugfs_count; |
1492 | /* virtual memory */ | 1521 | /* virtual memory */ |
1493 | struct radeon_vm_manager vm_manager; | 1522 | struct radeon_vm_manager vm_manager; |
1494 | /* ring used for bo copies */ | ||
1495 | u32 copy_ring; | ||
1496 | }; | 1523 | }; |
1497 | 1524 | ||
1498 | int radeon_device_init(struct radeon_device *rdev, | 1525 | int radeon_device_init(struct radeon_device *rdev, |
@@ -1648,47 +1675,53 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); | |||
1648 | #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) | 1675 | #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) |
1649 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) | 1676 | #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
1650 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) | 1677 | #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
1651 | #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) | 1678 | #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) |
1652 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 1679 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
1653 | #define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp)) | 1680 | #define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp)) |
1654 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) | 1681 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
1655 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) | 1682 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
1656 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) | 1683 | #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) |
1657 | #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) | 1684 | #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) |
1658 | #define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp)) | 1685 | #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) |
1686 | #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) | ||
1659 | #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) | 1687 | #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) |
1660 | #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) | 1688 | #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) |
1661 | #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) | 1689 | #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) |
1662 | #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) | 1690 | #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) |
1663 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) | 1691 | #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) |
1664 | #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) | 1692 | #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) |
1665 | #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) | 1693 | #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) |
1666 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) | 1694 | #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) |
1667 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) | 1695 | #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) |
1668 | #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) | 1696 | #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) |
1669 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) | 1697 | #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index |
1670 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 1698 | #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index |
1671 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) | 1699 | #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index |
1672 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) | 1700 | #define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) |
1673 | #define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev)) | 1701 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) |
1674 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 1702 | #define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) |
1675 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 1703 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) |
1676 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | 1704 | #define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) |
1677 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | 1705 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) |
1678 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | 1706 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) |
1679 | #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) | 1707 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) |
1680 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) | 1708 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) |
1681 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) | 1709 | #define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) |
1682 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) | 1710 | #define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) |
1711 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) | ||
1712 | #define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) | ||
1713 | #define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) | ||
1683 | #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) | 1714 | #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) |
1684 | #define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) | 1715 | #define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) |
1685 | #define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) | 1716 | #define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) |
1686 | #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) | 1717 | #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) |
1687 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) | 1718 | #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) |
1688 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) | 1719 | #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) |
1689 | #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) | 1720 | #define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) |
1690 | #define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) | 1721 | #define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) |
1691 | #define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) | 1722 | #define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) |
1723 | #define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) | ||
1724 | #define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) | ||
1692 | 1725 | ||
1693 | /* Common functions */ | 1726 | /* Common functions */ |
1694 | /* AGP */ | 1727 | /* AGP */ |
@@ -1750,6 +1783,16 @@ int r600_vram_scratch_init(struct radeon_device *rdev); | |||
1750 | void r600_vram_scratch_fini(struct radeon_device *rdev); | 1783 | void r600_vram_scratch_fini(struct radeon_device *rdev); |
1751 | 1784 | ||
1752 | /* | 1785 | /* |
1786 | * r600 cs checking helper | ||
1787 | */ | ||
1788 | unsigned r600_mip_minify(unsigned size, unsigned level); | ||
1789 | bool r600_fmt_is_valid_color(u32 format); | ||
1790 | bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); | ||
1791 | int r600_fmt_get_blocksize(u32 format); | ||
1792 | int r600_fmt_get_nblocksx(u32 format, u32 w); | ||
1793 | int r600_fmt_get_nblocksy(u32 format, u32 h); | ||
1794 | |||
1795 | /* | ||
1753 | * r600 functions used by radeon_encoder.c | 1796 | * r600 functions used by radeon_encoder.c |
1754 | */ | 1797 | */ |
1755 | extern void r600_hdmi_enable(struct drm_encoder *encoder); | 1798 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 36a6192ce862..479c89e0af17 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -114,13 +114,13 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
114 | rdev->family == CHIP_R423) { | 114 | rdev->family == CHIP_R423) { |
115 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 115 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
116 | rdev->flags |= RADEON_IS_PCIE; | 116 | rdev->flags |= RADEON_IS_PCIE; |
117 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; | 117 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
118 | rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; | 118 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
119 | } else { | 119 | } else { |
120 | DRM_INFO("Forcing AGP to PCI mode\n"); | 120 | DRM_INFO("Forcing AGP to PCI mode\n"); |
121 | rdev->flags |= RADEON_IS_PCI; | 121 | rdev->flags |= RADEON_IS_PCI; |
122 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | 122 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
123 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | 123 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
124 | } | 124 | } |
125 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 125 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
126 | } | 126 | } |
@@ -136,48 +136,70 @@ static struct radeon_asic r100_asic = { | |||
136 | .vga_set_state = &r100_vga_set_state, | 136 | .vga_set_state = &r100_vga_set_state, |
137 | .gpu_is_lockup = &r100_gpu_is_lockup, | 137 | .gpu_is_lockup = &r100_gpu_is_lockup, |
138 | .asic_reset = &r100_asic_reset, | 138 | .asic_reset = &r100_asic_reset, |
139 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 139 | .ioctl_wait_idle = NULL, |
140 | .gart_set_page = &r100_pci_gart_set_page, | 140 | .gui_idle = &r100_gui_idle, |
141 | .ring_start = &r100_ring_start, | 141 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
142 | .ring_test = &r100_ring_test, | 142 | .gart = { |
143 | .tlb_flush = &r100_pci_gart_tlb_flush, | ||
144 | .set_page = &r100_pci_gart_set_page, | ||
145 | }, | ||
143 | .ring = { | 146 | .ring = { |
144 | [RADEON_RING_TYPE_GFX_INDEX] = { | 147 | [RADEON_RING_TYPE_GFX_INDEX] = { |
145 | .ib_execute = &r100_ring_ib_execute, | 148 | .ib_execute = &r100_ring_ib_execute, |
146 | .emit_fence = &r100_fence_ring_emit, | 149 | .emit_fence = &r100_fence_ring_emit, |
147 | .emit_semaphore = &r100_semaphore_ring_emit, | 150 | .emit_semaphore = &r100_semaphore_ring_emit, |
151 | .cs_parse = &r100_cs_parse, | ||
152 | .ring_start = &r100_ring_start, | ||
153 | .ring_test = &r100_ring_test, | ||
154 | .ib_test = &r100_ib_test, | ||
148 | } | 155 | } |
149 | }, | 156 | }, |
150 | .irq_set = &r100_irq_set, | 157 | .irq = { |
151 | .irq_process = &r100_irq_process, | 158 | .set = &r100_irq_set, |
152 | .get_vblank_counter = &r100_get_vblank_counter, | 159 | .process = &r100_irq_process, |
153 | .cs_parse = &r100_cs_parse, | 160 | }, |
154 | .copy_blit = &r100_copy_blit, | 161 | .display = { |
155 | .copy_dma = NULL, | 162 | .bandwidth_update = &r100_bandwidth_update, |
156 | .copy = &r100_copy_blit, | 163 | .get_vblank_counter = &r100_get_vblank_counter, |
157 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 164 | .wait_for_vblank = &r100_wait_for_vblank, |
158 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 165 | }, |
159 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 166 | .copy = { |
160 | .set_memory_clock = NULL, | 167 | .blit = &r100_copy_blit, |
161 | .get_pcie_lanes = NULL, | 168 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
162 | .set_pcie_lanes = NULL, | 169 | .dma = NULL, |
163 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 170 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
164 | .set_surface_reg = r100_set_surface_reg, | 171 | .copy = &r100_copy_blit, |
165 | .clear_surface_reg = r100_clear_surface_reg, | 172 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
166 | .bandwidth_update = &r100_bandwidth_update, | 173 | }, |
167 | .hpd_init = &r100_hpd_init, | 174 | .surface = { |
168 | .hpd_fini = &r100_hpd_fini, | 175 | .set_reg = r100_set_surface_reg, |
169 | .hpd_sense = &r100_hpd_sense, | 176 | .clear_reg = r100_clear_surface_reg, |
170 | .hpd_set_polarity = &r100_hpd_set_polarity, | 177 | }, |
171 | .ioctl_wait_idle = NULL, | 178 | .hpd = { |
172 | .gui_idle = &r100_gui_idle, | 179 | .init = &r100_hpd_init, |
173 | .pm_misc = &r100_pm_misc, | 180 | .fini = &r100_hpd_fini, |
174 | .pm_prepare = &r100_pm_prepare, | 181 | .sense = &r100_hpd_sense, |
175 | .pm_finish = &r100_pm_finish, | 182 | .set_polarity = &r100_hpd_set_polarity, |
176 | .pm_init_profile = &r100_pm_init_profile, | 183 | }, |
177 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 184 | .pm = { |
178 | .pre_page_flip = &r100_pre_page_flip, | 185 | .misc = &r100_pm_misc, |
179 | .page_flip = &r100_page_flip, | 186 | .prepare = &r100_pm_prepare, |
180 | .post_page_flip = &r100_post_page_flip, | 187 | .finish = &r100_pm_finish, |
188 | .init_profile = &r100_pm_init_profile, | ||
189 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
190 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
191 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
192 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
193 | .set_memory_clock = NULL, | ||
194 | .get_pcie_lanes = NULL, | ||
195 | .set_pcie_lanes = NULL, | ||
196 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
197 | }, | ||
198 | .pflip = { | ||
199 | .pre_page_flip = &r100_pre_page_flip, | ||
200 | .page_flip = &r100_page_flip, | ||
201 | .post_page_flip = &r100_post_page_flip, | ||
202 | }, | ||
181 | }; | 203 | }; |
182 | 204 | ||
183 | static struct radeon_asic r200_asic = { | 205 | static struct radeon_asic r200_asic = { |
@@ -188,47 +210,70 @@ static struct radeon_asic r200_asic = { | |||
188 | .vga_set_state = &r100_vga_set_state, | 210 | .vga_set_state = &r100_vga_set_state, |
189 | .gpu_is_lockup = &r100_gpu_is_lockup, | 211 | .gpu_is_lockup = &r100_gpu_is_lockup, |
190 | .asic_reset = &r100_asic_reset, | 212 | .asic_reset = &r100_asic_reset, |
191 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 213 | .ioctl_wait_idle = NULL, |
192 | .gart_set_page = &r100_pci_gart_set_page, | 214 | .gui_idle = &r100_gui_idle, |
193 | .ring_start = &r100_ring_start, | 215 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
194 | .ring_test = &r100_ring_test, | 216 | .gart = { |
217 | .tlb_flush = &r100_pci_gart_tlb_flush, | ||
218 | .set_page = &r100_pci_gart_set_page, | ||
219 | }, | ||
195 | .ring = { | 220 | .ring = { |
196 | [RADEON_RING_TYPE_GFX_INDEX] = { | 221 | [RADEON_RING_TYPE_GFX_INDEX] = { |
197 | .ib_execute = &r100_ring_ib_execute, | 222 | .ib_execute = &r100_ring_ib_execute, |
198 | .emit_fence = &r100_fence_ring_emit, | 223 | .emit_fence = &r100_fence_ring_emit, |
199 | .emit_semaphore = &r100_semaphore_ring_emit, | 224 | .emit_semaphore = &r100_semaphore_ring_emit, |
225 | .cs_parse = &r100_cs_parse, | ||
226 | .ring_start = &r100_ring_start, | ||
227 | .ring_test = &r100_ring_test, | ||
228 | .ib_test = &r100_ib_test, | ||
200 | } | 229 | } |
201 | }, | 230 | }, |
202 | .irq_set = &r100_irq_set, | 231 | .irq = { |
203 | .irq_process = &r100_irq_process, | 232 | .set = &r100_irq_set, |
204 | .get_vblank_counter = &r100_get_vblank_counter, | 233 | .process = &r100_irq_process, |
205 | .cs_parse = &r100_cs_parse, | 234 | }, |
206 | .copy_blit = &r100_copy_blit, | 235 | .display = { |
207 | .copy_dma = &r200_copy_dma, | 236 | .bandwidth_update = &r100_bandwidth_update, |
208 | .copy = &r100_copy_blit, | 237 | .get_vblank_counter = &r100_get_vblank_counter, |
209 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 238 | .wait_for_vblank = &r100_wait_for_vblank, |
210 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 239 | }, |
211 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 240 | .copy = { |
212 | .set_memory_clock = NULL, | 241 | .blit = &r100_copy_blit, |
213 | .set_pcie_lanes = NULL, | 242 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
214 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 243 | .dma = &r200_copy_dma, |
215 | .set_surface_reg = r100_set_surface_reg, | 244 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
216 | .clear_surface_reg = r100_clear_surface_reg, | 245 | .copy = &r100_copy_blit, |
217 | .bandwidth_update = &r100_bandwidth_update, | 246 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
218 | .hpd_init = &r100_hpd_init, | 247 | }, |
219 | .hpd_fini = &r100_hpd_fini, | 248 | .surface = { |
220 | .hpd_sense = &r100_hpd_sense, | 249 | .set_reg = r100_set_surface_reg, |
221 | .hpd_set_polarity = &r100_hpd_set_polarity, | 250 | .clear_reg = r100_clear_surface_reg, |
222 | .ioctl_wait_idle = NULL, | 251 | }, |
223 | .gui_idle = &r100_gui_idle, | 252 | .hpd = { |
224 | .pm_misc = &r100_pm_misc, | 253 | .init = &r100_hpd_init, |
225 | .pm_prepare = &r100_pm_prepare, | 254 | .fini = &r100_hpd_fini, |
226 | .pm_finish = &r100_pm_finish, | 255 | .sense = &r100_hpd_sense, |
227 | .pm_init_profile = &r100_pm_init_profile, | 256 | .set_polarity = &r100_hpd_set_polarity, |
228 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 257 | }, |
229 | .pre_page_flip = &r100_pre_page_flip, | 258 | .pm = { |
230 | .page_flip = &r100_page_flip, | 259 | .misc = &r100_pm_misc, |
231 | .post_page_flip = &r100_post_page_flip, | 260 | .prepare = &r100_pm_prepare, |
261 | .finish = &r100_pm_finish, | ||
262 | .init_profile = &r100_pm_init_profile, | ||
263 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
264 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
265 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
266 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
267 | .set_memory_clock = NULL, | ||
268 | .get_pcie_lanes = NULL, | ||
269 | .set_pcie_lanes = NULL, | ||
270 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
271 | }, | ||
272 | .pflip = { | ||
273 | .pre_page_flip = &r100_pre_page_flip, | ||
274 | .page_flip = &r100_page_flip, | ||
275 | .post_page_flip = &r100_post_page_flip, | ||
276 | }, | ||
232 | }; | 277 | }; |
233 | 278 | ||
234 | static struct radeon_asic r300_asic = { | 279 | static struct radeon_asic r300_asic = { |
@@ -239,48 +284,70 @@ static struct radeon_asic r300_asic = { | |||
239 | .vga_set_state = &r100_vga_set_state, | 284 | .vga_set_state = &r100_vga_set_state, |
240 | .gpu_is_lockup = &r300_gpu_is_lockup, | 285 | .gpu_is_lockup = &r300_gpu_is_lockup, |
241 | .asic_reset = &r300_asic_reset, | 286 | .asic_reset = &r300_asic_reset, |
242 | .gart_tlb_flush = &r100_pci_gart_tlb_flush, | 287 | .ioctl_wait_idle = NULL, |
243 | .gart_set_page = &r100_pci_gart_set_page, | 288 | .gui_idle = &r100_gui_idle, |
244 | .ring_start = &r300_ring_start, | 289 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
245 | .ring_test = &r100_ring_test, | 290 | .gart = { |
291 | .tlb_flush = &r100_pci_gart_tlb_flush, | ||
292 | .set_page = &r100_pci_gart_set_page, | ||
293 | }, | ||
246 | .ring = { | 294 | .ring = { |
247 | [RADEON_RING_TYPE_GFX_INDEX] = { | 295 | [RADEON_RING_TYPE_GFX_INDEX] = { |
248 | .ib_execute = &r100_ring_ib_execute, | 296 | .ib_execute = &r100_ring_ib_execute, |
249 | .emit_fence = &r300_fence_ring_emit, | 297 | .emit_fence = &r300_fence_ring_emit, |
250 | .emit_semaphore = &r100_semaphore_ring_emit, | 298 | .emit_semaphore = &r100_semaphore_ring_emit, |
299 | .cs_parse = &r300_cs_parse, | ||
300 | .ring_start = &r300_ring_start, | ||
301 | .ring_test = &r100_ring_test, | ||
302 | .ib_test = &r100_ib_test, | ||
251 | } | 303 | } |
252 | }, | 304 | }, |
253 | .irq_set = &r100_irq_set, | 305 | .irq = { |
254 | .irq_process = &r100_irq_process, | 306 | .set = &r100_irq_set, |
255 | .get_vblank_counter = &r100_get_vblank_counter, | 307 | .process = &r100_irq_process, |
256 | .cs_parse = &r300_cs_parse, | 308 | }, |
257 | .copy_blit = &r100_copy_blit, | 309 | .display = { |
258 | .copy_dma = &r200_copy_dma, | 310 | .bandwidth_update = &r100_bandwidth_update, |
259 | .copy = &r100_copy_blit, | 311 | .get_vblank_counter = &r100_get_vblank_counter, |
260 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 312 | .wait_for_vblank = &r100_wait_for_vblank, |
261 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 313 | }, |
262 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 314 | .copy = { |
263 | .set_memory_clock = NULL, | 315 | .blit = &r100_copy_blit, |
264 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 316 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
265 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 317 | .dma = &r200_copy_dma, |
266 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 318 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
267 | .set_surface_reg = r100_set_surface_reg, | 319 | .copy = &r100_copy_blit, |
268 | .clear_surface_reg = r100_clear_surface_reg, | 320 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
269 | .bandwidth_update = &r100_bandwidth_update, | 321 | }, |
270 | .hpd_init = &r100_hpd_init, | 322 | .surface = { |
271 | .hpd_fini = &r100_hpd_fini, | 323 | .set_reg = r100_set_surface_reg, |
272 | .hpd_sense = &r100_hpd_sense, | 324 | .clear_reg = r100_clear_surface_reg, |
273 | .hpd_set_polarity = &r100_hpd_set_polarity, | 325 | }, |
274 | .ioctl_wait_idle = NULL, | 326 | .hpd = { |
275 | .gui_idle = &r100_gui_idle, | 327 | .init = &r100_hpd_init, |
276 | .pm_misc = &r100_pm_misc, | 328 | .fini = &r100_hpd_fini, |
277 | .pm_prepare = &r100_pm_prepare, | 329 | .sense = &r100_hpd_sense, |
278 | .pm_finish = &r100_pm_finish, | 330 | .set_polarity = &r100_hpd_set_polarity, |
279 | .pm_init_profile = &r100_pm_init_profile, | 331 | }, |
280 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 332 | .pm = { |
281 | .pre_page_flip = &r100_pre_page_flip, | 333 | .misc = &r100_pm_misc, |
282 | .page_flip = &r100_page_flip, | 334 | .prepare = &r100_pm_prepare, |
283 | .post_page_flip = &r100_post_page_flip, | 335 | .finish = &r100_pm_finish, |
336 | .init_profile = &r100_pm_init_profile, | ||
337 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
338 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
339 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
340 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
341 | .set_memory_clock = NULL, | ||
342 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
343 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
344 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
345 | }, | ||
346 | .pflip = { | ||
347 | .pre_page_flip = &r100_pre_page_flip, | ||
348 | .page_flip = &r100_page_flip, | ||
349 | .post_page_flip = &r100_post_page_flip, | ||
350 | }, | ||
284 | }; | 351 | }; |
285 | 352 | ||
286 | static struct radeon_asic r300_asic_pcie = { | 353 | static struct radeon_asic r300_asic_pcie = { |
@@ -291,47 +358,70 @@ static struct radeon_asic r300_asic_pcie = { | |||
291 | .vga_set_state = &r100_vga_set_state, | 358 | .vga_set_state = &r100_vga_set_state, |
292 | .gpu_is_lockup = &r300_gpu_is_lockup, | 359 | .gpu_is_lockup = &r300_gpu_is_lockup, |
293 | .asic_reset = &r300_asic_reset, | 360 | .asic_reset = &r300_asic_reset, |
294 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 361 | .ioctl_wait_idle = NULL, |
295 | .gart_set_page = &rv370_pcie_gart_set_page, | 362 | .gui_idle = &r100_gui_idle, |
296 | .ring_start = &r300_ring_start, | 363 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
297 | .ring_test = &r100_ring_test, | 364 | .gart = { |
365 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
366 | .set_page = &rv370_pcie_gart_set_page, | ||
367 | }, | ||
298 | .ring = { | 368 | .ring = { |
299 | [RADEON_RING_TYPE_GFX_INDEX] = { | 369 | [RADEON_RING_TYPE_GFX_INDEX] = { |
300 | .ib_execute = &r100_ring_ib_execute, | 370 | .ib_execute = &r100_ring_ib_execute, |
301 | .emit_fence = &r300_fence_ring_emit, | 371 | .emit_fence = &r300_fence_ring_emit, |
302 | .emit_semaphore = &r100_semaphore_ring_emit, | 372 | .emit_semaphore = &r100_semaphore_ring_emit, |
373 | .cs_parse = &r300_cs_parse, | ||
374 | .ring_start = &r300_ring_start, | ||
375 | .ring_test = &r100_ring_test, | ||
376 | .ib_test = &r100_ib_test, | ||
303 | } | 377 | } |
304 | }, | 378 | }, |
305 | .irq_set = &r100_irq_set, | 379 | .irq = { |
306 | .irq_process = &r100_irq_process, | 380 | .set = &r100_irq_set, |
307 | .get_vblank_counter = &r100_get_vblank_counter, | 381 | .process = &r100_irq_process, |
308 | .cs_parse = &r300_cs_parse, | 382 | }, |
309 | .copy_blit = &r100_copy_blit, | 383 | .display = { |
310 | .copy_dma = &r200_copy_dma, | 384 | .bandwidth_update = &r100_bandwidth_update, |
311 | .copy = &r100_copy_blit, | 385 | .get_vblank_counter = &r100_get_vblank_counter, |
312 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 386 | .wait_for_vblank = &r100_wait_for_vblank, |
313 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 387 | }, |
314 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 388 | .copy = { |
315 | .set_memory_clock = NULL, | 389 | .blit = &r100_copy_blit, |
316 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 390 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
317 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 391 | .dma = &r200_copy_dma, |
318 | .set_surface_reg = r100_set_surface_reg, | 392 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
319 | .clear_surface_reg = r100_clear_surface_reg, | 393 | .copy = &r100_copy_blit, |
320 | .bandwidth_update = &r100_bandwidth_update, | 394 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
321 | .hpd_init = &r100_hpd_init, | 395 | }, |
322 | .hpd_fini = &r100_hpd_fini, | 396 | .surface = { |
323 | .hpd_sense = &r100_hpd_sense, | 397 | .set_reg = r100_set_surface_reg, |
324 | .hpd_set_polarity = &r100_hpd_set_polarity, | 398 | .clear_reg = r100_clear_surface_reg, |
325 | .ioctl_wait_idle = NULL, | 399 | }, |
326 | .gui_idle = &r100_gui_idle, | 400 | .hpd = { |
327 | .pm_misc = &r100_pm_misc, | 401 | .init = &r100_hpd_init, |
328 | .pm_prepare = &r100_pm_prepare, | 402 | .fini = &r100_hpd_fini, |
329 | .pm_finish = &r100_pm_finish, | 403 | .sense = &r100_hpd_sense, |
330 | .pm_init_profile = &r100_pm_init_profile, | 404 | .set_polarity = &r100_hpd_set_polarity, |
331 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 405 | }, |
332 | .pre_page_flip = &r100_pre_page_flip, | 406 | .pm = { |
333 | .page_flip = &r100_page_flip, | 407 | .misc = &r100_pm_misc, |
334 | .post_page_flip = &r100_post_page_flip, | 408 | .prepare = &r100_pm_prepare, |
409 | .finish = &r100_pm_finish, | ||
410 | .init_profile = &r100_pm_init_profile, | ||
411 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
412 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
413 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
414 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
415 | .set_memory_clock = NULL, | ||
416 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
417 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
418 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
419 | }, | ||
420 | .pflip = { | ||
421 | .pre_page_flip = &r100_pre_page_flip, | ||
422 | .page_flip = &r100_page_flip, | ||
423 | .post_page_flip = &r100_post_page_flip, | ||
424 | }, | ||
335 | }; | 425 | }; |
336 | 426 | ||
337 | static struct radeon_asic r420_asic = { | 427 | static struct radeon_asic r420_asic = { |
@@ -342,48 +432,70 @@ static struct radeon_asic r420_asic = { | |||
342 | .vga_set_state = &r100_vga_set_state, | 432 | .vga_set_state = &r100_vga_set_state, |
343 | .gpu_is_lockup = &r300_gpu_is_lockup, | 433 | .gpu_is_lockup = &r300_gpu_is_lockup, |
344 | .asic_reset = &r300_asic_reset, | 434 | .asic_reset = &r300_asic_reset, |
345 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 435 | .ioctl_wait_idle = NULL, |
346 | .gart_set_page = &rv370_pcie_gart_set_page, | 436 | .gui_idle = &r100_gui_idle, |
347 | .ring_start = &r300_ring_start, | 437 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
348 | .ring_test = &r100_ring_test, | 438 | .gart = { |
439 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
440 | .set_page = &rv370_pcie_gart_set_page, | ||
441 | }, | ||
349 | .ring = { | 442 | .ring = { |
350 | [RADEON_RING_TYPE_GFX_INDEX] = { | 443 | [RADEON_RING_TYPE_GFX_INDEX] = { |
351 | .ib_execute = &r100_ring_ib_execute, | 444 | .ib_execute = &r100_ring_ib_execute, |
352 | .emit_fence = &r300_fence_ring_emit, | 445 | .emit_fence = &r300_fence_ring_emit, |
353 | .emit_semaphore = &r100_semaphore_ring_emit, | 446 | .emit_semaphore = &r100_semaphore_ring_emit, |
447 | .cs_parse = &r300_cs_parse, | ||
448 | .ring_start = &r300_ring_start, | ||
449 | .ring_test = &r100_ring_test, | ||
450 | .ib_test = &r100_ib_test, | ||
354 | } | 451 | } |
355 | }, | 452 | }, |
356 | .irq_set = &r100_irq_set, | 453 | .irq = { |
357 | .irq_process = &r100_irq_process, | 454 | .set = &r100_irq_set, |
358 | .get_vblank_counter = &r100_get_vblank_counter, | 455 | .process = &r100_irq_process, |
359 | .cs_parse = &r300_cs_parse, | 456 | }, |
360 | .copy_blit = &r100_copy_blit, | 457 | .display = { |
361 | .copy_dma = &r200_copy_dma, | 458 | .bandwidth_update = &r100_bandwidth_update, |
362 | .copy = &r100_copy_blit, | 459 | .get_vblank_counter = &r100_get_vblank_counter, |
363 | .get_engine_clock = &radeon_atom_get_engine_clock, | 460 | .wait_for_vblank = &r100_wait_for_vblank, |
364 | .set_engine_clock = &radeon_atom_set_engine_clock, | 461 | }, |
365 | .get_memory_clock = &radeon_atom_get_memory_clock, | 462 | .copy = { |
366 | .set_memory_clock = &radeon_atom_set_memory_clock, | 463 | .blit = &r100_copy_blit, |
367 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 464 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
368 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 465 | .dma = &r200_copy_dma, |
369 | .set_clock_gating = &radeon_atom_set_clock_gating, | 466 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
370 | .set_surface_reg = r100_set_surface_reg, | 467 | .copy = &r100_copy_blit, |
371 | .clear_surface_reg = r100_clear_surface_reg, | 468 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
372 | .bandwidth_update = &r100_bandwidth_update, | 469 | }, |
373 | .hpd_init = &r100_hpd_init, | 470 | .surface = { |
374 | .hpd_fini = &r100_hpd_fini, | 471 | .set_reg = r100_set_surface_reg, |
375 | .hpd_sense = &r100_hpd_sense, | 472 | .clear_reg = r100_clear_surface_reg, |
376 | .hpd_set_polarity = &r100_hpd_set_polarity, | 473 | }, |
377 | .ioctl_wait_idle = NULL, | 474 | .hpd = { |
378 | .gui_idle = &r100_gui_idle, | 475 | .init = &r100_hpd_init, |
379 | .pm_misc = &r100_pm_misc, | 476 | .fini = &r100_hpd_fini, |
380 | .pm_prepare = &r100_pm_prepare, | 477 | .sense = &r100_hpd_sense, |
381 | .pm_finish = &r100_pm_finish, | 478 | .set_polarity = &r100_hpd_set_polarity, |
382 | .pm_init_profile = &r420_pm_init_profile, | 479 | }, |
383 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 480 | .pm = { |
384 | .pre_page_flip = &r100_pre_page_flip, | 481 | .misc = &r100_pm_misc, |
385 | .page_flip = &r100_page_flip, | 482 | .prepare = &r100_pm_prepare, |
386 | .post_page_flip = &r100_post_page_flip, | 483 | .finish = &r100_pm_finish, |
484 | .init_profile = &r420_pm_init_profile, | ||
485 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
486 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
487 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
488 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
489 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
490 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
491 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
492 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
493 | }, | ||
494 | .pflip = { | ||
495 | .pre_page_flip = &r100_pre_page_flip, | ||
496 | .page_flip = &r100_page_flip, | ||
497 | .post_page_flip = &r100_post_page_flip, | ||
498 | }, | ||
387 | }; | 499 | }; |
388 | 500 | ||
389 | static struct radeon_asic rs400_asic = { | 501 | static struct radeon_asic rs400_asic = { |
@@ -394,48 +506,70 @@ static struct radeon_asic rs400_asic = { | |||
394 | .vga_set_state = &r100_vga_set_state, | 506 | .vga_set_state = &r100_vga_set_state, |
395 | .gpu_is_lockup = &r300_gpu_is_lockup, | 507 | .gpu_is_lockup = &r300_gpu_is_lockup, |
396 | .asic_reset = &r300_asic_reset, | 508 | .asic_reset = &r300_asic_reset, |
397 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 509 | .ioctl_wait_idle = NULL, |
398 | .gart_set_page = &rs400_gart_set_page, | 510 | .gui_idle = &r100_gui_idle, |
399 | .ring_start = &r300_ring_start, | 511 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, |
400 | .ring_test = &r100_ring_test, | 512 | .gart = { |
513 | .tlb_flush = &rs400_gart_tlb_flush, | ||
514 | .set_page = &rs400_gart_set_page, | ||
515 | }, | ||
401 | .ring = { | 516 | .ring = { |
402 | [RADEON_RING_TYPE_GFX_INDEX] = { | 517 | [RADEON_RING_TYPE_GFX_INDEX] = { |
403 | .ib_execute = &r100_ring_ib_execute, | 518 | .ib_execute = &r100_ring_ib_execute, |
404 | .emit_fence = &r300_fence_ring_emit, | 519 | .emit_fence = &r300_fence_ring_emit, |
405 | .emit_semaphore = &r100_semaphore_ring_emit, | 520 | .emit_semaphore = &r100_semaphore_ring_emit, |
521 | .cs_parse = &r300_cs_parse, | ||
522 | .ring_start = &r300_ring_start, | ||
523 | .ring_test = &r100_ring_test, | ||
524 | .ib_test = &r100_ib_test, | ||
406 | } | 525 | } |
407 | }, | 526 | }, |
408 | .irq_set = &r100_irq_set, | 527 | .irq = { |
409 | .irq_process = &r100_irq_process, | 528 | .set = &r100_irq_set, |
410 | .get_vblank_counter = &r100_get_vblank_counter, | 529 | .process = &r100_irq_process, |
411 | .cs_parse = &r300_cs_parse, | 530 | }, |
412 | .copy_blit = &r100_copy_blit, | 531 | .display = { |
413 | .copy_dma = &r200_copy_dma, | 532 | .bandwidth_update = &r100_bandwidth_update, |
414 | .copy = &r100_copy_blit, | 533 | .get_vblank_counter = &r100_get_vblank_counter, |
415 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 534 | .wait_for_vblank = &r100_wait_for_vblank, |
416 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 535 | }, |
417 | .get_memory_clock = &radeon_legacy_get_memory_clock, | 536 | .copy = { |
418 | .set_memory_clock = NULL, | 537 | .blit = &r100_copy_blit, |
419 | .get_pcie_lanes = NULL, | 538 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
420 | .set_pcie_lanes = NULL, | 539 | .dma = &r200_copy_dma, |
421 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 540 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
422 | .set_surface_reg = r100_set_surface_reg, | 541 | .copy = &r100_copy_blit, |
423 | .clear_surface_reg = r100_clear_surface_reg, | 542 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
424 | .bandwidth_update = &r100_bandwidth_update, | 543 | }, |
425 | .hpd_init = &r100_hpd_init, | 544 | .surface = { |
426 | .hpd_fini = &r100_hpd_fini, | 545 | .set_reg = r100_set_surface_reg, |
427 | .hpd_sense = &r100_hpd_sense, | 546 | .clear_reg = r100_clear_surface_reg, |
428 | .hpd_set_polarity = &r100_hpd_set_polarity, | 547 | }, |
429 | .ioctl_wait_idle = NULL, | 548 | .hpd = { |
430 | .gui_idle = &r100_gui_idle, | 549 | .init = &r100_hpd_init, |
431 | .pm_misc = &r100_pm_misc, | 550 | .fini = &r100_hpd_fini, |
432 | .pm_prepare = &r100_pm_prepare, | 551 | .sense = &r100_hpd_sense, |
433 | .pm_finish = &r100_pm_finish, | 552 | .set_polarity = &r100_hpd_set_polarity, |
434 | .pm_init_profile = &r100_pm_init_profile, | 553 | }, |
435 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 554 | .pm = { |
436 | .pre_page_flip = &r100_pre_page_flip, | 555 | .misc = &r100_pm_misc, |
437 | .page_flip = &r100_page_flip, | 556 | .prepare = &r100_pm_prepare, |
438 | .post_page_flip = &r100_post_page_flip, | 557 | .finish = &r100_pm_finish, |
558 | .init_profile = &r100_pm_init_profile, | ||
559 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
560 | .get_engine_clock = &radeon_legacy_get_engine_clock, | ||
561 | .set_engine_clock = &radeon_legacy_set_engine_clock, | ||
562 | .get_memory_clock = &radeon_legacy_get_memory_clock, | ||
563 | .set_memory_clock = NULL, | ||
564 | .get_pcie_lanes = NULL, | ||
565 | .set_pcie_lanes = NULL, | ||
566 | .set_clock_gating = &radeon_legacy_set_clock_gating, | ||
567 | }, | ||
568 | .pflip = { | ||
569 | .pre_page_flip = &r100_pre_page_flip, | ||
570 | .page_flip = &r100_page_flip, | ||
571 | .post_page_flip = &r100_post_page_flip, | ||
572 | }, | ||
439 | }; | 573 | }; |
440 | 574 | ||
441 | static struct radeon_asic rs600_asic = { | 575 | static struct radeon_asic rs600_asic = { |
@@ -446,48 +580,70 @@ static struct radeon_asic rs600_asic = { | |||
446 | .vga_set_state = &r100_vga_set_state, | 580 | .vga_set_state = &r100_vga_set_state, |
447 | .gpu_is_lockup = &r300_gpu_is_lockup, | 581 | .gpu_is_lockup = &r300_gpu_is_lockup, |
448 | .asic_reset = &rs600_asic_reset, | 582 | .asic_reset = &rs600_asic_reset, |
449 | .gart_tlb_flush = &rs600_gart_tlb_flush, | 583 | .ioctl_wait_idle = NULL, |
450 | .gart_set_page = &rs600_gart_set_page, | 584 | .gui_idle = &r100_gui_idle, |
451 | .ring_start = &r300_ring_start, | 585 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, |
452 | .ring_test = &r100_ring_test, | 586 | .gart = { |
587 | .tlb_flush = &rs600_gart_tlb_flush, | ||
588 | .set_page = &rs600_gart_set_page, | ||
589 | }, | ||
453 | .ring = { | 590 | .ring = { |
454 | [RADEON_RING_TYPE_GFX_INDEX] = { | 591 | [RADEON_RING_TYPE_GFX_INDEX] = { |
455 | .ib_execute = &r100_ring_ib_execute, | 592 | .ib_execute = &r100_ring_ib_execute, |
456 | .emit_fence = &r300_fence_ring_emit, | 593 | .emit_fence = &r300_fence_ring_emit, |
457 | .emit_semaphore = &r100_semaphore_ring_emit, | 594 | .emit_semaphore = &r100_semaphore_ring_emit, |
595 | .cs_parse = &r300_cs_parse, | ||
596 | .ring_start = &r300_ring_start, | ||
597 | .ring_test = &r100_ring_test, | ||
598 | .ib_test = &r100_ib_test, | ||
458 | } | 599 | } |
459 | }, | 600 | }, |
460 | .irq_set = &rs600_irq_set, | 601 | .irq = { |
461 | .irq_process = &rs600_irq_process, | 602 | .set = &rs600_irq_set, |
462 | .get_vblank_counter = &rs600_get_vblank_counter, | 603 | .process = &rs600_irq_process, |
463 | .cs_parse = &r300_cs_parse, | 604 | }, |
464 | .copy_blit = &r100_copy_blit, | 605 | .display = { |
465 | .copy_dma = &r200_copy_dma, | 606 | .bandwidth_update = &rs600_bandwidth_update, |
466 | .copy = &r100_copy_blit, | 607 | .get_vblank_counter = &rs600_get_vblank_counter, |
467 | .get_engine_clock = &radeon_atom_get_engine_clock, | 608 | .wait_for_vblank = &avivo_wait_for_vblank, |
468 | .set_engine_clock = &radeon_atom_set_engine_clock, | 609 | }, |
469 | .get_memory_clock = &radeon_atom_get_memory_clock, | 610 | .copy = { |
470 | .set_memory_clock = &radeon_atom_set_memory_clock, | 611 | .blit = &r100_copy_blit, |
471 | .get_pcie_lanes = NULL, | 612 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
472 | .set_pcie_lanes = NULL, | 613 | .dma = &r200_copy_dma, |
473 | .set_clock_gating = &radeon_atom_set_clock_gating, | 614 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
474 | .set_surface_reg = r100_set_surface_reg, | 615 | .copy = &r100_copy_blit, |
475 | .clear_surface_reg = r100_clear_surface_reg, | 616 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
476 | .bandwidth_update = &rs600_bandwidth_update, | 617 | }, |
477 | .hpd_init = &rs600_hpd_init, | 618 | .surface = { |
478 | .hpd_fini = &rs600_hpd_fini, | 619 | .set_reg = r100_set_surface_reg, |
479 | .hpd_sense = &rs600_hpd_sense, | 620 | .clear_reg = r100_clear_surface_reg, |
480 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 621 | }, |
481 | .ioctl_wait_idle = NULL, | 622 | .hpd = { |
482 | .gui_idle = &r100_gui_idle, | 623 | .init = &rs600_hpd_init, |
483 | .pm_misc = &rs600_pm_misc, | 624 | .fini = &rs600_hpd_fini, |
484 | .pm_prepare = &rs600_pm_prepare, | 625 | .sense = &rs600_hpd_sense, |
485 | .pm_finish = &rs600_pm_finish, | 626 | .set_polarity = &rs600_hpd_set_polarity, |
486 | .pm_init_profile = &r420_pm_init_profile, | 627 | }, |
487 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 628 | .pm = { |
488 | .pre_page_flip = &rs600_pre_page_flip, | 629 | .misc = &rs600_pm_misc, |
489 | .page_flip = &rs600_page_flip, | 630 | .prepare = &rs600_pm_prepare, |
490 | .post_page_flip = &rs600_post_page_flip, | 631 | .finish = &rs600_pm_finish, |
632 | .init_profile = &r420_pm_init_profile, | ||
633 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
634 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
635 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
636 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
637 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
638 | .get_pcie_lanes = NULL, | ||
639 | .set_pcie_lanes = NULL, | ||
640 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
641 | }, | ||
642 | .pflip = { | ||
643 | .pre_page_flip = &rs600_pre_page_flip, | ||
644 | .page_flip = &rs600_page_flip, | ||
645 | .post_page_flip = &rs600_post_page_flip, | ||
646 | }, | ||
491 | }; | 647 | }; |
492 | 648 | ||
493 | static struct radeon_asic rs690_asic = { | 649 | static struct radeon_asic rs690_asic = { |
@@ -498,48 +654,70 @@ static struct radeon_asic rs690_asic = { | |||
498 | .vga_set_state = &r100_vga_set_state, | 654 | .vga_set_state = &r100_vga_set_state, |
499 | .gpu_is_lockup = &r300_gpu_is_lockup, | 655 | .gpu_is_lockup = &r300_gpu_is_lockup, |
500 | .asic_reset = &rs600_asic_reset, | 656 | .asic_reset = &rs600_asic_reset, |
501 | .gart_tlb_flush = &rs400_gart_tlb_flush, | 657 | .ioctl_wait_idle = NULL, |
502 | .gart_set_page = &rs400_gart_set_page, | 658 | .gui_idle = &r100_gui_idle, |
503 | .ring_start = &r300_ring_start, | 659 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, |
504 | .ring_test = &r100_ring_test, | 660 | .gart = { |
661 | .tlb_flush = &rs400_gart_tlb_flush, | ||
662 | .set_page = &rs400_gart_set_page, | ||
663 | }, | ||
505 | .ring = { | 664 | .ring = { |
506 | [RADEON_RING_TYPE_GFX_INDEX] = { | 665 | [RADEON_RING_TYPE_GFX_INDEX] = { |
507 | .ib_execute = &r100_ring_ib_execute, | 666 | .ib_execute = &r100_ring_ib_execute, |
508 | .emit_fence = &r300_fence_ring_emit, | 667 | .emit_fence = &r300_fence_ring_emit, |
509 | .emit_semaphore = &r100_semaphore_ring_emit, | 668 | .emit_semaphore = &r100_semaphore_ring_emit, |
669 | .cs_parse = &r300_cs_parse, | ||
670 | .ring_start = &r300_ring_start, | ||
671 | .ring_test = &r100_ring_test, | ||
672 | .ib_test = &r100_ib_test, | ||
510 | } | 673 | } |
511 | }, | 674 | }, |
512 | .irq_set = &rs600_irq_set, | 675 | .irq = { |
513 | .irq_process = &rs600_irq_process, | 676 | .set = &rs600_irq_set, |
514 | .get_vblank_counter = &rs600_get_vblank_counter, | 677 | .process = &rs600_irq_process, |
515 | .cs_parse = &r300_cs_parse, | 678 | }, |
516 | .copy_blit = &r100_copy_blit, | 679 | .display = { |
517 | .copy_dma = &r200_copy_dma, | 680 | .get_vblank_counter = &rs600_get_vblank_counter, |
518 | .copy = &r200_copy_dma, | 681 | .bandwidth_update = &rs690_bandwidth_update, |
519 | .get_engine_clock = &radeon_atom_get_engine_clock, | 682 | .wait_for_vblank = &avivo_wait_for_vblank, |
520 | .set_engine_clock = &radeon_atom_set_engine_clock, | 683 | }, |
521 | .get_memory_clock = &radeon_atom_get_memory_clock, | 684 | .copy = { |
522 | .set_memory_clock = &radeon_atom_set_memory_clock, | 685 | .blit = &r100_copy_blit, |
523 | .get_pcie_lanes = NULL, | 686 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
524 | .set_pcie_lanes = NULL, | 687 | .dma = &r200_copy_dma, |
525 | .set_clock_gating = &radeon_atom_set_clock_gating, | 688 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
526 | .set_surface_reg = r100_set_surface_reg, | 689 | .copy = &r200_copy_dma, |
527 | .clear_surface_reg = r100_clear_surface_reg, | 690 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
528 | .bandwidth_update = &rs690_bandwidth_update, | 691 | }, |
529 | .hpd_init = &rs600_hpd_init, | 692 | .surface = { |
530 | .hpd_fini = &rs600_hpd_fini, | 693 | .set_reg = r100_set_surface_reg, |
531 | .hpd_sense = &rs600_hpd_sense, | 694 | .clear_reg = r100_clear_surface_reg, |
532 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 695 | }, |
533 | .ioctl_wait_idle = NULL, | 696 | .hpd = { |
534 | .gui_idle = &r100_gui_idle, | 697 | .init = &rs600_hpd_init, |
535 | .pm_misc = &rs600_pm_misc, | 698 | .fini = &rs600_hpd_fini, |
536 | .pm_prepare = &rs600_pm_prepare, | 699 | .sense = &rs600_hpd_sense, |
537 | .pm_finish = &rs600_pm_finish, | 700 | .set_polarity = &rs600_hpd_set_polarity, |
538 | .pm_init_profile = &r420_pm_init_profile, | 701 | }, |
539 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 702 | .pm = { |
540 | .pre_page_flip = &rs600_pre_page_flip, | 703 | .misc = &rs600_pm_misc, |
541 | .page_flip = &rs600_page_flip, | 704 | .prepare = &rs600_pm_prepare, |
542 | .post_page_flip = &rs600_post_page_flip, | 705 | .finish = &rs600_pm_finish, |
706 | .init_profile = &r420_pm_init_profile, | ||
707 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
708 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
709 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
710 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
711 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
712 | .get_pcie_lanes = NULL, | ||
713 | .set_pcie_lanes = NULL, | ||
714 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
715 | }, | ||
716 | .pflip = { | ||
717 | .pre_page_flip = &rs600_pre_page_flip, | ||
718 | .page_flip = &rs600_page_flip, | ||
719 | .post_page_flip = &rs600_post_page_flip, | ||
720 | }, | ||
543 | }; | 721 | }; |
544 | 722 | ||
545 | static struct radeon_asic rv515_asic = { | 723 | static struct radeon_asic rv515_asic = { |
@@ -550,48 +728,70 @@ static struct radeon_asic rv515_asic = { | |||
550 | .vga_set_state = &r100_vga_set_state, | 728 | .vga_set_state = &r100_vga_set_state, |
551 | .gpu_is_lockup = &r300_gpu_is_lockup, | 729 | .gpu_is_lockup = &r300_gpu_is_lockup, |
552 | .asic_reset = &rs600_asic_reset, | 730 | .asic_reset = &rs600_asic_reset, |
553 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 731 | .ioctl_wait_idle = NULL, |
554 | .gart_set_page = &rv370_pcie_gart_set_page, | 732 | .gui_idle = &r100_gui_idle, |
555 | .ring_start = &rv515_ring_start, | 733 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, |
556 | .ring_test = &r100_ring_test, | 734 | .gart = { |
735 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
736 | .set_page = &rv370_pcie_gart_set_page, | ||
737 | }, | ||
557 | .ring = { | 738 | .ring = { |
558 | [RADEON_RING_TYPE_GFX_INDEX] = { | 739 | [RADEON_RING_TYPE_GFX_INDEX] = { |
559 | .ib_execute = &r100_ring_ib_execute, | 740 | .ib_execute = &r100_ring_ib_execute, |
560 | .emit_fence = &r300_fence_ring_emit, | 741 | .emit_fence = &r300_fence_ring_emit, |
561 | .emit_semaphore = &r100_semaphore_ring_emit, | 742 | .emit_semaphore = &r100_semaphore_ring_emit, |
743 | .cs_parse = &r300_cs_parse, | ||
744 | .ring_start = &rv515_ring_start, | ||
745 | .ring_test = &r100_ring_test, | ||
746 | .ib_test = &r100_ib_test, | ||
562 | } | 747 | } |
563 | }, | 748 | }, |
564 | .irq_set = &rs600_irq_set, | 749 | .irq = { |
565 | .irq_process = &rs600_irq_process, | 750 | .set = &rs600_irq_set, |
566 | .get_vblank_counter = &rs600_get_vblank_counter, | 751 | .process = &rs600_irq_process, |
567 | .cs_parse = &r300_cs_parse, | 752 | }, |
568 | .copy_blit = &r100_copy_blit, | 753 | .display = { |
569 | .copy_dma = &r200_copy_dma, | 754 | .get_vblank_counter = &rs600_get_vblank_counter, |
570 | .copy = &r100_copy_blit, | 755 | .bandwidth_update = &rv515_bandwidth_update, |
571 | .get_engine_clock = &radeon_atom_get_engine_clock, | 756 | .wait_for_vblank = &avivo_wait_for_vblank, |
572 | .set_engine_clock = &radeon_atom_set_engine_clock, | 757 | }, |
573 | .get_memory_clock = &radeon_atom_get_memory_clock, | 758 | .copy = { |
574 | .set_memory_clock = &radeon_atom_set_memory_clock, | 759 | .blit = &r100_copy_blit, |
575 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 760 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
576 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 761 | .dma = &r200_copy_dma, |
577 | .set_clock_gating = &radeon_atom_set_clock_gating, | 762 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
578 | .set_surface_reg = r100_set_surface_reg, | 763 | .copy = &r100_copy_blit, |
579 | .clear_surface_reg = r100_clear_surface_reg, | 764 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
580 | .bandwidth_update = &rv515_bandwidth_update, | 765 | }, |
581 | .hpd_init = &rs600_hpd_init, | 766 | .surface = { |
582 | .hpd_fini = &rs600_hpd_fini, | 767 | .set_reg = r100_set_surface_reg, |
583 | .hpd_sense = &rs600_hpd_sense, | 768 | .clear_reg = r100_clear_surface_reg, |
584 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 769 | }, |
585 | .ioctl_wait_idle = NULL, | 770 | .hpd = { |
586 | .gui_idle = &r100_gui_idle, | 771 | .init = &rs600_hpd_init, |
587 | .pm_misc = &rs600_pm_misc, | 772 | .fini = &rs600_hpd_fini, |
588 | .pm_prepare = &rs600_pm_prepare, | 773 | .sense = &rs600_hpd_sense, |
589 | .pm_finish = &rs600_pm_finish, | 774 | .set_polarity = &rs600_hpd_set_polarity, |
590 | .pm_init_profile = &r420_pm_init_profile, | 775 | }, |
591 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 776 | .pm = { |
592 | .pre_page_flip = &rs600_pre_page_flip, | 777 | .misc = &rs600_pm_misc, |
593 | .page_flip = &rs600_page_flip, | 778 | .prepare = &rs600_pm_prepare, |
594 | .post_page_flip = &rs600_post_page_flip, | 779 | .finish = &rs600_pm_finish, |
780 | .init_profile = &r420_pm_init_profile, | ||
781 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
782 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
783 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
784 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
785 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
786 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
787 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
788 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
789 | }, | ||
790 | .pflip = { | ||
791 | .pre_page_flip = &rs600_pre_page_flip, | ||
792 | .page_flip = &rs600_page_flip, | ||
793 | .post_page_flip = &rs600_post_page_flip, | ||
794 | }, | ||
595 | }; | 795 | }; |
596 | 796 | ||
597 | static struct radeon_asic r520_asic = { | 797 | static struct radeon_asic r520_asic = { |
@@ -602,48 +802,70 @@ static struct radeon_asic r520_asic = { | |||
602 | .vga_set_state = &r100_vga_set_state, | 802 | .vga_set_state = &r100_vga_set_state, |
603 | .gpu_is_lockup = &r300_gpu_is_lockup, | 803 | .gpu_is_lockup = &r300_gpu_is_lockup, |
604 | .asic_reset = &rs600_asic_reset, | 804 | .asic_reset = &rs600_asic_reset, |
605 | .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, | 805 | .ioctl_wait_idle = NULL, |
606 | .gart_set_page = &rv370_pcie_gart_set_page, | 806 | .gui_idle = &r100_gui_idle, |
607 | .ring_start = &rv515_ring_start, | 807 | .mc_wait_for_idle = &r520_mc_wait_for_idle, |
608 | .ring_test = &r100_ring_test, | 808 | .gart = { |
809 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | ||
810 | .set_page = &rv370_pcie_gart_set_page, | ||
811 | }, | ||
609 | .ring = { | 812 | .ring = { |
610 | [RADEON_RING_TYPE_GFX_INDEX] = { | 813 | [RADEON_RING_TYPE_GFX_INDEX] = { |
611 | .ib_execute = &r100_ring_ib_execute, | 814 | .ib_execute = &r100_ring_ib_execute, |
612 | .emit_fence = &r300_fence_ring_emit, | 815 | .emit_fence = &r300_fence_ring_emit, |
613 | .emit_semaphore = &r100_semaphore_ring_emit, | 816 | .emit_semaphore = &r100_semaphore_ring_emit, |
817 | .cs_parse = &r300_cs_parse, | ||
818 | .ring_start = &rv515_ring_start, | ||
819 | .ring_test = &r100_ring_test, | ||
820 | .ib_test = &r100_ib_test, | ||
614 | } | 821 | } |
615 | }, | 822 | }, |
616 | .irq_set = &rs600_irq_set, | 823 | .irq = { |
617 | .irq_process = &rs600_irq_process, | 824 | .set = &rs600_irq_set, |
618 | .get_vblank_counter = &rs600_get_vblank_counter, | 825 | .process = &rs600_irq_process, |
619 | .cs_parse = &r300_cs_parse, | 826 | }, |
620 | .copy_blit = &r100_copy_blit, | 827 | .display = { |
621 | .copy_dma = &r200_copy_dma, | 828 | .bandwidth_update = &rv515_bandwidth_update, |
622 | .copy = &r100_copy_blit, | 829 | .get_vblank_counter = &rs600_get_vblank_counter, |
623 | .get_engine_clock = &radeon_atom_get_engine_clock, | 830 | .wait_for_vblank = &avivo_wait_for_vblank, |
624 | .set_engine_clock = &radeon_atom_set_engine_clock, | 831 | }, |
625 | .get_memory_clock = &radeon_atom_get_memory_clock, | 832 | .copy = { |
626 | .set_memory_clock = &radeon_atom_set_memory_clock, | 833 | .blit = &r100_copy_blit, |
627 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 834 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
628 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 835 | .dma = &r200_copy_dma, |
629 | .set_clock_gating = &radeon_atom_set_clock_gating, | 836 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
630 | .set_surface_reg = r100_set_surface_reg, | 837 | .copy = &r100_copy_blit, |
631 | .clear_surface_reg = r100_clear_surface_reg, | 838 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
632 | .bandwidth_update = &rv515_bandwidth_update, | 839 | }, |
633 | .hpd_init = &rs600_hpd_init, | 840 | .surface = { |
634 | .hpd_fini = &rs600_hpd_fini, | 841 | .set_reg = r100_set_surface_reg, |
635 | .hpd_sense = &rs600_hpd_sense, | 842 | .clear_reg = r100_clear_surface_reg, |
636 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 843 | }, |
637 | .ioctl_wait_idle = NULL, | 844 | .hpd = { |
638 | .gui_idle = &r100_gui_idle, | 845 | .init = &rs600_hpd_init, |
639 | .pm_misc = &rs600_pm_misc, | 846 | .fini = &rs600_hpd_fini, |
640 | .pm_prepare = &rs600_pm_prepare, | 847 | .sense = &rs600_hpd_sense, |
641 | .pm_finish = &rs600_pm_finish, | 848 | .set_polarity = &rs600_hpd_set_polarity, |
642 | .pm_init_profile = &r420_pm_init_profile, | 849 | }, |
643 | .pm_get_dynpm_state = &r100_pm_get_dynpm_state, | 850 | .pm = { |
644 | .pre_page_flip = &rs600_pre_page_flip, | 851 | .misc = &rs600_pm_misc, |
645 | .page_flip = &rs600_page_flip, | 852 | .prepare = &rs600_pm_prepare, |
646 | .post_page_flip = &rs600_post_page_flip, | 853 | .finish = &rs600_pm_finish, |
854 | .init_profile = &r420_pm_init_profile, | ||
855 | .get_dynpm_state = &r100_pm_get_dynpm_state, | ||
856 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
857 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
858 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
859 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
860 | .get_pcie_lanes = &rv370_get_pcie_lanes, | ||
861 | .set_pcie_lanes = &rv370_set_pcie_lanes, | ||
862 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
863 | }, | ||
864 | .pflip = { | ||
865 | .pre_page_flip = &rs600_pre_page_flip, | ||
866 | .page_flip = &rs600_page_flip, | ||
867 | .post_page_flip = &rs600_post_page_flip, | ||
868 | }, | ||
647 | }; | 869 | }; |
648 | 870 | ||
649 | static struct radeon_asic r600_asic = { | 871 | static struct radeon_asic r600_asic = { |
@@ -654,47 +876,69 @@ static struct radeon_asic r600_asic = { | |||
654 | .vga_set_state = &r600_vga_set_state, | 876 | .vga_set_state = &r600_vga_set_state, |
655 | .gpu_is_lockup = &r600_gpu_is_lockup, | 877 | .gpu_is_lockup = &r600_gpu_is_lockup, |
656 | .asic_reset = &r600_asic_reset, | 878 | .asic_reset = &r600_asic_reset, |
657 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 879 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
658 | .gart_set_page = &rs600_gart_set_page, | 880 | .gui_idle = &r600_gui_idle, |
659 | .ring_test = &r600_ring_test, | 881 | .mc_wait_for_idle = &r600_mc_wait_for_idle, |
882 | .gart = { | ||
883 | .tlb_flush = &r600_pcie_gart_tlb_flush, | ||
884 | .set_page = &rs600_gart_set_page, | ||
885 | }, | ||
660 | .ring = { | 886 | .ring = { |
661 | [RADEON_RING_TYPE_GFX_INDEX] = { | 887 | [RADEON_RING_TYPE_GFX_INDEX] = { |
662 | .ib_execute = &r600_ring_ib_execute, | 888 | .ib_execute = &r600_ring_ib_execute, |
663 | .emit_fence = &r600_fence_ring_emit, | 889 | .emit_fence = &r600_fence_ring_emit, |
664 | .emit_semaphore = &r600_semaphore_ring_emit, | 890 | .emit_semaphore = &r600_semaphore_ring_emit, |
891 | .cs_parse = &r600_cs_parse, | ||
892 | .ring_test = &r600_ring_test, | ||
893 | .ib_test = &r600_ib_test, | ||
665 | } | 894 | } |
666 | }, | 895 | }, |
667 | .irq_set = &r600_irq_set, | 896 | .irq = { |
668 | .irq_process = &r600_irq_process, | 897 | .set = &r600_irq_set, |
669 | .get_vblank_counter = &rs600_get_vblank_counter, | 898 | .process = &r600_irq_process, |
670 | .cs_parse = &r600_cs_parse, | 899 | }, |
671 | .copy_blit = &r600_copy_blit, | 900 | .display = { |
672 | .copy_dma = NULL, | 901 | .bandwidth_update = &rv515_bandwidth_update, |
673 | .copy = &r600_copy_blit, | 902 | .get_vblank_counter = &rs600_get_vblank_counter, |
674 | .get_engine_clock = &radeon_atom_get_engine_clock, | 903 | .wait_for_vblank = &avivo_wait_for_vblank, |
675 | .set_engine_clock = &radeon_atom_set_engine_clock, | 904 | }, |
676 | .get_memory_clock = &radeon_atom_get_memory_clock, | 905 | .copy = { |
677 | .set_memory_clock = &radeon_atom_set_memory_clock, | 906 | .blit = &r600_copy_blit, |
678 | .get_pcie_lanes = &r600_get_pcie_lanes, | 907 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
679 | .set_pcie_lanes = &r600_set_pcie_lanes, | 908 | .dma = NULL, |
680 | .set_clock_gating = NULL, | 909 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
681 | .set_surface_reg = r600_set_surface_reg, | 910 | .copy = &r600_copy_blit, |
682 | .clear_surface_reg = r600_clear_surface_reg, | 911 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
683 | .bandwidth_update = &rv515_bandwidth_update, | 912 | }, |
684 | .hpd_init = &r600_hpd_init, | 913 | .surface = { |
685 | .hpd_fini = &r600_hpd_fini, | 914 | .set_reg = r600_set_surface_reg, |
686 | .hpd_sense = &r600_hpd_sense, | 915 | .clear_reg = r600_clear_surface_reg, |
687 | .hpd_set_polarity = &r600_hpd_set_polarity, | 916 | }, |
688 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 917 | .hpd = { |
689 | .gui_idle = &r600_gui_idle, | 918 | .init = &r600_hpd_init, |
690 | .pm_misc = &r600_pm_misc, | 919 | .fini = &r600_hpd_fini, |
691 | .pm_prepare = &rs600_pm_prepare, | 920 | .sense = &r600_hpd_sense, |
692 | .pm_finish = &rs600_pm_finish, | 921 | .set_polarity = &r600_hpd_set_polarity, |
693 | .pm_init_profile = &r600_pm_init_profile, | 922 | }, |
694 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 923 | .pm = { |
695 | .pre_page_flip = &rs600_pre_page_flip, | 924 | .misc = &r600_pm_misc, |
696 | .page_flip = &rs600_page_flip, | 925 | .prepare = &rs600_pm_prepare, |
697 | .post_page_flip = &rs600_post_page_flip, | 926 | .finish = &rs600_pm_finish, |
927 | .init_profile = &r600_pm_init_profile, | ||
928 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
929 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
930 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
931 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
932 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
933 | .get_pcie_lanes = &r600_get_pcie_lanes, | ||
934 | .set_pcie_lanes = &r600_set_pcie_lanes, | ||
935 | .set_clock_gating = NULL, | ||
936 | }, | ||
937 | .pflip = { | ||
938 | .pre_page_flip = &rs600_pre_page_flip, | ||
939 | .page_flip = &rs600_page_flip, | ||
940 | .post_page_flip = &rs600_post_page_flip, | ||
941 | }, | ||
698 | }; | 942 | }; |
699 | 943 | ||
700 | static struct radeon_asic rs780_asic = { | 944 | static struct radeon_asic rs780_asic = { |
@@ -705,47 +949,69 @@ static struct radeon_asic rs780_asic = { | |||
705 | .gpu_is_lockup = &r600_gpu_is_lockup, | 949 | .gpu_is_lockup = &r600_gpu_is_lockup, |
706 | .vga_set_state = &r600_vga_set_state, | 950 | .vga_set_state = &r600_vga_set_state, |
707 | .asic_reset = &r600_asic_reset, | 951 | .asic_reset = &r600_asic_reset, |
708 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 952 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
709 | .gart_set_page = &rs600_gart_set_page, | 953 | .gui_idle = &r600_gui_idle, |
710 | .ring_test = &r600_ring_test, | 954 | .mc_wait_for_idle = &r600_mc_wait_for_idle, |
955 | .gart = { | ||
956 | .tlb_flush = &r600_pcie_gart_tlb_flush, | ||
957 | .set_page = &rs600_gart_set_page, | ||
958 | }, | ||
711 | .ring = { | 959 | .ring = { |
712 | [RADEON_RING_TYPE_GFX_INDEX] = { | 960 | [RADEON_RING_TYPE_GFX_INDEX] = { |
713 | .ib_execute = &r600_ring_ib_execute, | 961 | .ib_execute = &r600_ring_ib_execute, |
714 | .emit_fence = &r600_fence_ring_emit, | 962 | .emit_fence = &r600_fence_ring_emit, |
715 | .emit_semaphore = &r600_semaphore_ring_emit, | 963 | .emit_semaphore = &r600_semaphore_ring_emit, |
964 | .cs_parse = &r600_cs_parse, | ||
965 | .ring_test = &r600_ring_test, | ||
966 | .ib_test = &r600_ib_test, | ||
716 | } | 967 | } |
717 | }, | 968 | }, |
718 | .irq_set = &r600_irq_set, | 969 | .irq = { |
719 | .irq_process = &r600_irq_process, | 970 | .set = &r600_irq_set, |
720 | .get_vblank_counter = &rs600_get_vblank_counter, | 971 | .process = &r600_irq_process, |
721 | .cs_parse = &r600_cs_parse, | 972 | }, |
722 | .copy_blit = &r600_copy_blit, | 973 | .display = { |
723 | .copy_dma = NULL, | 974 | .bandwidth_update = &rs690_bandwidth_update, |
724 | .copy = &r600_copy_blit, | 975 | .get_vblank_counter = &rs600_get_vblank_counter, |
725 | .get_engine_clock = &radeon_atom_get_engine_clock, | 976 | .wait_for_vblank = &avivo_wait_for_vblank, |
726 | .set_engine_clock = &radeon_atom_set_engine_clock, | 977 | }, |
727 | .get_memory_clock = NULL, | 978 | .copy = { |
728 | .set_memory_clock = NULL, | 979 | .blit = &r600_copy_blit, |
729 | .get_pcie_lanes = NULL, | 980 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
730 | .set_pcie_lanes = NULL, | 981 | .dma = NULL, |
731 | .set_clock_gating = NULL, | 982 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
732 | .set_surface_reg = r600_set_surface_reg, | 983 | .copy = &r600_copy_blit, |
733 | .clear_surface_reg = r600_clear_surface_reg, | 984 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
734 | .bandwidth_update = &rs690_bandwidth_update, | 985 | }, |
735 | .hpd_init = &r600_hpd_init, | 986 | .surface = { |
736 | .hpd_fini = &r600_hpd_fini, | 987 | .set_reg = r600_set_surface_reg, |
737 | .hpd_sense = &r600_hpd_sense, | 988 | .clear_reg = r600_clear_surface_reg, |
738 | .hpd_set_polarity = &r600_hpd_set_polarity, | 989 | }, |
739 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 990 | .hpd = { |
740 | .gui_idle = &r600_gui_idle, | 991 | .init = &r600_hpd_init, |
741 | .pm_misc = &r600_pm_misc, | 992 | .fini = &r600_hpd_fini, |
742 | .pm_prepare = &rs600_pm_prepare, | 993 | .sense = &r600_hpd_sense, |
743 | .pm_finish = &rs600_pm_finish, | 994 | .set_polarity = &r600_hpd_set_polarity, |
744 | .pm_init_profile = &rs780_pm_init_profile, | 995 | }, |
745 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 996 | .pm = { |
746 | .pre_page_flip = &rs600_pre_page_flip, | 997 | .misc = &r600_pm_misc, |
747 | .page_flip = &rs600_page_flip, | 998 | .prepare = &rs600_pm_prepare, |
748 | .post_page_flip = &rs600_post_page_flip, | 999 | .finish = &rs600_pm_finish, |
1000 | .init_profile = &rs780_pm_init_profile, | ||
1001 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1002 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1003 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1004 | .get_memory_clock = NULL, | ||
1005 | .set_memory_clock = NULL, | ||
1006 | .get_pcie_lanes = NULL, | ||
1007 | .set_pcie_lanes = NULL, | ||
1008 | .set_clock_gating = NULL, | ||
1009 | }, | ||
1010 | .pflip = { | ||
1011 | .pre_page_flip = &rs600_pre_page_flip, | ||
1012 | .page_flip = &rs600_page_flip, | ||
1013 | .post_page_flip = &rs600_post_page_flip, | ||
1014 | }, | ||
749 | }; | 1015 | }; |
750 | 1016 | ||
751 | static struct radeon_asic rv770_asic = { | 1017 | static struct radeon_asic rv770_asic = { |
@@ -756,47 +1022,69 @@ static struct radeon_asic rv770_asic = { | |||
756 | .asic_reset = &r600_asic_reset, | 1022 | .asic_reset = &r600_asic_reset, |
757 | .gpu_is_lockup = &r600_gpu_is_lockup, | 1023 | .gpu_is_lockup = &r600_gpu_is_lockup, |
758 | .vga_set_state = &r600_vga_set_state, | 1024 | .vga_set_state = &r600_vga_set_state, |
759 | .gart_tlb_flush = &r600_pcie_gart_tlb_flush, | 1025 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
760 | .gart_set_page = &rs600_gart_set_page, | 1026 | .gui_idle = &r600_gui_idle, |
761 | .ring_test = &r600_ring_test, | 1027 | .mc_wait_for_idle = &r600_mc_wait_for_idle, |
1028 | .gart = { | ||
1029 | .tlb_flush = &r600_pcie_gart_tlb_flush, | ||
1030 | .set_page = &rs600_gart_set_page, | ||
1031 | }, | ||
762 | .ring = { | 1032 | .ring = { |
763 | [RADEON_RING_TYPE_GFX_INDEX] = { | 1033 | [RADEON_RING_TYPE_GFX_INDEX] = { |
764 | .ib_execute = &r600_ring_ib_execute, | 1034 | .ib_execute = &r600_ring_ib_execute, |
765 | .emit_fence = &r600_fence_ring_emit, | 1035 | .emit_fence = &r600_fence_ring_emit, |
766 | .emit_semaphore = &r600_semaphore_ring_emit, | 1036 | .emit_semaphore = &r600_semaphore_ring_emit, |
1037 | .cs_parse = &r600_cs_parse, | ||
1038 | .ring_test = &r600_ring_test, | ||
1039 | .ib_test = &r600_ib_test, | ||
767 | } | 1040 | } |
768 | }, | 1041 | }, |
769 | .irq_set = &r600_irq_set, | 1042 | .irq = { |
770 | .irq_process = &r600_irq_process, | 1043 | .set = &r600_irq_set, |
771 | .get_vblank_counter = &rs600_get_vblank_counter, | 1044 | .process = &r600_irq_process, |
772 | .cs_parse = &r600_cs_parse, | 1045 | }, |
773 | .copy_blit = &r600_copy_blit, | 1046 | .display = { |
774 | .copy_dma = NULL, | 1047 | .bandwidth_update = &rv515_bandwidth_update, |
775 | .copy = &r600_copy_blit, | 1048 | .get_vblank_counter = &rs600_get_vblank_counter, |
776 | .get_engine_clock = &radeon_atom_get_engine_clock, | 1049 | .wait_for_vblank = &avivo_wait_for_vblank, |
777 | .set_engine_clock = &radeon_atom_set_engine_clock, | 1050 | }, |
778 | .get_memory_clock = &radeon_atom_get_memory_clock, | 1051 | .copy = { |
779 | .set_memory_clock = &radeon_atom_set_memory_clock, | 1052 | .blit = &r600_copy_blit, |
780 | .get_pcie_lanes = &r600_get_pcie_lanes, | 1053 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
781 | .set_pcie_lanes = &r600_set_pcie_lanes, | 1054 | .dma = NULL, |
782 | .set_clock_gating = &radeon_atom_set_clock_gating, | 1055 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
783 | .set_surface_reg = r600_set_surface_reg, | 1056 | .copy = &r600_copy_blit, |
784 | .clear_surface_reg = r600_clear_surface_reg, | 1057 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
785 | .bandwidth_update = &rv515_bandwidth_update, | 1058 | }, |
786 | .hpd_init = &r600_hpd_init, | 1059 | .surface = { |
787 | .hpd_fini = &r600_hpd_fini, | 1060 | .set_reg = r600_set_surface_reg, |
788 | .hpd_sense = &r600_hpd_sense, | 1061 | .clear_reg = r600_clear_surface_reg, |
789 | .hpd_set_polarity = &r600_hpd_set_polarity, | 1062 | }, |
790 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 1063 | .hpd = { |
791 | .gui_idle = &r600_gui_idle, | 1064 | .init = &r600_hpd_init, |
792 | .pm_misc = &rv770_pm_misc, | 1065 | .fini = &r600_hpd_fini, |
793 | .pm_prepare = &rs600_pm_prepare, | 1066 | .sense = &r600_hpd_sense, |
794 | .pm_finish = &rs600_pm_finish, | 1067 | .set_polarity = &r600_hpd_set_polarity, |
795 | .pm_init_profile = &r600_pm_init_profile, | 1068 | }, |
796 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 1069 | .pm = { |
797 | .pre_page_flip = &rs600_pre_page_flip, | 1070 | .misc = &rv770_pm_misc, |
798 | .page_flip = &rv770_page_flip, | 1071 | .prepare = &rs600_pm_prepare, |
799 | .post_page_flip = &rs600_post_page_flip, | 1072 | .finish = &rs600_pm_finish, |
1073 | .init_profile = &r600_pm_init_profile, | ||
1074 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1075 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1076 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1077 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
1078 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
1079 | .get_pcie_lanes = &r600_get_pcie_lanes, | ||
1080 | .set_pcie_lanes = &r600_set_pcie_lanes, | ||
1081 | .set_clock_gating = &radeon_atom_set_clock_gating, | ||
1082 | }, | ||
1083 | .pflip = { | ||
1084 | .pre_page_flip = &rs600_pre_page_flip, | ||
1085 | .page_flip = &rv770_page_flip, | ||
1086 | .post_page_flip = &rs600_post_page_flip, | ||
1087 | }, | ||
800 | }; | 1088 | }; |
801 | 1089 | ||
802 | static struct radeon_asic evergreen_asic = { | 1090 | static struct radeon_asic evergreen_asic = { |
@@ -807,47 +1095,69 @@ static struct radeon_asic evergreen_asic = { | |||
807 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | 1095 | .gpu_is_lockup = &evergreen_gpu_is_lockup, |
808 | .asic_reset = &evergreen_asic_reset, | 1096 | .asic_reset = &evergreen_asic_reset, |
809 | .vga_set_state = &r600_vga_set_state, | 1097 | .vga_set_state = &r600_vga_set_state, |
810 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1098 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
811 | .gart_set_page = &rs600_gart_set_page, | 1099 | .gui_idle = &r600_gui_idle, |
812 | .ring_test = &r600_ring_test, | 1100 | .mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
1101 | .gart = { | ||
1102 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
1103 | .set_page = &rs600_gart_set_page, | ||
1104 | }, | ||
813 | .ring = { | 1105 | .ring = { |
814 | [RADEON_RING_TYPE_GFX_INDEX] = { | 1106 | [RADEON_RING_TYPE_GFX_INDEX] = { |
815 | .ib_execute = &evergreen_ring_ib_execute, | 1107 | .ib_execute = &evergreen_ring_ib_execute, |
816 | .emit_fence = &r600_fence_ring_emit, | 1108 | .emit_fence = &r600_fence_ring_emit, |
817 | .emit_semaphore = &r600_semaphore_ring_emit, | 1109 | .emit_semaphore = &r600_semaphore_ring_emit, |
1110 | .cs_parse = &evergreen_cs_parse, | ||
1111 | .ring_test = &r600_ring_test, | ||
1112 | .ib_test = &r600_ib_test, | ||
818 | } | 1113 | } |
819 | }, | 1114 | }, |
820 | .irq_set = &evergreen_irq_set, | 1115 | .irq = { |
821 | .irq_process = &evergreen_irq_process, | 1116 | .set = &evergreen_irq_set, |
822 | .get_vblank_counter = &evergreen_get_vblank_counter, | 1117 | .process = &evergreen_irq_process, |
823 | .cs_parse = &evergreen_cs_parse, | 1118 | }, |
824 | .copy_blit = &r600_copy_blit, | 1119 | .display = { |
825 | .copy_dma = NULL, | 1120 | .bandwidth_update = &evergreen_bandwidth_update, |
826 | .copy = &r600_copy_blit, | 1121 | .get_vblank_counter = &evergreen_get_vblank_counter, |
827 | .get_engine_clock = &radeon_atom_get_engine_clock, | 1122 | .wait_for_vblank = &dce4_wait_for_vblank, |
828 | .set_engine_clock = &radeon_atom_set_engine_clock, | 1123 | }, |
829 | .get_memory_clock = &radeon_atom_get_memory_clock, | 1124 | .copy = { |
830 | .set_memory_clock = &radeon_atom_set_memory_clock, | 1125 | .blit = &r600_copy_blit, |
831 | .get_pcie_lanes = &r600_get_pcie_lanes, | 1126 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
832 | .set_pcie_lanes = &r600_set_pcie_lanes, | 1127 | .dma = NULL, |
833 | .set_clock_gating = NULL, | 1128 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
834 | .set_surface_reg = r600_set_surface_reg, | 1129 | .copy = &r600_copy_blit, |
835 | .clear_surface_reg = r600_clear_surface_reg, | 1130 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
836 | .bandwidth_update = &evergreen_bandwidth_update, | 1131 | }, |
837 | .hpd_init = &evergreen_hpd_init, | 1132 | .surface = { |
838 | .hpd_fini = &evergreen_hpd_fini, | 1133 | .set_reg = r600_set_surface_reg, |
839 | .hpd_sense = &evergreen_hpd_sense, | 1134 | .clear_reg = r600_clear_surface_reg, |
840 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | 1135 | }, |
841 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 1136 | .hpd = { |
842 | .gui_idle = &r600_gui_idle, | 1137 | .init = &evergreen_hpd_init, |
843 | .pm_misc = &evergreen_pm_misc, | 1138 | .fini = &evergreen_hpd_fini, |
844 | .pm_prepare = &evergreen_pm_prepare, | 1139 | .sense = &evergreen_hpd_sense, |
845 | .pm_finish = &evergreen_pm_finish, | 1140 | .set_polarity = &evergreen_hpd_set_polarity, |
846 | .pm_init_profile = &r600_pm_init_profile, | 1141 | }, |
847 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 1142 | .pm = { |
848 | .pre_page_flip = &evergreen_pre_page_flip, | 1143 | .misc = &evergreen_pm_misc, |
849 | .page_flip = &evergreen_page_flip, | 1144 | .prepare = &evergreen_pm_prepare, |
850 | .post_page_flip = &evergreen_post_page_flip, | 1145 | .finish = &evergreen_pm_finish, |
1146 | .init_profile = &r600_pm_init_profile, | ||
1147 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1148 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1149 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1150 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
1151 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
1152 | .get_pcie_lanes = &r600_get_pcie_lanes, | ||
1153 | .set_pcie_lanes = &r600_set_pcie_lanes, | ||
1154 | .set_clock_gating = NULL, | ||
1155 | }, | ||
1156 | .pflip = { | ||
1157 | .pre_page_flip = &evergreen_pre_page_flip, | ||
1158 | .page_flip = &evergreen_page_flip, | ||
1159 | .post_page_flip = &evergreen_post_page_flip, | ||
1160 | }, | ||
851 | }; | 1161 | }; |
852 | 1162 | ||
853 | static struct radeon_asic sumo_asic = { | 1163 | static struct radeon_asic sumo_asic = { |
@@ -858,47 +1168,69 @@ static struct radeon_asic sumo_asic = { | |||
858 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | 1168 | .gpu_is_lockup = &evergreen_gpu_is_lockup, |
859 | .asic_reset = &evergreen_asic_reset, | 1169 | .asic_reset = &evergreen_asic_reset, |
860 | .vga_set_state = &r600_vga_set_state, | 1170 | .vga_set_state = &r600_vga_set_state, |
861 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1171 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
862 | .gart_set_page = &rs600_gart_set_page, | 1172 | .gui_idle = &r600_gui_idle, |
863 | .ring_test = &r600_ring_test, | 1173 | .mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
1174 | .gart = { | ||
1175 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
1176 | .set_page = &rs600_gart_set_page, | ||
1177 | }, | ||
864 | .ring = { | 1178 | .ring = { |
865 | [RADEON_RING_TYPE_GFX_INDEX] = { | 1179 | [RADEON_RING_TYPE_GFX_INDEX] = { |
866 | .ib_execute = &evergreen_ring_ib_execute, | 1180 | .ib_execute = &evergreen_ring_ib_execute, |
867 | .emit_fence = &r600_fence_ring_emit, | 1181 | .emit_fence = &r600_fence_ring_emit, |
868 | .emit_semaphore = &r600_semaphore_ring_emit, | 1182 | .emit_semaphore = &r600_semaphore_ring_emit, |
869 | } | 1183 | .cs_parse = &evergreen_cs_parse, |
1184 | .ring_test = &r600_ring_test, | ||
1185 | .ib_test = &r600_ib_test, | ||
1186 | }, | ||
1187 | }, | ||
1188 | .irq = { | ||
1189 | .set = &evergreen_irq_set, | ||
1190 | .process = &evergreen_irq_process, | ||
1191 | }, | ||
1192 | .display = { | ||
1193 | .bandwidth_update = &evergreen_bandwidth_update, | ||
1194 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
1195 | .wait_for_vblank = &dce4_wait_for_vblank, | ||
1196 | }, | ||
1197 | .copy = { | ||
1198 | .blit = &r600_copy_blit, | ||
1199 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | ||
1200 | .dma = NULL, | ||
1201 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, | ||
1202 | .copy = &r600_copy_blit, | ||
1203 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, | ||
1204 | }, | ||
1205 | .surface = { | ||
1206 | .set_reg = r600_set_surface_reg, | ||
1207 | .clear_reg = r600_clear_surface_reg, | ||
1208 | }, | ||
1209 | .hpd = { | ||
1210 | .init = &evergreen_hpd_init, | ||
1211 | .fini = &evergreen_hpd_fini, | ||
1212 | .sense = &evergreen_hpd_sense, | ||
1213 | .set_polarity = &evergreen_hpd_set_polarity, | ||
1214 | }, | ||
1215 | .pm = { | ||
1216 | .misc = &evergreen_pm_misc, | ||
1217 | .prepare = &evergreen_pm_prepare, | ||
1218 | .finish = &evergreen_pm_finish, | ||
1219 | .init_profile = &sumo_pm_init_profile, | ||
1220 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1221 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1222 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1223 | .get_memory_clock = NULL, | ||
1224 | .set_memory_clock = NULL, | ||
1225 | .get_pcie_lanes = NULL, | ||
1226 | .set_pcie_lanes = NULL, | ||
1227 | .set_clock_gating = NULL, | ||
1228 | }, | ||
1229 | .pflip = { | ||
1230 | .pre_page_flip = &evergreen_pre_page_flip, | ||
1231 | .page_flip = &evergreen_page_flip, | ||
1232 | .post_page_flip = &evergreen_post_page_flip, | ||
870 | }, | 1233 | }, |
871 | .irq_set = &evergreen_irq_set, | ||
872 | .irq_process = &evergreen_irq_process, | ||
873 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
874 | .cs_parse = &evergreen_cs_parse, | ||
875 | .copy_blit = &r600_copy_blit, | ||
876 | .copy_dma = NULL, | ||
877 | .copy = &r600_copy_blit, | ||
878 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
879 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
880 | .get_memory_clock = NULL, | ||
881 | .set_memory_clock = NULL, | ||
882 | .get_pcie_lanes = NULL, | ||
883 | .set_pcie_lanes = NULL, | ||
884 | .set_clock_gating = NULL, | ||
885 | .set_surface_reg = r600_set_surface_reg, | ||
886 | .clear_surface_reg = r600_clear_surface_reg, | ||
887 | .bandwidth_update = &evergreen_bandwidth_update, | ||
888 | .hpd_init = &evergreen_hpd_init, | ||
889 | .hpd_fini = &evergreen_hpd_fini, | ||
890 | .hpd_sense = &evergreen_hpd_sense, | ||
891 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
892 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
893 | .gui_idle = &r600_gui_idle, | ||
894 | .pm_misc = &evergreen_pm_misc, | ||
895 | .pm_prepare = &evergreen_pm_prepare, | ||
896 | .pm_finish = &evergreen_pm_finish, | ||
897 | .pm_init_profile = &sumo_pm_init_profile, | ||
898 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
899 | .pre_page_flip = &evergreen_pre_page_flip, | ||
900 | .page_flip = &evergreen_page_flip, | ||
901 | .post_page_flip = &evergreen_post_page_flip, | ||
902 | }; | 1234 | }; |
903 | 1235 | ||
904 | static struct radeon_asic btc_asic = { | 1236 | static struct radeon_asic btc_asic = { |
@@ -909,47 +1241,69 @@ static struct radeon_asic btc_asic = { | |||
909 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | 1241 | .gpu_is_lockup = &evergreen_gpu_is_lockup, |
910 | .asic_reset = &evergreen_asic_reset, | 1242 | .asic_reset = &evergreen_asic_reset, |
911 | .vga_set_state = &r600_vga_set_state, | 1243 | .vga_set_state = &r600_vga_set_state, |
912 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1244 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
913 | .gart_set_page = &rs600_gart_set_page, | 1245 | .gui_idle = &r600_gui_idle, |
914 | .ring_test = &r600_ring_test, | 1246 | .mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
1247 | .gart = { | ||
1248 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
1249 | .set_page = &rs600_gart_set_page, | ||
1250 | }, | ||
915 | .ring = { | 1251 | .ring = { |
916 | [RADEON_RING_TYPE_GFX_INDEX] = { | 1252 | [RADEON_RING_TYPE_GFX_INDEX] = { |
917 | .ib_execute = &evergreen_ring_ib_execute, | 1253 | .ib_execute = &evergreen_ring_ib_execute, |
918 | .emit_fence = &r600_fence_ring_emit, | 1254 | .emit_fence = &r600_fence_ring_emit, |
919 | .emit_semaphore = &r600_semaphore_ring_emit, | 1255 | .emit_semaphore = &r600_semaphore_ring_emit, |
1256 | .cs_parse = &evergreen_cs_parse, | ||
1257 | .ring_test = &r600_ring_test, | ||
1258 | .ib_test = &r600_ib_test, | ||
920 | } | 1259 | } |
921 | }, | 1260 | }, |
922 | .irq_set = &evergreen_irq_set, | 1261 | .irq = { |
923 | .irq_process = &evergreen_irq_process, | 1262 | .set = &evergreen_irq_set, |
924 | .get_vblank_counter = &evergreen_get_vblank_counter, | 1263 | .process = &evergreen_irq_process, |
925 | .cs_parse = &evergreen_cs_parse, | 1264 | }, |
926 | .copy_blit = &r600_copy_blit, | 1265 | .display = { |
927 | .copy_dma = NULL, | 1266 | .bandwidth_update = &evergreen_bandwidth_update, |
928 | .copy = &r600_copy_blit, | 1267 | .get_vblank_counter = &evergreen_get_vblank_counter, |
929 | .get_engine_clock = &radeon_atom_get_engine_clock, | 1268 | .wait_for_vblank = &dce4_wait_for_vblank, |
930 | .set_engine_clock = &radeon_atom_set_engine_clock, | 1269 | }, |
931 | .get_memory_clock = &radeon_atom_get_memory_clock, | 1270 | .copy = { |
932 | .set_memory_clock = &radeon_atom_set_memory_clock, | 1271 | .blit = &r600_copy_blit, |
933 | .get_pcie_lanes = NULL, | 1272 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
934 | .set_pcie_lanes = NULL, | 1273 | .dma = NULL, |
935 | .set_clock_gating = NULL, | 1274 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
936 | .set_surface_reg = r600_set_surface_reg, | 1275 | .copy = &r600_copy_blit, |
937 | .clear_surface_reg = r600_clear_surface_reg, | 1276 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
938 | .bandwidth_update = &evergreen_bandwidth_update, | 1277 | }, |
939 | .hpd_init = &evergreen_hpd_init, | 1278 | .surface = { |
940 | .hpd_fini = &evergreen_hpd_fini, | 1279 | .set_reg = r600_set_surface_reg, |
941 | .hpd_sense = &evergreen_hpd_sense, | 1280 | .clear_reg = r600_clear_surface_reg, |
942 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | 1281 | }, |
943 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 1282 | .hpd = { |
944 | .gui_idle = &r600_gui_idle, | 1283 | .init = &evergreen_hpd_init, |
945 | .pm_misc = &evergreen_pm_misc, | 1284 | .fini = &evergreen_hpd_fini, |
946 | .pm_prepare = &evergreen_pm_prepare, | 1285 | .sense = &evergreen_hpd_sense, |
947 | .pm_finish = &evergreen_pm_finish, | 1286 | .set_polarity = &evergreen_hpd_set_polarity, |
948 | .pm_init_profile = &r600_pm_init_profile, | 1287 | }, |
949 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 1288 | .pm = { |
950 | .pre_page_flip = &evergreen_pre_page_flip, | 1289 | .misc = &evergreen_pm_misc, |
951 | .page_flip = &evergreen_page_flip, | 1290 | .prepare = &evergreen_pm_prepare, |
952 | .post_page_flip = &evergreen_post_page_flip, | 1291 | .finish = &evergreen_pm_finish, |
1292 | .init_profile = &r600_pm_init_profile, | ||
1293 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1294 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1295 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1296 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
1297 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
1298 | .get_pcie_lanes = NULL, | ||
1299 | .set_pcie_lanes = NULL, | ||
1300 | .set_clock_gating = NULL, | ||
1301 | }, | ||
1302 | .pflip = { | ||
1303 | .pre_page_flip = &evergreen_pre_page_flip, | ||
1304 | .page_flip = &evergreen_page_flip, | ||
1305 | .post_page_flip = &evergreen_post_page_flip, | ||
1306 | }, | ||
953 | }; | 1307 | }; |
954 | 1308 | ||
955 | static const struct radeon_vm_funcs cayman_vm_funcs = { | 1309 | static const struct radeon_vm_funcs cayman_vm_funcs = { |
@@ -970,60 +1324,88 @@ static struct radeon_asic cayman_asic = { | |||
970 | .gpu_is_lockup = &cayman_gpu_is_lockup, | 1324 | .gpu_is_lockup = &cayman_gpu_is_lockup, |
971 | .asic_reset = &cayman_asic_reset, | 1325 | .asic_reset = &cayman_asic_reset, |
972 | .vga_set_state = &r600_vga_set_state, | 1326 | .vga_set_state = &r600_vga_set_state, |
973 | .gart_tlb_flush = &cayman_pcie_gart_tlb_flush, | 1327 | .ioctl_wait_idle = r600_ioctl_wait_idle, |
974 | .gart_set_page = &rs600_gart_set_page, | 1328 | .gui_idle = &r600_gui_idle, |
975 | .ring_test = &r600_ring_test, | 1329 | .mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
1330 | .gart = { | ||
1331 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | ||
1332 | .set_page = &rs600_gart_set_page, | ||
1333 | }, | ||
976 | .ring = { | 1334 | .ring = { |
977 | [RADEON_RING_TYPE_GFX_INDEX] = { | 1335 | [RADEON_RING_TYPE_GFX_INDEX] = { |
978 | .ib_execute = &cayman_ring_ib_execute, | 1336 | .ib_execute = &cayman_ring_ib_execute, |
979 | .ib_parse = &evergreen_ib_parse, | 1337 | .ib_parse = &evergreen_ib_parse, |
980 | .emit_fence = &cayman_fence_ring_emit, | 1338 | .emit_fence = &cayman_fence_ring_emit, |
981 | .emit_semaphore = &r600_semaphore_ring_emit, | 1339 | .emit_semaphore = &r600_semaphore_ring_emit, |
1340 | .cs_parse = &evergreen_cs_parse, | ||
1341 | .ring_test = &r600_ring_test, | ||
1342 | .ib_test = &r600_ib_test, | ||
982 | }, | 1343 | }, |
983 | [CAYMAN_RING_TYPE_CP1_INDEX] = { | 1344 | [CAYMAN_RING_TYPE_CP1_INDEX] = { |
984 | .ib_execute = &cayman_ring_ib_execute, | 1345 | .ib_execute = &cayman_ring_ib_execute, |
985 | .ib_parse = &evergreen_ib_parse, | 1346 | .ib_parse = &evergreen_ib_parse, |
986 | .emit_fence = &cayman_fence_ring_emit, | 1347 | .emit_fence = &cayman_fence_ring_emit, |
987 | .emit_semaphore = &r600_semaphore_ring_emit, | 1348 | .emit_semaphore = &r600_semaphore_ring_emit, |
1349 | .cs_parse = &evergreen_cs_parse, | ||
1350 | .ring_test = &r600_ring_test, | ||
1351 | .ib_test = &r600_ib_test, | ||
988 | }, | 1352 | }, |
989 | [CAYMAN_RING_TYPE_CP2_INDEX] = { | 1353 | [CAYMAN_RING_TYPE_CP2_INDEX] = { |
990 | .ib_execute = &cayman_ring_ib_execute, | 1354 | .ib_execute = &cayman_ring_ib_execute, |
991 | .ib_parse = &evergreen_ib_parse, | 1355 | .ib_parse = &evergreen_ib_parse, |
992 | .emit_fence = &cayman_fence_ring_emit, | 1356 | .emit_fence = &cayman_fence_ring_emit, |
993 | .emit_semaphore = &r600_semaphore_ring_emit, | 1357 | .emit_semaphore = &r600_semaphore_ring_emit, |
1358 | .cs_parse = &evergreen_cs_parse, | ||
1359 | .ring_test = &r600_ring_test, | ||
1360 | .ib_test = &r600_ib_test, | ||
994 | } | 1361 | } |
995 | }, | 1362 | }, |
996 | .irq_set = &evergreen_irq_set, | 1363 | .irq = { |
997 | .irq_process = &evergreen_irq_process, | 1364 | .set = &evergreen_irq_set, |
998 | .get_vblank_counter = &evergreen_get_vblank_counter, | 1365 | .process = &evergreen_irq_process, |
999 | .cs_parse = &evergreen_cs_parse, | 1366 | }, |
1000 | .copy_blit = &r600_copy_blit, | 1367 | .display = { |
1001 | .copy_dma = NULL, | 1368 | .bandwidth_update = &evergreen_bandwidth_update, |
1002 | .copy = &r600_copy_blit, | 1369 | .get_vblank_counter = &evergreen_get_vblank_counter, |
1003 | .get_engine_clock = &radeon_atom_get_engine_clock, | 1370 | .wait_for_vblank = &dce4_wait_for_vblank, |
1004 | .set_engine_clock = &radeon_atom_set_engine_clock, | 1371 | }, |
1005 | .get_memory_clock = &radeon_atom_get_memory_clock, | 1372 | .copy = { |
1006 | .set_memory_clock = &radeon_atom_set_memory_clock, | 1373 | .blit = &r600_copy_blit, |
1007 | .get_pcie_lanes = NULL, | 1374 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
1008 | .set_pcie_lanes = NULL, | 1375 | .dma = NULL, |
1009 | .set_clock_gating = NULL, | 1376 | .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
1010 | .set_surface_reg = r600_set_surface_reg, | 1377 | .copy = &r600_copy_blit, |
1011 | .clear_surface_reg = r600_clear_surface_reg, | 1378 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
1012 | .bandwidth_update = &evergreen_bandwidth_update, | 1379 | }, |
1013 | .hpd_init = &evergreen_hpd_init, | 1380 | .surface = { |
1014 | .hpd_fini = &evergreen_hpd_fini, | 1381 | .set_reg = r600_set_surface_reg, |
1015 | .hpd_sense = &evergreen_hpd_sense, | 1382 | .clear_reg = r600_clear_surface_reg, |
1016 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | 1383 | }, |
1017 | .ioctl_wait_idle = r600_ioctl_wait_idle, | 1384 | .hpd = { |
1018 | .gui_idle = &r600_gui_idle, | 1385 | .init = &evergreen_hpd_init, |
1019 | .pm_misc = &evergreen_pm_misc, | 1386 | .fini = &evergreen_hpd_fini, |
1020 | .pm_prepare = &evergreen_pm_prepare, | 1387 | .sense = &evergreen_hpd_sense, |
1021 | .pm_finish = &evergreen_pm_finish, | 1388 | .set_polarity = &evergreen_hpd_set_polarity, |
1022 | .pm_init_profile = &r600_pm_init_profile, | 1389 | }, |
1023 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 1390 | .pm = { |
1024 | .pre_page_flip = &evergreen_pre_page_flip, | 1391 | .misc = &evergreen_pm_misc, |
1025 | .page_flip = &evergreen_page_flip, | 1392 | .prepare = &evergreen_pm_prepare, |
1026 | .post_page_flip = &evergreen_post_page_flip, | 1393 | .finish = &evergreen_pm_finish, |
1394 | .init_profile = &r600_pm_init_profile, | ||
1395 | .get_dynpm_state = &r600_pm_get_dynpm_state, | ||
1396 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
1397 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
1398 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
1399 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
1400 | .get_pcie_lanes = NULL, | ||
1401 | .set_pcie_lanes = NULL, | ||
1402 | .set_clock_gating = NULL, | ||
1403 | }, | ||
1404 | .pflip = { | ||
1405 | .pre_page_flip = &evergreen_pre_page_flip, | ||
1406 | .page_flip = &evergreen_page_flip, | ||
1407 | .post_page_flip = &evergreen_post_page_flip, | ||
1408 | }, | ||
1027 | }; | 1409 | }; |
1028 | 1410 | ||
1029 | int radeon_asic_init(struct radeon_device *rdev) | 1411 | int radeon_asic_init(struct radeon_device *rdev) |
@@ -1036,9 +1418,6 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
1036 | else | 1418 | else |
1037 | rdev->num_crtc = 2; | 1419 | rdev->num_crtc = 2; |
1038 | 1420 | ||
1039 | /* set the ring used for bo copies */ | ||
1040 | rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX; | ||
1041 | |||
1042 | switch (rdev->family) { | 1421 | switch (rdev->family) { |
1043 | case CHIP_R100: | 1422 | case CHIP_R100: |
1044 | case CHIP_RV100: | 1423 | case CHIP_RV100: |
@@ -1068,10 +1447,10 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
1068 | rdev->asic = &r420_asic; | 1447 | rdev->asic = &r420_asic; |
1069 | /* handle macs */ | 1448 | /* handle macs */ |
1070 | if (rdev->bios == NULL) { | 1449 | if (rdev->bios == NULL) { |
1071 | rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; | 1450 | rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock; |
1072 | rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; | 1451 | rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; |
1073 | rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; | 1452 | rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; |
1074 | rdev->asic->set_memory_clock = NULL; | 1453 | rdev->asic->pm.set_memory_clock = NULL; |
1075 | } | 1454 | } |
1076 | break; | 1455 | break; |
1077 | case CHIP_RS400: | 1456 | case CHIP_RS400: |
@@ -1152,8 +1531,8 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
1152 | } | 1531 | } |
1153 | 1532 | ||
1154 | if (rdev->flags & RADEON_IS_IGP) { | 1533 | if (rdev->flags & RADEON_IS_IGP) { |
1155 | rdev->asic->get_memory_clock = NULL; | 1534 | rdev->asic->pm.get_memory_clock = NULL; |
1156 | rdev->asic->set_memory_clock = NULL; | 1535 | rdev->asic->pm.set_memory_clock = NULL; |
1157 | } | 1536 | } |
1158 | 1537 | ||
1159 | return 0; | 1538 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 6304aef0d9b2..b8f0a16bf65f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -63,7 +63,7 @@ int r100_asic_reset(struct radeon_device *rdev); | |||
63 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 63 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
64 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 64 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
65 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 65 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
66 | void r100_ring_start(struct radeon_device *rdev); | 66 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
67 | int r100_irq_set(struct radeon_device *rdev); | 67 | int r100_irq_set(struct radeon_device *rdev); |
68 | int r100_irq_process(struct radeon_device *rdev); | 68 | int r100_irq_process(struct radeon_device *rdev); |
69 | void r100_fence_ring_emit(struct radeon_device *rdev, | 69 | void r100_fence_ring_emit(struct radeon_device *rdev, |
@@ -109,7 +109,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, | |||
109 | struct r100_gpu_lockup *lockup, | 109 | struct r100_gpu_lockup *lockup, |
110 | struct radeon_ring *cp); | 110 | struct radeon_ring *cp); |
111 | void r100_ib_fini(struct radeon_device *rdev); | 111 | void r100_ib_fini(struct radeon_device *rdev); |
112 | int r100_ib_test(struct radeon_device *rdev); | 112 | int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
113 | void r100_irq_disable(struct radeon_device *rdev); | 113 | void r100_irq_disable(struct radeon_device *rdev); |
114 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | 114 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
115 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | 115 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
@@ -139,6 +139,8 @@ extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); | |||
139 | extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); | 139 | extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); |
140 | extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 140 | extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
141 | extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | 141 | extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); |
142 | extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); | ||
143 | extern int r100_mc_wait_for_idle(struct radeon_device *rdev); | ||
142 | 144 | ||
143 | /* | 145 | /* |
144 | * r200,rv250,rs300,rv280 | 146 | * r200,rv250,rs300,rv280 |
@@ -159,7 +161,7 @@ extern int r300_suspend(struct radeon_device *rdev); | |||
159 | extern int r300_resume(struct radeon_device *rdev); | 161 | extern int r300_resume(struct radeon_device *rdev); |
160 | extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); | 162 | extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
161 | extern int r300_asic_reset(struct radeon_device *rdev); | 163 | extern int r300_asic_reset(struct radeon_device *rdev); |
162 | extern void r300_ring_start(struct radeon_device *rdev); | 164 | extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
163 | extern void r300_fence_ring_emit(struct radeon_device *rdev, | 165 | extern void r300_fence_ring_emit(struct radeon_device *rdev, |
164 | struct radeon_fence *fence); | 166 | struct radeon_fence *fence); |
165 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 167 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
@@ -176,6 +178,7 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev); | |||
176 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | 178 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); |
177 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | 179 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); |
178 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | 180 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); |
181 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
179 | 182 | ||
180 | /* | 183 | /* |
181 | * r420,r423,rv410 | 184 | * r420,r423,rv410 |
@@ -206,6 +209,7 @@ int rs400_gart_enable(struct radeon_device *rdev); | |||
206 | void rs400_gart_adjust_size(struct radeon_device *rdev); | 209 | void rs400_gart_adjust_size(struct radeon_device *rdev); |
207 | void rs400_gart_disable(struct radeon_device *rdev); | 210 | void rs400_gart_disable(struct radeon_device *rdev); |
208 | void rs400_gart_fini(struct radeon_device *rdev); | 211 | void rs400_gart_fini(struct radeon_device *rdev); |
212 | extern int rs400_mc_wait_for_idle(struct radeon_device *rdev); | ||
209 | 213 | ||
210 | /* | 214 | /* |
211 | * rs600. | 215 | * rs600. |
@@ -236,7 +240,8 @@ extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); | |||
236 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 240 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
237 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); | 241 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); |
238 | void rs600_set_safe_registers(struct radeon_device *rdev); | 242 | void rs600_set_safe_registers(struct radeon_device *rdev); |
239 | 243 | extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc); | |
244 | extern int rs600_mc_wait_for_idle(struct radeon_device *rdev); | ||
240 | 245 | ||
241 | /* | 246 | /* |
242 | * rs690,rs740 | 247 | * rs690,rs740 |
@@ -251,6 +256,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev); | |||
251 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 256 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
252 | struct drm_display_mode *mode1, | 257 | struct drm_display_mode *mode1, |
253 | struct drm_display_mode *mode2); | 258 | struct drm_display_mode *mode2); |
259 | extern int rs690_mc_wait_for_idle(struct radeon_device *rdev); | ||
254 | 260 | ||
255 | /* | 261 | /* |
256 | * rv515 | 262 | * rv515 |
@@ -267,7 +273,7 @@ int rv515_init(struct radeon_device *rdev); | |||
267 | void rv515_fini(struct radeon_device *rdev); | 273 | void rv515_fini(struct radeon_device *rdev); |
268 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 274 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
269 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 275 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
270 | void rv515_ring_start(struct radeon_device *rdev); | 276 | void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
271 | void rv515_bandwidth_update(struct radeon_device *rdev); | 277 | void rv515_bandwidth_update(struct radeon_device *rdev); |
272 | int rv515_resume(struct radeon_device *rdev); | 278 | int rv515_resume(struct radeon_device *rdev); |
273 | int rv515_suspend(struct radeon_device *rdev); | 279 | int rv515_suspend(struct radeon_device *rdev); |
@@ -278,13 +284,14 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | |||
278 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | 284 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); |
279 | void rv515_clock_startup(struct radeon_device *rdev); | 285 | void rv515_clock_startup(struct radeon_device *rdev); |
280 | void rv515_debugfs(struct radeon_device *rdev); | 286 | void rv515_debugfs(struct radeon_device *rdev); |
281 | 287 | int rv515_mc_wait_for_idle(struct radeon_device *rdev); | |
282 | 288 | ||
283 | /* | 289 | /* |
284 | * r520,rv530,rv560,rv570,r580 | 290 | * r520,rv530,rv560,rv570,r580 |
285 | */ | 291 | */ |
286 | int r520_init(struct radeon_device *rdev); | 292 | int r520_init(struct radeon_device *rdev); |
287 | int r520_resume(struct radeon_device *rdev); | 293 | int r520_resume(struct radeon_device *rdev); |
294 | int r520_mc_wait_for_idle(struct radeon_device *rdev); | ||
288 | 295 | ||
289 | /* | 296 | /* |
290 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 | 297 | * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 |
@@ -312,7 +319,7 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |||
312 | uint32_t tiling_flags, uint32_t pitch, | 319 | uint32_t tiling_flags, uint32_t pitch, |
313 | uint32_t offset, uint32_t obj_size); | 320 | uint32_t offset, uint32_t obj_size); |
314 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 321 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
315 | int r600_ib_test(struct radeon_device *rdev, int ring); | 322 | int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
316 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 323 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
317 | int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); | 324 | int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
318 | int r600_copy_blit(struct radeon_device *rdev, | 325 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -375,6 +382,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | |||
375 | void r600_kms_blit_copy(struct radeon_device *rdev, | 382 | void r600_kms_blit_copy(struct radeon_device *rdev, |
376 | u64 src_gpu_addr, u64 dst_gpu_addr, | 383 | u64 src_gpu_addr, u64 dst_gpu_addr, |
377 | unsigned num_gpu_pages); | 384 | unsigned num_gpu_pages); |
385 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | ||
378 | 386 | ||
379 | /* | 387 | /* |
380 | * rv770,rv730,rv710,rv740 | 388 | * rv770,rv730,rv710,rv740 |
@@ -423,8 +431,10 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev); | |||
423 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | 431 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); |
424 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 432 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
425 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | 433 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); |
434 | extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); | ||
426 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); | 435 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); |
427 | int evergreen_blit_init(struct radeon_device *rdev); | 436 | int evergreen_blit_init(struct radeon_device *rdev); |
437 | int evergreen_mc_wait_for_idle(struct radeon_device *rdev); | ||
428 | 438 | ||
429 | /* | 439 | /* |
430 | * cayman | 440 | * cayman |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f53ae74ada1..73541373bf56 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -442,6 +442,20 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
442 | struct radeon_device *rdev = dev->dev_private; | 442 | struct radeon_device *rdev = dev->dev_private; |
443 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); | 443 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); |
444 | } | 444 | } |
445 | |||
446 | /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ | ||
447 | if ((dev->pdev->device == 0x9802) && | ||
448 | (dev->pdev->subsystem_vendor == 0x1734) && | ||
449 | (dev->pdev->subsystem_device == 0x11bd)) { | ||
450 | if (*connector_type == DRM_MODE_CONNECTOR_VGA) { | ||
451 | *connector_type = DRM_MODE_CONNECTOR_DVII; | ||
452 | *line_mux = 0x3103; | ||
453 | } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { | ||
454 | *connector_type = DRM_MODE_CONNECTOR_DVII; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | |||
445 | return true; | 459 | return true; |
446 | } | 460 | } |
447 | 461 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 815f2341ab94..fef7b722b05d 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -43,17 +43,19 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, | |||
43 | 43 | ||
44 | start_jiffies = jiffies; | 44 | start_jiffies = jiffies; |
45 | for (i = 0; i < n; i++) { | 45 | for (i = 0; i < n; i++) { |
46 | r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); | ||
47 | if (r) | ||
48 | return r; | ||
49 | |||
50 | switch (flag) { | 46 | switch (flag) { |
51 | case RADEON_BENCHMARK_COPY_DMA: | 47 | case RADEON_BENCHMARK_COPY_DMA: |
48 | r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev)); | ||
49 | if (r) | ||
50 | return r; | ||
52 | r = radeon_copy_dma(rdev, saddr, daddr, | 51 | r = radeon_copy_dma(rdev, saddr, daddr, |
53 | size / RADEON_GPU_PAGE_SIZE, | 52 | size / RADEON_GPU_PAGE_SIZE, |
54 | fence); | 53 | fence); |
55 | break; | 54 | break; |
56 | case RADEON_BENCHMARK_COPY_BLIT: | 55 | case RADEON_BENCHMARK_COPY_BLIT: |
56 | r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev)); | ||
57 | if (r) | ||
58 | return r; | ||
57 | r = radeon_copy_blit(rdev, saddr, daddr, | 59 | r = radeon_copy_blit(rdev, saddr, daddr, |
58 | size / RADEON_GPU_PAGE_SIZE, | 60 | size / RADEON_GPU_PAGE_SIZE, |
59 | fence); | 61 | fence); |
@@ -129,7 +131,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
129 | /* r100 doesn't have dma engine so skip the test */ | 131 | /* r100 doesn't have dma engine so skip the test */ |
130 | /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ | 132 | /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ |
131 | /* skip it as well if domains are the same */ | 133 | /* skip it as well if domains are the same */ |
132 | if ((rdev->asic->copy_dma) && (sdomain != ddomain)) { | 134 | if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { |
133 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 135 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
134 | RADEON_BENCHMARK_COPY_DMA, n); | 136 | RADEON_BENCHMARK_COPY_DMA, n); |
135 | if (time < 0) | 137 | if (time < 0) |
@@ -208,22 +210,22 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number) | |||
208 | break; | 210 | break; |
209 | case 3: | 211 | case 3: |
210 | /* GTT to VRAM, buffer size sweep, powers of 2 */ | 212 | /* GTT to VRAM, buffer size sweep, powers of 2 */ |
211 | for (i = 1; i <= 65536; i <<= 1) | 213 | for (i = 1; i <= 16384; i <<= 1) |
212 | radeon_benchmark_move(rdev, i*1024, | 214 | radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE, |
213 | RADEON_GEM_DOMAIN_GTT, | 215 | RADEON_GEM_DOMAIN_GTT, |
214 | RADEON_GEM_DOMAIN_VRAM); | 216 | RADEON_GEM_DOMAIN_VRAM); |
215 | break; | 217 | break; |
216 | case 4: | 218 | case 4: |
217 | /* VRAM to GTT, buffer size sweep, powers of 2 */ | 219 | /* VRAM to GTT, buffer size sweep, powers of 2 */ |
218 | for (i = 1; i <= 65536; i <<= 1) | 220 | for (i = 1; i <= 16384; i <<= 1) |
219 | radeon_benchmark_move(rdev, i*1024, | 221 | radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE, |
220 | RADEON_GEM_DOMAIN_VRAM, | 222 | RADEON_GEM_DOMAIN_VRAM, |
221 | RADEON_GEM_DOMAIN_GTT); | 223 | RADEON_GEM_DOMAIN_GTT); |
222 | break; | 224 | break; |
223 | case 5: | 225 | case 5: |
224 | /* VRAM to VRAM, buffer size sweep, powers of 2 */ | 226 | /* VRAM to VRAM, buffer size sweep, powers of 2 */ |
225 | for (i = 1; i <= 65536; i <<= 1) | 227 | for (i = 1; i <= 16384; i <<= 1) |
226 | radeon_benchmark_move(rdev, i*1024, | 228 | radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE, |
227 | RADEON_GEM_DOMAIN_VRAM, | 229 | RADEON_GEM_DOMAIN_VRAM, |
228 | RADEON_GEM_DOMAIN_VRAM); | 230 | RADEON_GEM_DOMAIN_VRAM); |
229 | break; | 231 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h new file mode 100644 index 000000000000..4ecbe72c9d2d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_blit_common.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2009 Red Hat Inc. | ||
4 | * Copyright 2012 Alcatel-Lucent, Inc. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef __RADEON_BLIT_COMMON_H__ | ||
28 | |||
29 | #define DI_PT_RECTLIST 0x11 | ||
30 | #define DI_INDEX_SIZE_16_BIT 0x0 | ||
31 | #define DI_SRC_SEL_AUTO_INDEX 0x2 | ||
32 | |||
33 | #define FMT_8 0x1 | ||
34 | #define FMT_5_6_5 0x8 | ||
35 | #define FMT_8_8_8_8 0x1a | ||
36 | #define COLOR_8 0x1 | ||
37 | #define COLOR_5_6_5 0x8 | ||
38 | #define COLOR_8_8_8_8 0x1a | ||
39 | |||
40 | #define RECT_UNIT_H 32 | ||
41 | #define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H) | ||
42 | |||
43 | #define __RADEON_BLIT_COMMON_H__ | ||
44 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index b6e18c8db9f5..6ae0c75f016a 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -334,7 +334,7 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
334 | 334 | ||
335 | if (!rdev->clock.default_sclk) | 335 | if (!rdev->clock.default_sclk) |
336 | rdev->clock.default_sclk = radeon_get_engine_clock(rdev); | 336 | rdev->clock.default_sclk = radeon_get_engine_clock(rdev); |
337 | if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock) | 337 | if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock) |
338 | rdev->clock.default_mclk = radeon_get_memory_clock(rdev); | 338 | rdev->clock.default_mclk = radeon_get_memory_clock(rdev); |
339 | 339 | ||
340 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 340 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 8c9a8115b632..64774ac94449 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -827,6 +827,27 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) | |||
827 | return ret; | 827 | return ret; |
828 | } | 828 | } |
829 | 829 | ||
830 | static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector) | ||
831 | { | ||
832 | struct drm_device *dev = connector->dev; | ||
833 | struct radeon_device *rdev = dev->dev_private; | ||
834 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
835 | enum drm_connector_status status; | ||
836 | |||
837 | /* We only trust HPD on R600 and newer ASICS. */ | ||
838 | if (rdev->family >= CHIP_R600 | ||
839 | && radeon_connector->hpd.hpd != RADEON_HPD_NONE) { | ||
840 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
841 | status = connector_status_connected; | ||
842 | else | ||
843 | status = connector_status_disconnected; | ||
844 | if (connector->status == status) | ||
845 | return true; | ||
846 | } | ||
847 | |||
848 | return false; | ||
849 | } | ||
850 | |||
830 | /* | 851 | /* |
831 | * DVI is complicated | 852 | * DVI is complicated |
832 | * Do a DDC probe, if DDC probe passes, get the full EDID so | 853 | * Do a DDC probe, if DDC probe passes, get the full EDID so |
@@ -851,6 +872,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
851 | enum drm_connector_status ret = connector_status_disconnected; | 872 | enum drm_connector_status ret = connector_status_disconnected; |
852 | bool dret = false; | 873 | bool dret = false; |
853 | 874 | ||
875 | if (!force && radeon_check_hpd_status_unchanged(connector)) | ||
876 | return connector->status; | ||
877 | |||
854 | if (radeon_connector->ddc_bus) | 878 | if (radeon_connector->ddc_bus) |
855 | dret = radeon_ddc_probe(radeon_connector); | 879 | dret = radeon_ddc_probe(radeon_connector); |
856 | if (dret) { | 880 | if (dret) { |
@@ -946,6 +970,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
946 | 970 | ||
947 | encoder = obj_to_encoder(obj); | 971 | encoder = obj_to_encoder(obj); |
948 | 972 | ||
973 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || | ||
974 | encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) | ||
975 | continue; | ||
976 | |||
949 | encoder_funcs = encoder->helper_private; | 977 | encoder_funcs = encoder->helper_private; |
950 | if (encoder_funcs->detect) { | 978 | if (encoder_funcs->detect) { |
951 | if (ret != connector_status_connected) { | 979 | if (ret != connector_status_connected) { |
@@ -1250,6 +1278,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1250 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | 1278 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; |
1251 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | 1279 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
1252 | 1280 | ||
1281 | if (!force && radeon_check_hpd_status_unchanged(connector)) | ||
1282 | return connector->status; | ||
1283 | |||
1253 | if (radeon_connector->edid) { | 1284 | if (radeon_connector->edid) { |
1254 | kfree(radeon_connector->edid); | 1285 | kfree(radeon_connector->edid); |
1255 | radeon_connector->edid = NULL; | 1286 | radeon_connector->edid = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 72ae8266b8e9..0ebb7d4796fa 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -2115,6 +2115,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
2115 | break; | 2115 | break; |
2116 | } | 2116 | } |
2117 | 2117 | ||
2118 | pci_set_master(dev->pdev); | ||
2119 | |||
2118 | if (drm_pci_device_is_agp(dev)) | 2120 | if (drm_pci_device_is_agp(dev)) |
2119 | dev_priv->flags |= RADEON_IS_AGP; | 2121 | dev_priv->flags |= RADEON_IS_AGP; |
2120 | else if (pci_is_pcie(dev->pdev)) | 2122 | else if (pci_is_pcie(dev->pdev)) |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index e64bec488ed8..d9d9f5a59c42 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -85,12 +85,6 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
85 | radeon_bo_list_add_object(&p->relocs[i].lobj, | 85 | radeon_bo_list_add_object(&p->relocs[i].lobj, |
86 | &p->validated); | 86 | &p->validated); |
87 | 87 | ||
88 | if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) { | ||
89 | struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj; | ||
90 | if (!radeon_fence_signaled(fence)) { | ||
91 | p->sync_to_ring[fence->ring] = true; | ||
92 | } | ||
93 | } | ||
94 | } else | 88 | } else |
95 | p->relocs[i].handle = 0; | 89 | p->relocs[i].handle = 0; |
96 | } | 90 | } |
@@ -118,11 +112,24 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority | |||
118 | 112 | ||
119 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) | 113 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
120 | { | 114 | { |
115 | bool sync_to_ring[RADEON_NUM_RINGS] = { }; | ||
121 | int i, r; | 116 | int i, r; |
122 | 117 | ||
118 | for (i = 0; i < p->nrelocs; i++) { | ||
119 | if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj) | ||
120 | continue; | ||
121 | |||
122 | if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) { | ||
123 | struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj; | ||
124 | if (!radeon_fence_signaled(fence)) { | ||
125 | sync_to_ring[fence->ring] = true; | ||
126 | } | ||
127 | } | ||
128 | } | ||
129 | |||
123 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 130 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
124 | /* no need to sync to our own or unused rings */ | 131 | /* no need to sync to our own or unused rings */ |
125 | if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready) | 132 | if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready) |
126 | continue; | 133 | continue; |
127 | 134 | ||
128 | if (!p->ib->fence->semaphore) { | 135 | if (!p->ib->fence->semaphore) { |
@@ -236,20 +243,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
236 | if ((p->cs_flags & RADEON_CS_USE_VM) && | 243 | if ((p->cs_flags & RADEON_CS_USE_VM) && |
237 | !p->rdev->vm_manager.enabled) { | 244 | !p->rdev->vm_manager.enabled) { |
238 | DRM_ERROR("VM not active on asic!\n"); | 245 | DRM_ERROR("VM not active on asic!\n"); |
239 | if (p->chunk_relocs_idx != -1) | ||
240 | kfree(p->chunks[p->chunk_relocs_idx].kdata); | ||
241 | if (p->chunk_flags_idx != -1) | ||
242 | kfree(p->chunks[p->chunk_flags_idx].kdata); | ||
243 | return -EINVAL; | 246 | return -EINVAL; |
244 | } | 247 | } |
245 | 248 | ||
246 | if (radeon_cs_get_ring(p, ring, priority)) { | 249 | if (radeon_cs_get_ring(p, ring, priority)) |
247 | if (p->chunk_relocs_idx != -1) | ||
248 | kfree(p->chunks[p->chunk_relocs_idx].kdata); | ||
249 | if (p->chunk_flags_idx != -1) | ||
250 | kfree(p->chunks[p->chunk_flags_idx].kdata); | ||
251 | return -EINVAL; | 250 | return -EINVAL; |
252 | } | ||
253 | 251 | ||
254 | 252 | ||
255 | /* deal with non-vm */ | 253 | /* deal with non-vm */ |
@@ -264,11 +262,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
264 | p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); | 262 | p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); |
265 | p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); | 263 | p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); |
266 | if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || | 264 | if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || |
267 | p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { | 265 | p->chunks[p->chunk_ib_idx].kpage[1] == NULL) |
268 | kfree(p->chunks[p->chunk_ib_idx].kpage[0]); | ||
269 | kfree(p->chunks[p->chunk_ib_idx].kpage[1]); | ||
270 | return -ENOMEM; | 266 | return -ENOMEM; |
271 | } | ||
272 | p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1; | 267 | p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1; |
273 | p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1; | 268 | p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1; |
274 | p->chunks[p->chunk_ib_idx].last_copied_page = -1; | 269 | p->chunks[p->chunk_ib_idx].last_copied_page = -1; |
@@ -341,7 +336,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, | |||
341 | return r; | 336 | return r; |
342 | } | 337 | } |
343 | parser->ib->length_dw = ib_chunk->length_dw; | 338 | parser->ib->length_dw = ib_chunk->length_dw; |
344 | r = radeon_cs_parse(parser); | 339 | r = radeon_cs_parse(rdev, parser->ring, parser); |
345 | if (r || parser->parser_error) { | 340 | if (r || parser->parser_error) { |
346 | DRM_ERROR("Invalid command stream !\n"); | 341 | DRM_ERROR("Invalid command stream !\n"); |
347 | return r; | 342 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index fde25c0d65a0..42acc6449dd6 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
151 | uint32_t height) | 151 | uint32_t height) |
152 | { | 152 | { |
153 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 153 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
154 | struct radeon_device *rdev = crtc->dev->dev_private; | ||
154 | struct drm_gem_object *obj; | 155 | struct drm_gem_object *obj; |
156 | struct radeon_bo *robj; | ||
155 | uint64_t gpu_addr; | 157 | uint64_t gpu_addr; |
156 | int ret; | 158 | int ret; |
157 | 159 | ||
@@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
173 | return -ENOENT; | 175 | return -ENOENT; |
174 | } | 176 | } |
175 | 177 | ||
176 | ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | 178 | robj = gem_to_radeon_bo(obj); |
179 | ret = radeon_bo_reserve(robj, false); | ||
180 | if (unlikely(ret != 0)) | ||
181 | goto fail; | ||
182 | /* Only 27 bit offset for legacy cursor */ | ||
183 | ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, | ||
184 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, | ||
185 | &gpu_addr); | ||
186 | radeon_bo_unreserve(robj); | ||
177 | if (ret) | 187 | if (ret) |
178 | goto fail; | 188 | goto fail; |
179 | 189 | ||
@@ -181,14 +191,18 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
181 | radeon_crtc->cursor_height = height; | 191 | radeon_crtc->cursor_height = height; |
182 | 192 | ||
183 | radeon_lock_cursor(crtc, true); | 193 | radeon_lock_cursor(crtc, true); |
184 | /* XXX only 27 bit offset for legacy cursor */ | ||
185 | radeon_set_cursor(crtc, obj, gpu_addr); | 194 | radeon_set_cursor(crtc, obj, gpu_addr); |
186 | radeon_show_cursor(crtc); | 195 | radeon_show_cursor(crtc); |
187 | radeon_lock_cursor(crtc, false); | 196 | radeon_lock_cursor(crtc, false); |
188 | 197 | ||
189 | unpin: | 198 | unpin: |
190 | if (radeon_crtc->cursor_bo) { | 199 | if (radeon_crtc->cursor_bo) { |
191 | radeon_gem_object_unpin(radeon_crtc->cursor_bo); | 200 | robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); |
201 | ret = radeon_bo_reserve(robj, false); | ||
202 | if (likely(ret == 0)) { | ||
203 | radeon_bo_unpin(robj); | ||
204 | radeon_bo_unreserve(robj); | ||
205 | } | ||
192 | drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); | 206 | drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); |
193 | } | 207 | } |
194 | 208 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3d314338d843..1ebcef25b915 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -303,8 +303,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | |||
303 | if (update_pending && | 303 | if (update_pending && |
304 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, | 304 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, |
305 | &vpos, &hpos)) && | 305 | &vpos, &hpos)) && |
306 | (vpos >=0) && | 306 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || |
307 | (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { | 307 | (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { |
308 | /* crtc didn't flip in this target vblank interval, | ||
309 | * but flip is pending in crtc. Based on the current | ||
310 | * scanout position we know that the current frame is | ||
311 | * (nearly) complete and the flip will (likely) | ||
312 | * complete before the start of the next frame. | ||
313 | */ | ||
314 | update_pending = 0; | ||
315 | } | ||
316 | if (update_pending) { | ||
308 | /* crtc didn't flip in this target vblank interval, | 317 | /* crtc didn't flip in this target vblank interval, |
309 | * but flip is pending in crtc. It will complete it | 318 | * but flip is pending in crtc. It will complete it |
310 | * in next vblank interval, so complete the flip at | 319 | * in next vblank interval, so complete the flip at |
@@ -393,7 +402,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
393 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); | 402 | DRM_ERROR("failed to reserve new rbo buffer before flip\n"); |
394 | goto pflip_cleanup; | 403 | goto pflip_cleanup; |
395 | } | 404 | } |
396 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | 405 | /* Only 27 bit offset for legacy CRTC */ |
406 | r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, | ||
407 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); | ||
397 | if (unlikely(r != 0)) { | 408 | if (unlikely(r != 0)) { |
398 | radeon_bo_unreserve(rbo); | 409 | radeon_bo_unreserve(rbo); |
399 | r = -EINVAL; | 410 | r = -EINVAL; |
@@ -1136,11 +1147,6 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = { | |||
1136 | .output_poll_changed = radeon_output_poll_changed | 1147 | .output_poll_changed = radeon_output_poll_changed |
1137 | }; | 1148 | }; |
1138 | 1149 | ||
1139 | struct drm_prop_enum_list { | ||
1140 | int type; | ||
1141 | char *name; | ||
1142 | }; | ||
1143 | |||
1144 | static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = | 1150 | static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = |
1145 | { { 0, "driver" }, | 1151 | { { 0, "driver" }, |
1146 | { 1, "bios" }, | 1152 | { 1, "bios" }, |
@@ -1165,86 +1171,53 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] = | |||
1165 | 1171 | ||
1166 | static int radeon_modeset_create_props(struct radeon_device *rdev) | 1172 | static int radeon_modeset_create_props(struct radeon_device *rdev) |
1167 | { | 1173 | { |
1168 | int i, sz; | 1174 | int sz; |
1169 | 1175 | ||
1170 | if (rdev->is_atom_bios) { | 1176 | if (rdev->is_atom_bios) { |
1171 | rdev->mode_info.coherent_mode_property = | 1177 | rdev->mode_info.coherent_mode_property = |
1172 | drm_property_create(rdev->ddev, | 1178 | drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); |
1173 | DRM_MODE_PROP_RANGE, | ||
1174 | "coherent", 2); | ||
1175 | if (!rdev->mode_info.coherent_mode_property) | 1179 | if (!rdev->mode_info.coherent_mode_property) |
1176 | return -ENOMEM; | 1180 | return -ENOMEM; |
1177 | |||
1178 | rdev->mode_info.coherent_mode_property->values[0] = 0; | ||
1179 | rdev->mode_info.coherent_mode_property->values[1] = 1; | ||
1180 | } | 1181 | } |
1181 | 1182 | ||
1182 | if (!ASIC_IS_AVIVO(rdev)) { | 1183 | if (!ASIC_IS_AVIVO(rdev)) { |
1183 | sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); | 1184 | sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); |
1184 | rdev->mode_info.tmds_pll_property = | 1185 | rdev->mode_info.tmds_pll_property = |
1185 | drm_property_create(rdev->ddev, | 1186 | drm_property_create_enum(rdev->ddev, 0, |
1186 | DRM_MODE_PROP_ENUM, | 1187 | "tmds_pll", |
1187 | "tmds_pll", sz); | 1188 | radeon_tmds_pll_enum_list, sz); |
1188 | for (i = 0; i < sz; i++) { | ||
1189 | drm_property_add_enum(rdev->mode_info.tmds_pll_property, | ||
1190 | i, | ||
1191 | radeon_tmds_pll_enum_list[i].type, | ||
1192 | radeon_tmds_pll_enum_list[i].name); | ||
1193 | } | ||
1194 | } | 1189 | } |
1195 | 1190 | ||
1196 | rdev->mode_info.load_detect_property = | 1191 | rdev->mode_info.load_detect_property = |
1197 | drm_property_create(rdev->ddev, | 1192 | drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); |
1198 | DRM_MODE_PROP_RANGE, | ||
1199 | "load detection", 2); | ||
1200 | if (!rdev->mode_info.load_detect_property) | 1193 | if (!rdev->mode_info.load_detect_property) |
1201 | return -ENOMEM; | 1194 | return -ENOMEM; |
1202 | rdev->mode_info.load_detect_property->values[0] = 0; | ||
1203 | rdev->mode_info.load_detect_property->values[1] = 1; | ||
1204 | 1195 | ||
1205 | drm_mode_create_scaling_mode_property(rdev->ddev); | 1196 | drm_mode_create_scaling_mode_property(rdev->ddev); |
1206 | 1197 | ||
1207 | sz = ARRAY_SIZE(radeon_tv_std_enum_list); | 1198 | sz = ARRAY_SIZE(radeon_tv_std_enum_list); |
1208 | rdev->mode_info.tv_std_property = | 1199 | rdev->mode_info.tv_std_property = |
1209 | drm_property_create(rdev->ddev, | 1200 | drm_property_create_enum(rdev->ddev, 0, |
1210 | DRM_MODE_PROP_ENUM, | 1201 | "tv standard", |
1211 | "tv standard", sz); | 1202 | radeon_tv_std_enum_list, sz); |
1212 | for (i = 0; i < sz; i++) { | ||
1213 | drm_property_add_enum(rdev->mode_info.tv_std_property, | ||
1214 | i, | ||
1215 | radeon_tv_std_enum_list[i].type, | ||
1216 | radeon_tv_std_enum_list[i].name); | ||
1217 | } | ||
1218 | 1203 | ||
1219 | sz = ARRAY_SIZE(radeon_underscan_enum_list); | 1204 | sz = ARRAY_SIZE(radeon_underscan_enum_list); |
1220 | rdev->mode_info.underscan_property = | 1205 | rdev->mode_info.underscan_property = |
1221 | drm_property_create(rdev->ddev, | 1206 | drm_property_create_enum(rdev->ddev, 0, |
1222 | DRM_MODE_PROP_ENUM, | 1207 | "underscan", |
1223 | "underscan", sz); | 1208 | radeon_underscan_enum_list, sz); |
1224 | for (i = 0; i < sz; i++) { | ||
1225 | drm_property_add_enum(rdev->mode_info.underscan_property, | ||
1226 | i, | ||
1227 | radeon_underscan_enum_list[i].type, | ||
1228 | radeon_underscan_enum_list[i].name); | ||
1229 | } | ||
1230 | 1209 | ||
1231 | rdev->mode_info.underscan_hborder_property = | 1210 | rdev->mode_info.underscan_hborder_property = |
1232 | drm_property_create(rdev->ddev, | 1211 | drm_property_create_range(rdev->ddev, 0, |
1233 | DRM_MODE_PROP_RANGE, | 1212 | "underscan hborder", 0, 128); |
1234 | "underscan hborder", 2); | ||
1235 | if (!rdev->mode_info.underscan_hborder_property) | 1213 | if (!rdev->mode_info.underscan_hborder_property) |
1236 | return -ENOMEM; | 1214 | return -ENOMEM; |
1237 | rdev->mode_info.underscan_hborder_property->values[0] = 0; | ||
1238 | rdev->mode_info.underscan_hborder_property->values[1] = 128; | ||
1239 | 1215 | ||
1240 | rdev->mode_info.underscan_vborder_property = | 1216 | rdev->mode_info.underscan_vborder_property = |
1241 | drm_property_create(rdev->ddev, | 1217 | drm_property_create_range(rdev->ddev, 0, |
1242 | DRM_MODE_PROP_RANGE, | 1218 | "underscan vborder", 0, 128); |
1243 | "underscan vborder", 2); | ||
1244 | if (!rdev->mode_info.underscan_vborder_property) | 1219 | if (!rdev->mode_info.underscan_vborder_property) |
1245 | return -ENOMEM; | 1220 | return -ENOMEM; |
1246 | rdev->mode_info.underscan_vborder_property->values[0] = 0; | ||
1247 | rdev->mode_info.underscan_vborder_property->values[1] = 128; | ||
1248 | 1221 | ||
1249 | return 0; | 1222 | return 0; |
1250 | } | 1223 | } |
@@ -1290,6 +1263,9 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1290 | rdev->ddev->mode_config.max_height = 4096; | 1263 | rdev->ddev->mode_config.max_height = 4096; |
1291 | } | 1264 | } |
1292 | 1265 | ||
1266 | rdev->ddev->mode_config.preferred_depth = 24; | ||
1267 | rdev->ddev->mode_config.prefer_shadow = 1; | ||
1268 | |||
1293 | rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; | 1269 | rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; |
1294 | 1270 | ||
1295 | ret = radeon_modeset_create_props(rdev); | 1271 | ret = radeon_modeset_create_props(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 8032f1fedb11..498d21d50ba3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -54,10 +54,11 @@ | |||
54 | * 2.10.0 - fusion 2D tiling | 54 | * 2.10.0 - fusion 2D tiling |
55 | * 2.11.0 - backend map, initial compute support for the CS checker | 55 | * 2.11.0 - backend map, initial compute support for the CS checker |
56 | * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS | 56 | * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS |
57 | * 2.13.0 - virtual memory support | 57 | * 2.13.0 - virtual memory support, streamout |
58 | * 2.14.0 - add evergreen tiling informations | ||
58 | */ | 59 | */ |
59 | #define KMS_DRIVER_MAJOR 2 | 60 | #define KMS_DRIVER_MAJOR 2 |
60 | #define KMS_DRIVER_MINOR 13 | 61 | #define KMS_DRIVER_MINOR 14 |
61 | #define KMS_DRIVER_PATCHLEVEL 0 | 62 | #define KMS_DRIVER_PATCHLEVEL 0 |
62 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 63 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
63 | int radeon_driver_unload_kms(struct drm_device *dev); | 64 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 195471cf65d3..5906914a78bc 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -164,7 +164,10 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
164 | ret = radeon_bo_reserve(rbo, false); | 164 | ret = radeon_bo_reserve(rbo, false); |
165 | if (unlikely(ret != 0)) | 165 | if (unlikely(ret != 0)) |
166 | goto out_unref; | 166 | goto out_unref; |
167 | ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL); | 167 | /* Only 27 bit offset for legacy CRTC */ |
168 | ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, | ||
169 | ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, | ||
170 | NULL); | ||
168 | if (ret) { | 171 | if (ret) { |
169 | radeon_bo_unreserve(rbo); | 172 | radeon_bo_unreserve(rbo); |
170 | goto out_unref; | 173 | goto out_unref; |
@@ -263,11 +266,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
263 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; | 266 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
264 | info->apertures->ranges[0].size = rdev->mc.aper_size; | 267 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
265 | 268 | ||
266 | info->pixmap.size = 64*1024; | 269 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
267 | info->pixmap.buf_align = 8; | ||
268 | info->pixmap.access_align = 32; | ||
269 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
270 | info->pixmap.scan_align = 1; | ||
271 | 270 | ||
272 | if (info->screen_base == NULL) { | 271 | if (info->screen_base == NULL) { |
273 | ret = -ENOSPC; | 272 | ret = -ENOSPC; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 7337850af2fa..c7008b5210f7 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -75,32 +75,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | ||
79 | uint64_t *gpu_addr) | ||
80 | { | ||
81 | struct radeon_bo *robj = gem_to_radeon_bo(obj); | ||
82 | int r; | ||
83 | |||
84 | r = radeon_bo_reserve(robj, false); | ||
85 | if (unlikely(r != 0)) | ||
86 | return r; | ||
87 | r = radeon_bo_pin(robj, pin_domain, gpu_addr); | ||
88 | radeon_bo_unreserve(robj); | ||
89 | return r; | ||
90 | } | ||
91 | |||
92 | void radeon_gem_object_unpin(struct drm_gem_object *obj) | ||
93 | { | ||
94 | struct radeon_bo *robj = gem_to_radeon_bo(obj); | ||
95 | int r; | ||
96 | |||
97 | r = radeon_bo_reserve(robj, false); | ||
98 | if (likely(r == 0)) { | ||
99 | radeon_bo_unpin(robj); | ||
100 | radeon_bo_unreserve(robj); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | int radeon_gem_set_domain(struct drm_gem_object *gobj, | 78 | int radeon_gem_set_domain(struct drm_gem_object *gobj, |
105 | uint32_t rdomain, uint32_t wdomain) | 79 | uint32_t rdomain, uint32_t wdomain) |
106 | { | 80 | { |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 98a8ad680109..85bcfc8923a7 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -26,10 +26,15 @@ | |||
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | 27 | ||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "drm_edid.h" | ||
29 | #include "radeon_drm.h" | 30 | #include "radeon_drm.h" |
30 | #include "radeon.h" | 31 | #include "radeon.h" |
31 | #include "atom.h" | 32 | #include "atom.h" |
32 | 33 | ||
34 | extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, | ||
35 | struct i2c_msg *msgs, int num); | ||
36 | extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap); | ||
37 | |||
33 | /** | 38 | /** |
34 | * radeon_ddc_probe | 39 | * radeon_ddc_probe |
35 | * | 40 | * |
@@ -41,13 +46,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
41 | int ret; | 46 | int ret; |
42 | struct i2c_msg msgs[] = { | 47 | struct i2c_msg msgs[] = { |
43 | { | 48 | { |
44 | .addr = 0x50, | 49 | .addr = DDC_ADDR, |
45 | .flags = 0, | 50 | .flags = 0, |
46 | .len = 1, | 51 | .len = 1, |
47 | .buf = &out, | 52 | .buf = &out, |
48 | }, | 53 | }, |
49 | { | 54 | { |
50 | .addr = 0x50, | 55 | .addr = DDC_ADDR, |
51 | .flags = I2C_M_RD, | 56 | .flags = I2C_M_RD, |
52 | .len = 8, | 57 | .len = 8, |
53 | .buf = buf, | 58 | .buf = buf, |
@@ -882,6 +887,11 @@ static const struct i2c_algorithm radeon_i2c_algo = { | |||
882 | .functionality = radeon_hw_i2c_func, | 887 | .functionality = radeon_hw_i2c_func, |
883 | }; | 888 | }; |
884 | 889 | ||
890 | static const struct i2c_algorithm radeon_atom_i2c_algo = { | ||
891 | .master_xfer = radeon_atom_hw_i2c_xfer, | ||
892 | .functionality = radeon_atom_hw_i2c_func, | ||
893 | }; | ||
894 | |||
885 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 895 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
886 | struct radeon_i2c_bus_rec *rec, | 896 | struct radeon_i2c_bus_rec *rec, |
887 | const char *name) | 897 | const char *name) |
@@ -914,6 +924,18 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
914 | DRM_ERROR("Failed to register hw i2c %s\n", name); | 924 | DRM_ERROR("Failed to register hw i2c %s\n", name); |
915 | goto out_free; | 925 | goto out_free; |
916 | } | 926 | } |
927 | } else if (rec->hw_capable && | ||
928 | radeon_hw_i2c && | ||
929 | ASIC_IS_DCE3(rdev)) { | ||
930 | /* hw i2c using atom */ | ||
931 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | ||
932 | "Radeon i2c hw bus %s", name); | ||
933 | i2c->adapter.algo = &radeon_atom_i2c_algo; | ||
934 | ret = i2c_add_adapter(&i2c->adapter); | ||
935 | if (ret) { | ||
936 | DRM_ERROR("Failed to register hw i2c %s\n", name); | ||
937 | goto out_free; | ||
938 | } | ||
917 | } else { | 939 | } else { |
918 | /* set the radeon bit adapter */ | 940 | /* set the radeon bit adapter */ |
919 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), | 941 | snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
@@ -925,10 +947,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
925 | i2c->algo.bit.setscl = set_clock; | 947 | i2c->algo.bit.setscl = set_clock; |
926 | i2c->algo.bit.getsda = get_data; | 948 | i2c->algo.bit.getsda = get_data; |
927 | i2c->algo.bit.getscl = get_clock; | 949 | i2c->algo.bit.getscl = get_clock; |
928 | i2c->algo.bit.udelay = 20; | 950 | i2c->algo.bit.udelay = 10; |
929 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | 951 | i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */ |
930 | * make this, 2 jiffies is a lot more reliable */ | ||
931 | i2c->algo.bit.timeout = 2; | ||
932 | i2c->algo.bit.data = i2c; | 952 | i2c->algo.bit.data = i2c; |
933 | ret = i2c_bit_add_bus(&i2c->adapter); | 953 | ret = i2c_bit_add_bus(&i2c->adapter); |
934 | if (ret) { | 954 | if (ret) { |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index d3352889a870..1986ebae1ef2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -57,6 +57,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
57 | } | 57 | } |
58 | dev->dev_private = (void *)rdev; | 58 | dev->dev_private = (void *)rdev; |
59 | 59 | ||
60 | pci_set_master(dev->pdev); | ||
61 | |||
60 | /* update BUS flag */ | 62 | /* update BUS flag */ |
61 | if (drm_pci_device_is_agp(dev)) { | 63 | if (drm_pci_device_is_agp(dev)) { |
62 | flags |= RADEON_IS_AGP; | 64 | flags |= RADEON_IS_AGP; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 25a19c483075..210317c7045e 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -419,7 +419,9 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, | |||
419 | r = radeon_bo_reserve(rbo, false); | 419 | r = radeon_bo_reserve(rbo, false); |
420 | if (unlikely(r != 0)) | 420 | if (unlikely(r != 0)) |
421 | return r; | 421 | return r; |
422 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | 422 | /* Only 27 bit offset for legacy CRTC */ |
423 | r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27, | ||
424 | &base); | ||
423 | if (unlikely(r != 0)) { | 425 | if (unlikely(r != 0)) { |
424 | radeon_bo_unreserve(rbo); | 426 | radeon_bo_unreserve(rbo); |
425 | return -EINVAL; | 427 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d45df1763598..91541e63d582 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -224,7 +224,8 @@ void radeon_bo_unref(struct radeon_bo **bo) | |||
224 | *bo = NULL; | 224 | *bo = NULL; |
225 | } | 225 | } |
226 | 226 | ||
227 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | 227 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
228 | u64 *gpu_addr) | ||
228 | { | 229 | { |
229 | int r, i; | 230 | int r, i; |
230 | 231 | ||
@@ -232,6 +233,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |||
232 | bo->pin_count++; | 233 | bo->pin_count++; |
233 | if (gpu_addr) | 234 | if (gpu_addr) |
234 | *gpu_addr = radeon_bo_gpu_offset(bo); | 235 | *gpu_addr = radeon_bo_gpu_offset(bo); |
236 | WARN_ON_ONCE(max_offset != 0); | ||
235 | return 0; | 237 | return 0; |
236 | } | 238 | } |
237 | radeon_ttm_placement_from_domain(bo, domain); | 239 | radeon_ttm_placement_from_domain(bo, domain); |
@@ -239,6 +241,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |||
239 | /* force to pin into visible video ram */ | 241 | /* force to pin into visible video ram */ |
240 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | 242 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
241 | } | 243 | } |
244 | if (max_offset) { | ||
245 | u64 lpfn = max_offset >> PAGE_SHIFT; | ||
246 | |||
247 | if (!bo->placement.lpfn) | ||
248 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; | ||
249 | |||
250 | if (lpfn < bo->placement.lpfn) | ||
251 | bo->placement.lpfn = lpfn; | ||
252 | } | ||
242 | for (i = 0; i < bo->placement.num_placement; i++) | 253 | for (i = 0; i < bo->placement.num_placement; i++) |
243 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | 254 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
244 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); | 255 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
@@ -252,6 +263,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |||
252 | return r; | 263 | return r; |
253 | } | 264 | } |
254 | 265 | ||
266 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | ||
267 | { | ||
268 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | ||
269 | } | ||
270 | |||
255 | int radeon_bo_unpin(struct radeon_bo *bo) | 271 | int radeon_bo_unpin(struct radeon_bo *bo) |
256 | { | 272 | { |
257 | int r, i; | 273 | int r, i; |
@@ -445,8 +461,54 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) | |||
445 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 461 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
446 | uint32_t tiling_flags, uint32_t pitch) | 462 | uint32_t tiling_flags, uint32_t pitch) |
447 | { | 463 | { |
464 | struct radeon_device *rdev = bo->rdev; | ||
448 | int r; | 465 | int r; |
449 | 466 | ||
467 | if (rdev->family >= CHIP_CEDAR) { | ||
468 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | ||
469 | |||
470 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | ||
471 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | ||
472 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | ||
473 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | ||
474 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | ||
475 | switch (bankw) { | ||
476 | case 0: | ||
477 | case 1: | ||
478 | case 2: | ||
479 | case 4: | ||
480 | case 8: | ||
481 | break; | ||
482 | default: | ||
483 | return -EINVAL; | ||
484 | } | ||
485 | switch (bankh) { | ||
486 | case 0: | ||
487 | case 1: | ||
488 | case 2: | ||
489 | case 4: | ||
490 | case 8: | ||
491 | break; | ||
492 | default: | ||
493 | return -EINVAL; | ||
494 | } | ||
495 | switch (mtaspect) { | ||
496 | case 0: | ||
497 | case 1: | ||
498 | case 2: | ||
499 | case 4: | ||
500 | case 8: | ||
501 | break; | ||
502 | default: | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | if (tilesplit > 6) { | ||
506 | return -EINVAL; | ||
507 | } | ||
508 | if (stilesplit > 6) { | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | } | ||
450 | r = radeon_bo_reserve(bo, false); | 512 | r = radeon_bo_reserve(bo, false); |
451 | if (unlikely(r != 0)) | 513 | if (unlikely(r != 0)) |
452 | return r; | 514 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index cde430308870..f9104be88d7c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -118,6 +118,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | |||
118 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | 118 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
119 | extern void radeon_bo_unref(struct radeon_bo **bo); | 119 | extern void radeon_bo_unref(struct radeon_bo **bo); |
120 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); | 120 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); |
121 | extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, | ||
122 | u64 max_offset, u64 *gpu_addr); | ||
121 | extern int radeon_bo_unpin(struct radeon_bo *bo); | 123 | extern int radeon_bo_unpin(struct radeon_bo *bo); |
122 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); | 124 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); |
123 | extern void radeon_bo_force_delete(struct radeon_device *rdev); | 125 | extern void radeon_bo_force_delete(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 095148e29a1f..3575129c1940 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -221,7 +221,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /* set memory clock */ | 223 | /* set memory clock */ |
224 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | 224 | if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { |
225 | radeon_pm_debug_check_in_vbl(rdev, false); | 225 | radeon_pm_debug_check_in_vbl(rdev, false); |
226 | radeon_set_memory_clock(rdev, mclk); | 226 | radeon_set_memory_clock(rdev, mclk); |
227 | radeon_pm_debug_check_in_vbl(rdev, true); | 227 | radeon_pm_debug_check_in_vbl(rdev, true); |
@@ -863,11 +863,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
863 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); | 863 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); |
864 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); | 864 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
865 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); | 865 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); |
866 | if (rdev->asic->get_memory_clock) | 866 | if (rdev->asic->pm.get_memory_clock) |
867 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); | 867 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
868 | if (rdev->pm.current_vddc) | 868 | if (rdev->pm.current_vddc) |
869 | seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); | 869 | seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); |
870 | if (rdev->asic->get_pcie_lanes) | 870 | if (rdev->asic->pm.get_pcie_lanes) |
871 | seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); | 871 | seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); |
872 | 872 | ||
873 | return 0; | 873 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index b4ce86455707..509863411285 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -539,9 +539,11 @@ | |||
539 | 539 | ||
540 | #define RADEON_CRTC2_PITCH 0x032c | 540 | #define RADEON_CRTC2_PITCH 0x032c |
541 | #define RADEON_CRTC_STATUS 0x005c | 541 | #define RADEON_CRTC_STATUS 0x005c |
542 | # define RADEON_CRTC_VBLANK_CUR (1 << 0) | ||
542 | # define RADEON_CRTC_VBLANK_SAVE (1 << 1) | 543 | # define RADEON_CRTC_VBLANK_SAVE (1 << 1) |
543 | # define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1) | 544 | # define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1) |
544 | #define RADEON_CRTC2_STATUS 0x03fc | 545 | #define RADEON_CRTC2_STATUS 0x03fc |
546 | # define RADEON_CRTC2_VBLANK_CUR (1 << 0) | ||
545 | # define RADEON_CRTC2_VBLANK_SAVE (1 << 1) | 547 | # define RADEON_CRTC2_VBLANK_SAVE (1 << 1) |
546 | # define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1) | 548 | # define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1) |
547 | #define RADEON_CRTC_V_SYNC_STRT_WID 0x020c | 549 | #define RADEON_CRTC_V_SYNC_STRT_WID 0x020c |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 92c9ea4751fb..30566201dffb 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -478,7 +478,9 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |||
478 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | 478 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
479 | { | 479 | { |
480 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 480 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
481 | struct radeon_ib *ib = node->info_ent->data; | 481 | struct drm_device *dev = node->minor->dev; |
482 | struct radeon_device *rdev = dev->dev_private; | ||
483 | struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)]; | ||
482 | unsigned i; | 484 | unsigned i; |
483 | 485 | ||
484 | if (ib == NULL) { | 486 | if (ib == NULL) { |
@@ -495,6 +497,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
495 | 497 | ||
496 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; | 498 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
497 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; | 499 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; |
500 | static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE]; | ||
498 | #endif | 501 | #endif |
499 | 502 | ||
500 | int radeon_debugfs_ring_init(struct radeon_device *rdev) | 503 | int radeon_debugfs_ring_init(struct radeon_device *rdev) |
@@ -517,10 +520,11 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev) | |||
517 | 520 | ||
518 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | 521 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
519 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); | 522 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); |
523 | radeon_debugfs_ib_idx[i] = i; | ||
520 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; | 524 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; |
521 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; | 525 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; |
522 | radeon_debugfs_ib_list[i].driver_features = 0; | 526 | radeon_debugfs_ib_list[i].driver_features = 0; |
523 | radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; | 527 | radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i]; |
524 | } | 528 | } |
525 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, | 529 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, |
526 | RADEON_IB_POOL_SIZE); | 530 | RADEON_IB_POOL_SIZE); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c421e77ace71..f493c6403af5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -226,7 +226,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
226 | int r, i; | 226 | int r, i; |
227 | 227 | ||
228 | rdev = radeon_get_rdev(bo->bdev); | 228 | rdev = radeon_get_rdev(bo->bdev); |
229 | r = radeon_fence_create(rdev, &fence, rdev->copy_ring); | 229 | r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); |
230 | if (unlikely(r)) { | 230 | if (unlikely(r)) { |
231 | return r; | 231 | return r; |
232 | } | 232 | } |
@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
256 | return -EINVAL; | 256 | return -EINVAL; |
257 | } | 257 | } |
258 | if (!rdev->ring[rdev->copy_ring].ready) { | 258 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { |
259 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | 259 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | } | 261 | } |
@@ -266,7 +266,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
266 | if (rdev->family >= CHIP_R600) { | 266 | if (rdev->family >= CHIP_R600) { |
267 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 267 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
268 | /* no need to sync to our own or unused rings */ | 268 | /* no need to sync to our own or unused rings */ |
269 | if (i == rdev->copy_ring || !rdev->ring[i].ready) | 269 | if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready) |
270 | continue; | 270 | continue; |
271 | 271 | ||
272 | if (!fence->semaphore) { | 272 | if (!fence->semaphore) { |
@@ -283,12 +283,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
283 | radeon_semaphore_emit_signal(rdev, i, fence->semaphore); | 283 | radeon_semaphore_emit_signal(rdev, i, fence->semaphore); |
284 | radeon_ring_unlock_commit(rdev, &rdev->ring[i]); | 284 | radeon_ring_unlock_commit(rdev, &rdev->ring[i]); |
285 | 285 | ||
286 | r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3); | 286 | r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3); |
287 | /* FIXME: handle ring lock error */ | 287 | /* FIXME: handle ring lock error */ |
288 | if (r) | 288 | if (r) |
289 | continue; | 289 | continue; |
290 | radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore); | 290 | radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore); |
291 | radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]); | 291 | radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]); |
292 | } | 292 | } |
293 | } | 293 | } |
294 | 294 | ||
@@ -410,7 +410,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
410 | radeon_move_null(bo, new_mem); | 410 | radeon_move_null(bo, new_mem); |
411 | return 0; | 411 | return 0; |
412 | } | 412 | } |
413 | if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) { | 413 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
414 | rdev->asic->copy.copy == NULL) { | ||
414 | /* use memcpy */ | 415 | /* use memcpy */ |
415 | goto memcpy; | 416 | goto memcpy; |
416 | } | 417 | } |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 2316977eb924..aea63c415852 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman | |||
@@ -1,5 +1,8 @@ | |||
1 | cayman 0x9400 | 1 | cayman 0x9400 |
2 | 0x0000802C GRBM_GFX_INDEX | 2 | 0x0000802C GRBM_GFX_INDEX |
3 | 0x000084FC CP_STRMOUT_CNTL | ||
4 | 0x000085F0 CP_COHER_CNTL | ||
5 | 0x000085F4 CP_COHER_SIZE | ||
3 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | 6 | 0x000088B0 VGT_VTX_VECT_EJECT_REG |
4 | 0x000088C4 VGT_CACHE_INVALIDATION | 7 | 0x000088C4 VGT_CACHE_INVALIDATION |
5 | 0x000088D4 VGT_GS_VERTEX_REUSE | 8 | 0x000088D4 VGT_GS_VERTEX_REUSE |
@@ -77,7 +80,6 @@ cayman 0x9400 | |||
77 | 0x0002802C DB_DEPTH_CLEAR | 80 | 0x0002802C DB_DEPTH_CLEAR |
78 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | 81 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL |
79 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | 82 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR |
80 | 0x0002805C DB_DEPTH_SLICE | ||
81 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | 83 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 |
82 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | 84 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 |
83 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 | 85 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 |
@@ -206,7 +208,6 @@ cayman 0x9400 | |||
206 | 0x00028344 PA_SC_VPORT_ZMAX_14 | 208 | 0x00028344 PA_SC_VPORT_ZMAX_14 |
207 | 0x00028348 PA_SC_VPORT_ZMIN_15 | 209 | 0x00028348 PA_SC_VPORT_ZMIN_15 |
208 | 0x0002834C PA_SC_VPORT_ZMAX_15 | 210 | 0x0002834C PA_SC_VPORT_ZMAX_15 |
209 | 0x00028350 SX_MISC | ||
210 | 0x00028354 SX_SURFACE_SYNC | 211 | 0x00028354 SX_SURFACE_SYNC |
211 | 0x0002835C SX_SCATTER_EXPORT_SIZE | 212 | 0x0002835C SX_SCATTER_EXPORT_SIZE |
212 | 0x00028380 SQ_VTX_SEMANTIC_0 | 213 | 0x00028380 SQ_VTX_SEMANTIC_0 |
@@ -512,6 +513,13 @@ cayman 0x9400 | |||
512 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | 513 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 |
513 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | 514 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 |
514 | 0x00028AC8 DB_PRELOAD_CONTROL | 515 | 0x00028AC8 DB_PRELOAD_CONTROL |
516 | 0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0 | ||
517 | 0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1 | ||
518 | 0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2 | ||
519 | 0x00028B04 VGT_STRMOUT_VTX_STRIDE_3 | ||
520 | 0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET | ||
521 | 0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE | ||
522 | 0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE | ||
515 | 0x00028B38 VGT_GS_MAX_VERT_OUT | 523 | 0x00028B38 VGT_GS_MAX_VERT_OUT |
516 | 0x00028B54 VGT_SHADER_STAGES_EN | 524 | 0x00028B54 VGT_SHADER_STAGES_EN |
517 | 0x00028B58 VGT_LS_HS_CONFIG | 525 | 0x00028B58 VGT_LS_HS_CONFIG |
@@ -551,6 +559,18 @@ cayman 0x9400 | |||
551 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3 | 559 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3 |
552 | 0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0 | 560 | 0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0 |
553 | 0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1 | 561 | 0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1 |
562 | 0x00028C78 CB_COLOR0_DIM | ||
563 | 0x00028CB4 CB_COLOR1_DIM | ||
564 | 0x00028CF0 CB_COLOR2_DIM | ||
565 | 0x00028D2C CB_COLOR3_DIM | ||
566 | 0x00028D68 CB_COLOR4_DIM | ||
567 | 0x00028DA4 CB_COLOR5_DIM | ||
568 | 0x00028DE0 CB_COLOR6_DIM | ||
569 | 0x00028E1C CB_COLOR7_DIM | ||
570 | 0x00028E58 CB_COLOR8_DIM | ||
571 | 0x00028E74 CB_COLOR9_DIM | ||
572 | 0x00028E90 CB_COLOR10_DIM | ||
573 | 0x00028EAC CB_COLOR11_DIM | ||
554 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 | 574 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 |
555 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 | 575 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 |
556 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 | 576 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 161737a28c23..77c37202376f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -4,6 +4,9 @@ evergreen 0x9400 | |||
4 | 0x00008044 WAIT_UNTIL_POLL_CNTL | 4 | 0x00008044 WAIT_UNTIL_POLL_CNTL |
5 | 0x00008048 WAIT_UNTIL_POLL_MASK | 5 | 0x00008048 WAIT_UNTIL_POLL_MASK |
6 | 0x0000804c WAIT_UNTIL_POLL_REFDATA | 6 | 0x0000804c WAIT_UNTIL_POLL_REFDATA |
7 | 0x000084FC CP_STRMOUT_CNTL | ||
8 | 0x000085F0 CP_COHER_CNTL | ||
9 | 0x000085F4 CP_COHER_SIZE | ||
7 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | 10 | 0x000088B0 VGT_VTX_VECT_EJECT_REG |
8 | 0x000088C4 VGT_CACHE_INVALIDATION | 11 | 0x000088C4 VGT_CACHE_INVALIDATION |
9 | 0x000088D4 VGT_GS_VERTEX_REUSE | 12 | 0x000088D4 VGT_GS_VERTEX_REUSE |
@@ -93,7 +96,6 @@ evergreen 0x9400 | |||
93 | 0x0002802C DB_DEPTH_CLEAR | 96 | 0x0002802C DB_DEPTH_CLEAR |
94 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | 97 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL |
95 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | 98 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR |
96 | 0x0002805C DB_DEPTH_SLICE | ||
97 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | 99 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 |
98 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | 100 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 |
99 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 | 101 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 |
@@ -222,7 +224,6 @@ evergreen 0x9400 | |||
222 | 0x00028344 PA_SC_VPORT_ZMAX_14 | 224 | 0x00028344 PA_SC_VPORT_ZMAX_14 |
223 | 0x00028348 PA_SC_VPORT_ZMIN_15 | 225 | 0x00028348 PA_SC_VPORT_ZMIN_15 |
224 | 0x0002834C PA_SC_VPORT_ZMAX_15 | 226 | 0x0002834C PA_SC_VPORT_ZMAX_15 |
225 | 0x00028350 SX_MISC | ||
226 | 0x00028354 SX_SURFACE_SYNC | 227 | 0x00028354 SX_SURFACE_SYNC |
227 | 0x00028380 SQ_VTX_SEMANTIC_0 | 228 | 0x00028380 SQ_VTX_SEMANTIC_0 |
228 | 0x00028384 SQ_VTX_SEMANTIC_1 | 229 | 0x00028384 SQ_VTX_SEMANTIC_1 |
@@ -522,6 +523,13 @@ evergreen 0x9400 | |||
522 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | 523 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 |
523 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | 524 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 |
524 | 0x00028AC8 DB_PRELOAD_CONTROL | 525 | 0x00028AC8 DB_PRELOAD_CONTROL |
526 | 0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0 | ||
527 | 0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1 | ||
528 | 0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2 | ||
529 | 0x00028B04 VGT_STRMOUT_VTX_STRIDE_3 | ||
530 | 0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET | ||
531 | 0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE | ||
532 | 0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE | ||
525 | 0x00028B38 VGT_GS_MAX_VERT_OUT | 533 | 0x00028B38 VGT_GS_MAX_VERT_OUT |
526 | 0x00028B54 VGT_SHADER_STAGES_EN | 534 | 0x00028B54 VGT_SHADER_STAGES_EN |
527 | 0x00028B58 VGT_LS_HS_CONFIG | 535 | 0x00028B58 VGT_LS_HS_CONFIG |
@@ -554,6 +562,18 @@ evergreen 0x9400 | |||
554 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 | 562 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 |
555 | 0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 | 563 | 0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 |
556 | 0x00028C3C PA_SC_AA_MASK | 564 | 0x00028C3C PA_SC_AA_MASK |
565 | 0x00028C78 CB_COLOR0_DIM | ||
566 | 0x00028CB4 CB_COLOR1_DIM | ||
567 | 0x00028CF0 CB_COLOR2_DIM | ||
568 | 0x00028D2C CB_COLOR3_DIM | ||
569 | 0x00028D68 CB_COLOR4_DIM | ||
570 | 0x00028DA4 CB_COLOR5_DIM | ||
571 | 0x00028DE0 CB_COLOR6_DIM | ||
572 | 0x00028E1C CB_COLOR7_DIM | ||
573 | 0x00028E58 CB_COLOR8_DIM | ||
574 | 0x00028E74 CB_COLOR9_DIM | ||
575 | 0x00028E90 CB_COLOR10_DIM | ||
576 | 0x00028EAC CB_COLOR11_DIM | ||
557 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 | 577 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 |
558 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 | 578 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 |
559 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 | 579 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 0380c5c15f80..626c24ea0b56 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -3,6 +3,9 @@ r600 0x9400 | |||
3 | 0x00028230 R7xx_PA_SC_EDGERULE | 3 | 0x00028230 R7xx_PA_SC_EDGERULE |
4 | 0x000286C8 R7xx_SPI_THREAD_GROUPING | 4 | 0x000286C8 R7xx_SPI_THREAD_GROUPING |
5 | 0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | 5 | 0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ |
6 | 0x00008490 CP_STRMOUT_CNTL | ||
7 | 0x000085F0 CP_COHER_CNTL | ||
8 | 0x000085F4 CP_COHER_SIZE | ||
6 | 0x000088C4 VGT_CACHE_INVALIDATION | 9 | 0x000088C4 VGT_CACHE_INVALIDATION |
7 | 0x00028A50 VGT_ENHANCE | 10 | 0x00028A50 VGT_ENHANCE |
8 | 0x000088CC VGT_ES_PER_GS | 11 | 0x000088CC VGT_ES_PER_GS |
@@ -38,6 +41,13 @@ r600 0x9400 | |||
38 | 0x00028AB4 VGT_REUSE_OFF | 41 | 0x00028AB4 VGT_REUSE_OFF |
39 | 0x00028AB8 VGT_VTX_CNT_EN | 42 | 0x00028AB8 VGT_VTX_CNT_EN |
40 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | 43 | 0x000088B0 VGT_VTX_VECT_EJECT_REG |
44 | 0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0 | ||
45 | 0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1 | ||
46 | 0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2 | ||
47 | 0x00028B04 VGT_STRMOUT_VTX_STRIDE_3 | ||
48 | 0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET | ||
49 | 0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE | ||
50 | 0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE | ||
41 | 0x00028810 PA_CL_CLIP_CNTL | 51 | 0x00028810 PA_CL_CLIP_CNTL |
42 | 0x00008A14 PA_CL_ENHANCE | 52 | 0x00008A14 PA_CL_ENHANCE |
43 | 0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ | 53 | 0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ |
@@ -428,7 +438,7 @@ r600 0x9400 | |||
428 | 0x00028638 SPI_VS_OUT_ID_9 | 438 | 0x00028638 SPI_VS_OUT_ID_9 |
429 | 0x00028438 SX_ALPHA_REF | 439 | 0x00028438 SX_ALPHA_REF |
430 | 0x00028410 SX_ALPHA_TEST_CONTROL | 440 | 0x00028410 SX_ALPHA_TEST_CONTROL |
431 | 0x00028350 SX_MISC | 441 | 0x00028354 SX_SURFACE_SYNC |
432 | 0x00009014 SX_MEMORY_EXPORT_SIZE | 442 | 0x00009014 SX_MEMORY_EXPORT_SIZE |
433 | 0x00009604 TC_INVALIDATE | 443 | 0x00009604 TC_INVALIDATE |
434 | 0x00009400 TD_FILTER4 | 444 | 0x00009400 TD_FILTER4 |
@@ -743,14 +753,6 @@ r600 0x9400 | |||
743 | 0x00028114 CB_COLOR5_MASK | 753 | 0x00028114 CB_COLOR5_MASK |
744 | 0x00028118 CB_COLOR6_MASK | 754 | 0x00028118 CB_COLOR6_MASK |
745 | 0x0002811C CB_COLOR7_MASK | 755 | 0x0002811C CB_COLOR7_MASK |
746 | 0x00028080 CB_COLOR0_VIEW | ||
747 | 0x00028084 CB_COLOR1_VIEW | ||
748 | 0x00028088 CB_COLOR2_VIEW | ||
749 | 0x0002808C CB_COLOR3_VIEW | ||
750 | 0x00028090 CB_COLOR4_VIEW | ||
751 | 0x00028094 CB_COLOR5_VIEW | ||
752 | 0x00028098 CB_COLOR6_VIEW | ||
753 | 0x0002809C CB_COLOR7_VIEW | ||
754 | 0x00028808 CB_COLOR_CONTROL | 756 | 0x00028808 CB_COLOR_CONTROL |
755 | 0x0002842C CB_FOG_BLUE | 757 | 0x0002842C CB_FOG_BLUE |
756 | 0x00028428 CB_FOG_GREEN | 758 | 0x00028428 CB_FOG_GREEN |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 866a05be75f2..4cf381b3a6d8 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -430,7 +430,7 @@ static int rs400_startup(struct radeon_device *rdev) | |||
430 | if (r) | 430 | if (r) |
431 | return r; | 431 | return r; |
432 | 432 | ||
433 | r = r100_ib_test(rdev); | 433 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
434 | if (r) { | 434 | if (r) { |
435 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 435 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
436 | rdev->accel_working = false; | 436 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4fc700684dcd..d25cf869d08d 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -46,6 +46,25 @@ | |||
46 | void rs600_gpu_init(struct radeon_device *rdev); | 46 | void rs600_gpu_init(struct radeon_device *rdev); |
47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
48 | 48 | ||
49 | void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) | ||
50 | { | ||
51 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
52 | int i; | ||
53 | |||
54 | if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { | ||
55 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
56 | if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) | ||
57 | break; | ||
58 | udelay(1); | ||
59 | } | ||
60 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
61 | if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) | ||
62 | break; | ||
63 | udelay(1); | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
49 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) | 68 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) |
50 | { | 69 | { |
51 | /* enable the pflip int */ | 70 | /* enable the pflip int */ |
@@ -175,7 +194,7 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
175 | /* set pcie lanes */ | 194 | /* set pcie lanes */ |
176 | if ((rdev->flags & RADEON_IS_PCIE) && | 195 | if ((rdev->flags & RADEON_IS_PCIE) && |
177 | !(rdev->flags & RADEON_IS_IGP) && | 196 | !(rdev->flags & RADEON_IS_IGP) && |
178 | rdev->asic->set_pcie_lanes && | 197 | rdev->asic->pm.set_pcie_lanes && |
179 | (ps->pcie_lanes != | 198 | (ps->pcie_lanes != |
180 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { | 199 | rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { |
181 | radeon_set_pcie_lanes(rdev, | 200 | radeon_set_pcie_lanes(rdev, |
@@ -864,7 +883,7 @@ static int rs600_startup(struct radeon_device *rdev) | |||
864 | if (r) | 883 | if (r) |
865 | return r; | 884 | return r; |
866 | 885 | ||
867 | r = r100_ib_test(rdev); | 886 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
868 | if (r) { | 887 | if (r) { |
869 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 888 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
870 | rdev->accel_working = false; | 889 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index f68dff2fadcb..f2c3b9d75f18 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "rs690d.h" | 32 | #include "rs690d.h" |
33 | 33 | ||
34 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) | 34 | int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
35 | { | 35 | { |
36 | unsigned i; | 36 | unsigned i; |
37 | uint32_t tmp; | 37 | uint32_t tmp; |
@@ -647,7 +647,7 @@ static int rs690_startup(struct radeon_device *rdev) | |||
647 | if (r) | 647 | if (r) |
648 | return r; | 648 | return r; |
649 | 649 | ||
650 | r = r100_ib_test(rdev); | 650 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
651 | if (r) { | 651 | if (r) { |
652 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 652 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
653 | rdev->accel_working = false; | 653 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c520d06a930c..d8d78fe17946 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -53,9 +53,8 @@ void rv515_debugfs(struct radeon_device *rdev) | |||
53 | } | 53 | } |
54 | } | 54 | } |
55 | 55 | ||
56 | void rv515_ring_start(struct radeon_device *rdev) | 56 | void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
57 | { | 57 | { |
58 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
59 | int r; | 58 | int r; |
60 | 59 | ||
61 | r = radeon_ring_lock(rdev, ring, 64); | 60 | r = radeon_ring_lock(rdev, ring, 64); |
@@ -413,7 +412,7 @@ static int rv515_startup(struct radeon_device *rdev) | |||
413 | if (r) | 412 | if (r) |
414 | return r; | 413 | return r; |
415 | 414 | ||
416 | r = r100_ib_test(rdev); | 415 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
417 | if (r) { | 416 | if (r) { |
418 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | 417 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
419 | rdev->accel_working = false; | 418 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c049c0c51841..c62ae4be3845 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -1074,7 +1074,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1074 | r = r600_blit_init(rdev); | 1074 | r = r600_blit_init(rdev); |
1075 | if (r) { | 1075 | if (r) { |
1076 | r600_blit_fini(rdev); | 1076 | r600_blit_fini(rdev); |
1077 | rdev->asic->copy = NULL; | 1077 | rdev->asic->copy.copy = NULL; |
1078 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 1078 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
1079 | } | 1079 | } |
1080 | 1080 | ||
@@ -1114,7 +1114,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1114 | if (r) | 1114 | if (r) |
1115 | return r; | 1115 | return r; |
1116 | 1116 | ||
1117 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | 1117 | r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1118 | if (r) { | 1118 | if (r) { |
1119 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | 1119 | dev_err(rdev->dev, "IB test failed (%d).\n", r); |
1120 | rdev->accel_working = false; | 1120 | rdev->accel_working = false; |
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index 8a3e31599c94..031aaaf79ac2 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c | |||
@@ -1057,7 +1057,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
1057 | DRM_ERROR("indexed drawing command extends " | 1057 | DRM_ERROR("indexed drawing command extends " |
1058 | "beyond end of command buffer\n"); | 1058 | "beyond end of command buffer\n"); |
1059 | DMA_FLUSH(); | 1059 | DMA_FLUSH(); |
1060 | return -EINVAL; | 1060 | ret = -EINVAL; |
1061 | goto done; | ||
1061 | } | 1062 | } |
1062 | /* fall through */ | 1063 | /* fall through */ |
1063 | case SAVAGE_CMD_DMA_PRIM: | 1064 | case SAVAGE_CMD_DMA_PRIM: |
@@ -1076,7 +1077,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ | |||
1076 | cmdbuf->vb_stride, | 1077 | cmdbuf->vb_stride, |
1077 | cmdbuf->nbox, cmdbuf->box_addr); | 1078 | cmdbuf->nbox, cmdbuf->box_addr); |
1078 | if (ret != 0) | 1079 | if (ret != 0) |
1079 | return ret; | 1080 | goto done; |
1080 | first_draw_cmd = NULL; | 1081 | first_draw_cmd = NULL; |
1081 | } | 1082 | } |
1082 | } | 1083 | } |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 573220cc5269..30d98d14b5c5 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
@@ -41,6 +41,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) | |||
41 | { | 41 | { |
42 | drm_sis_private_t *dev_priv; | 42 | drm_sis_private_t *dev_priv; |
43 | 43 | ||
44 | pci_set_master(dev->pdev); | ||
45 | |||
44 | dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL); | 46 | dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL); |
45 | if (dev_priv == NULL) | 47 | if (dev_priv == NULL) |
46 | return -ENOMEM; | 48 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 747c1413fc95..4a8728291361 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -29,6 +29,8 @@ | |||
29 | * Keith Packard. | 29 | * Keith Packard. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #define pr_fmt(fmt) "[TTM] " fmt | ||
33 | |||
32 | #include "ttm/ttm_module.h" | 34 | #include "ttm/ttm_module.h" |
33 | #include "ttm/ttm_bo_driver.h" | 35 | #include "ttm/ttm_bo_driver.h" |
34 | #include "ttm/ttm_page_alloc.h" | 36 | #include "ttm/ttm_page_alloc.h" |
@@ -74,7 +76,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
74 | 76 | ||
75 | ret = agp_bind_memory(mem, node->start); | 77 | ret = agp_bind_memory(mem, node->start); |
76 | if (ret) | 78 | if (ret) |
77 | printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); | 79 | pr_err("AGP Bind memory failed\n"); |
78 | 80 | ||
79 | return ret; | 81 | return ret; |
80 | } | 82 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7c3a57de8187..1f5c67c579cf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -28,6 +28,8 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define pr_fmt(fmt) "[TTM] " fmt | ||
32 | |||
31 | #include "ttm/ttm_module.h" | 33 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 34 | #include "ttm/ttm_bo_driver.h" |
33 | #include "ttm/ttm_placement.h" | 35 | #include "ttm/ttm_placement.h" |
@@ -68,15 +70,13 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
68 | { | 70 | { |
69 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 71 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
70 | 72 | ||
71 | printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); | 73 | pr_err(" has_type: %d\n", man->has_type); |
72 | printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); | 74 | pr_err(" use_type: %d\n", man->use_type); |
73 | printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); | 75 | pr_err(" flags: 0x%08X\n", man->flags); |
74 | printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); | 76 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); |
75 | printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); | 77 | pr_err(" size: %llu\n", man->size); |
76 | printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", | 78 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
77 | man->available_caching); | 79 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
78 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", | ||
79 | man->default_caching); | ||
80 | if (mem_type != TTM_PL_SYSTEM) | 80 | if (mem_type != TTM_PL_SYSTEM) |
81 | (*man->func->debug)(man, TTM_PFX); | 81 | (*man->func->debug)(man, TTM_PFX); |
82 | } | 82 | } |
@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, | |||
86 | { | 86 | { |
87 | int i, ret, mem_type; | 87 | int i, ret, mem_type; |
88 | 88 | ||
89 | printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", | 89 | pr_err("No space for %p (%lu pages, %luK, %luM)\n", |
90 | bo, bo->mem.num_pages, bo->mem.size >> 10, | 90 | bo, bo->mem.num_pages, bo->mem.size >> 10, |
91 | bo->mem.size >> 20); | 91 | bo->mem.size >> 20); |
92 | for (i = 0; i < placement->num_placement; i++) { | 92 | for (i = 0; i < placement->num_placement; i++) { |
93 | ret = ttm_mem_type_from_flags(placement->placement[i], | 93 | ret = ttm_mem_type_from_flags(placement->placement[i], |
94 | &mem_type); | 94 | &mem_type); |
95 | if (ret) | 95 | if (ret) |
96 | return; | 96 | return; |
97 | printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", | 97 | pr_err(" placement[%d]=0x%08X (%d)\n", |
98 | i, placement->placement[i], mem_type); | 98 | i, placement->placement[i], mem_type); |
99 | ttm_mem_type_debug(bo->bdev, mem_type); | 99 | ttm_mem_type_debug(bo->bdev, mem_type); |
100 | } | 100 | } |
101 | } | 101 | } |
@@ -344,7 +344,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
344 | ret = -ENOMEM; | 344 | ret = -ENOMEM; |
345 | break; | 345 | break; |
346 | default: | 346 | default: |
347 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | 347 | pr_err("Illegal buffer object type\n"); |
348 | ret = -EINVAL; | 348 | ret = -EINVAL; |
349 | break; | 349 | break; |
350 | } | 350 | } |
@@ -432,7 +432,7 @@ moved: | |||
432 | if (bo->evicted) { | 432 | if (bo->evicted) { |
433 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | 433 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
434 | if (ret) | 434 | if (ret) |
435 | printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); | 435 | pr_err("Can not flush read caches\n"); |
436 | bo->evicted = false; | 436 | bo->evicted = false; |
437 | } | 437 | } |
438 | 438 | ||
@@ -734,9 +734,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
734 | 734 | ||
735 | if (unlikely(ret != 0)) { | 735 | if (unlikely(ret != 0)) { |
736 | if (ret != -ERESTARTSYS) { | 736 | if (ret != -ERESTARTSYS) { |
737 | printk(KERN_ERR TTM_PFX | 737 | pr_err("Failed to expire sync object before buffer eviction\n"); |
738 | "Failed to expire sync object before " | ||
739 | "buffer eviction.\n"); | ||
740 | } | 738 | } |
741 | goto out; | 739 | goto out; |
742 | } | 740 | } |
@@ -757,9 +755,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
757 | no_wait_reserve, no_wait_gpu); | 755 | no_wait_reserve, no_wait_gpu); |
758 | if (ret) { | 756 | if (ret) { |
759 | if (ret != -ERESTARTSYS) { | 757 | if (ret != -ERESTARTSYS) { |
760 | printk(KERN_ERR TTM_PFX | 758 | pr_err("Failed to find memory space for buffer 0x%p eviction\n", |
761 | "Failed to find memory space for " | 759 | bo); |
762 | "buffer 0x%p eviction.\n", bo); | ||
763 | ttm_bo_mem_space_debug(bo, &placement); | 760 | ttm_bo_mem_space_debug(bo, &placement); |
764 | } | 761 | } |
765 | goto out; | 762 | goto out; |
@@ -769,7 +766,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
769 | no_wait_reserve, no_wait_gpu); | 766 | no_wait_reserve, no_wait_gpu); |
770 | if (ret) { | 767 | if (ret) { |
771 | if (ret != -ERESTARTSYS) | 768 | if (ret != -ERESTARTSYS) |
772 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | 769 | pr_err("Buffer eviction failed\n"); |
773 | ttm_bo_mem_put(bo, &evict_mem); | 770 | ttm_bo_mem_put(bo, &evict_mem); |
774 | goto out; | 771 | goto out; |
775 | } | 772 | } |
@@ -1180,7 +1177,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1180 | 1177 | ||
1181 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | 1178 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); |
1182 | if (ret) { | 1179 | if (ret) { |
1183 | printk(KERN_ERR TTM_PFX "Out of kernel memory.\n"); | 1180 | pr_err("Out of kernel memory\n"); |
1184 | if (destroy) | 1181 | if (destroy) |
1185 | (*destroy)(bo); | 1182 | (*destroy)(bo); |
1186 | else | 1183 | else |
@@ -1191,7 +1188,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1191 | size += buffer_start & ~PAGE_MASK; | 1188 | size += buffer_start & ~PAGE_MASK; |
1192 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1189 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1193 | if (num_pages == 0) { | 1190 | if (num_pages == 0) { |
1194 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | 1191 | pr_err("Illegal buffer object size\n"); |
1195 | if (destroy) | 1192 | if (destroy) |
1196 | (*destroy)(bo); | 1193 | (*destroy)(bo); |
1197 | else | 1194 | else |
@@ -1342,8 +1339,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1342 | if (allow_errors) { | 1339 | if (allow_errors) { |
1343 | return ret; | 1340 | return ret; |
1344 | } else { | 1341 | } else { |
1345 | printk(KERN_ERR TTM_PFX | 1342 | pr_err("Cleanup eviction failed\n"); |
1346 | "Cleanup eviction failed\n"); | ||
1347 | } | 1343 | } |
1348 | } | 1344 | } |
1349 | spin_lock(&glob->lru_lock); | 1345 | spin_lock(&glob->lru_lock); |
@@ -1358,14 +1354,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1358 | int ret = -EINVAL; | 1354 | int ret = -EINVAL; |
1359 | 1355 | ||
1360 | if (mem_type >= TTM_NUM_MEM_TYPES) { | 1356 | if (mem_type >= TTM_NUM_MEM_TYPES) { |
1361 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); | 1357 | pr_err("Illegal memory type %d\n", mem_type); |
1362 | return ret; | 1358 | return ret; |
1363 | } | 1359 | } |
1364 | man = &bdev->man[mem_type]; | 1360 | man = &bdev->man[mem_type]; |
1365 | 1361 | ||
1366 | if (!man->has_type) { | 1362 | if (!man->has_type) { |
1367 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " | 1363 | pr_err("Trying to take down uninitialized memory manager type %u\n", |
1368 | "memory manager type %u\n", mem_type); | 1364 | mem_type); |
1369 | return ret; | 1365 | return ret; |
1370 | } | 1366 | } |
1371 | 1367 | ||
@@ -1388,16 +1384,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1388 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 1384 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
1389 | 1385 | ||
1390 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { | 1386 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { |
1391 | printk(KERN_ERR TTM_PFX | 1387 | pr_err("Illegal memory manager memory type %u\n", mem_type); |
1392 | "Illegal memory manager memory type %u.\n", | ||
1393 | mem_type); | ||
1394 | return -EINVAL; | 1388 | return -EINVAL; |
1395 | } | 1389 | } |
1396 | 1390 | ||
1397 | if (!man->has_type) { | 1391 | if (!man->has_type) { |
1398 | printk(KERN_ERR TTM_PFX | 1392 | pr_err("Memory type %u has not been initialized\n", mem_type); |
1399 | "Memory type %u has not been initialized.\n", | ||
1400 | mem_type); | ||
1401 | return 0; | 1393 | return 0; |
1402 | } | 1394 | } |
1403 | 1395 | ||
@@ -1482,8 +1474,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) | |||
1482 | ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); | 1474 | ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); |
1483 | ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); | 1475 | ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); |
1484 | if (unlikely(ret != 0)) { | 1476 | if (unlikely(ret != 0)) { |
1485 | printk(KERN_ERR TTM_PFX | 1477 | pr_err("Could not register buffer object swapout\n"); |
1486 | "Could not register buffer object swapout.\n"); | ||
1487 | goto out_no_shrink; | 1478 | goto out_no_shrink; |
1488 | } | 1479 | } |
1489 | 1480 | ||
@@ -1516,9 +1507,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) | |||
1516 | man->use_type = false; | 1507 | man->use_type = false; |
1517 | if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { | 1508 | if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { |
1518 | ret = -EBUSY; | 1509 | ret = -EBUSY; |
1519 | printk(KERN_ERR TTM_PFX | 1510 | pr_err("DRM memory manager type %d is not clean\n", |
1520 | "DRM memory manager type %d " | 1511 | i); |
1521 | "is not clean.\n", i); | ||
1522 | } | 1512 | } |
1523 | man->has_type = false; | 1513 | man->has_type = false; |
1524 | } | 1514 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 54412848de88..a877813571a4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -28,6 +28,8 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define pr_fmt(fmt) "[TTM] " fmt | ||
32 | |||
31 | #include <ttm/ttm_module.h> | 33 | #include <ttm/ttm_module.h> |
32 | #include <ttm/ttm_bo_driver.h> | 34 | #include <ttm/ttm_bo_driver.h> |
33 | #include <ttm/ttm_placement.h> | 35 | #include <ttm/ttm_placement.h> |
@@ -262,8 +264,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
262 | read_unlock(&bdev->vm_lock); | 264 | read_unlock(&bdev->vm_lock); |
263 | 265 | ||
264 | if (unlikely(bo == NULL)) { | 266 | if (unlikely(bo == NULL)) { |
265 | printk(KERN_ERR TTM_PFX | 267 | pr_err("Could not find buffer object to map\n"); |
266 | "Could not find buffer object to map.\n"); | ||
267 | return -EINVAL; | 268 | return -EINVAL; |
268 | } | 269 | } |
269 | 270 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 9eba8e9a4e9c..23d2ecbaed59 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | 27 | ||
28 | #define pr_fmt(fmt) "[TTM] " fmt | ||
29 | |||
28 | #include "ttm/ttm_memory.h" | 30 | #include "ttm/ttm_memory.h" |
29 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
30 | #include "ttm/ttm_page_alloc.h" | 32 | #include "ttm/ttm_page_alloc.h" |
@@ -74,9 +76,8 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj) | |||
74 | struct ttm_mem_zone *zone = | 76 | struct ttm_mem_zone *zone = |
75 | container_of(kobj, struct ttm_mem_zone, kobj); | 77 | container_of(kobj, struct ttm_mem_zone, kobj); |
76 | 78 | ||
77 | printk(KERN_INFO TTM_PFX | 79 | pr_info("Zone %7s: Used memory at exit: %llu kiB\n", |
78 | "Zone %7s: Used memory at exit: %llu kiB.\n", | 80 | zone->name, (unsigned long long)zone->used_mem >> 10); |
79 | zone->name, (unsigned long long) zone->used_mem >> 10); | ||
80 | kfree(zone); | 81 | kfree(zone); |
81 | } | 82 | } |
82 | 83 | ||
@@ -390,9 +391,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) | |||
390 | #endif | 391 | #endif |
391 | for (i = 0; i < glob->num_zones; ++i) { | 392 | for (i = 0; i < glob->num_zones; ++i) { |
392 | zone = glob->zones[i]; | 393 | zone = glob->zones[i]; |
393 | printk(KERN_INFO TTM_PFX | 394 | pr_info("Zone %7s: Available graphics memory: %llu kiB\n", |
394 | "Zone %7s: Available graphics memory: %llu kiB.\n", | 395 | zone->name, (unsigned long long)zone->max_mem >> 10); |
395 | zone->name, (unsigned long long) zone->max_mem >> 10); | ||
396 | } | 396 | } |
397 | ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); | 397 | ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); |
398 | ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); | 398 | ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 93577f2e2954..68daca412cbd 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -49,6 +49,8 @@ | |||
49 | * for fast lookup of ref objects given a base object. | 49 | * for fast lookup of ref objects given a base object. |
50 | */ | 50 | */ |
51 | 51 | ||
52 | #define pr_fmt(fmt) "[TTM] " fmt | ||
53 | |||
52 | #include "ttm/ttm_object.h" | 54 | #include "ttm/ttm_object.h" |
53 | #include "ttm/ttm_module.h" | 55 | #include "ttm/ttm_module.h" |
54 | #include <linux/list.h> | 56 | #include <linux/list.h> |
@@ -232,8 +234,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | |||
232 | return NULL; | 234 | return NULL; |
233 | 235 | ||
234 | if (tfile != base->tfile && !base->shareable) { | 236 | if (tfile != base->tfile && !base->shareable) { |
235 | printk(KERN_ERR TTM_PFX | 237 | pr_err("Attempted access of non-shareable object\n"); |
236 | "Attempted access of non-shareable object.\n"); | ||
237 | ttm_base_object_unref(&base); | 238 | ttm_base_object_unref(&base); |
238 | return NULL; | 239 | return NULL; |
239 | } | 240 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 499debda791e..ebc6fac96e36 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -30,6 +30,9 @@ | |||
30 | * - Use page->lru to keep a free list | 30 | * - Use page->lru to keep a free list |
31 | * - doesn't track currently in use pages | 31 | * - doesn't track currently in use pages |
32 | */ | 32 | */ |
33 | |||
34 | #define pr_fmt(fmt) "[TTM] " fmt | ||
35 | |||
33 | #include <linux/list.h> | 36 | #include <linux/list.h> |
34 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
35 | #include <linux/highmem.h> | 38 | #include <linux/highmem.h> |
@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, | |||
167 | m->options.small = val; | 170 | m->options.small = val; |
168 | else if (attr == &ttm_page_pool_alloc_size) { | 171 | else if (attr == &ttm_page_pool_alloc_size) { |
169 | if (val > NUM_PAGES_TO_ALLOC*8) { | 172 | if (val > NUM_PAGES_TO_ALLOC*8) { |
170 | printk(KERN_ERR TTM_PFX | 173 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
171 | "Setting allocation size to %lu " | ||
172 | "is not allowed. Recommended size is " | ||
173 | "%lu\n", | ||
174 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | 174 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
175 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 175 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
176 | return size; | 176 | return size; |
177 | } else if (val > NUM_PAGES_TO_ALLOC) { | 177 | } else if (val > NUM_PAGES_TO_ALLOC) { |
178 | printk(KERN_WARNING TTM_PFX | 178 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
179 | "Setting allocation size to " | 179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
180 | "larger than %lu is not recommended.\n", | ||
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
182 | } | 180 | } |
183 | m->options.alloc_size = val; | 181 | m->options.alloc_size = val; |
184 | } | 182 | } |
@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) | |||
279 | { | 277 | { |
280 | unsigned i; | 278 | unsigned i; |
281 | if (set_pages_array_wb(pages, npages)) | 279 | if (set_pages_array_wb(pages, npages)) |
282 | printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", | 280 | pr_err("Failed to set %d pages to wb!\n", npages); |
283 | npages); | ||
284 | for (i = 0; i < npages; ++i) | 281 | for (i = 0; i < npages; ++i) |
285 | __free_page(pages[i]); | 282 | __free_page(pages[i]); |
286 | } | 283 | } |
@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |||
315 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 312 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
316 | GFP_KERNEL); | 313 | GFP_KERNEL); |
317 | if (!pages_to_free) { | 314 | if (!pages_to_free) { |
318 | printk(KERN_ERR TTM_PFX | 315 | pr_err("Failed to allocate memory for pool free operation\n"); |
319 | "Failed to allocate memory for pool free operation.\n"); | ||
320 | return 0; | 316 | return 0; |
321 | } | 317 | } |
322 | 318 | ||
@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages, | |||
438 | case tt_uncached: | 434 | case tt_uncached: |
439 | r = set_pages_array_uc(pages, cpages); | 435 | r = set_pages_array_uc(pages, cpages); |
440 | if (r) | 436 | if (r) |
441 | printk(KERN_ERR TTM_PFX | 437 | pr_err("Failed to set %d pages to uc!\n", cpages); |
442 | "Failed to set %d pages to uc!\n", | ||
443 | cpages); | ||
444 | break; | 438 | break; |
445 | case tt_wc: | 439 | case tt_wc: |
446 | r = set_pages_array_wc(pages, cpages); | 440 | r = set_pages_array_wc(pages, cpages); |
447 | if (r) | 441 | if (r) |
448 | printk(KERN_ERR TTM_PFX | 442 | pr_err("Failed to set %d pages to wc!\n", cpages); |
449 | "Failed to set %d pages to wc!\n", | ||
450 | cpages); | ||
451 | break; | 443 | break; |
452 | default: | 444 | default: |
453 | break; | 445 | break; |
@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, | |||
492 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 484 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
493 | 485 | ||
494 | if (!caching_array) { | 486 | if (!caching_array) { |
495 | printk(KERN_ERR TTM_PFX | 487 | pr_err("Unable to allocate table for new pages\n"); |
496 | "Unable to allocate table for new pages."); | ||
497 | return -ENOMEM; | 488 | return -ENOMEM; |
498 | } | 489 | } |
499 | 490 | ||
@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, | |||
501 | p = alloc_page(gfp_flags); | 492 | p = alloc_page(gfp_flags); |
502 | 493 | ||
503 | if (!p) { | 494 | if (!p) { |
504 | printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); | 495 | pr_err("Unable to get page %u\n", i); |
505 | 496 | ||
506 | /* store already allocated pages in the pool after | 497 | /* store already allocated pages in the pool after |
507 | * setting the caching state */ | 498 | * setting the caching state */ |
@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
599 | ++pool->nrefills; | 590 | ++pool->nrefills; |
600 | pool->npages += alloc_size; | 591 | pool->npages += alloc_size; |
601 | } else { | 592 | } else { |
602 | printk(KERN_ERR TTM_PFX | 593 | pr_err("Failed to fill pool (%p)\n", pool); |
603 | "Failed to fill pool (%p).", pool); | ||
604 | /* If we have any pages left put them to the pool. */ | 594 | /* If we have any pages left put them to the pool. */ |
605 | list_for_each_entry(p, &pool->list, lru) { | 595 | list_for_each_entry(p, &pool->list, lru) { |
606 | ++cpages; | 596 | ++cpages; |
@@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
675 | for (i = 0; i < npages; i++) { | 665 | for (i = 0; i < npages; i++) { |
676 | if (pages[i]) { | 666 | if (pages[i]) { |
677 | if (page_count(pages[i]) != 1) | 667 | if (page_count(pages[i]) != 1) |
678 | printk(KERN_ERR TTM_PFX | 668 | pr_err("Erroneous page count. Leaking pages.\n"); |
679 | "Erroneous page count. " | ||
680 | "Leaking pages.\n"); | ||
681 | __free_page(pages[i]); | 669 | __free_page(pages[i]); |
682 | pages[i] = NULL; | 670 | pages[i] = NULL; |
683 | } | 671 | } |
@@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
689 | for (i = 0; i < npages; i++) { | 677 | for (i = 0; i < npages; i++) { |
690 | if (pages[i]) { | 678 | if (pages[i]) { |
691 | if (page_count(pages[i]) != 1) | 679 | if (page_count(pages[i]) != 1) |
692 | printk(KERN_ERR TTM_PFX | 680 | pr_err("Erroneous page count. Leaking pages.\n"); |
693 | "Erroneous page count. " | ||
694 | "Leaking pages.\n"); | ||
695 | list_add_tail(&pages[i]->lru, &pool->list); | 681 | list_add_tail(&pages[i]->lru, &pool->list); |
696 | pages[i] = NULL; | 682 | pages[i] = NULL; |
697 | pool->npages++; | 683 | pool->npages++; |
@@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
740 | p = alloc_page(gfp_flags); | 726 | p = alloc_page(gfp_flags); |
741 | if (!p) { | 727 | if (!p) { |
742 | 728 | ||
743 | printk(KERN_ERR TTM_PFX | 729 | pr_err("Unable to allocate page\n"); |
744 | "Unable to allocate page."); | ||
745 | return -ENOMEM; | 730 | return -ENOMEM; |
746 | } | 731 | } |
747 | 732 | ||
@@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
781 | if (r) { | 766 | if (r) { |
782 | /* If there is any pages in the list put them back to | 767 | /* If there is any pages in the list put them back to |
783 | * the pool. */ | 768 | * the pool. */ |
784 | printk(KERN_ERR TTM_PFX | 769 | pr_err("Failed to allocate extra pages for large request\n"); |
785 | "Failed to allocate extra pages " | ||
786 | "for large request."); | ||
787 | ttm_put_pages(pages, count, flags, cstate); | 770 | ttm_put_pages(pages, count, flags, cstate); |
788 | return r; | 771 | return r; |
789 | } | 772 | } |
@@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
809 | 792 | ||
810 | WARN_ON(_manager); | 793 | WARN_ON(_manager); |
811 | 794 | ||
812 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); | 795 | pr_info("Initializing pool allocator\n"); |
813 | 796 | ||
814 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); | 797 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
815 | 798 | ||
@@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void) | |||
844 | { | 827 | { |
845 | int i; | 828 | int i; |
846 | 829 | ||
847 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); | 830 | pr_info("Finalizing pool allocator\n"); |
848 | ttm_pool_mm_shrink_fini(_manager); | 831 | ttm_pool_mm_shrink_fini(_manager); |
849 | 832 | ||
850 | for (i = 0; i < NUM_POOLS; ++i) | 833 | for (i = 0; i < NUM_POOLS; ++i) |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 0c46d8cdc6ea..4f9e548b2eec 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -33,6 +33,8 @@ | |||
33 | * when freed). | 33 | * when freed). |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define pr_fmt(fmt) "[TTM] " fmt | ||
37 | |||
36 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
37 | #include <linux/list.h> | 39 | #include <linux/list.h> |
38 | #include <linux/seq_file.h> /* for seq_printf */ | 40 | #include <linux/seq_file.h> /* for seq_printf */ |
@@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, | |||
221 | m->options.small = val; | 223 | m->options.small = val; |
222 | else if (attr == &ttm_page_pool_alloc_size) { | 224 | else if (attr == &ttm_page_pool_alloc_size) { |
223 | if (val > NUM_PAGES_TO_ALLOC*8) { | 225 | if (val > NUM_PAGES_TO_ALLOC*8) { |
224 | printk(KERN_ERR TTM_PFX | 226 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
225 | "Setting allocation size to %lu " | ||
226 | "is not allowed. Recommended size is " | ||
227 | "%lu\n", | ||
228 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | 227 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
229 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 228 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
230 | return size; | 229 | return size; |
231 | } else if (val > NUM_PAGES_TO_ALLOC) { | 230 | } else if (val > NUM_PAGES_TO_ALLOC) { |
232 | printk(KERN_WARNING TTM_PFX | 231 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
233 | "Setting allocation size to " | 232 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); |
234 | "larger than %lu is not recommended.\n", | ||
235 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
236 | } | 233 | } |
237 | m->options.alloc_size = val; | 234 | m->options.alloc_size = val; |
238 | } | 235 | } |
@@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, | |||
313 | if (pool->type & IS_UC) { | 310 | if (pool->type & IS_UC) { |
314 | r = set_pages_array_uc(pages, cpages); | 311 | r = set_pages_array_uc(pages, cpages); |
315 | if (r) | 312 | if (r) |
316 | pr_err(TTM_PFX | 313 | pr_err("%s: Failed to set %d pages to uc!\n", |
317 | "%s: Failed to set %d pages to uc!\n", | ||
318 | pool->dev_name, cpages); | 314 | pool->dev_name, cpages); |
319 | } | 315 | } |
320 | if (pool->type & IS_WC) { | 316 | if (pool->type & IS_WC) { |
321 | r = set_pages_array_wc(pages, cpages); | 317 | r = set_pages_array_wc(pages, cpages); |
322 | if (r) | 318 | if (r) |
323 | pr_err(TTM_PFX | 319 | pr_err("%s: Failed to set %d pages to wc!\n", |
324 | "%s: Failed to set %d pages to wc!\n", | ||
325 | pool->dev_name, cpages); | 320 | pool->dev_name, cpages); |
326 | } | 321 | } |
327 | return r; | 322 | return r; |
@@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, | |||
387 | /* Don't set WB on WB page pool. */ | 382 | /* Don't set WB on WB page pool. */ |
388 | if (npages && !(pool->type & IS_CACHED) && | 383 | if (npages && !(pool->type & IS_CACHED) && |
389 | set_pages_array_wb(pages, npages)) | 384 | set_pages_array_wb(pages, npages)) |
390 | pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", | 385 | pr_err("%s: Failed to set %d pages to wb!\n", |
391 | pool->dev_name, npages); | 386 | pool->dev_name, npages); |
392 | 387 | ||
393 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { | 388 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { |
394 | list_del(&d_page->page_list); | 389 | list_del(&d_page->page_list); |
@@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) | |||
400 | { | 395 | { |
401 | /* Don't set WB on WB page pool. */ | 396 | /* Don't set WB on WB page pool. */ |
402 | if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) | 397 | if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) |
403 | pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", | 398 | pr_err("%s: Failed to set %d pages to wb!\n", |
404 | pool->dev_name, 1); | 399 | pool->dev_name, 1); |
405 | 400 | ||
406 | list_del(&d_page->page_list); | 401 | list_del(&d_page->page_list); |
407 | __ttm_dma_free_page(pool, d_page); | 402 | __ttm_dma_free_page(pool, d_page); |
@@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) | |||
430 | #if 0 | 425 | #if 0 |
431 | if (nr_free > 1) { | 426 | if (nr_free > 1) { |
432 | pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", | 427 | pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", |
433 | pool->dev_name, pool->name, current->pid, | 428 | pool->dev_name, pool->name, current->pid, |
434 | npages_to_free, nr_free); | 429 | npages_to_free, nr_free); |
435 | } | 430 | } |
436 | #endif | 431 | #endif |
437 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 432 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
438 | GFP_KERNEL); | 433 | GFP_KERNEL); |
439 | 434 | ||
440 | if (!pages_to_free) { | 435 | if (!pages_to_free) { |
441 | pr_err(TTM_PFX | 436 | pr_err("%s: Failed to allocate memory for pool free operation\n", |
442 | "%s: Failed to allocate memory for pool free operation.\n", | 437 | pool->dev_name); |
443 | pool->dev_name); | ||
444 | return 0; | 438 | return 0; |
445 | } | 439 | } |
446 | INIT_LIST_HEAD(&d_pages); | 440 | INIT_LIST_HEAD(&d_pages); |
@@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, | |||
723 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 717 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
724 | 718 | ||
725 | if (!caching_array) { | 719 | if (!caching_array) { |
726 | pr_err(TTM_PFX | 720 | pr_err("%s: Unable to allocate table for new pages\n", |
727 | "%s: Unable to allocate table for new pages.", | 721 | pool->dev_name); |
728 | pool->dev_name); | ||
729 | return -ENOMEM; | 722 | return -ENOMEM; |
730 | } | 723 | } |
731 | 724 | ||
732 | if (count > 1) { | 725 | if (count > 1) { |
733 | pr_debug("%s: (%s:%d) Getting %d pages\n", | 726 | pr_debug("%s: (%s:%d) Getting %d pages\n", |
734 | pool->dev_name, pool->name, current->pid, | 727 | pool->dev_name, pool->name, current->pid, count); |
735 | count); | ||
736 | } | 728 | } |
737 | 729 | ||
738 | for (i = 0, cpages = 0; i < count; ++i) { | 730 | for (i = 0, cpages = 0; i < count; ++i) { |
739 | dma_p = __ttm_dma_alloc_page(pool); | 731 | dma_p = __ttm_dma_alloc_page(pool); |
740 | if (!dma_p) { | 732 | if (!dma_p) { |
741 | pr_err(TTM_PFX "%s: Unable to get page %u.\n", | 733 | pr_err("%s: Unable to get page %u\n", |
742 | pool->dev_name, i); | 734 | pool->dev_name, i); |
743 | 735 | ||
744 | /* store already allocated pages in the pool after | 736 | /* store already allocated pages in the pool after |
745 | * setting the caching state */ | 737 | * setting the caching state */ |
@@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, | |||
821 | struct dma_page *d_page; | 813 | struct dma_page *d_page; |
822 | unsigned cpages = 0; | 814 | unsigned cpages = 0; |
823 | 815 | ||
824 | pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n", | 816 | pr_err("%s: Failed to fill %s pool (r:%d)!\n", |
825 | pool->dev_name, pool->name, r); | 817 | pool->dev_name, pool->name, r); |
826 | 818 | ||
827 | list_for_each_entry(d_page, &d_pages, page_list) { | 819 | list_for_each_entry(d_page, &d_pages, page_list) { |
828 | cpages++; | 820 | cpages++; |
@@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, | |||
1038 | nr_free = shrink_pages; | 1030 | nr_free = shrink_pages; |
1039 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); | 1031 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); |
1040 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", | 1032 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", |
1041 | p->pool->dev_name, p->pool->name, current->pid, nr_free, | 1033 | p->pool->dev_name, p->pool->name, current->pid, |
1042 | shrink_pages); | 1034 | nr_free, shrink_pages); |
1043 | } | 1035 | } |
1044 | mutex_unlock(&_manager->lock); | 1036 | mutex_unlock(&_manager->lock); |
1045 | /* return estimated number of unused pages in pool */ | 1037 | /* return estimated number of unused pages in pool */ |
@@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
1064 | 1056 | ||
1065 | WARN_ON(_manager); | 1057 | WARN_ON(_manager); |
1066 | 1058 | ||
1067 | printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n"); | 1059 | pr_info("Initializing DMA pool allocator\n"); |
1068 | 1060 | ||
1069 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); | 1061 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
1070 | if (!_manager) | 1062 | if (!_manager) |
@@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void) | |||
1097 | { | 1089 | { |
1098 | struct device_pools *p, *t; | 1090 | struct device_pools *p, *t; |
1099 | 1091 | ||
1100 | printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n"); | 1092 | pr_info("Finalizing DMA pool allocator\n"); |
1101 | ttm_dma_pool_mm_shrink_fini(_manager); | 1093 | ttm_dma_pool_mm_shrink_fini(_manager); |
1102 | 1094 | ||
1103 | list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { | 1095 | list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index c10cf5e2443a..fa09daf9a50c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,6 +28,8 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define pr_fmt(fmt) "[TTM] " fmt | ||
32 | |||
31 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
32 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
33 | #include <linux/pagemap.h> | 35 | #include <linux/pagemap.h> |
@@ -196,7 +198,7 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, | |||
196 | ttm_tt_alloc_page_directory(ttm); | 198 | ttm_tt_alloc_page_directory(ttm); |
197 | if (!ttm->pages) { | 199 | if (!ttm->pages) { |
198 | ttm_tt_destroy(ttm); | 200 | ttm_tt_destroy(ttm); |
199 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | 201 | pr_err("Failed allocating page table\n"); |
200 | return -ENOMEM; | 202 | return -ENOMEM; |
201 | } | 203 | } |
202 | return 0; | 204 | return 0; |
@@ -229,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | |||
229 | ttm_dma_tt_alloc_page_directory(ttm_dma); | 231 | ttm_dma_tt_alloc_page_directory(ttm_dma); |
230 | if (!ttm->pages || !ttm_dma->dma_address) { | 232 | if (!ttm->pages || !ttm_dma->dma_address) { |
231 | ttm_tt_destroy(ttm); | 233 | ttm_tt_destroy(ttm); |
232 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | 234 | pr_err("Failed allocating page table\n"); |
233 | return -ENOMEM; | 235 | return -ENOMEM; |
234 | } | 236 | } |
235 | return 0; | 237 | return 0; |
@@ -347,7 +349,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) | |||
347 | ttm->num_pages << PAGE_SHIFT, | 349 | ttm->num_pages << PAGE_SHIFT, |
348 | 0); | 350 | 0); |
349 | if (unlikely(IS_ERR(swap_storage))) { | 351 | if (unlikely(IS_ERR(swap_storage))) { |
350 | printk(KERN_ERR "Failed allocating swap storage.\n"); | 352 | pr_err("Failed allocating swap storage\n"); |
351 | return PTR_ERR(swap_storage); | 353 | return PTR_ERR(swap_storage); |
352 | } | 354 | } |
353 | } else | 355 | } else |
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig new file mode 100644 index 000000000000..0b5e096d39a6 --- /dev/null +++ b/drivers/gpu/drm/udl/Kconfig | |||
@@ -0,0 +1,12 @@ | |||
1 | config DRM_UDL | ||
2 | tristate "DisplayLink" | ||
3 | depends on DRM && EXPERIMENTAL | ||
4 | select DRM_USB | ||
5 | select FB_SYS_FILLRECT | ||
6 | select FB_SYS_COPYAREA | ||
7 | select FB_SYS_IMAGEBLIT | ||
8 | select FB_DEFERRED_IO | ||
9 | select DRM_KMS_HELPER | ||
10 | help | ||
11 | This is a KMS driver for the USB displaylink video adapters. | ||
12 | Say M/Y to add support for these devices via drm/kms interfaces. | ||
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile new file mode 100644 index 000000000000..05c7481bfd40 --- /dev/null +++ b/drivers/gpu/drm/udl/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | |||
2 | ccflags-y := -Iinclude/drm | ||
3 | |||
4 | udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o | ||
5 | |||
6 | obj-$(CONFIG_DRM_UDL) := udl.o | ||
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c new file mode 100644 index 000000000000..ba055e9ca007 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_connector.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * based in parts on udlfb.c: | ||
4 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
5 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
6 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License v2. See the file COPYING in the main directory of this archive for | ||
10 | * more details. | ||
11 | */ | ||
12 | |||
13 | #include "drmP.h" | ||
14 | #include "drm_crtc.h" | ||
15 | #include "drm_edid.h" | ||
16 | #include "drm_crtc_helper.h" | ||
17 | #include "udl_drv.h" | ||
18 | |||
19 | /* dummy connector to just get EDID, | ||
20 | all UDL appear to have a DVI-D */ | ||
21 | |||
22 | static u8 *udl_get_edid(struct udl_device *udl) | ||
23 | { | ||
24 | u8 *block; | ||
25 | char rbuf[3]; | ||
26 | int ret, i; | ||
27 | |||
28 | block = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
29 | if (block == NULL) | ||
30 | return NULL; | ||
31 | |||
32 | for (i = 0; i < EDID_LENGTH; i++) { | ||
33 | ret = usb_control_msg(udl->ddev->usbdev, | ||
34 | usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), | ||
35 | (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, | ||
36 | HZ); | ||
37 | if (ret < 1) { | ||
38 | DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); | ||
39 | i--; | ||
40 | goto error; | ||
41 | } | ||
42 | block[i] = rbuf[1]; | ||
43 | } | ||
44 | |||
45 | return block; | ||
46 | |||
47 | error: | ||
48 | kfree(block); | ||
49 | return NULL; | ||
50 | } | ||
51 | |||
52 | static int udl_get_modes(struct drm_connector *connector) | ||
53 | { | ||
54 | struct udl_device *udl = connector->dev->dev_private; | ||
55 | struct edid *edid; | ||
56 | int ret; | ||
57 | |||
58 | edid = (struct edid *)udl_get_edid(udl); | ||
59 | |||
60 | connector->display_info.raw_edid = (char *)edid; | ||
61 | |||
62 | drm_mode_connector_update_edid_property(connector, edid); | ||
63 | ret = drm_add_edid_modes(connector, edid); | ||
64 | connector->display_info.raw_edid = NULL; | ||
65 | kfree(edid); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | static int udl_mode_valid(struct drm_connector *connector, | ||
70 | struct drm_display_mode *mode) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static enum drm_connector_status | ||
76 | udl_detect(struct drm_connector *connector, bool force) | ||
77 | { | ||
78 | if (drm_device_is_unplugged(connector->dev)) | ||
79 | return connector_status_disconnected; | ||
80 | return connector_status_connected; | ||
81 | } | ||
82 | |||
83 | struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector) | ||
84 | { | ||
85 | int enc_id = connector->encoder_ids[0]; | ||
86 | struct drm_mode_object *obj; | ||
87 | struct drm_encoder *encoder; | ||
88 | |||
89 | obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); | ||
90 | if (!obj) | ||
91 | return NULL; | ||
92 | encoder = obj_to_encoder(obj); | ||
93 | return encoder; | ||
94 | } | ||
95 | |||
96 | int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, | ||
97 | uint64_t val) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static void udl_connector_destroy(struct drm_connector *connector) | ||
103 | { | ||
104 | drm_sysfs_connector_remove(connector); | ||
105 | drm_connector_cleanup(connector); | ||
106 | kfree(connector); | ||
107 | } | ||
108 | |||
109 | struct drm_connector_helper_funcs udl_connector_helper_funcs = { | ||
110 | .get_modes = udl_get_modes, | ||
111 | .mode_valid = udl_mode_valid, | ||
112 | .best_encoder = udl_best_single_encoder, | ||
113 | }; | ||
114 | |||
115 | struct drm_connector_funcs udl_connector_funcs = { | ||
116 | .dpms = drm_helper_connector_dpms, | ||
117 | .detect = udl_detect, | ||
118 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
119 | .destroy = udl_connector_destroy, | ||
120 | .set_property = udl_connector_set_property, | ||
121 | }; | ||
122 | |||
123 | int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) | ||
124 | { | ||
125 | struct drm_connector *connector; | ||
126 | |||
127 | connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); | ||
128 | if (!connector) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); | ||
132 | drm_connector_helper_add(connector, &udl_connector_helper_funcs); | ||
133 | |||
134 | drm_sysfs_connector_add(connector); | ||
135 | drm_mode_connector_attach_encoder(connector, encoder); | ||
136 | |||
137 | drm_connector_attach_property(connector, | ||
138 | dev->mode_config.dirty_info_property, | ||
139 | 1); | ||
140 | return 0; | ||
141 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c new file mode 100644 index 000000000000..5340c5f3987b --- /dev/null +++ b/drivers/gpu/drm/udl/udl_drv.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License v2. See the file COPYING in the main directory of this archive for | ||
6 | * more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include "drm_usb.h" | ||
11 | #include "drm_crtc_helper.h" | ||
12 | #include "udl_drv.h" | ||
13 | |||
14 | static struct drm_driver driver; | ||
15 | |||
16 | static struct usb_device_id id_table[] = { | ||
17 | {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,}, | ||
18 | {}, | ||
19 | }; | ||
20 | MODULE_DEVICE_TABLE(usb, id_table); | ||
21 | |||
22 | MODULE_LICENSE("GPL"); | ||
23 | |||
24 | static int udl_usb_probe(struct usb_interface *interface, | ||
25 | const struct usb_device_id *id) | ||
26 | { | ||
27 | return drm_get_usb_dev(interface, id, &driver); | ||
28 | } | ||
29 | |||
30 | static void udl_usb_disconnect(struct usb_interface *interface) | ||
31 | { | ||
32 | struct drm_device *dev = usb_get_intfdata(interface); | ||
33 | |||
34 | drm_kms_helper_poll_disable(dev); | ||
35 | drm_connector_unplug_all(dev); | ||
36 | udl_fbdev_unplug(dev); | ||
37 | udl_drop_usb(dev); | ||
38 | drm_unplug_dev(dev); | ||
39 | } | ||
40 | |||
41 | static struct vm_operations_struct udl_gem_vm_ops = { | ||
42 | .fault = udl_gem_fault, | ||
43 | .open = drm_gem_vm_open, | ||
44 | .close = drm_gem_vm_close, | ||
45 | }; | ||
46 | |||
47 | static const struct file_operations udl_driver_fops = { | ||
48 | .owner = THIS_MODULE, | ||
49 | .open = drm_open, | ||
50 | .mmap = drm_gem_mmap, | ||
51 | .poll = drm_poll, | ||
52 | .read = drm_read, | ||
53 | .unlocked_ioctl = drm_ioctl, | ||
54 | .release = drm_release, | ||
55 | .fasync = drm_fasync, | ||
56 | .llseek = noop_llseek, | ||
57 | }; | ||
58 | |||
59 | static struct drm_driver driver = { | ||
60 | .driver_features = DRIVER_MODESET | DRIVER_GEM, | ||
61 | .load = udl_driver_load, | ||
62 | .unload = udl_driver_unload, | ||
63 | |||
64 | /* gem hooks */ | ||
65 | .gem_init_object = udl_gem_init_object, | ||
66 | .gem_free_object = udl_gem_free_object, | ||
67 | .gem_vm_ops = &udl_gem_vm_ops, | ||
68 | |||
69 | .dumb_create = udl_dumb_create, | ||
70 | .dumb_map_offset = udl_gem_mmap, | ||
71 | .dumb_destroy = udl_dumb_destroy, | ||
72 | .fops = &udl_driver_fops, | ||
73 | .name = DRIVER_NAME, | ||
74 | .desc = DRIVER_DESC, | ||
75 | .date = DRIVER_DATE, | ||
76 | .major = DRIVER_MAJOR, | ||
77 | .minor = DRIVER_MINOR, | ||
78 | .patchlevel = DRIVER_PATCHLEVEL, | ||
79 | }; | ||
80 | |||
81 | static struct usb_driver udl_driver = { | ||
82 | .name = "udl", | ||
83 | .probe = udl_usb_probe, | ||
84 | .disconnect = udl_usb_disconnect, | ||
85 | .id_table = id_table, | ||
86 | }; | ||
87 | |||
88 | static int __init udl_init(void) | ||
89 | { | ||
90 | return drm_usb_init(&driver, &udl_driver); | ||
91 | } | ||
92 | |||
93 | static void __exit udl_exit(void) | ||
94 | { | ||
95 | drm_usb_exit(&driver, &udl_driver); | ||
96 | } | ||
97 | |||
98 | module_init(udl_init); | ||
99 | module_exit(udl_exit); | ||
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h new file mode 100644 index 000000000000..1612954a5bc4 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_drv.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * based in parts on udlfb.c: | ||
5 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
6 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
7 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License v2. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef UDL_DRV_H | ||
15 | #define UDL_DRV_H | ||
16 | |||
17 | #include <linux/usb.h> | ||
18 | |||
19 | #define DRIVER_NAME "udl" | ||
20 | #define DRIVER_DESC "DisplayLink" | ||
21 | #define DRIVER_DATE "20120220" | ||
22 | |||
23 | #define DRIVER_MAJOR 0 | ||
24 | #define DRIVER_MINOR 0 | ||
25 | #define DRIVER_PATCHLEVEL 1 | ||
26 | |||
27 | struct udl_device; | ||
28 | |||
29 | struct urb_node { | ||
30 | struct list_head entry; | ||
31 | struct udl_device *dev; | ||
32 | struct delayed_work release_urb_work; | ||
33 | struct urb *urb; | ||
34 | }; | ||
35 | |||
36 | struct urb_list { | ||
37 | struct list_head list; | ||
38 | spinlock_t lock; | ||
39 | struct semaphore limit_sem; | ||
40 | int available; | ||
41 | int count; | ||
42 | size_t size; | ||
43 | }; | ||
44 | |||
45 | struct udl_fbdev; | ||
46 | |||
47 | struct udl_device { | ||
48 | struct device *dev; | ||
49 | struct drm_device *ddev; | ||
50 | |||
51 | int sku_pixel_limit; | ||
52 | |||
53 | struct urb_list urbs; | ||
54 | atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ | ||
55 | |||
56 | struct udl_fbdev *fbdev; | ||
57 | char mode_buf[1024]; | ||
58 | uint32_t mode_buf_len; | ||
59 | atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ | ||
60 | atomic_t bytes_identical; /* saved effort with backbuffer comparison */ | ||
61 | atomic_t bytes_sent; /* to usb, after compression including overhead */ | ||
62 | atomic_t cpu_kcycles_used; /* transpired during pixel processing */ | ||
63 | }; | ||
64 | |||
65 | struct udl_gem_object { | ||
66 | struct drm_gem_object base; | ||
67 | struct page **pages; | ||
68 | void *vmapping; | ||
69 | }; | ||
70 | |||
71 | #define to_udl_bo(x) container_of(x, struct udl_gem_object, base) | ||
72 | |||
73 | struct udl_framebuffer { | ||
74 | struct drm_framebuffer base; | ||
75 | struct udl_gem_object *obj; | ||
76 | bool active_16; /* active on the 16-bit channel */ | ||
77 | }; | ||
78 | |||
79 | #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base) | ||
80 | |||
81 | /* modeset */ | ||
82 | int udl_modeset_init(struct drm_device *dev); | ||
83 | void udl_modeset_cleanup(struct drm_device *dev); | ||
84 | int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder); | ||
85 | |||
86 | struct drm_encoder *udl_encoder_init(struct drm_device *dev); | ||
87 | |||
88 | struct urb *udl_get_urb(struct drm_device *dev); | ||
89 | |||
90 | int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); | ||
91 | void udl_urb_completion(struct urb *urb); | ||
92 | |||
93 | int udl_driver_load(struct drm_device *dev, unsigned long flags); | ||
94 | int udl_driver_unload(struct drm_device *dev); | ||
95 | |||
96 | int udl_fbdev_init(struct drm_device *dev); | ||
97 | void udl_fbdev_cleanup(struct drm_device *dev); | ||
98 | void udl_fbdev_unplug(struct drm_device *dev); | ||
99 | struct drm_framebuffer * | ||
100 | udl_fb_user_fb_create(struct drm_device *dev, | ||
101 | struct drm_file *file, | ||
102 | struct drm_mode_fb_cmd2 *mode_cmd); | ||
103 | |||
104 | int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, | ||
105 | const char *front, char **urb_buf_ptr, | ||
106 | u32 byte_offset, u32 byte_width, | ||
107 | int *ident_ptr, int *sent_ptr); | ||
108 | |||
109 | int udl_dumb_create(struct drm_file *file_priv, | ||
110 | struct drm_device *dev, | ||
111 | struct drm_mode_create_dumb *args); | ||
112 | int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev, | ||
113 | uint32_t handle, uint64_t *offset); | ||
114 | int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, | ||
115 | uint32_t handle); | ||
116 | |||
117 | int udl_gem_init_object(struct drm_gem_object *obj); | ||
118 | void udl_gem_free_object(struct drm_gem_object *gem_obj); | ||
119 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, | ||
120 | size_t size); | ||
121 | |||
122 | int udl_gem_vmap(struct udl_gem_object *obj); | ||
123 | void udl_gem_vunmap(struct udl_gem_object *obj); | ||
124 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
125 | |||
126 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | ||
127 | int width, int height); | ||
128 | |||
129 | int udl_drop_usb(struct drm_device *dev); | ||
130 | |||
131 | #define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */ | ||
132 | #define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */ | ||
133 | #define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */ | ||
134 | #define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */ | ||
135 | |||
136 | #define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */ | ||
137 | #define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */ | ||
138 | #define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */ | ||
139 | #define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */ | ||
140 | |||
141 | #endif | ||
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c new file mode 100644 index 000000000000..56e75f0f1df5 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_encoder.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * based in parts on udlfb.c: | ||
4 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
5 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
6 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License v2. See the file COPYING in the main directory of this archive for | ||
10 | * more details. | ||
11 | */ | ||
12 | |||
13 | #include "drmP.h" | ||
14 | #include "drm_crtc.h" | ||
15 | #include "drm_crtc_helper.h" | ||
16 | #include "udl_drv.h" | ||
17 | |||
18 | /* dummy encoder */ | ||
19 | void udl_enc_destroy(struct drm_encoder *encoder) | ||
20 | { | ||
21 | drm_encoder_cleanup(encoder); | ||
22 | kfree(encoder); | ||
23 | } | ||
24 | |||
25 | static void udl_encoder_disable(struct drm_encoder *encoder) | ||
26 | { | ||
27 | } | ||
28 | |||
29 | static bool udl_mode_fixup(struct drm_encoder *encoder, | ||
30 | struct drm_display_mode *mode, | ||
31 | struct drm_display_mode *adjusted_mode) | ||
32 | { | ||
33 | return true; | ||
34 | } | ||
35 | |||
36 | static void udl_encoder_prepare(struct drm_encoder *encoder) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | static void udl_encoder_commit(struct drm_encoder *encoder) | ||
41 | { | ||
42 | } | ||
43 | |||
44 | static void udl_encoder_mode_set(struct drm_encoder *encoder, | ||
45 | struct drm_display_mode *mode, | ||
46 | struct drm_display_mode *adjusted_mode) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | udl_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | static const struct drm_encoder_helper_funcs udl_helper_funcs = { | ||
56 | .dpms = udl_encoder_dpms, | ||
57 | .mode_fixup = udl_mode_fixup, | ||
58 | .prepare = udl_encoder_prepare, | ||
59 | .mode_set = udl_encoder_mode_set, | ||
60 | .commit = udl_encoder_commit, | ||
61 | .disable = udl_encoder_disable, | ||
62 | }; | ||
63 | |||
64 | static const struct drm_encoder_funcs udl_enc_funcs = { | ||
65 | .destroy = udl_enc_destroy, | ||
66 | }; | ||
67 | |||
68 | struct drm_encoder *udl_encoder_init(struct drm_device *dev) | ||
69 | { | ||
70 | struct drm_encoder *encoder; | ||
71 | |||
72 | encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL); | ||
73 | if (!encoder) | ||
74 | return NULL; | ||
75 | |||
76 | drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
77 | drm_encoder_helper_add(encoder, &udl_helper_funcs); | ||
78 | encoder->possible_crtcs = 1; | ||
79 | return encoder; | ||
80 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c new file mode 100644 index 000000000000..4d9c3a5d8a45 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -0,0 +1,611 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * based in parts on udlfb.c: | ||
5 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
6 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
7 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License v2. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/fb.h> | ||
16 | |||
17 | #include "drmP.h" | ||
18 | #include "drm.h" | ||
19 | #include "drm_crtc.h" | ||
20 | #include "drm_crtc_helper.h" | ||
21 | #include "udl_drv.h" | ||
22 | |||
23 | #include "drm_fb_helper.h" | ||
24 | |||
25 | #define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ | ||
26 | |||
27 | static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */ | ||
28 | static int fb_bpp = 16; | ||
29 | |||
30 | module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | ||
31 | module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); | ||
32 | |||
33 | struct udl_fbdev { | ||
34 | struct drm_fb_helper helper; | ||
35 | struct udl_framebuffer ufb; | ||
36 | struct list_head fbdev_list; | ||
37 | int fb_count; | ||
38 | }; | ||
39 | |||
40 | #define DL_ALIGN_UP(x, a) ALIGN(x, a) | ||
41 | #define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a) | ||
42 | |||
43 | /** Read the red component (0..255) of a 32 bpp colour. */ | ||
44 | #define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF) | ||
45 | |||
46 | /** Read the green component (0..255) of a 32 bpp colour. */ | ||
47 | #define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF) | ||
48 | |||
49 | /** Read the blue component (0..255) of a 32 bpp colour. */ | ||
50 | #define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF) | ||
51 | |||
52 | /** Return red/green component of a 16 bpp colour number. */ | ||
53 | #define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF) | ||
54 | |||
55 | /** Return green/blue component of a 16 bpp colour number. */ | ||
56 | #define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF) | ||
57 | |||
58 | /** Return 8 bpp colour number from red, green and blue components. */ | ||
59 | #define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF) | ||
60 | |||
61 | #if 0 | ||
62 | static uint8_t rgb8(uint32_t col) | ||
63 | { | ||
64 | uint8_t red = DLO_RGB_GETRED(col); | ||
65 | uint8_t grn = DLO_RGB_GETGRN(col); | ||
66 | uint8_t blu = DLO_RGB_GETBLU(col); | ||
67 | |||
68 | return DLO_RGB8(red, grn, blu); | ||
69 | } | ||
70 | |||
71 | static uint16_t rgb16(uint32_t col) | ||
72 | { | ||
73 | uint8_t red = DLO_RGB_GETRED(col); | ||
74 | uint8_t grn = DLO_RGB_GETGRN(col); | ||
75 | uint8_t blu = DLO_RGB_GETBLU(col); | ||
76 | |||
77 | return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu); | ||
78 | } | ||
79 | #endif | ||
80 | |||
81 | /* | ||
82 | * NOTE: fb_defio.c is holding info->fbdefio.mutex | ||
83 | * Touching ANY framebuffer memory that triggers a page fault | ||
84 | * in fb_defio will cause a deadlock, when it also tries to | ||
85 | * grab the same mutex. | ||
86 | */ | ||
87 | static void udlfb_dpy_deferred_io(struct fb_info *info, | ||
88 | struct list_head *pagelist) | ||
89 | { | ||
90 | struct page *cur; | ||
91 | struct fb_deferred_io *fbdefio = info->fbdefio; | ||
92 | struct udl_fbdev *ufbdev = info->par; | ||
93 | struct drm_device *dev = ufbdev->ufb.base.dev; | ||
94 | struct udl_device *udl = dev->dev_private; | ||
95 | struct urb *urb; | ||
96 | char *cmd; | ||
97 | cycles_t start_cycles, end_cycles; | ||
98 | int bytes_sent = 0; | ||
99 | int bytes_identical = 0; | ||
100 | int bytes_rendered = 0; | ||
101 | |||
102 | if (!fb_defio) | ||
103 | return; | ||
104 | |||
105 | start_cycles = get_cycles(); | ||
106 | |||
107 | urb = udl_get_urb(dev); | ||
108 | if (!urb) | ||
109 | return; | ||
110 | |||
111 | cmd = urb->transfer_buffer; | ||
112 | |||
113 | /* walk the written page list and render each to device */ | ||
114 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | ||
115 | |||
116 | if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), | ||
117 | &urb, (char *) info->fix.smem_start, | ||
118 | &cmd, cur->index << PAGE_SHIFT, | ||
119 | PAGE_SIZE, &bytes_identical, &bytes_sent)) | ||
120 | goto error; | ||
121 | bytes_rendered += PAGE_SIZE; | ||
122 | } | ||
123 | |||
124 | if (cmd > (char *) urb->transfer_buffer) { | ||
125 | /* Send partial buffer remaining before exiting */ | ||
126 | int len = cmd - (char *) urb->transfer_buffer; | ||
127 | udl_submit_urb(dev, urb, len); | ||
128 | bytes_sent += len; | ||
129 | } else | ||
130 | udl_urb_completion(urb); | ||
131 | |||
132 | error: | ||
133 | atomic_add(bytes_sent, &udl->bytes_sent); | ||
134 | atomic_add(bytes_identical, &udl->bytes_identical); | ||
135 | atomic_add(bytes_rendered, &udl->bytes_rendered); | ||
136 | end_cycles = get_cycles(); | ||
137 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | ||
138 | >> 10)), /* Kcycles */ | ||
139 | &udl->cpu_kcycles_used); | ||
140 | } | ||
141 | |||
142 | int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | ||
143 | int width, int height) | ||
144 | { | ||
145 | struct drm_device *dev = fb->base.dev; | ||
146 | struct udl_device *udl = dev->dev_private; | ||
147 | int i, ret; | ||
148 | char *cmd; | ||
149 | cycles_t start_cycles, end_cycles; | ||
150 | int bytes_sent = 0; | ||
151 | int bytes_identical = 0; | ||
152 | struct urb *urb; | ||
153 | int aligned_x; | ||
154 | int bpp = (fb->base.bits_per_pixel / 8); | ||
155 | |||
156 | if (!fb->active_16) | ||
157 | return 0; | ||
158 | |||
159 | if (!fb->obj->vmapping) | ||
160 | udl_gem_vmap(fb->obj); | ||
161 | |||
162 | start_cycles = get_cycles(); | ||
163 | |||
164 | aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); | ||
165 | width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); | ||
166 | x = aligned_x; | ||
167 | |||
168 | if ((width <= 0) || | ||
169 | (x + width > fb->base.width) || | ||
170 | (y + height > fb->base.height)) | ||
171 | return -EINVAL; | ||
172 | |||
173 | urb = udl_get_urb(dev); | ||
174 | if (!urb) | ||
175 | return 0; | ||
176 | cmd = urb->transfer_buffer; | ||
177 | |||
178 | for (i = y; i < y + height ; i++) { | ||
179 | const int line_offset = fb->base.pitches[0] * i; | ||
180 | const int byte_offset = line_offset + (x * bpp); | ||
181 | |||
182 | if (udl_render_hline(dev, bpp, &urb, | ||
183 | (char *) fb->obj->vmapping, | ||
184 | &cmd, byte_offset, width * bpp, | ||
185 | &bytes_identical, &bytes_sent)) | ||
186 | goto error; | ||
187 | } | ||
188 | |||
189 | if (cmd > (char *) urb->transfer_buffer) { | ||
190 | /* Send partial buffer remaining before exiting */ | ||
191 | int len = cmd - (char *) urb->transfer_buffer; | ||
192 | ret = udl_submit_urb(dev, urb, len); | ||
193 | bytes_sent += len; | ||
194 | } else | ||
195 | udl_urb_completion(urb); | ||
196 | |||
197 | error: | ||
198 | atomic_add(bytes_sent, &udl->bytes_sent); | ||
199 | atomic_add(bytes_identical, &udl->bytes_identical); | ||
200 | atomic_add(width*height*bpp, &udl->bytes_rendered); | ||
201 | end_cycles = get_cycles(); | ||
202 | atomic_add(((unsigned int) ((end_cycles - start_cycles) | ||
203 | >> 10)), /* Kcycles */ | ||
204 | &udl->cpu_kcycles_used); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) | ||
210 | { | ||
211 | unsigned long start = vma->vm_start; | ||
212 | unsigned long size = vma->vm_end - vma->vm_start; | ||
213 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
214 | unsigned long page, pos; | ||
215 | |||
216 | if (offset + size > info->fix.smem_len) | ||
217 | return -EINVAL; | ||
218 | |||
219 | pos = (unsigned long)info->fix.smem_start + offset; | ||
220 | |||
221 | pr_notice("mmap() framebuffer addr:%lu size:%lu\n", | ||
222 | pos, size); | ||
223 | |||
224 | while (size > 0) { | ||
225 | page = vmalloc_to_pfn((void *)pos); | ||
226 | if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) | ||
227 | return -EAGAIN; | ||
228 | |||
229 | start += PAGE_SIZE; | ||
230 | pos += PAGE_SIZE; | ||
231 | if (size > PAGE_SIZE) | ||
232 | size -= PAGE_SIZE; | ||
233 | else | ||
234 | size = 0; | ||
235 | } | ||
236 | |||
237 | vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | ||
242 | { | ||
243 | struct udl_fbdev *ufbdev = info->par; | ||
244 | |||
245 | sys_fillrect(info, rect); | ||
246 | |||
247 | udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width, | ||
248 | rect->height); | ||
249 | } | ||
250 | |||
251 | static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) | ||
252 | { | ||
253 | struct udl_fbdev *ufbdev = info->par; | ||
254 | |||
255 | sys_copyarea(info, region); | ||
256 | |||
257 | udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width, | ||
258 | region->height); | ||
259 | } | ||
260 | |||
261 | static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image) | ||
262 | { | ||
263 | struct udl_fbdev *ufbdev = info->par; | ||
264 | |||
265 | sys_imageblit(info, image); | ||
266 | |||
267 | udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width, | ||
268 | image->height); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * It's common for several clients to have framebuffer open simultaneously. | ||
273 | * e.g. both fbcon and X. Makes things interesting. | ||
274 | * Assumes caller is holding info->lock (for open and release at least) | ||
275 | */ | ||
276 | static int udl_fb_open(struct fb_info *info, int user) | ||
277 | { | ||
278 | struct udl_fbdev *ufbdev = info->par; | ||
279 | struct drm_device *dev = ufbdev->ufb.base.dev; | ||
280 | struct udl_device *udl = dev->dev_private; | ||
281 | |||
282 | /* If the USB device is gone, we don't accept new opens */ | ||
283 | if (drm_device_is_unplugged(udl->ddev)) | ||
284 | return -ENODEV; | ||
285 | |||
286 | ufbdev->fb_count++; | ||
287 | |||
288 | if (fb_defio && (info->fbdefio == NULL)) { | ||
289 | /* enable defio at last moment if not disabled by client */ | ||
290 | |||
291 | struct fb_deferred_io *fbdefio; | ||
292 | |||
293 | fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); | ||
294 | |||
295 | if (fbdefio) { | ||
296 | fbdefio->delay = DL_DEFIO_WRITE_DELAY; | ||
297 | fbdefio->deferred_io = udlfb_dpy_deferred_io; | ||
298 | } | ||
299 | |||
300 | info->fbdefio = fbdefio; | ||
301 | fb_deferred_io_init(info); | ||
302 | } | ||
303 | |||
304 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", | ||
305 | info->node, user, info, ufbdev->fb_count); | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | |||
311 | /* | ||
312 | * Assumes caller is holding info->lock mutex (for open and release at least) | ||
313 | */ | ||
314 | static int udl_fb_release(struct fb_info *info, int user) | ||
315 | { | ||
316 | struct udl_fbdev *ufbdev = info->par; | ||
317 | |||
318 | ufbdev->fb_count--; | ||
319 | |||
320 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { | ||
321 | fb_deferred_io_cleanup(info); | ||
322 | kfree(info->fbdefio); | ||
323 | info->fbdefio = NULL; | ||
324 | info->fbops->fb_mmap = udl_fb_mmap; | ||
325 | } | ||
326 | |||
327 | pr_warn("released /dev/fb%d user=%d count=%d\n", | ||
328 | info->node, user, ufbdev->fb_count); | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static struct fb_ops udlfb_ops = { | ||
334 | .owner = THIS_MODULE, | ||
335 | .fb_check_var = drm_fb_helper_check_var, | ||
336 | .fb_set_par = drm_fb_helper_set_par, | ||
337 | .fb_fillrect = udl_fb_fillrect, | ||
338 | .fb_copyarea = udl_fb_copyarea, | ||
339 | .fb_imageblit = udl_fb_imageblit, | ||
340 | .fb_pan_display = drm_fb_helper_pan_display, | ||
341 | .fb_blank = drm_fb_helper_blank, | ||
342 | .fb_setcmap = drm_fb_helper_setcmap, | ||
343 | .fb_debug_enter = drm_fb_helper_debug_enter, | ||
344 | .fb_debug_leave = drm_fb_helper_debug_leave, | ||
345 | .fb_mmap = udl_fb_mmap, | ||
346 | .fb_open = udl_fb_open, | ||
347 | .fb_release = udl_fb_release, | ||
348 | }; | ||
349 | |||
350 | void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | ||
351 | u16 blue, int regno) | ||
352 | { | ||
353 | } | ||
354 | |||
355 | void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
356 | u16 *blue, int regno) | ||
357 | { | ||
358 | *red = 0; | ||
359 | *green = 0; | ||
360 | *blue = 0; | ||
361 | } | ||
362 | |||
363 | static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, | ||
364 | struct drm_file *file, | ||
365 | unsigned flags, unsigned color, | ||
366 | struct drm_clip_rect *clips, | ||
367 | unsigned num_clips) | ||
368 | { | ||
369 | struct udl_framebuffer *ufb = to_udl_fb(fb); | ||
370 | int i; | ||
371 | |||
372 | if (!ufb->active_16) | ||
373 | return 0; | ||
374 | |||
375 | for (i = 0; i < num_clips; i++) { | ||
376 | udl_handle_damage(ufb, clips[i].x1, clips[i].y1, | ||
377 | clips[i].x2 - clips[i].x1, | ||
378 | clips[i].y2 - clips[i].y1); | ||
379 | } | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
384 | { | ||
385 | struct udl_framebuffer *ufb = to_udl_fb(fb); | ||
386 | |||
387 | if (ufb->obj) | ||
388 | drm_gem_object_unreference_unlocked(&ufb->obj->base); | ||
389 | |||
390 | drm_framebuffer_cleanup(fb); | ||
391 | kfree(ufb); | ||
392 | } | ||
393 | |||
394 | static const struct drm_framebuffer_funcs udlfb_funcs = { | ||
395 | .destroy = udl_user_framebuffer_destroy, | ||
396 | .dirty = udl_user_framebuffer_dirty, | ||
397 | .create_handle = NULL, | ||
398 | }; | ||
399 | |||
400 | |||
401 | static int | ||
402 | udl_framebuffer_init(struct drm_device *dev, | ||
403 | struct udl_framebuffer *ufb, | ||
404 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
405 | struct udl_gem_object *obj) | ||
406 | { | ||
407 | int ret; | ||
408 | |||
409 | ufb->obj = obj; | ||
410 | ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); | ||
411 | drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); | ||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | |||
416 | static int udlfb_create(struct udl_fbdev *ufbdev, | ||
417 | struct drm_fb_helper_surface_size *sizes) | ||
418 | { | ||
419 | struct drm_device *dev = ufbdev->helper.dev; | ||
420 | struct fb_info *info; | ||
421 | struct device *device = &dev->usbdev->dev; | ||
422 | struct drm_framebuffer *fb; | ||
423 | struct drm_mode_fb_cmd2 mode_cmd; | ||
424 | struct udl_gem_object *obj; | ||
425 | uint32_t size; | ||
426 | int ret = 0; | ||
427 | |||
428 | if (sizes->surface_bpp == 24) | ||
429 | sizes->surface_bpp = 32; | ||
430 | |||
431 | mode_cmd.width = sizes->surface_width; | ||
432 | mode_cmd.height = sizes->surface_height; | ||
433 | mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); | ||
434 | |||
435 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, | ||
436 | sizes->surface_depth); | ||
437 | |||
438 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
439 | size = ALIGN(size, PAGE_SIZE); | ||
440 | |||
441 | obj = udl_gem_alloc_object(dev, size); | ||
442 | if (!obj) | ||
443 | goto out; | ||
444 | |||
445 | ret = udl_gem_vmap(obj); | ||
446 | if (ret) { | ||
447 | DRM_ERROR("failed to vmap fb\n"); | ||
448 | goto out_gfree; | ||
449 | } | ||
450 | |||
451 | info = framebuffer_alloc(0, device); | ||
452 | if (!info) { | ||
453 | ret = -ENOMEM; | ||
454 | goto out_gfree; | ||
455 | } | ||
456 | info->par = ufbdev; | ||
457 | |||
458 | ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj); | ||
459 | if (ret) | ||
460 | goto out_gfree; | ||
461 | |||
462 | fb = &ufbdev->ufb.base; | ||
463 | |||
464 | ufbdev->helper.fb = fb; | ||
465 | ufbdev->helper.fbdev = info; | ||
466 | |||
467 | strcpy(info->fix.id, "udldrmfb"); | ||
468 | |||
469 | info->screen_base = ufbdev->ufb.obj->vmapping; | ||
470 | info->fix.smem_len = size; | ||
471 | info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping; | ||
472 | |||
473 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | ||
474 | info->fbops = &udlfb_ops; | ||
475 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
476 | drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); | ||
477 | |||
478 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
479 | if (ret) { | ||
480 | ret = -ENOMEM; | ||
481 | goto out_gfree; | ||
482 | } | ||
483 | |||
484 | |||
485 | DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", | ||
486 | fb->width, fb->height, | ||
487 | ufbdev->ufb.obj->vmapping); | ||
488 | |||
489 | return ret; | ||
490 | out_gfree: | ||
491 | drm_gem_object_unreference(&ufbdev->ufb.obj->base); | ||
492 | out: | ||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | static int udl_fb_find_or_create_single(struct drm_fb_helper *helper, | ||
497 | struct drm_fb_helper_surface_size *sizes) | ||
498 | { | ||
499 | struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper; | ||
500 | int new_fb = 0; | ||
501 | int ret; | ||
502 | |||
503 | if (!helper->fb) { | ||
504 | ret = udlfb_create(ufbdev, sizes); | ||
505 | if (ret) | ||
506 | return ret; | ||
507 | |||
508 | new_fb = 1; | ||
509 | } | ||
510 | return new_fb; | ||
511 | } | ||
512 | |||
513 | static struct drm_fb_helper_funcs udl_fb_helper_funcs = { | ||
514 | .gamma_set = udl_crtc_fb_gamma_set, | ||
515 | .gamma_get = udl_crtc_fb_gamma_get, | ||
516 | .fb_probe = udl_fb_find_or_create_single, | ||
517 | }; | ||
518 | |||
519 | static void udl_fbdev_destroy(struct drm_device *dev, | ||
520 | struct udl_fbdev *ufbdev) | ||
521 | { | ||
522 | struct fb_info *info; | ||
523 | if (ufbdev->helper.fbdev) { | ||
524 | info = ufbdev->helper.fbdev; | ||
525 | unregister_framebuffer(info); | ||
526 | if (info->cmap.len) | ||
527 | fb_dealloc_cmap(&info->cmap); | ||
528 | framebuffer_release(info); | ||
529 | } | ||
530 | drm_fb_helper_fini(&ufbdev->helper); | ||
531 | drm_framebuffer_cleanup(&ufbdev->ufb.base); | ||
532 | drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); | ||
533 | } | ||
534 | |||
535 | int udl_fbdev_init(struct drm_device *dev) | ||
536 | { | ||
537 | struct udl_device *udl = dev->dev_private; | ||
538 | int bpp_sel = fb_bpp; | ||
539 | struct udl_fbdev *ufbdev; | ||
540 | int ret; | ||
541 | |||
542 | ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL); | ||
543 | if (!ufbdev) | ||
544 | return -ENOMEM; | ||
545 | |||
546 | udl->fbdev = ufbdev; | ||
547 | ufbdev->helper.funcs = &udl_fb_helper_funcs; | ||
548 | |||
549 | ret = drm_fb_helper_init(dev, &ufbdev->helper, | ||
550 | 1, 1); | ||
551 | if (ret) { | ||
552 | kfree(ufbdev); | ||
553 | return ret; | ||
554 | |||
555 | } | ||
556 | |||
557 | drm_fb_helper_single_add_all_connectors(&ufbdev->helper); | ||
558 | drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); | ||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | void udl_fbdev_cleanup(struct drm_device *dev) | ||
563 | { | ||
564 | struct udl_device *udl = dev->dev_private; | ||
565 | if (!udl->fbdev) | ||
566 | return; | ||
567 | |||
568 | udl_fbdev_destroy(dev, udl->fbdev); | ||
569 | kfree(udl->fbdev); | ||
570 | udl->fbdev = NULL; | ||
571 | } | ||
572 | |||
573 | void udl_fbdev_unplug(struct drm_device *dev) | ||
574 | { | ||
575 | struct udl_device *udl = dev->dev_private; | ||
576 | struct udl_fbdev *ufbdev; | ||
577 | if (!udl->fbdev) | ||
578 | return; | ||
579 | |||
580 | ufbdev = udl->fbdev; | ||
581 | if (ufbdev->helper.fbdev) { | ||
582 | struct fb_info *info; | ||
583 | info = ufbdev->helper.fbdev; | ||
584 | unlink_framebuffer(info); | ||
585 | } | ||
586 | } | ||
587 | |||
588 | struct drm_framebuffer * | ||
589 | udl_fb_user_fb_create(struct drm_device *dev, | ||
590 | struct drm_file *file, | ||
591 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
592 | { | ||
593 | struct drm_gem_object *obj; | ||
594 | struct udl_framebuffer *ufb; | ||
595 | int ret; | ||
596 | |||
597 | obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]); | ||
598 | if (obj == NULL) | ||
599 | return ERR_PTR(-ENOENT); | ||
600 | |||
601 | ufb = kzalloc(sizeof(*ufb), GFP_KERNEL); | ||
602 | if (ufb == NULL) | ||
603 | return ERR_PTR(-ENOMEM); | ||
604 | |||
605 | ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj)); | ||
606 | if (ret) { | ||
607 | kfree(ufb); | ||
608 | return ERR_PTR(-EINVAL); | ||
609 | } | ||
610 | return &ufb->base; | ||
611 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c new file mode 100644 index 000000000000..852642dc1187 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License v2. See the file COPYING in the main directory of this archive for | ||
6 | * more details. | ||
7 | */ | ||
8 | |||
9 | #include "drmP.h" | ||
10 | #include "udl_drv.h" | ||
11 | #include <linux/shmem_fs.h> | ||
12 | |||
13 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, | ||
14 | size_t size) | ||
15 | { | ||
16 | struct udl_gem_object *obj; | ||
17 | |||
18 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
19 | if (obj == NULL) | ||
20 | return NULL; | ||
21 | |||
22 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { | ||
23 | kfree(obj); | ||
24 | return NULL; | ||
25 | } | ||
26 | |||
27 | return obj; | ||
28 | } | ||
29 | |||
30 | static int | ||
31 | udl_gem_create(struct drm_file *file, | ||
32 | struct drm_device *dev, | ||
33 | uint64_t size, | ||
34 | uint32_t *handle_p) | ||
35 | { | ||
36 | struct udl_gem_object *obj; | ||
37 | int ret; | ||
38 | u32 handle; | ||
39 | |||
40 | size = roundup(size, PAGE_SIZE); | ||
41 | |||
42 | obj = udl_gem_alloc_object(dev, size); | ||
43 | if (obj == NULL) | ||
44 | return -ENOMEM; | ||
45 | |||
46 | ret = drm_gem_handle_create(file, &obj->base, &handle); | ||
47 | if (ret) { | ||
48 | drm_gem_object_release(&obj->base); | ||
49 | kfree(obj); | ||
50 | return ret; | ||
51 | } | ||
52 | |||
53 | drm_gem_object_unreference(&obj->base); | ||
54 | *handle_p = handle; | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | int udl_dumb_create(struct drm_file *file, | ||
59 | struct drm_device *dev, | ||
60 | struct drm_mode_create_dumb *args) | ||
61 | { | ||
62 | args->pitch = args->width * ((args->bpp + 1) / 8); | ||
63 | args->size = args->pitch * args->height; | ||
64 | return udl_gem_create(file, dev, | ||
65 | args->size, &args->handle); | ||
66 | } | ||
67 | |||
68 | int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, | ||
69 | uint32_t handle) | ||
70 | { | ||
71 | return drm_gem_handle_delete(file, handle); | ||
72 | } | ||
73 | |||
74 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
75 | { | ||
76 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); | ||
77 | struct page *page; | ||
78 | unsigned int page_offset; | ||
79 | int ret = 0; | ||
80 | |||
81 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | ||
82 | PAGE_SHIFT; | ||
83 | |||
84 | if (!obj->pages) | ||
85 | return VM_FAULT_SIGBUS; | ||
86 | |||
87 | page = obj->pages[page_offset]; | ||
88 | ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); | ||
89 | switch (ret) { | ||
90 | case -EAGAIN: | ||
91 | set_need_resched(); | ||
92 | case 0: | ||
93 | case -ERESTARTSYS: | ||
94 | return VM_FAULT_NOPAGE; | ||
95 | case -ENOMEM: | ||
96 | return VM_FAULT_OOM; | ||
97 | default: | ||
98 | return VM_FAULT_SIGBUS; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | int udl_gem_init_object(struct drm_gem_object *obj) | ||
103 | { | ||
104 | BUG(); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) | ||
110 | { | ||
111 | int page_count, i; | ||
112 | struct page *page; | ||
113 | struct inode *inode; | ||
114 | struct address_space *mapping; | ||
115 | |||
116 | if (obj->pages) | ||
117 | return 0; | ||
118 | |||
119 | page_count = obj->base.size / PAGE_SIZE; | ||
120 | BUG_ON(obj->pages != NULL); | ||
121 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | ||
122 | if (obj->pages == NULL) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | inode = obj->base.filp->f_path.dentry->d_inode; | ||
126 | mapping = inode->i_mapping; | ||
127 | gfpmask |= mapping_gfp_mask(mapping); | ||
128 | |||
129 | for (i = 0; i < page_count; i++) { | ||
130 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
131 | if (IS_ERR(page)) | ||
132 | goto err_pages; | ||
133 | obj->pages[i] = page; | ||
134 | } | ||
135 | |||
136 | return 0; | ||
137 | err_pages: | ||
138 | while (i--) | ||
139 | page_cache_release(obj->pages[i]); | ||
140 | drm_free_large(obj->pages); | ||
141 | obj->pages = NULL; | ||
142 | return PTR_ERR(page); | ||
143 | } | ||
144 | |||
145 | static void udl_gem_put_pages(struct udl_gem_object *obj) | ||
146 | { | ||
147 | int page_count = obj->base.size / PAGE_SIZE; | ||
148 | int i; | ||
149 | |||
150 | for (i = 0; i < page_count; i++) | ||
151 | page_cache_release(obj->pages[i]); | ||
152 | |||
153 | drm_free_large(obj->pages); | ||
154 | obj->pages = NULL; | ||
155 | } | ||
156 | |||
157 | int udl_gem_vmap(struct udl_gem_object *obj) | ||
158 | { | ||
159 | int page_count = obj->base.size / PAGE_SIZE; | ||
160 | int ret; | ||
161 | |||
162 | ret = udl_gem_get_pages(obj, GFP_KERNEL); | ||
163 | if (ret) | ||
164 | return ret; | ||
165 | |||
166 | obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); | ||
167 | if (!obj->vmapping) | ||
168 | return -ENOMEM; | ||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | void udl_gem_vunmap(struct udl_gem_object *obj) | ||
173 | { | ||
174 | if (obj->vmapping) | ||
175 | vunmap(obj->vmapping); | ||
176 | |||
177 | udl_gem_put_pages(obj); | ||
178 | } | ||
179 | |||
180 | void udl_gem_free_object(struct drm_gem_object *gem_obj) | ||
181 | { | ||
182 | struct udl_gem_object *obj = to_udl_bo(gem_obj); | ||
183 | |||
184 | if (obj->vmapping) | ||
185 | udl_gem_vunmap(obj); | ||
186 | |||
187 | if (obj->pages) | ||
188 | udl_gem_put_pages(obj); | ||
189 | |||
190 | if (gem_obj->map_list.map) | ||
191 | drm_gem_free_mmap_offset(gem_obj); | ||
192 | } | ||
193 | |||
194 | /* the dumb interface doesn't work with the GEM straight MMAP | ||
195 | interface, it expects to do MMAP on the drm fd, like normal */ | ||
196 | int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, | ||
197 | uint32_t handle, uint64_t *offset) | ||
198 | { | ||
199 | struct udl_gem_object *gobj; | ||
200 | struct drm_gem_object *obj; | ||
201 | int ret = 0; | ||
202 | |||
203 | mutex_lock(&dev->struct_mutex); | ||
204 | obj = drm_gem_object_lookup(dev, file, handle); | ||
205 | if (obj == NULL) { | ||
206 | ret = -ENOENT; | ||
207 | goto unlock; | ||
208 | } | ||
209 | gobj = to_udl_bo(obj); | ||
210 | |||
211 | ret = udl_gem_get_pages(gobj, GFP_KERNEL); | ||
212 | if (ret) | ||
213 | return ret; | ||
214 | if (!gobj->base.map_list.map) { | ||
215 | ret = drm_gem_create_mmap_offset(obj); | ||
216 | if (ret) | ||
217 | goto out; | ||
218 | } | ||
219 | |||
220 | *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; | ||
221 | |||
222 | out: | ||
223 | drm_gem_object_unreference(&gobj->base); | ||
224 | unlock: | ||
225 | mutex_unlock(&dev->struct_mutex); | ||
226 | return ret; | ||
227 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c new file mode 100644 index 000000000000..a8d5f09428c7 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_main.c | |||
@@ -0,0 +1,338 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * based in parts on udlfb.c: | ||
5 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
6 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
7 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License v2. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | #include "drmP.h" | ||
14 | #include "udl_drv.h" | ||
15 | |||
16 | /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ | ||
17 | #define BULK_SIZE 512 | ||
18 | |||
19 | #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) | ||
20 | #define WRITES_IN_FLIGHT (4) | ||
21 | #define MAX_VENDOR_DESCRIPTOR_SIZE 256 | ||
22 | |||
23 | #define GET_URB_TIMEOUT HZ | ||
24 | #define FREE_URB_TIMEOUT (HZ*2) | ||
25 | |||
26 | static int udl_parse_vendor_descriptor(struct drm_device *dev, | ||
27 | struct usb_device *usbdev) | ||
28 | { | ||
29 | struct udl_device *udl = dev->dev_private; | ||
30 | char *desc; | ||
31 | char *buf; | ||
32 | char *desc_end; | ||
33 | |||
34 | u8 total_len = 0; | ||
35 | |||
36 | buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); | ||
37 | if (!buf) | ||
38 | return false; | ||
39 | desc = buf; | ||
40 | |||
41 | total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ | ||
42 | 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); | ||
43 | if (total_len > 5) { | ||
44 | DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \ | ||
45 | "%02x %02x %02x %02x %02x %02x %02x\n", | ||
46 | total_len, desc[0], | ||
47 | desc[1], desc[2], desc[3], desc[4], desc[5], desc[6], | ||
48 | desc[7], desc[8], desc[9], desc[10]); | ||
49 | |||
50 | if ((desc[0] != total_len) || /* descriptor length */ | ||
51 | (desc[1] != 0x5f) || /* vendor descriptor type */ | ||
52 | (desc[2] != 0x01) || /* version (2 bytes) */ | ||
53 | (desc[3] != 0x00) || | ||
54 | (desc[4] != total_len - 2)) /* length after type */ | ||
55 | goto unrecognized; | ||
56 | |||
57 | desc_end = desc + total_len; | ||
58 | desc += 5; /* the fixed header we've already parsed */ | ||
59 | |||
60 | while (desc < desc_end) { | ||
61 | u8 length; | ||
62 | u16 key; | ||
63 | |||
64 | key = *((u16 *) desc); | ||
65 | desc += sizeof(u16); | ||
66 | length = *desc; | ||
67 | desc++; | ||
68 | |||
69 | switch (key) { | ||
70 | case 0x0200: { /* max_area */ | ||
71 | u32 max_area; | ||
72 | max_area = le32_to_cpu(*((u32 *)desc)); | ||
73 | DRM_DEBUG("DL chip limited to %d pixel modes\n", | ||
74 | max_area); | ||
75 | udl->sku_pixel_limit = max_area; | ||
76 | break; | ||
77 | } | ||
78 | default: | ||
79 | break; | ||
80 | } | ||
81 | desc += length; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | goto success; | ||
86 | |||
87 | unrecognized: | ||
88 | /* allow udlfb to load for now even if firmware unrecognized */ | ||
89 | DRM_ERROR("Unrecognized vendor firmware descriptor\n"); | ||
90 | |||
91 | success: | ||
92 | kfree(buf); | ||
93 | return true; | ||
94 | } | ||
95 | |||
96 | static void udl_release_urb_work(struct work_struct *work) | ||
97 | { | ||
98 | struct urb_node *unode = container_of(work, struct urb_node, | ||
99 | release_urb_work.work); | ||
100 | |||
101 | up(&unode->dev->urbs.limit_sem); | ||
102 | } | ||
103 | |||
104 | void udl_urb_completion(struct urb *urb) | ||
105 | { | ||
106 | struct urb_node *unode = urb->context; | ||
107 | struct udl_device *udl = unode->dev; | ||
108 | unsigned long flags; | ||
109 | |||
110 | /* sync/async unlink faults aren't errors */ | ||
111 | if (urb->status) { | ||
112 | if (!(urb->status == -ENOENT || | ||
113 | urb->status == -ECONNRESET || | ||
114 | urb->status == -ESHUTDOWN)) { | ||
115 | DRM_ERROR("%s - nonzero write bulk status received: %d\n", | ||
116 | __func__, urb->status); | ||
117 | atomic_set(&udl->lost_pixels, 1); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */ | ||
122 | |||
123 | spin_lock_irqsave(&udl->urbs.lock, flags); | ||
124 | list_add_tail(&unode->entry, &udl->urbs.list); | ||
125 | udl->urbs.available++; | ||
126 | spin_unlock_irqrestore(&udl->urbs.lock, flags); | ||
127 | |||
128 | #if 0 | ||
129 | /* | ||
130 | * When using fb_defio, we deadlock if up() is called | ||
131 | * while another is waiting. So queue to another process. | ||
132 | */ | ||
133 | if (fb_defio) | ||
134 | schedule_delayed_work(&unode->release_urb_work, 0); | ||
135 | else | ||
136 | #endif | ||
137 | up(&udl->urbs.limit_sem); | ||
138 | } | ||
139 | |||
140 | static void udl_free_urb_list(struct drm_device *dev) | ||
141 | { | ||
142 | struct udl_device *udl = dev->dev_private; | ||
143 | int count = udl->urbs.count; | ||
144 | struct list_head *node; | ||
145 | struct urb_node *unode; | ||
146 | struct urb *urb; | ||
147 | int ret; | ||
148 | unsigned long flags; | ||
149 | |||
150 | DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); | ||
151 | |||
152 | /* keep waiting and freeing, until we've got 'em all */ | ||
153 | while (count--) { | ||
154 | |||
155 | /* Getting interrupted means a leak, but ok at shutdown*/ | ||
156 | ret = down_interruptible(&udl->urbs.limit_sem); | ||
157 | if (ret) | ||
158 | break; | ||
159 | |||
160 | spin_lock_irqsave(&udl->urbs.lock, flags); | ||
161 | |||
162 | node = udl->urbs.list.next; /* have reserved one with sem */ | ||
163 | list_del_init(node); | ||
164 | |||
165 | spin_unlock_irqrestore(&udl->urbs.lock, flags); | ||
166 | |||
167 | unode = list_entry(node, struct urb_node, entry); | ||
168 | urb = unode->urb; | ||
169 | |||
170 | /* Free each separately allocated piece */ | ||
171 | usb_free_coherent(urb->dev, udl->urbs.size, | ||
172 | urb->transfer_buffer, urb->transfer_dma); | ||
173 | usb_free_urb(urb); | ||
174 | kfree(node); | ||
175 | } | ||
176 | udl->urbs.count = 0; | ||
177 | } | ||
178 | |||
179 | static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) | ||
180 | { | ||
181 | struct udl_device *udl = dev->dev_private; | ||
182 | int i = 0; | ||
183 | struct urb *urb; | ||
184 | struct urb_node *unode; | ||
185 | char *buf; | ||
186 | |||
187 | spin_lock_init(&udl->urbs.lock); | ||
188 | |||
189 | udl->urbs.size = size; | ||
190 | INIT_LIST_HEAD(&udl->urbs.list); | ||
191 | |||
192 | while (i < count) { | ||
193 | unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); | ||
194 | if (!unode) | ||
195 | break; | ||
196 | unode->dev = udl; | ||
197 | |||
198 | INIT_DELAYED_WORK(&unode->release_urb_work, | ||
199 | udl_release_urb_work); | ||
200 | |||
201 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
202 | if (!urb) { | ||
203 | kfree(unode); | ||
204 | break; | ||
205 | } | ||
206 | unode->urb = urb; | ||
207 | |||
208 | buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL, | ||
209 | &urb->transfer_dma); | ||
210 | if (!buf) { | ||
211 | kfree(unode); | ||
212 | usb_free_urb(urb); | ||
213 | break; | ||
214 | } | ||
215 | |||
216 | /* urb->transfer_buffer_length set to actual before submit */ | ||
217 | usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1), | ||
218 | buf, size, udl_urb_completion, unode); | ||
219 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
220 | |||
221 | list_add_tail(&unode->entry, &udl->urbs.list); | ||
222 | |||
223 | i++; | ||
224 | } | ||
225 | |||
226 | sema_init(&udl->urbs.limit_sem, i); | ||
227 | udl->urbs.count = i; | ||
228 | udl->urbs.available = i; | ||
229 | |||
230 | DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); | ||
231 | |||
232 | return i; | ||
233 | } | ||
234 | |||
235 | struct urb *udl_get_urb(struct drm_device *dev) | ||
236 | { | ||
237 | struct udl_device *udl = dev->dev_private; | ||
238 | int ret = 0; | ||
239 | struct list_head *entry; | ||
240 | struct urb_node *unode; | ||
241 | struct urb *urb = NULL; | ||
242 | unsigned long flags; | ||
243 | |||
244 | /* Wait for an in-flight buffer to complete and get re-queued */ | ||
245 | ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT); | ||
246 | if (ret) { | ||
247 | atomic_set(&udl->lost_pixels, 1); | ||
248 | DRM_INFO("wait for urb interrupted: %x available: %d\n", | ||
249 | ret, udl->urbs.available); | ||
250 | goto error; | ||
251 | } | ||
252 | |||
253 | spin_lock_irqsave(&udl->urbs.lock, flags); | ||
254 | |||
255 | BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */ | ||
256 | entry = udl->urbs.list.next; | ||
257 | list_del_init(entry); | ||
258 | udl->urbs.available--; | ||
259 | |||
260 | spin_unlock_irqrestore(&udl->urbs.lock, flags); | ||
261 | |||
262 | unode = list_entry(entry, struct urb_node, entry); | ||
263 | urb = unode->urb; | ||
264 | |||
265 | error: | ||
266 | return urb; | ||
267 | } | ||
268 | |||
269 | int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) | ||
270 | { | ||
271 | struct udl_device *udl = dev->dev_private; | ||
272 | int ret; | ||
273 | |||
274 | BUG_ON(len > udl->urbs.size); | ||
275 | |||
276 | urb->transfer_buffer_length = len; /* set to actual payload len */ | ||
277 | ret = usb_submit_urb(urb, GFP_ATOMIC); | ||
278 | if (ret) { | ||
279 | udl_urb_completion(urb); /* because no one else will */ | ||
280 | atomic_set(&udl->lost_pixels, 1); | ||
281 | DRM_ERROR("usb_submit_urb error %x\n", ret); | ||
282 | } | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | int udl_driver_load(struct drm_device *dev, unsigned long flags) | ||
287 | { | ||
288 | struct udl_device *udl; | ||
289 | int ret; | ||
290 | |||
291 | DRM_DEBUG("\n"); | ||
292 | udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL); | ||
293 | if (!udl) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | udl->ddev = dev; | ||
297 | dev->dev_private = udl; | ||
298 | |||
299 | if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) { | ||
300 | DRM_ERROR("firmware not recognized. Assume incompatible device\n"); | ||
301 | goto err; | ||
302 | } | ||
303 | |||
304 | if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { | ||
305 | ret = -ENOMEM; | ||
306 | DRM_ERROR("udl_alloc_urb_list failed\n"); | ||
307 | goto err; | ||
308 | } | ||
309 | |||
310 | DRM_DEBUG("\n"); | ||
311 | ret = udl_modeset_init(dev); | ||
312 | |||
313 | ret = udl_fbdev_init(dev); | ||
314 | return 0; | ||
315 | err: | ||
316 | kfree(udl); | ||
317 | DRM_ERROR("%d\n", ret); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | int udl_drop_usb(struct drm_device *dev) | ||
322 | { | ||
323 | udl_free_urb_list(dev); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | int udl_driver_unload(struct drm_device *dev) | ||
328 | { | ||
329 | struct udl_device *udl = dev->dev_private; | ||
330 | |||
331 | if (udl->urbs.count) | ||
332 | udl_free_urb_list(dev); | ||
333 | |||
334 | udl_fbdev_cleanup(dev); | ||
335 | udl_modeset_cleanup(dev); | ||
336 | kfree(udl); | ||
337 | return 0; | ||
338 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c new file mode 100644 index 000000000000..b3ecb3d12a1d --- /dev/null +++ b/drivers/gpu/drm/udl/udl_modeset.c | |||
@@ -0,0 +1,414 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * | ||
4 | * based in parts on udlfb.c: | ||
5 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
6 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
7 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
8 | |||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License v2. See the file COPYING in the main directory of this archive for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | #include "drmP.h" | ||
15 | #include "drm_crtc.h" | ||
16 | #include "drm_crtc_helper.h" | ||
17 | #include "udl_drv.h" | ||
18 | |||
19 | /* | ||
20 | * All DisplayLink bulk operations start with 0xAF, followed by specific code | ||
21 | * All operations are written to buffers which then later get sent to device | ||
22 | */ | ||
23 | static char *udl_set_register(char *buf, u8 reg, u8 val) | ||
24 | { | ||
25 | *buf++ = 0xAF; | ||
26 | *buf++ = 0x20; | ||
27 | *buf++ = reg; | ||
28 | *buf++ = val; | ||
29 | return buf; | ||
30 | } | ||
31 | |||
32 | static char *udl_vidreg_lock(char *buf) | ||
33 | { | ||
34 | return udl_set_register(buf, 0xFF, 0x00); | ||
35 | } | ||
36 | |||
37 | static char *udl_vidreg_unlock(char *buf) | ||
38 | { | ||
39 | return udl_set_register(buf, 0xFF, 0xFF); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * On/Off for driving the DisplayLink framebuffer to the display | ||
44 | * 0x00 H and V sync on | ||
45 | * 0x01 H and V sync off (screen blank but powered) | ||
46 | * 0x07 DPMS powerdown (requires modeset to come back) | ||
47 | */ | ||
48 | static char *udl_enable_hvsync(char *buf, bool enable) | ||
49 | { | ||
50 | if (enable) | ||
51 | return udl_set_register(buf, 0x1F, 0x00); | ||
52 | else | ||
53 | return udl_set_register(buf, 0x1F, 0x07); | ||
54 | } | ||
55 | |||
56 | static char *udl_set_color_depth(char *buf, u8 selection) | ||
57 | { | ||
58 | return udl_set_register(buf, 0x00, selection); | ||
59 | } | ||
60 | |||
61 | static char *udl_set_base16bpp(char *wrptr, u32 base) | ||
62 | { | ||
63 | /* the base pointer is 16 bits wide, 0x20 is hi byte. */ | ||
64 | wrptr = udl_set_register(wrptr, 0x20, base >> 16); | ||
65 | wrptr = udl_set_register(wrptr, 0x21, base >> 8); | ||
66 | return udl_set_register(wrptr, 0x22, base); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * DisplayLink HW has separate 16bpp and 8bpp framebuffers. | ||
71 | * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer | ||
72 | */ | ||
73 | static char *udl_set_base8bpp(char *wrptr, u32 base) | ||
74 | { | ||
75 | wrptr = udl_set_register(wrptr, 0x26, base >> 16); | ||
76 | wrptr = udl_set_register(wrptr, 0x27, base >> 8); | ||
77 | return udl_set_register(wrptr, 0x28, base); | ||
78 | } | ||
79 | |||
80 | static char *udl_set_register_16(char *wrptr, u8 reg, u16 value) | ||
81 | { | ||
82 | wrptr = udl_set_register(wrptr, reg, value >> 8); | ||
83 | return udl_set_register(wrptr, reg+1, value); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * This is kind of weird because the controller takes some | ||
88 | * register values in a different byte order than other registers. | ||
89 | */ | ||
90 | static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value) | ||
91 | { | ||
92 | wrptr = udl_set_register(wrptr, reg, value); | ||
93 | return udl_set_register(wrptr, reg+1, value >> 8); | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * LFSR is linear feedback shift register. The reason we have this is | ||
98 | * because the display controller needs to minimize the clock depth of | ||
99 | * various counters used in the display path. So this code reverses the | ||
100 | * provided value into the lfsr16 value by counting backwards to get | ||
101 | * the value that needs to be set in the hardware comparator to get the | ||
102 | * same actual count. This makes sense once you read above a couple of | ||
103 | * times and think about it from a hardware perspective. | ||
104 | */ | ||
105 | static u16 udl_lfsr16(u16 actual_count) | ||
106 | { | ||
107 | u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */ | ||
108 | |||
109 | while (actual_count--) { | ||
110 | lv = ((lv << 1) | | ||
111 | (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1)) | ||
112 | & 0xFFFF; | ||
113 | } | ||
114 | |||
115 | return (u16) lv; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * This does LFSR conversion on the value that is to be written. | ||
120 | * See LFSR explanation above for more detail. | ||
121 | */ | ||
122 | static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value) | ||
123 | { | ||
124 | return udl_set_register_16(wrptr, reg, udl_lfsr16(value)); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * This takes a standard fbdev screeninfo struct and all of its monitor mode | ||
129 | * details and converts them into the DisplayLink equivalent register commands. | ||
130 | ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1)); | ||
131 | ERR(vreg_lfsr16(dev, 0x01, xDisplayStart)); | ||
132 | ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd)); | ||
133 | ERR(vreg_lfsr16(dev, 0x05, yDisplayStart)); | ||
134 | ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd)); | ||
135 | ERR(vreg_lfsr16(dev, 0x09, xEndCount)); | ||
136 | ERR(vreg_lfsr16(dev, 0x0B, hSyncStart)); | ||
137 | ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd)); | ||
138 | ERR(vreg_big_endian(dev, 0x0F, hPixels)); | ||
139 | ERR(vreg_lfsr16(dev, 0x11, yEndCount)); | ||
140 | ERR(vreg_lfsr16(dev, 0x13, vSyncStart)); | ||
141 | ERR(vreg_lfsr16(dev, 0x15, vSyncEnd)); | ||
142 | ERR(vreg_big_endian(dev, 0x17, vPixels)); | ||
143 | ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz)); | ||
144 | |||
145 | ERR(vreg(dev, 0x1F, 0)); | ||
146 | |||
147 | ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK))); | ||
148 | */ | ||
149 | static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode) | ||
150 | { | ||
151 | u16 xds, yds; | ||
152 | u16 xde, yde; | ||
153 | u16 yec; | ||
154 | |||
155 | /* x display start */ | ||
156 | xds = mode->crtc_htotal - mode->crtc_hsync_start; | ||
157 | wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds); | ||
158 | /* x display end */ | ||
159 | xde = xds + mode->crtc_hdisplay; | ||
160 | wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde); | ||
161 | |||
162 | /* y display start */ | ||
163 | yds = mode->crtc_vtotal - mode->crtc_vsync_start; | ||
164 | wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds); | ||
165 | /* y display end */ | ||
166 | yde = yds + mode->crtc_vdisplay; | ||
167 | wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde); | ||
168 | |||
169 | /* x end count is active + blanking - 1 */ | ||
170 | wrptr = udl_set_register_lfsr16(wrptr, 0x09, | ||
171 | mode->crtc_htotal - 1); | ||
172 | |||
173 | /* libdlo hardcodes hsync start to 1 */ | ||
174 | wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1); | ||
175 | |||
176 | /* hsync end is width of sync pulse + 1 */ | ||
177 | wrptr = udl_set_register_lfsr16(wrptr, 0x0D, | ||
178 | mode->crtc_hsync_end - mode->crtc_hsync_start + 1); | ||
179 | |||
180 | /* hpixels is active pixels */ | ||
181 | wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay); | ||
182 | |||
183 | /* yendcount is vertical active + vertical blanking */ | ||
184 | yec = mode->crtc_vtotal; | ||
185 | wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec); | ||
186 | |||
187 | /* libdlo hardcodes vsync start to 0 */ | ||
188 | wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0); | ||
189 | |||
190 | /* vsync end is width of vsync pulse */ | ||
191 | wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start); | ||
192 | |||
193 | /* vpixels is active pixels */ | ||
194 | wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay); | ||
195 | |||
196 | wrptr = udl_set_register_16be(wrptr, 0x1B, | ||
197 | mode->clock / 5); | ||
198 | |||
199 | return wrptr; | ||
200 | } | ||
201 | |||
202 | static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc) | ||
203 | { | ||
204 | struct drm_device *dev = crtc->dev; | ||
205 | struct udl_device *udl = dev->dev_private; | ||
206 | struct urb *urb; | ||
207 | char *buf; | ||
208 | int retval; | ||
209 | |||
210 | urb = udl_get_urb(dev); | ||
211 | if (!urb) | ||
212 | return -ENOMEM; | ||
213 | |||
214 | buf = (char *)urb->transfer_buffer; | ||
215 | |||
216 | memcpy(buf, udl->mode_buf, udl->mode_buf_len); | ||
217 | retval = udl_submit_urb(dev, urb, udl->mode_buf_len); | ||
218 | DRM_INFO("write mode info %d\n", udl->mode_buf_len); | ||
219 | return retval; | ||
220 | } | ||
221 | |||
222 | |||
223 | static void udl_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
224 | { | ||
225 | struct drm_device *dev = crtc->dev; | ||
226 | struct udl_device *udl = dev->dev_private; | ||
227 | int retval; | ||
228 | |||
229 | if (mode == DRM_MODE_DPMS_OFF) { | ||
230 | char *buf; | ||
231 | struct urb *urb; | ||
232 | urb = udl_get_urb(dev); | ||
233 | if (!urb) | ||
234 | return; | ||
235 | |||
236 | buf = (char *)urb->transfer_buffer; | ||
237 | buf = udl_vidreg_lock(buf); | ||
238 | buf = udl_enable_hvsync(buf, false); | ||
239 | buf = udl_vidreg_unlock(buf); | ||
240 | |||
241 | retval = udl_submit_urb(dev, urb, buf - (char *) | ||
242 | urb->transfer_buffer); | ||
243 | } else { | ||
244 | if (udl->mode_buf_len == 0) { | ||
245 | DRM_ERROR("Trying to enable DPMS with no mode\n"); | ||
246 | return; | ||
247 | } | ||
248 | udl_crtc_write_mode_to_hw(crtc); | ||
249 | } | ||
250 | |||
251 | } | ||
252 | |||
253 | static bool udl_crtc_mode_fixup(struct drm_crtc *crtc, | ||
254 | struct drm_display_mode *mode, | ||
255 | struct drm_display_mode *adjusted_mode) | ||
256 | |||
257 | { | ||
258 | return true; | ||
259 | } | ||
260 | |||
261 | #if 0 | ||
262 | static int | ||
263 | udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | ||
264 | int x, int y, enum mode_set_atomic state) | ||
265 | { | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int | ||
270 | udl_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
271 | struct drm_framebuffer *old_fb) | ||
272 | { | ||
273 | return 0; | ||
274 | } | ||
275 | #endif | ||
276 | |||
277 | static int udl_crtc_mode_set(struct drm_crtc *crtc, | ||
278 | struct drm_display_mode *mode, | ||
279 | struct drm_display_mode *adjusted_mode, | ||
280 | int x, int y, | ||
281 | struct drm_framebuffer *old_fb) | ||
282 | |||
283 | { | ||
284 | struct drm_device *dev = crtc->dev; | ||
285 | struct udl_framebuffer *ufb = to_udl_fb(crtc->fb); | ||
286 | struct udl_device *udl = dev->dev_private; | ||
287 | char *buf; | ||
288 | char *wrptr; | ||
289 | int color_depth = 0; | ||
290 | |||
291 | buf = (char *)udl->mode_buf; | ||
292 | |||
293 | /* for now we just clip 24 -> 16 - if we fix that fix this */ | ||
294 | /*if (crtc->fb->bits_per_pixel != 16) | ||
295 | color_depth = 1; */ | ||
296 | |||
297 | /* This first section has to do with setting the base address on the | ||
298 | * controller * associated with the display. There are 2 base | ||
299 | * pointers, currently, we only * use the 16 bpp segment. | ||
300 | */ | ||
301 | wrptr = udl_vidreg_lock(buf); | ||
302 | wrptr = udl_set_color_depth(wrptr, color_depth); | ||
303 | /* set base for 16bpp segment to 0 */ | ||
304 | wrptr = udl_set_base16bpp(wrptr, 0); | ||
305 | /* set base for 8bpp segment to end of fb */ | ||
306 | wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay); | ||
307 | |||
308 | wrptr = udl_set_vid_cmds(wrptr, adjusted_mode); | ||
309 | wrptr = udl_enable_hvsync(wrptr, true); | ||
310 | wrptr = udl_vidreg_unlock(wrptr); | ||
311 | |||
312 | ufb->active_16 = true; | ||
313 | if (old_fb) { | ||
314 | struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); | ||
315 | uold_fb->active_16 = false; | ||
316 | } | ||
317 | udl->mode_buf_len = wrptr - buf; | ||
318 | |||
319 | /* damage all of it */ | ||
320 | udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height); | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | |||
325 | static void udl_crtc_disable(struct drm_crtc *crtc) | ||
326 | { | ||
327 | |||
328 | |||
329 | } | ||
330 | |||
331 | static void udl_crtc_destroy(struct drm_crtc *crtc) | ||
332 | { | ||
333 | drm_crtc_cleanup(crtc); | ||
334 | kfree(crtc); | ||
335 | } | ||
336 | |||
337 | static void udl_load_lut(struct drm_crtc *crtc) | ||
338 | { | ||
339 | } | ||
340 | |||
341 | static void udl_crtc_prepare(struct drm_crtc *crtc) | ||
342 | { | ||
343 | } | ||
344 | |||
345 | static void udl_crtc_commit(struct drm_crtc *crtc) | ||
346 | { | ||
347 | udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
348 | } | ||
349 | |||
350 | static struct drm_crtc_helper_funcs udl_helper_funcs = { | ||
351 | .dpms = udl_crtc_dpms, | ||
352 | .mode_fixup = udl_crtc_mode_fixup, | ||
353 | .mode_set = udl_crtc_mode_set, | ||
354 | .prepare = udl_crtc_prepare, | ||
355 | .commit = udl_crtc_commit, | ||
356 | .disable = udl_crtc_disable, | ||
357 | .load_lut = udl_load_lut, | ||
358 | }; | ||
359 | |||
360 | static const struct drm_crtc_funcs udl_crtc_funcs = { | ||
361 | .set_config = drm_crtc_helper_set_config, | ||
362 | .destroy = udl_crtc_destroy, | ||
363 | }; | ||
364 | |||
365 | int udl_crtc_init(struct drm_device *dev) | ||
366 | { | ||
367 | struct drm_crtc *crtc; | ||
368 | |||
369 | crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL); | ||
370 | if (crtc == NULL) | ||
371 | return -ENOMEM; | ||
372 | |||
373 | drm_crtc_init(dev, crtc, &udl_crtc_funcs); | ||
374 | drm_crtc_helper_add(crtc, &udl_helper_funcs); | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static const struct drm_mode_config_funcs udl_mode_funcs = { | ||
380 | .fb_create = udl_fb_user_fb_create, | ||
381 | .output_poll_changed = NULL, | ||
382 | }; | ||
383 | |||
384 | int udl_modeset_init(struct drm_device *dev) | ||
385 | { | ||
386 | struct drm_encoder *encoder; | ||
387 | drm_mode_config_init(dev); | ||
388 | |||
389 | dev->mode_config.min_width = 640; | ||
390 | dev->mode_config.min_height = 480; | ||
391 | |||
392 | dev->mode_config.max_width = 2048; | ||
393 | dev->mode_config.max_height = 2048; | ||
394 | |||
395 | dev->mode_config.prefer_shadow = 0; | ||
396 | dev->mode_config.preferred_depth = 24; | ||
397 | |||
398 | dev->mode_config.funcs = (void *)&udl_mode_funcs; | ||
399 | |||
400 | drm_mode_create_dirty_info_property(dev); | ||
401 | |||
402 | udl_crtc_init(dev); | ||
403 | |||
404 | encoder = udl_encoder_init(dev); | ||
405 | |||
406 | udl_connector_init(dev, encoder); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | void udl_modeset_cleanup(struct drm_device *dev) | ||
412 | { | ||
413 | drm_mode_config_cleanup(dev); | ||
414 | } | ||
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c new file mode 100644 index 000000000000..b9320e2608dd --- /dev/null +++ b/drivers/gpu/drm/udl/udl_transfer.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat | ||
3 | * based in parts on udlfb.c: | ||
4 | * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> | ||
5 | * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> | ||
6 | * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License v2. See the file COPYING in the main directory of this archive for | ||
10 | * more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/fb.h> | ||
16 | #include <linux/prefetch.h> | ||
17 | |||
18 | #include "drmP.h" | ||
19 | #include "udl_drv.h" | ||
20 | |||
21 | #define MAX_CMD_PIXELS 255 | ||
22 | |||
23 | #define RLX_HEADER_BYTES 7 | ||
24 | #define MIN_RLX_PIX_BYTES 4 | ||
25 | #define MIN_RLX_CMD_BYTES (RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES) | ||
26 | |||
27 | #define RLE_HEADER_BYTES 6 | ||
28 | #define MIN_RLE_PIX_BYTES 3 | ||
29 | #define MIN_RLE_CMD_BYTES (RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES) | ||
30 | |||
31 | #define RAW_HEADER_BYTES 6 | ||
32 | #define MIN_RAW_PIX_BYTES 2 | ||
33 | #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) | ||
34 | |||
35 | /* | ||
36 | * Trims identical data from front and back of line | ||
37 | * Sets new front buffer address and width | ||
38 | * And returns byte count of identical pixels | ||
39 | * Assumes CPU natural alignment (unsigned long) | ||
40 | * for back and front buffer ptrs and width | ||
41 | */ | ||
42 | #if 0 | ||
43 | static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) | ||
44 | { | ||
45 | int j, k; | ||
46 | const unsigned long *back = (const unsigned long *) bback; | ||
47 | const unsigned long *front = (const unsigned long *) *bfront; | ||
48 | const int width = *width_bytes / sizeof(unsigned long); | ||
49 | int identical = width; | ||
50 | int start = width; | ||
51 | int end = width; | ||
52 | |||
53 | prefetch((void *) front); | ||
54 | prefetch((void *) back); | ||
55 | |||
56 | for (j = 0; j < width; j++) { | ||
57 | if (back[j] != front[j]) { | ||
58 | start = j; | ||
59 | break; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | for (k = width - 1; k > j; k--) { | ||
64 | if (back[k] != front[k]) { | ||
65 | end = k+1; | ||
66 | break; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | identical = start + (width - end); | ||
71 | *bfront = (u8 *) &front[start]; | ||
72 | *width_bytes = (end - start) * sizeof(unsigned long); | ||
73 | |||
74 | return identical * sizeof(unsigned long); | ||
75 | } | ||
76 | #endif | ||
77 | |||
78 | static inline u16 pixel32_to_be16p(const uint8_t *pixel) | ||
79 | { | ||
80 | uint32_t pix = *(uint32_t *)pixel; | ||
81 | u16 retval; | ||
82 | |||
83 | retval = (((pix >> 3) & 0x001f) | | ||
84 | ((pix >> 5) & 0x07e0) | | ||
85 | ((pix >> 8) & 0xf800)); | ||
86 | return retval; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Render a command stream for an encoded horizontal line segment of pixels. | ||
91 | * | ||
92 | * A command buffer holds several commands. | ||
93 | * It always begins with a fresh command header | ||
94 | * (the protocol doesn't require this, but we enforce it to allow | ||
95 | * multiple buffers to be potentially encoded and sent in parallel). | ||
96 | * A single command encodes one contiguous horizontal line of pixels | ||
97 | * | ||
98 | * The function relies on the client to do all allocation, so that | ||
99 | * rendering can be done directly to output buffers (e.g. USB URBs). | ||
100 | * The function fills the supplied command buffer, providing information | ||
101 | * on where it left off, so the client may call in again with additional | ||
102 | * buffers if the line will take several buffers to complete. | ||
103 | * | ||
104 | * A single command can transmit a maximum of 256 pixels, | ||
105 | * regardless of the compression ratio (protocol design limit). | ||
106 | * To the hardware, 0 for a size byte means 256 | ||
107 | * | ||
108 | * Rather than 256 pixel commands which are either rl or raw encoded, | ||
109 | * the rlx command simply assumes alternating raw and rl spans within one cmd. | ||
110 | * This has a slightly larger header overhead, but produces more even results. | ||
111 | * It also processes all data (read and write) in a single pass. | ||
112 | * Performance benchmarks of common cases show it having just slightly better | ||
113 | * compression than 256 pixel raw or rle commands, with similar CPU consumpion. | ||
114 | * But for very rl friendly data, will compress not quite as well. | ||
115 | */ | ||
116 | static void udl_compress_hline16( | ||
117 | const u8 **pixel_start_ptr, | ||
118 | const u8 *const pixel_end, | ||
119 | uint32_t *device_address_ptr, | ||
120 | uint8_t **command_buffer_ptr, | ||
121 | const uint8_t *const cmd_buffer_end, int bpp) | ||
122 | { | ||
123 | const u8 *pixel = *pixel_start_ptr; | ||
124 | uint32_t dev_addr = *device_address_ptr; | ||
125 | uint8_t *cmd = *command_buffer_ptr; | ||
126 | |||
127 | while ((pixel_end > pixel) && | ||
128 | (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { | ||
129 | uint8_t *raw_pixels_count_byte = 0; | ||
130 | uint8_t *cmd_pixels_count_byte = 0; | ||
131 | const u8 *raw_pixel_start = 0; | ||
132 | const u8 *cmd_pixel_start, *cmd_pixel_end = 0; | ||
133 | |||
134 | prefetchw((void *) cmd); /* pull in one cache line at least */ | ||
135 | |||
136 | *cmd++ = 0xaf; | ||
137 | *cmd++ = 0x6b; | ||
138 | *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF); | ||
139 | *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF); | ||
140 | *cmd++ = (uint8_t) ((dev_addr) & 0xFF); | ||
141 | |||
142 | cmd_pixels_count_byte = cmd++; /* we'll know this later */ | ||
143 | cmd_pixel_start = pixel; | ||
144 | |||
145 | raw_pixels_count_byte = cmd++; /* we'll know this later */ | ||
146 | raw_pixel_start = pixel; | ||
147 | |||
148 | cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, | ||
149 | min((int)(pixel_end - pixel) / bpp, | ||
150 | (int)(cmd_buffer_end - cmd) / 2))) * bpp; | ||
151 | |||
152 | prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); | ||
153 | |||
154 | while (pixel < cmd_pixel_end) { | ||
155 | const u8 * const repeating_pixel = pixel; | ||
156 | |||
157 | if (bpp == 2) | ||
158 | *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel); | ||
159 | else if (bpp == 4) | ||
160 | *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel)); | ||
161 | |||
162 | cmd += 2; | ||
163 | pixel += bpp; | ||
164 | |||
165 | if (unlikely((pixel < cmd_pixel_end) && | ||
166 | (!memcmp(pixel, repeating_pixel, bpp)))) { | ||
167 | /* go back and fill in raw pixel count */ | ||
168 | *raw_pixels_count_byte = (((repeating_pixel - | ||
169 | raw_pixel_start) / bpp) + 1) & 0xFF; | ||
170 | |||
171 | while ((pixel < cmd_pixel_end) | ||
172 | && (!memcmp(pixel, repeating_pixel, bpp))) { | ||
173 | pixel += bpp; | ||
174 | } | ||
175 | |||
176 | /* immediately after raw data is repeat byte */ | ||
177 | *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF; | ||
178 | |||
179 | /* Then start another raw pixel span */ | ||
180 | raw_pixel_start = pixel; | ||
181 | raw_pixels_count_byte = cmd++; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | if (pixel > raw_pixel_start) { | ||
186 | /* finalize last RAW span */ | ||
187 | *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; | ||
188 | } | ||
189 | |||
190 | *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; | ||
191 | dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2; | ||
192 | } | ||
193 | |||
194 | if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { | ||
195 | /* Fill leftover bytes with no-ops */ | ||
196 | if (cmd_buffer_end > cmd) | ||
197 | memset(cmd, 0xAF, cmd_buffer_end - cmd); | ||
198 | cmd = (uint8_t *) cmd_buffer_end; | ||
199 | } | ||
200 | |||
201 | *command_buffer_ptr = cmd; | ||
202 | *pixel_start_ptr = pixel; | ||
203 | *device_address_ptr = dev_addr; | ||
204 | |||
205 | return; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * There are 3 copies of every pixel: The front buffer that the fbdev | ||
210 | * client renders to, the actual framebuffer across the USB bus in hardware | ||
211 | * (that we can only write to, slowly, and can never read), and (optionally) | ||
212 | * our shadow copy that tracks what's been sent to that hardware buffer. | ||
213 | */ | ||
214 | int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, | ||
215 | const char *front, char **urb_buf_ptr, | ||
216 | u32 byte_offset, u32 byte_width, | ||
217 | int *ident_ptr, int *sent_ptr) | ||
218 | { | ||
219 | const u8 *line_start, *line_end, *next_pixel; | ||
220 | u32 base16 = 0 + (byte_offset / bpp) * 2; | ||
221 | struct urb *urb = *urb_ptr; | ||
222 | u8 *cmd = *urb_buf_ptr; | ||
223 | u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; | ||
224 | |||
225 | line_start = (u8 *) (front + byte_offset); | ||
226 | next_pixel = line_start; | ||
227 | line_end = next_pixel + byte_width; | ||
228 | |||
229 | while (next_pixel < line_end) { | ||
230 | |||
231 | udl_compress_hline16(&next_pixel, | ||
232 | line_end, &base16, | ||
233 | (u8 **) &cmd, (u8 *) cmd_end, bpp); | ||
234 | |||
235 | if (cmd >= cmd_end) { | ||
236 | int len = cmd - (u8 *) urb->transfer_buffer; | ||
237 | if (udl_submit_urb(dev, urb, len)) | ||
238 | return 1; /* lost pixels is set */ | ||
239 | *sent_ptr += len; | ||
240 | urb = udl_get_urb(dev); | ||
241 | if (!urb) | ||
242 | return 1; /* lost_pixels is set */ | ||
243 | *urb_ptr = urb; | ||
244 | cmd = urb->transfer_buffer; | ||
245 | cmd_end = &cmd[urb->transfer_buffer_length]; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | *urb_buf_ptr = cmd; | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index a2ab34365151..1f182254e81e 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c | |||
@@ -106,6 +106,8 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) | |||
106 | 106 | ||
107 | idr_init(&dev->object_name_idr); | 107 | idr_init(&dev->object_name_idr); |
108 | 108 | ||
109 | pci_set_master(dev->pdev); | ||
110 | |||
109 | ret = drm_vblank_init(dev, 1); | 111 | ret = drm_vblank_init(dev, 1); |
110 | if (ret) { | 112 | if (ret) { |
111 | kfree(dev_priv); | 113 | kfree(dev_priv); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2d6f573bfff2..ee24d216aa85 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -38,6 +38,10 @@ | |||
38 | #define VMWGFX_CHIP_SVGAII 0 | 38 | #define VMWGFX_CHIP_SVGAII 0 |
39 | #define VMW_FB_RESERVATION 0 | 39 | #define VMW_FB_RESERVATION 0 |
40 | 40 | ||
41 | #define VMW_MIN_INITIAL_WIDTH 800 | ||
42 | #define VMW_MIN_INITIAL_HEIGHT 600 | ||
43 | |||
44 | |||
41 | /** | 45 | /** |
42 | * Fully encoded drm commands. Might move to vmw_drm.h | 46 | * Fully encoded drm commands. Might move to vmw_drm.h |
43 | */ | 47 | */ |
@@ -387,6 +391,41 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, | |||
387 | BUG_ON(n3d < 0); | 391 | BUG_ON(n3d < 0); |
388 | } | 392 | } |
389 | 393 | ||
394 | /** | ||
395 | * Sets the initial_[width|height] fields on the given vmw_private. | ||
396 | * | ||
397 | * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then | ||
398 | * clamping the value to fb_max_[width|height] fields and the | ||
399 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. | ||
400 | * If the values appear to be invalid, set them to | ||
401 | * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. | ||
402 | */ | ||
403 | static void vmw_get_initial_size(struct vmw_private *dev_priv) | ||
404 | { | ||
405 | uint32_t width; | ||
406 | uint32_t height; | ||
407 | |||
408 | width = vmw_read(dev_priv, SVGA_REG_WIDTH); | ||
409 | height = vmw_read(dev_priv, SVGA_REG_HEIGHT); | ||
410 | |||
411 | width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); | ||
412 | height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); | ||
413 | |||
414 | if (width > dev_priv->fb_max_width || | ||
415 | height > dev_priv->fb_max_height) { | ||
416 | |||
417 | /* | ||
418 | * This is a host error and shouldn't occur. | ||
419 | */ | ||
420 | |||
421 | width = VMW_MIN_INITIAL_WIDTH; | ||
422 | height = VMW_MIN_INITIAL_HEIGHT; | ||
423 | } | ||
424 | |||
425 | dev_priv->initial_width = width; | ||
426 | dev_priv->initial_height = height; | ||
427 | } | ||
428 | |||
390 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 429 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
391 | { | 430 | { |
392 | struct vmw_private *dev_priv; | 431 | struct vmw_private *dev_priv; |
@@ -400,6 +439,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
400 | } | 439 | } |
401 | memset(dev_priv, 0, sizeof(*dev_priv)); | 440 | memset(dev_priv, 0, sizeof(*dev_priv)); |
402 | 441 | ||
442 | pci_set_master(dev->pdev); | ||
443 | |||
403 | dev_priv->dev = dev; | 444 | dev_priv->dev = dev; |
404 | dev_priv->vmw_chipset = chipset; | 445 | dev_priv->vmw_chipset = chipset; |
405 | dev_priv->last_read_seqno = (uint32_t) -100; | 446 | dev_priv->last_read_seqno = (uint32_t) -100; |
@@ -441,6 +482,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
441 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | 482 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
442 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | 483 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); |
443 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | 484 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); |
485 | |||
486 | vmw_get_initial_size(dev_priv); | ||
487 | |||
444 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 488 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
445 | dev_priv->max_gmr_descriptors = | 489 | dev_priv->max_gmr_descriptors = |
446 | vmw_read(dev_priv, | 490 | vmw_read(dev_priv, |
@@ -688,6 +732,15 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
688 | return 0; | 732 | return 0; |
689 | } | 733 | } |
690 | 734 | ||
735 | static void vmw_preclose(struct drm_device *dev, | ||
736 | struct drm_file *file_priv) | ||
737 | { | ||
738 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
739 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
740 | |||
741 | vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); | ||
742 | } | ||
743 | |||
691 | static void vmw_postclose(struct drm_device *dev, | 744 | static void vmw_postclose(struct drm_device *dev, |
692 | struct drm_file *file_priv) | 745 | struct drm_file *file_priv) |
693 | { | 746 | { |
@@ -710,6 +763,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
710 | if (unlikely(vmw_fp == NULL)) | 763 | if (unlikely(vmw_fp == NULL)) |
711 | return ret; | 764 | return ret; |
712 | 765 | ||
766 | INIT_LIST_HEAD(&vmw_fp->fence_events); | ||
713 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); | 767 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
714 | if (unlikely(vmw_fp->tfile == NULL)) | 768 | if (unlikely(vmw_fp->tfile == NULL)) |
715 | goto out_no_tfile; | 769 | goto out_no_tfile; |
@@ -1102,6 +1156,7 @@ static struct drm_driver driver = { | |||
1102 | .master_set = vmw_master_set, | 1156 | .master_set = vmw_master_set, |
1103 | .master_drop = vmw_master_drop, | 1157 | .master_drop = vmw_master_drop, |
1104 | .open = vmw_driver_open, | 1158 | .open = vmw_driver_open, |
1159 | .preclose = vmw_preclose, | ||
1105 | .postclose = vmw_postclose, | 1160 | .postclose = vmw_postclose, |
1106 | .fops = &vmwgfx_driver_fops, | 1161 | .fops = &vmwgfx_driver_fops, |
1107 | .name = VMWGFX_DRIVER_NAME, | 1162 | .name = VMWGFX_DRIVER_NAME, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index dc279706ca70..d0f2c079ee27 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20111025" | 43 | #define VMWGFX_DRIVER_DATE "20120209" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 3 | 45 | #define VMWGFX_DRIVER_MINOR 4 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -62,6 +62,7 @@ | |||
62 | struct vmw_fpriv { | 62 | struct vmw_fpriv { |
63 | struct drm_master *locked_master; | 63 | struct drm_master *locked_master; |
64 | struct ttm_object_file *tfile; | 64 | struct ttm_object_file *tfile; |
65 | struct list_head fence_events; | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | struct vmw_dma_buffer { | 68 | struct vmw_dma_buffer { |
@@ -202,6 +203,8 @@ struct vmw_private { | |||
202 | uint32_t mmio_size; | 203 | uint32_t mmio_size; |
203 | uint32_t fb_max_width; | 204 | uint32_t fb_max_width; |
204 | uint32_t fb_max_height; | 205 | uint32_t fb_max_height; |
206 | uint32_t initial_width; | ||
207 | uint32_t initial_height; | ||
205 | __le32 __iomem *mmio_virt; | 208 | __le32 __iomem *mmio_virt; |
206 | int mmio_mtrr; | 209 | int mmio_mtrr; |
207 | uint32_t capabilities; | 210 | uint32_t capabilities; |
@@ -533,7 +536,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, | |||
533 | uint32_t command_size, | 536 | uint32_t command_size, |
534 | uint64_t throttle_us, | 537 | uint64_t throttle_us, |
535 | struct drm_vmw_fence_rep __user | 538 | struct drm_vmw_fence_rep __user |
536 | *user_fence_rep); | 539 | *user_fence_rep, |
540 | struct vmw_fence_obj **out_fence); | ||
537 | 541 | ||
538 | extern void | 542 | extern void |
539 | vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 543 | vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 40932fbdac0f..4acced44a623 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1109,10 +1109,11 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1109 | void *kernel_commands, | 1109 | void *kernel_commands, |
1110 | uint32_t command_size, | 1110 | uint32_t command_size, |
1111 | uint64_t throttle_us, | 1111 | uint64_t throttle_us, |
1112 | struct drm_vmw_fence_rep __user *user_fence_rep) | 1112 | struct drm_vmw_fence_rep __user *user_fence_rep, |
1113 | struct vmw_fence_obj **out_fence) | ||
1113 | { | 1114 | { |
1114 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 1115 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
1115 | struct vmw_fence_obj *fence; | 1116 | struct vmw_fence_obj *fence = NULL; |
1116 | uint32_t handle; | 1117 | uint32_t handle; |
1117 | void *cmd; | 1118 | void *cmd; |
1118 | int ret; | 1119 | int ret; |
@@ -1208,8 +1209,13 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1208 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, | 1209 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
1209 | user_fence_rep, fence, handle); | 1210 | user_fence_rep, fence, handle); |
1210 | 1211 | ||
1211 | if (likely(fence != NULL)) | 1212 | /* Don't unreference when handing fence out */ |
1213 | if (unlikely(out_fence != NULL)) { | ||
1214 | *out_fence = fence; | ||
1215 | fence = NULL; | ||
1216 | } else if (likely(fence != NULL)) { | ||
1212 | vmw_fence_obj_unreference(&fence); | 1217 | vmw_fence_obj_unreference(&fence); |
1218 | } | ||
1213 | 1219 | ||
1214 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1220 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1215 | return 0; | 1221 | return 0; |
@@ -1362,7 +1368,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
1362 | ret = vmw_execbuf_process(file_priv, dev_priv, | 1368 | ret = vmw_execbuf_process(file_priv, dev_priv, |
1363 | (void __user *)(unsigned long)arg->commands, | 1369 | (void __user *)(unsigned long)arg->commands, |
1364 | NULL, arg->command_size, arg->throttle_us, | 1370 | NULL, arg->command_size, arg->throttle_us, |
1365 | (void __user *)(unsigned long)arg->fence_rep); | 1371 | (void __user *)(unsigned long)arg->fence_rep, |
1372 | NULL); | ||
1366 | 1373 | ||
1367 | if (unlikely(ret != 0)) | 1374 | if (unlikely(ret != 0)) |
1368 | goto out_unlock; | 1375 | goto out_unlock; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 34e51a1695b8..3c447bf317cb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -414,10 +414,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
414 | unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; | 414 | unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; |
415 | int ret; | 415 | int ret; |
416 | 416 | ||
417 | /* XXX These shouldn't be hardcoded. */ | ||
418 | initial_width = 800; | ||
419 | initial_height = 600; | ||
420 | |||
421 | fb_bpp = 32; | 417 | fb_bpp = 32; |
422 | fb_depth = 24; | 418 | fb_depth = 24; |
423 | 419 | ||
@@ -425,8 +421,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
425 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | 421 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); |
426 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | 422 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); |
427 | 423 | ||
428 | initial_width = min(fb_width, initial_width); | 424 | initial_width = min(vmw_priv->initial_width, fb_width); |
429 | initial_height = min(fb_height, initial_height); | 425 | initial_height = min(vmw_priv->initial_height, fb_height); |
430 | 426 | ||
431 | fb_pitch = fb_width * fb_bpp / 8; | 427 | fb_pitch = fb_width * fb_bpp / 8; |
432 | fb_size = fb_pitch * fb_height; | 428 | fb_size = fb_pitch * fb_height; |
@@ -515,19 +511,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
515 | info->var.xres = initial_width; | 511 | info->var.xres = initial_width; |
516 | info->var.yres = initial_height; | 512 | info->var.yres = initial_height; |
517 | 513 | ||
518 | #if 0 | 514 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
519 | info->pixmap.size = 64*1024; | ||
520 | info->pixmap.buf_align = 8; | ||
521 | info->pixmap.access_align = 32; | ||
522 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
523 | info->pixmap.scan_align = 1; | ||
524 | #else | ||
525 | info->pixmap.size = 0; | ||
526 | info->pixmap.buf_align = 8; | ||
527 | info->pixmap.access_align = 32; | ||
528 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
529 | info->pixmap.scan_align = 1; | ||
530 | #endif | ||
531 | 515 | ||
532 | info->apertures = alloc_apertures(1); | 516 | info->apertures = alloc_apertures(1); |
533 | if (!info->apertures) { | 517 | if (!info->apertures) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 15fb26088d68..f2fb8f15e2f1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -69,12 +69,13 @@ struct vmw_user_fence { | |||
69 | * be assigned the current time tv_usec val when the fence signals. | 69 | * be assigned the current time tv_usec val when the fence signals. |
70 | */ | 70 | */ |
71 | struct vmw_event_fence_action { | 71 | struct vmw_event_fence_action { |
72 | struct drm_pending_event e; | ||
73 | struct vmw_fence_action action; | 72 | struct vmw_fence_action action; |
73 | struct list_head fpriv_head; | ||
74 | |||
75 | struct drm_pending_event *event; | ||
74 | struct vmw_fence_obj *fence; | 76 | struct vmw_fence_obj *fence; |
75 | struct drm_device *dev; | 77 | struct drm_device *dev; |
76 | struct kref kref; | 78 | |
77 | uint32_t size; | ||
78 | uint32_t *tv_sec; | 79 | uint32_t *tv_sec; |
79 | uint32_t *tv_usec; | 80 | uint32_t *tv_usec; |
80 | }; | 81 | }; |
@@ -784,46 +785,40 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | |||
784 | } | 785 | } |
785 | 786 | ||
786 | /** | 787 | /** |
787 | * vmw_event_fence_action_destroy | 788 | * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects |
788 | * | ||
789 | * @kref: The struct kref embedded in a struct vmw_event_fence_action. | ||
790 | * | ||
791 | * The vmw_event_fence_action destructor that may be called either after | ||
792 | * the fence action cleanup, or when the event is delivered. | ||
793 | * It frees both the vmw_event_fence_action struct and the actual | ||
794 | * event structure copied to user-space. | ||
795 | */ | ||
796 | static void vmw_event_fence_action_destroy(struct kref *kref) | ||
797 | { | ||
798 | struct vmw_event_fence_action *eaction = | ||
799 | container_of(kref, struct vmw_event_fence_action, kref); | ||
800 | struct ttm_mem_global *mem_glob = | ||
801 | vmw_mem_glob(vmw_priv(eaction->dev)); | ||
802 | uint32_t size = eaction->size; | ||
803 | |||
804 | kfree(eaction->e.event); | ||
805 | kfree(eaction); | ||
806 | ttm_mem_global_free(mem_glob, size); | ||
807 | } | ||
808 | |||
809 | |||
810 | /** | ||
811 | * vmw_event_fence_action_delivered | ||
812 | * | 789 | * |
813 | * @e: The struct drm_pending_event embedded in a struct | 790 | * @fman: Pointer to a struct vmw_fence_manager |
814 | * vmw_event_fence_action. | 791 | * @event_list: Pointer to linked list of struct vmw_event_fence_action objects |
792 | * with pointers to a struct drm_file object about to be closed. | ||
815 | * | 793 | * |
816 | * The struct drm_pending_event destructor that is called by drm | 794 | * This function removes all pending fence events with references to a |
817 | * once the event is delivered. Since we don't know whether this function | 795 | * specific struct drm_file object about to be closed. The caller is required |
818 | * will be called before or after the fence action destructor, we | 796 | * to pass a list of all struct vmw_event_fence_action objects with such |
819 | * free a refcount and destroy if it becomes zero. | 797 | * events attached. This function is typically called before the |
798 | * struct drm_file object's event management is taken down. | ||
820 | */ | 799 | */ |
821 | static void vmw_event_fence_action_delivered(struct drm_pending_event *e) | 800 | void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, |
801 | struct list_head *event_list) | ||
822 | { | 802 | { |
823 | struct vmw_event_fence_action *eaction = | 803 | struct vmw_event_fence_action *eaction; |
824 | container_of(e, struct vmw_event_fence_action, e); | 804 | struct drm_pending_event *event; |
805 | unsigned long irq_flags; | ||
825 | 806 | ||
826 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | 807 | while (1) { |
808 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
809 | if (list_empty(event_list)) | ||
810 | goto out_unlock; | ||
811 | eaction = list_first_entry(event_list, | ||
812 | struct vmw_event_fence_action, | ||
813 | fpriv_head); | ||
814 | list_del_init(&eaction->fpriv_head); | ||
815 | event = eaction->event; | ||
816 | eaction->event = NULL; | ||
817 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
818 | event->destroy(event); | ||
819 | } | ||
820 | out_unlock: | ||
821 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
827 | } | 822 | } |
828 | 823 | ||
829 | 824 | ||
@@ -836,18 +831,21 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e) | |||
836 | * This function is called when the seqno of the fence where @action is | 831 | * This function is called when the seqno of the fence where @action is |
837 | * attached has passed. It queues the event on the submitter's event list. | 832 | * attached has passed. It queues the event on the submitter's event list. |
838 | * This function is always called from atomic context, and may be called | 833 | * This function is always called from atomic context, and may be called |
839 | * from irq context. It ups a refcount reflecting that we now have two | 834 | * from irq context. |
840 | * destructors. | ||
841 | */ | 835 | */ |
842 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | 836 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) |
843 | { | 837 | { |
844 | struct vmw_event_fence_action *eaction = | 838 | struct vmw_event_fence_action *eaction = |
845 | container_of(action, struct vmw_event_fence_action, action); | 839 | container_of(action, struct vmw_event_fence_action, action); |
846 | struct drm_device *dev = eaction->dev; | 840 | struct drm_device *dev = eaction->dev; |
847 | struct drm_file *file_priv = eaction->e.file_priv; | 841 | struct drm_pending_event *event = eaction->event; |
842 | struct drm_file *file_priv; | ||
848 | unsigned long irq_flags; | 843 | unsigned long irq_flags; |
849 | 844 | ||
850 | kref_get(&eaction->kref); | 845 | if (unlikely(event == NULL)) |
846 | return; | ||
847 | |||
848 | file_priv = event->file_priv; | ||
851 | spin_lock_irqsave(&dev->event_lock, irq_flags); | 849 | spin_lock_irqsave(&dev->event_lock, irq_flags); |
852 | 850 | ||
853 | if (likely(eaction->tv_sec != NULL)) { | 851 | if (likely(eaction->tv_sec != NULL)) { |
@@ -858,7 +856,9 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | |||
858 | *eaction->tv_usec = tv.tv_usec; | 856 | *eaction->tv_usec = tv.tv_usec; |
859 | } | 857 | } |
860 | 858 | ||
861 | list_add_tail(&eaction->e.link, &file_priv->event_list); | 859 | list_del_init(&eaction->fpriv_head); |
860 | list_add_tail(&eaction->event->link, &file_priv->event_list); | ||
861 | eaction->event = NULL; | ||
862 | wake_up_all(&file_priv->event_wait); | 862 | wake_up_all(&file_priv->event_wait); |
863 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | 863 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); |
864 | } | 864 | } |
@@ -876,9 +876,15 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | |||
876 | { | 876 | { |
877 | struct vmw_event_fence_action *eaction = | 877 | struct vmw_event_fence_action *eaction = |
878 | container_of(action, struct vmw_event_fence_action, action); | 878 | container_of(action, struct vmw_event_fence_action, action); |
879 | struct vmw_fence_manager *fman = eaction->fence->fman; | ||
880 | unsigned long irq_flags; | ||
881 | |||
882 | spin_lock_irqsave(&fman->lock, irq_flags); | ||
883 | list_del(&eaction->fpriv_head); | ||
884 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
879 | 885 | ||
880 | vmw_fence_obj_unreference(&eaction->fence); | 886 | vmw_fence_obj_unreference(&eaction->fence); |
881 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | 887 | kfree(eaction); |
882 | } | 888 | } |
883 | 889 | ||
884 | 890 | ||
@@ -946,39 +952,23 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |||
946 | * an error code, the caller needs to free that object. | 952 | * an error code, the caller needs to free that object. |
947 | */ | 953 | */ |
948 | 954 | ||
949 | int vmw_event_fence_action_create(struct drm_file *file_priv, | 955 | int vmw_event_fence_action_queue(struct drm_file *file_priv, |
950 | struct vmw_fence_obj *fence, | 956 | struct vmw_fence_obj *fence, |
951 | struct drm_event *event, | 957 | struct drm_pending_event *event, |
952 | uint32_t *tv_sec, | 958 | uint32_t *tv_sec, |
953 | uint32_t *tv_usec, | 959 | uint32_t *tv_usec, |
954 | bool interruptible) | 960 | bool interruptible) |
955 | { | 961 | { |
956 | struct vmw_event_fence_action *eaction; | 962 | struct vmw_event_fence_action *eaction; |
957 | struct ttm_mem_global *mem_glob = | ||
958 | vmw_mem_glob(fence->fman->dev_priv); | ||
959 | struct vmw_fence_manager *fman = fence->fman; | 963 | struct vmw_fence_manager *fman = fence->fman; |
960 | uint32_t size = fman->event_fence_action_size + | 964 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); |
961 | ttm_round_pot(event->length); | 965 | unsigned long irq_flags; |
962 | int ret; | ||
963 | |||
964 | /* | ||
965 | * Account for internal structure size as well as the | ||
966 | * event size itself. | ||
967 | */ | ||
968 | |||
969 | ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible); | ||
970 | if (unlikely(ret != 0)) | ||
971 | return ret; | ||
972 | 966 | ||
973 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); | 967 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); |
974 | if (unlikely(eaction == NULL)) { | 968 | if (unlikely(eaction == NULL)) |
975 | ttm_mem_global_free(mem_glob, size); | ||
976 | return -ENOMEM; | 969 | return -ENOMEM; |
977 | } | ||
978 | 970 | ||
979 | eaction->e.event = event; | 971 | eaction->event = event; |
980 | eaction->e.file_priv = file_priv; | ||
981 | eaction->e.destroy = vmw_event_fence_action_delivered; | ||
982 | 972 | ||
983 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; | 973 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; |
984 | eaction->action.cleanup = vmw_event_fence_action_cleanup; | 974 | eaction->action.cleanup = vmw_event_fence_action_cleanup; |
@@ -986,16 +976,89 @@ int vmw_event_fence_action_create(struct drm_file *file_priv, | |||
986 | 976 | ||
987 | eaction->fence = vmw_fence_obj_reference(fence); | 977 | eaction->fence = vmw_fence_obj_reference(fence); |
988 | eaction->dev = fman->dev_priv->dev; | 978 | eaction->dev = fman->dev_priv->dev; |
989 | eaction->size = size; | ||
990 | eaction->tv_sec = tv_sec; | 979 | eaction->tv_sec = tv_sec; |
991 | eaction->tv_usec = tv_usec; | 980 | eaction->tv_usec = tv_usec; |
992 | 981 | ||
993 | kref_init(&eaction->kref); | 982 | spin_lock_irqsave(&fman->lock, irq_flags); |
983 | list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events); | ||
984 | spin_unlock_irqrestore(&fman->lock, irq_flags); | ||
985 | |||
994 | vmw_fence_obj_add_action(fence, &eaction->action); | 986 | vmw_fence_obj_add_action(fence, &eaction->action); |
995 | 987 | ||
996 | return 0; | 988 | return 0; |
997 | } | 989 | } |
998 | 990 | ||
991 | struct vmw_event_fence_pending { | ||
992 | struct drm_pending_event base; | ||
993 | struct drm_vmw_event_fence event; | ||
994 | }; | ||
995 | |||
996 | int vmw_event_fence_action_create(struct drm_file *file_priv, | ||
997 | struct vmw_fence_obj *fence, | ||
998 | uint32_t flags, | ||
999 | uint64_t user_data, | ||
1000 | bool interruptible) | ||
1001 | { | ||
1002 | struct vmw_event_fence_pending *event; | ||
1003 | struct drm_device *dev = fence->fman->dev_priv->dev; | ||
1004 | unsigned long irq_flags; | ||
1005 | int ret; | ||
1006 | |||
1007 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1008 | |||
1009 | ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0; | ||
1010 | if (likely(ret == 0)) | ||
1011 | file_priv->event_space -= sizeof(event->event); | ||
1012 | |||
1013 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1014 | |||
1015 | if (unlikely(ret != 0)) { | ||
1016 | DRM_ERROR("Failed to allocate event space for this file.\n"); | ||
1017 | goto out_no_space; | ||
1018 | } | ||
1019 | |||
1020 | |||
1021 | event = kzalloc(sizeof(event->event), GFP_KERNEL); | ||
1022 | if (unlikely(event == NULL)) { | ||
1023 | DRM_ERROR("Failed to allocate an event.\n"); | ||
1024 | ret = -ENOMEM; | ||
1025 | goto out_no_event; | ||
1026 | } | ||
1027 | |||
1028 | event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; | ||
1029 | event->event.base.length = sizeof(*event); | ||
1030 | event->event.user_data = user_data; | ||
1031 | |||
1032 | event->base.event = &event->event.base; | ||
1033 | event->base.file_priv = file_priv; | ||
1034 | event->base.destroy = (void (*) (struct drm_pending_event *)) kfree; | ||
1035 | |||
1036 | |||
1037 | if (flags & DRM_VMW_FE_FLAG_REQ_TIME) | ||
1038 | ret = vmw_event_fence_action_queue(file_priv, fence, | ||
1039 | &event->base, | ||
1040 | &event->event.tv_sec, | ||
1041 | &event->event.tv_usec, | ||
1042 | interruptible); | ||
1043 | else | ||
1044 | ret = vmw_event_fence_action_queue(file_priv, fence, | ||
1045 | &event->base, | ||
1046 | NULL, | ||
1047 | NULL, | ||
1048 | interruptible); | ||
1049 | if (ret != 0) | ||
1050 | goto out_no_queue; | ||
1051 | |||
1052 | out_no_queue: | ||
1053 | event->base.destroy(&event->base); | ||
1054 | out_no_event: | ||
1055 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1056 | file_priv->event_space += sizeof(*event); | ||
1057 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1058 | out_no_space: | ||
1059 | return ret; | ||
1060 | } | ||
1061 | |||
999 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | 1062 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, |
1000 | struct drm_file *file_priv) | 1063 | struct drm_file *file_priv) |
1001 | { | 1064 | { |
@@ -1008,8 +1071,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
1008 | (struct drm_vmw_fence_rep __user *)(unsigned long) | 1071 | (struct drm_vmw_fence_rep __user *)(unsigned long) |
1009 | arg->fence_rep; | 1072 | arg->fence_rep; |
1010 | uint32_t handle; | 1073 | uint32_t handle; |
1011 | unsigned long irq_flags; | ||
1012 | struct drm_vmw_event_fence *event; | ||
1013 | int ret; | 1074 | int ret; |
1014 | 1075 | ||
1015 | /* | 1076 | /* |
@@ -1062,59 +1123,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
1062 | 1123 | ||
1063 | BUG_ON(fence == NULL); | 1124 | BUG_ON(fence == NULL); |
1064 | 1125 | ||
1065 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1066 | |||
1067 | ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0; | ||
1068 | if (likely(ret == 0)) | ||
1069 | file_priv->event_space -= sizeof(*event); | ||
1070 | |||
1071 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1072 | |||
1073 | if (unlikely(ret != 0)) { | ||
1074 | DRM_ERROR("Failed to allocate event space for this file.\n"); | ||
1075 | goto out_no_event_space; | ||
1076 | } | ||
1077 | |||
1078 | event = kzalloc(sizeof(*event), GFP_KERNEL); | ||
1079 | if (unlikely(event == NULL)) { | ||
1080 | DRM_ERROR("Failed to allocate an event.\n"); | ||
1081 | goto out_no_event; | ||
1082 | } | ||
1083 | |||
1084 | event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED; | ||
1085 | event->base.length = sizeof(*event); | ||
1086 | event->user_data = arg->user_data; | ||
1087 | |||
1088 | if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) | 1126 | if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) |
1089 | ret = vmw_event_fence_action_create(file_priv, fence, | 1127 | ret = vmw_event_fence_action_create(file_priv, fence, |
1090 | &event->base, | 1128 | arg->flags, |
1091 | &event->tv_sec, | 1129 | arg->user_data, |
1092 | &event->tv_usec, | ||
1093 | true); | 1130 | true); |
1094 | else | 1131 | else |
1095 | ret = vmw_event_fence_action_create(file_priv, fence, | 1132 | ret = vmw_event_fence_action_create(file_priv, fence, |
1096 | &event->base, | 1133 | arg->flags, |
1097 | NULL, | 1134 | arg->user_data, |
1098 | NULL, | ||
1099 | true); | 1135 | true); |
1100 | 1136 | ||
1101 | if (unlikely(ret != 0)) { | 1137 | if (unlikely(ret != 0)) { |
1102 | if (ret != -ERESTARTSYS) | 1138 | if (ret != -ERESTARTSYS) |
1103 | DRM_ERROR("Failed to attach event to fence.\n"); | 1139 | DRM_ERROR("Failed to attach event to fence.\n"); |
1104 | goto out_no_attach; | 1140 | goto out_no_create; |
1105 | } | 1141 | } |
1106 | 1142 | ||
1107 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, | 1143 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, |
1108 | handle); | 1144 | handle); |
1109 | vmw_fence_obj_unreference(&fence); | 1145 | vmw_fence_obj_unreference(&fence); |
1110 | return 0; | 1146 | return 0; |
1111 | out_no_attach: | 1147 | out_no_create: |
1112 | kfree(event); | ||
1113 | out_no_event: | ||
1114 | spin_lock_irqsave(&dev->event_lock, irq_flags); | ||
1115 | file_priv->event_space += sizeof(*event); | ||
1116 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | ||
1117 | out_no_event_space: | ||
1118 | if (user_fence_rep != NULL) | 1148 | if (user_fence_rep != NULL) |
1119 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | 1149 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
1120 | handle, TTM_REF_USAGE); | 1150 | handle, TTM_REF_USAGE); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 0854a2096b55..faf2e7873860 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | |||
@@ -109,5 +109,12 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | |||
109 | struct drm_file *file_priv); | 109 | struct drm_file *file_priv); |
110 | extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | 110 | extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data, |
111 | struct drm_file *file_priv); | 111 | struct drm_file *file_priv); |
112 | 112 | extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, | |
113 | struct list_head *event_list); | ||
114 | extern int vmw_event_fence_action_queue(struct drm_file *filee_priv, | ||
115 | struct vmw_fence_obj *fence, | ||
116 | struct drm_pending_event *event, | ||
117 | uint32_t *tv_sec, | ||
118 | uint32_t *tv_usec, | ||
119 | bool interruptible); | ||
113 | #endif /* _VMWGFX_FENCE_H_ */ | 120 | #endif /* _VMWGFX_FENCE_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b66ef0e3cde1..2286d47e5022 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -422,7 +422,8 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, | |||
422 | struct vmw_framebuffer *framebuffer, | 422 | struct vmw_framebuffer *framebuffer, |
423 | unsigned flags, unsigned color, | 423 | unsigned flags, unsigned color, |
424 | struct drm_clip_rect *clips, | 424 | struct drm_clip_rect *clips, |
425 | unsigned num_clips, int inc) | 425 | unsigned num_clips, int inc, |
426 | struct vmw_fence_obj **out_fence) | ||
426 | { | 427 | { |
427 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | 428 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; |
428 | struct drm_clip_rect *clips_ptr; | 429 | struct drm_clip_rect *clips_ptr; |
@@ -542,12 +543,15 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, | |||
542 | if (num == 0) | 543 | if (num == 0) |
543 | continue; | 544 | continue; |
544 | 545 | ||
546 | /* only return the last fence */ | ||
547 | if (out_fence && *out_fence) | ||
548 | vmw_fence_obj_unreference(out_fence); | ||
545 | 549 | ||
546 | /* recalculate package length */ | 550 | /* recalculate package length */ |
547 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; | 551 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; |
548 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); | 552 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); |
549 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | 553 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, |
550 | fifo_size, 0, NULL); | 554 | fifo_size, 0, NULL, out_fence); |
551 | 555 | ||
552 | if (unlikely(ret != 0)) | 556 | if (unlikely(ret != 0)) |
553 | break; | 557 | break; |
@@ -598,7 +602,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
598 | 602 | ||
599 | ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, | 603 | ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, |
600 | flags, color, | 604 | flags, color, |
601 | clips, num_clips, inc); | 605 | clips, num_clips, inc, NULL); |
602 | 606 | ||
603 | ttm_read_unlock(&vmaster->lock); | 607 | ttm_read_unlock(&vmaster->lock); |
604 | return 0; | 608 | return 0; |
@@ -809,7 +813,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv, | |||
809 | cmd->body.ptr.offset = 0; | 813 | cmd->body.ptr.offset = 0; |
810 | 814 | ||
811 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | 815 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, |
812 | fifo_size, 0, NULL); | 816 | fifo_size, 0, NULL, NULL); |
813 | 817 | ||
814 | kfree(cmd); | 818 | kfree(cmd); |
815 | 819 | ||
@@ -821,7 +825,8 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv, | |||
821 | struct vmw_framebuffer *framebuffer, | 825 | struct vmw_framebuffer *framebuffer, |
822 | unsigned flags, unsigned color, | 826 | unsigned flags, unsigned color, |
823 | struct drm_clip_rect *clips, | 827 | struct drm_clip_rect *clips, |
824 | unsigned num_clips, int increment) | 828 | unsigned num_clips, int increment, |
829 | struct vmw_fence_obj **out_fence) | ||
825 | { | 830 | { |
826 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | 831 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; |
827 | struct drm_clip_rect *clips_ptr; | 832 | struct drm_clip_rect *clips_ptr; |
@@ -894,9 +899,13 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv, | |||
894 | if (hit_num == 0) | 899 | if (hit_num == 0) |
895 | continue; | 900 | continue; |
896 | 901 | ||
902 | /* only return the last fence */ | ||
903 | if (out_fence && *out_fence) | ||
904 | vmw_fence_obj_unreference(out_fence); | ||
905 | |||
897 | fifo_size = sizeof(*blits) * hit_num; | 906 | fifo_size = sizeof(*blits) * hit_num; |
898 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, | 907 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, |
899 | fifo_size, 0, NULL); | 908 | fifo_size, 0, NULL, out_fence); |
900 | 909 | ||
901 | if (unlikely(ret != 0)) | 910 | if (unlikely(ret != 0)) |
902 | break; | 911 | break; |
@@ -942,7 +951,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
942 | } else { | 951 | } else { |
943 | ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, | 952 | ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, |
944 | flags, color, | 953 | flags, color, |
945 | clips, num_clips, increment); | 954 | clips, num_clips, increment, NULL); |
946 | } | 955 | } |
947 | 956 | ||
948 | ttm_read_unlock(&vmaster->lock); | 957 | ttm_read_unlock(&vmaster->lock); |
@@ -1296,7 +1305,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, | |||
1296 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; | 1305 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; |
1297 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); | 1306 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); |
1298 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | 1307 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, |
1299 | fifo_size, 0, NULL); | 1308 | fifo_size, 0, NULL, NULL); |
1300 | 1309 | ||
1301 | if (unlikely(ret != 0)) | 1310 | if (unlikely(ret != 0)) |
1302 | break; | 1311 | break; |
@@ -1409,7 +1418,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, | |||
1409 | fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; | 1418 | fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; |
1410 | 1419 | ||
1411 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, | 1420 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, |
1412 | 0, user_fence_rep); | 1421 | 0, user_fence_rep, NULL); |
1413 | 1422 | ||
1414 | kfree(cmd); | 1423 | kfree(cmd); |
1415 | 1424 | ||
@@ -1672,6 +1681,70 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | |||
1672 | return 0; | 1681 | return 0; |
1673 | } | 1682 | } |
1674 | 1683 | ||
1684 | int vmw_du_page_flip(struct drm_crtc *crtc, | ||
1685 | struct drm_framebuffer *fb, | ||
1686 | struct drm_pending_vblank_event *event) | ||
1687 | { | ||
1688 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
1689 | struct drm_framebuffer *old_fb = crtc->fb; | ||
1690 | struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); | ||
1691 | struct drm_file *file_priv = event->base.file_priv; | ||
1692 | struct vmw_fence_obj *fence = NULL; | ||
1693 | struct drm_clip_rect clips; | ||
1694 | int ret; | ||
1695 | |||
1696 | /* require ScreenObject support for page flipping */ | ||
1697 | if (!dev_priv->sou_priv) | ||
1698 | return -ENOSYS; | ||
1699 | |||
1700 | if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) | ||
1701 | return -EINVAL; | ||
1702 | |||
1703 | crtc->fb = fb; | ||
1704 | |||
1705 | /* do a full screen dirty update */ | ||
1706 | clips.x1 = clips.y1 = 0; | ||
1707 | clips.x2 = fb->width; | ||
1708 | clips.y2 = fb->height; | ||
1709 | |||
1710 | if (vfb->dmabuf) | ||
1711 | ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb, | ||
1712 | 0, 0, &clips, 1, 1, &fence); | ||
1713 | else | ||
1714 | ret = do_surface_dirty_sou(dev_priv, file_priv, vfb, | ||
1715 | 0, 0, &clips, 1, 1, &fence); | ||
1716 | |||
1717 | |||
1718 | if (ret != 0) | ||
1719 | goto out_no_fence; | ||
1720 | if (!fence) { | ||
1721 | ret = -EINVAL; | ||
1722 | goto out_no_fence; | ||
1723 | } | ||
1724 | |||
1725 | ret = vmw_event_fence_action_queue(file_priv, fence, | ||
1726 | &event->base, | ||
1727 | &event->event.tv_sec, | ||
1728 | &event->event.tv_usec, | ||
1729 | true); | ||
1730 | |||
1731 | /* | ||
1732 | * No need to hold on to this now. The only cleanup | ||
1733 | * we need to do if we fail is unref the fence. | ||
1734 | */ | ||
1735 | vmw_fence_obj_unreference(&fence); | ||
1736 | |||
1737 | if (vmw_crtc_to_du(crtc)->is_implicit) | ||
1738 | vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc); | ||
1739 | |||
1740 | return ret; | ||
1741 | |||
1742 | out_no_fence: | ||
1743 | crtc->fb = old_fb; | ||
1744 | return ret; | ||
1745 | } | ||
1746 | |||
1747 | |||
1675 | void vmw_du_crtc_save(struct drm_crtc *crtc) | 1748 | void vmw_du_crtc_save(struct drm_crtc *crtc) |
1676 | { | 1749 | { |
1677 | } | 1750 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index a4f7f034996a..8184bc5b1730 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -121,6 +121,9 @@ struct vmw_display_unit { | |||
121 | * Shared display unit functions - vmwgfx_kms.c | 121 | * Shared display unit functions - vmwgfx_kms.c |
122 | */ | 122 | */ |
123 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); | 123 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); |
124 | int vmw_du_page_flip(struct drm_crtc *crtc, | ||
125 | struct drm_framebuffer *fb, | ||
126 | struct drm_pending_vblank_event *event); | ||
124 | void vmw_du_crtc_save(struct drm_crtc *crtc); | 127 | void vmw_du_crtc_save(struct drm_crtc *crtc); |
125 | void vmw_du_crtc_restore(struct drm_crtc *crtc); | 128 | void vmw_du_crtc_restore(struct drm_crtc *crtc); |
126 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | 129 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, |
@@ -154,5 +157,10 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); | |||
154 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); | 157 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); |
155 | int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, | 158 | int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, |
156 | struct drm_vmw_rect *rects); | 159 | struct drm_vmw_rect *rects); |
160 | bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, | ||
161 | struct drm_crtc *crtc); | ||
162 | void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, | ||
163 | struct drm_crtc *crtc); | ||
164 | |||
157 | 165 | ||
158 | #endif | 166 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index f77b184be807..070fb239c5af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -354,8 +354,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
354 | INIT_LIST_HEAD(&ldu->active); | 354 | INIT_LIST_HEAD(&ldu->active); |
355 | 355 | ||
356 | ldu->base.pref_active = (unit == 0); | 356 | ldu->base.pref_active = (unit == 0); |
357 | ldu->base.pref_width = 800; | 357 | ldu->base.pref_width = dev_priv->initial_width; |
358 | ldu->base.pref_height = 600; | 358 | ldu->base.pref_height = dev_priv->initial_height; |
359 | ldu->base.pref_mode = NULL; | 359 | ldu->base.pref_mode = NULL; |
360 | ldu->base.is_implicit = true; | 360 | ldu->base.is_implicit = true; |
361 | 361 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4defdcf1c72e..6deaf2f8bab1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -394,6 +394,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | |||
394 | .gamma_set = vmw_du_crtc_gamma_set, | 394 | .gamma_set = vmw_du_crtc_gamma_set, |
395 | .destroy = vmw_sou_crtc_destroy, | 395 | .destroy = vmw_sou_crtc_destroy, |
396 | .set_config = vmw_sou_crtc_set_config, | 396 | .set_config = vmw_sou_crtc_set_config, |
397 | .page_flip = vmw_du_page_flip, | ||
397 | }; | 398 | }; |
398 | 399 | ||
399 | /* | 400 | /* |
@@ -448,8 +449,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) | |||
448 | sou->active_implicit = false; | 449 | sou->active_implicit = false; |
449 | 450 | ||
450 | sou->base.pref_active = (unit == 0); | 451 | sou->base.pref_active = (unit == 0); |
451 | sou->base.pref_width = 800; | 452 | sou->base.pref_width = dev_priv->initial_width; |
452 | sou->base.pref_height = 600; | 453 | sou->base.pref_height = dev_priv->initial_height; |
453 | sou->base.pref_mode = NULL; | 454 | sou->base.pref_mode = NULL; |
454 | sou->base.is_implicit = true; | 455 | sou->base.is_implicit = true; |
455 | 456 | ||
@@ -535,3 +536,36 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) | |||
535 | 536 | ||
536 | return 0; | 537 | return 0; |
537 | } | 538 | } |
539 | |||
540 | /** | ||
541 | * Returns if this unit can be page flipped. | ||
542 | * Must be called with the mode_config mutex held. | ||
543 | */ | ||
544 | bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv, | ||
545 | struct drm_crtc *crtc) | ||
546 | { | ||
547 | struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); | ||
548 | |||
549 | if (!sou->base.is_implicit) | ||
550 | return true; | ||
551 | |||
552 | if (dev_priv->sou_priv->num_implicit != 1) | ||
553 | return false; | ||
554 | |||
555 | return true; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * Update the implicit fb to the current fb of this crtc. | ||
560 | * Must be called with the mode_config mutex held. | ||
561 | */ | ||
562 | void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv, | ||
563 | struct drm_crtc *crtc) | ||
564 | { | ||
565 | struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); | ||
566 | |||
567 | BUG_ON(!sou->base.is_implicit); | ||
568 | |||
569 | dev_priv->sou_priv->implicit_fb = | ||
570 | vmw_framebuffer_to_vfb(sou->base.crtc.fb); | ||
571 | } | ||
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 24f94f4ae395..acba1c686c65 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
@@ -616,10 +616,11 @@ static u32 bit_func(struct i2c_adapter *adap) | |||
616 | 616 | ||
617 | /* -----exported algorithm data: ------------------------------------- */ | 617 | /* -----exported algorithm data: ------------------------------------- */ |
618 | 618 | ||
619 | static const struct i2c_algorithm i2c_bit_algo = { | 619 | const struct i2c_algorithm i2c_bit_algo = { |
620 | .master_xfer = bit_xfer, | 620 | .master_xfer = bit_xfer, |
621 | .functionality = bit_func, | 621 | .functionality = bit_func, |
622 | }; | 622 | }; |
623 | EXPORT_SYMBOL(i2c_bit_algo); | ||
623 | 624 | ||
624 | /* | 625 | /* |
625 | * registering functions to load algorithms at runtime | 626 | * registering functions to load algorithms at runtime |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index ac9141b85356..c6ce416ab587 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1665,6 +1665,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) | |||
1665 | if (ret) | 1665 | if (ret) |
1666 | return -EINVAL; | 1666 | return -EINVAL; |
1667 | 1667 | ||
1668 | unlink_framebuffer(fb_info); | ||
1668 | if (fb_info->pixmap.addr && | 1669 | if (fb_info->pixmap.addr && |
1669 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) | 1670 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) |
1670 | kfree(fb_info->pixmap.addr); | 1671 | kfree(fb_info->pixmap.addr); |
@@ -1672,7 +1673,6 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) | |||
1672 | registered_fb[i] = NULL; | 1673 | registered_fb[i] = NULL; |
1673 | num_registered_fb--; | 1674 | num_registered_fb--; |
1674 | fb_cleanup_device(fb_info); | 1675 | fb_cleanup_device(fb_info); |
1675 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); | ||
1676 | event.info = fb_info; | 1676 | event.info = fb_info; |
1677 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); | 1677 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); |
1678 | 1678 | ||
@@ -1681,6 +1681,22 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) | |||
1681 | return 0; | 1681 | return 0; |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | int unlink_framebuffer(struct fb_info *fb_info) | ||
1685 | { | ||
1686 | int i; | ||
1687 | |||
1688 | i = fb_info->node; | ||
1689 | if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) | ||
1690 | return -EINVAL; | ||
1691 | |||
1692 | if (fb_info->dev) { | ||
1693 | device_destroy(fb_class, MKDEV(FB_MAJOR, i)); | ||
1694 | fb_info->dev = NULL; | ||
1695 | } | ||
1696 | return 0; | ||
1697 | } | ||
1698 | EXPORT_SYMBOL(unlink_framebuffer); | ||
1699 | |||
1684 | void remove_conflicting_framebuffers(struct apertures_struct *a, | 1700 | void remove_conflicting_framebuffers(struct apertures_struct *a, |
1685 | const char *name, bool primary) | 1701 | const char *name, bool primary) |
1686 | { | 1702 | { |
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c index a19773149bd7..a40c05ebbdc2 100644 --- a/drivers/video/udlfb.c +++ b/drivers/video/udlfb.c | |||
@@ -1739,7 +1739,7 @@ static void dlfb_usb_disconnect(struct usb_interface *interface) | |||
1739 | for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) | 1739 | for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) |
1740 | device_remove_file(info->dev, &fb_device_attrs[i]); | 1740 | device_remove_file(info->dev, &fb_device_attrs[i]); |
1741 | device_remove_bin_file(info->dev, &edid_attr); | 1741 | device_remove_bin_file(info->dev, &edid_attr); |
1742 | 1742 | unlink_framebuffer(info); | |
1743 | usb_set_intfdata(interface, NULL); | 1743 | usb_set_intfdata(interface, NULL); |
1744 | 1744 | ||
1745 | /* if clients still have us open, will be freed on last close */ | 1745 | /* if clients still have us open, will be freed on last close */ |
diff --git a/include/drm/drm.h b/include/drm/drm.h index 49d94ede2ec2..34a7b89fd006 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h | |||
@@ -761,6 +761,8 @@ struct drm_event_vblank { | |||
761 | 761 | ||
762 | #define DRM_CAP_DUMB_BUFFER 0x1 | 762 | #define DRM_CAP_DUMB_BUFFER 0x1 |
763 | #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 | 763 | #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
764 | #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 | ||
765 | #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 | ||
764 | 766 | ||
765 | /* typedef area */ | 767 | /* typedef area */ |
766 | #ifndef __KERNEL__ | 768 | #ifndef __KERNEL__ |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 92f0981b5fb8..574bd1c81ebd 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1170,6 +1170,8 @@ struct drm_device { | |||
1170 | struct idr object_name_idr; | 1170 | struct idr object_name_idr; |
1171 | /*@} */ | 1171 | /*@} */ |
1172 | int switch_power_state; | 1172 | int switch_power_state; |
1173 | |||
1174 | atomic_t unplugged; /* device has been unplugged or gone away */ | ||
1173 | }; | 1175 | }; |
1174 | 1176 | ||
1175 | #define DRM_SWITCH_POWER_ON 0 | 1177 | #define DRM_SWITCH_POWER_ON 0 |
@@ -1235,6 +1237,19 @@ static inline int drm_mtrr_del(int handle, unsigned long offset, | |||
1235 | } | 1237 | } |
1236 | #endif | 1238 | #endif |
1237 | 1239 | ||
1240 | static inline void drm_device_set_unplugged(struct drm_device *dev) | ||
1241 | { | ||
1242 | smp_wmb(); | ||
1243 | atomic_set(&dev->unplugged, 1); | ||
1244 | } | ||
1245 | |||
1246 | static inline int drm_device_is_unplugged(struct drm_device *dev) | ||
1247 | { | ||
1248 | int ret = atomic_read(&dev->unplugged); | ||
1249 | smp_rmb(); | ||
1250 | return ret; | ||
1251 | } | ||
1252 | |||
1238 | /******************************************************************/ | 1253 | /******************************************************************/ |
1239 | /** \name Internal function definitions */ | 1254 | /** \name Internal function definitions */ |
1240 | /*@{*/ | 1255 | /*@{*/ |
@@ -1264,11 +1279,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | |||
1264 | 1279 | ||
1265 | /* Memory management support (drm_memory.h) */ | 1280 | /* Memory management support (drm_memory.h) */ |
1266 | #include "drm_memory.h" | 1281 | #include "drm_memory.h" |
1267 | extern void drm_mem_init(void); | ||
1268 | extern int drm_mem_info(char *buf, char **start, off_t offset, | ||
1269 | int request, int *eof, void *data); | ||
1270 | extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); | ||
1271 | |||
1272 | extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); | 1282 | extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); |
1273 | extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); | 1283 | extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); |
1274 | extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | 1284 | extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, |
@@ -1383,12 +1393,8 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, | |||
1383 | /* IRQ support (drm_irq.h) */ | 1393 | /* IRQ support (drm_irq.h) */ |
1384 | extern int drm_control(struct drm_device *dev, void *data, | 1394 | extern int drm_control(struct drm_device *dev, void *data, |
1385 | struct drm_file *file_priv); | 1395 | struct drm_file *file_priv); |
1386 | extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); | ||
1387 | extern int drm_irq_install(struct drm_device *dev); | 1396 | extern int drm_irq_install(struct drm_device *dev); |
1388 | extern int drm_irq_uninstall(struct drm_device *dev); | 1397 | extern int drm_irq_uninstall(struct drm_device *dev); |
1389 | extern void drm_driver_irq_preinstall(struct drm_device *dev); | ||
1390 | extern void drm_driver_irq_postinstall(struct drm_device *dev); | ||
1391 | extern void drm_driver_irq_uninstall(struct drm_device *dev); | ||
1392 | 1398 | ||
1393 | extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); | 1399 | extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); |
1394 | extern int drm_wait_vblank(struct drm_device *dev, void *data, | 1400 | extern int drm_wait_vblank(struct drm_device *dev, void *data, |
@@ -1464,6 +1470,7 @@ extern void drm_master_put(struct drm_master **master); | |||
1464 | 1470 | ||
1465 | extern void drm_put_dev(struct drm_device *dev); | 1471 | extern void drm_put_dev(struct drm_device *dev); |
1466 | extern int drm_put_minor(struct drm_minor **minor); | 1472 | extern int drm_put_minor(struct drm_minor **minor); |
1473 | extern void drm_unplug_dev(struct drm_device *dev); | ||
1467 | extern unsigned int drm_debug; | 1474 | extern unsigned int drm_debug; |
1468 | 1475 | ||
1469 | extern unsigned int drm_vblank_offdelay; | 1476 | extern unsigned int drm_vblank_offdelay; |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4cd4be26722c..e250eda4e3a8 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -121,7 +121,7 @@ struct drm_display_mode { | |||
121 | char name[DRM_DISPLAY_MODE_LEN]; | 121 | char name[DRM_DISPLAY_MODE_LEN]; |
122 | 122 | ||
123 | enum drm_mode_status status; | 123 | enum drm_mode_status status; |
124 | int type; | 124 | unsigned int type; |
125 | 125 | ||
126 | /* Proposed mode values */ | 126 | /* Proposed mode values */ |
127 | int clock; /* in kHz */ | 127 | int clock; /* in kHz */ |
@@ -257,7 +257,7 @@ struct drm_property_blob { | |||
257 | struct drm_mode_object base; | 257 | struct drm_mode_object base; |
258 | struct list_head head; | 258 | struct list_head head; |
259 | unsigned int length; | 259 | unsigned int length; |
260 | void *data; | 260 | unsigned char data[]; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | struct drm_property_enum { | 263 | struct drm_property_enum { |
@@ -796,6 +796,9 @@ struct drm_mode_config { | |||
796 | struct drm_property *scaling_mode_property; | 796 | struct drm_property *scaling_mode_property; |
797 | struct drm_property *dithering_mode_property; | 797 | struct drm_property *dithering_mode_property; |
798 | struct drm_property *dirty_info_property; | 798 | struct drm_property *dirty_info_property; |
799 | |||
800 | /* dumb ioctl parameters */ | ||
801 | uint32_t preferred_depth, prefer_shadow; | ||
799 | }; | 802 | }; |
800 | 803 | ||
801 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) | 804 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
@@ -807,23 +810,29 @@ struct drm_mode_config { | |||
807 | #define obj_to_blob(x) container_of(x, struct drm_property_blob, base) | 810 | #define obj_to_blob(x) container_of(x, struct drm_property_blob, base) |
808 | #define obj_to_plane(x) container_of(x, struct drm_plane, base) | 811 | #define obj_to_plane(x) container_of(x, struct drm_plane, base) |
809 | 812 | ||
813 | struct drm_prop_enum_list { | ||
814 | int type; | ||
815 | char *name; | ||
816 | }; | ||
810 | 817 | ||
811 | extern void drm_crtc_init(struct drm_device *dev, | 818 | extern int drm_crtc_init(struct drm_device *dev, |
812 | struct drm_crtc *crtc, | 819 | struct drm_crtc *crtc, |
813 | const struct drm_crtc_funcs *funcs); | 820 | const struct drm_crtc_funcs *funcs); |
814 | extern void drm_crtc_cleanup(struct drm_crtc *crtc); | 821 | extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
815 | 822 | ||
816 | extern void drm_connector_init(struct drm_device *dev, | 823 | extern int drm_connector_init(struct drm_device *dev, |
817 | struct drm_connector *connector, | 824 | struct drm_connector *connector, |
818 | const struct drm_connector_funcs *funcs, | 825 | const struct drm_connector_funcs *funcs, |
819 | int connector_type); | 826 | int connector_type); |
820 | 827 | ||
821 | extern void drm_connector_cleanup(struct drm_connector *connector); | 828 | extern void drm_connector_cleanup(struct drm_connector *connector); |
829 | /* helper to unplug all connectors from sysfs for device */ | ||
830 | extern void drm_connector_unplug_all(struct drm_device *dev); | ||
822 | 831 | ||
823 | extern void drm_encoder_init(struct drm_device *dev, | 832 | extern int drm_encoder_init(struct drm_device *dev, |
824 | struct drm_encoder *encoder, | 833 | struct drm_encoder *encoder, |
825 | const struct drm_encoder_funcs *funcs, | 834 | const struct drm_encoder_funcs *funcs, |
826 | int encoder_type); | 835 | int encoder_type); |
827 | 836 | ||
828 | extern int drm_plane_init(struct drm_device *dev, | 837 | extern int drm_plane_init(struct drm_device *dev, |
829 | struct drm_plane *plane, | 838 | struct drm_plane *plane, |
@@ -848,6 +857,7 @@ extern struct edid *drm_get_edid(struct drm_connector *connector, | |||
848 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 857 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
849 | extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); | 858 | extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
850 | extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); | 859 | extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); |
860 | extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); | ||
851 | extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | 861 | extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
852 | const struct drm_display_mode *mode); | 862 | const struct drm_display_mode *mode); |
853 | extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); | 863 | extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); |
@@ -862,7 +872,7 @@ extern int drm_mode_height(struct drm_display_mode *mode); | |||
862 | /* for us by fb module */ | 872 | /* for us by fb module */ |
863 | extern int drm_mode_attachmode_crtc(struct drm_device *dev, | 873 | extern int drm_mode_attachmode_crtc(struct drm_device *dev, |
864 | struct drm_crtc *crtc, | 874 | struct drm_crtc *crtc, |
865 | struct drm_display_mode *mode); | 875 | const struct drm_display_mode *mode); |
866 | extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); | 876 | extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); |
867 | 877 | ||
868 | extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); | 878 | extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); |
@@ -904,6 +914,13 @@ extern int drm_connector_attach_property(struct drm_connector *connector, | |||
904 | struct drm_property *property, uint64_t init_val); | 914 | struct drm_property *property, uint64_t init_val); |
905 | extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, | 915 | extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, |
906 | const char *name, int num_values); | 916 | const char *name, int num_values); |
917 | extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, | ||
918 | const char *name, | ||
919 | const struct drm_prop_enum_list *props, | ||
920 | int num_values); | ||
921 | struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, | ||
922 | const char *name, | ||
923 | uint64_t min, uint64_t max); | ||
907 | extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); | 924 | extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); |
908 | extern int drm_property_add_enum(struct drm_property *property, int index, | 925 | extern int drm_property_add_enum(struct drm_property *property, int index, |
909 | uint64_t value, const char *name); | 926 | uint64_t value, const char *name); |
@@ -919,7 +936,7 @@ extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, | |||
919 | struct drm_encoder *encoder); | 936 | struct drm_encoder *encoder); |
920 | extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, | 937 | extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, |
921 | struct drm_encoder *encoder); | 938 | struct drm_encoder *encoder); |
922 | extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, | 939 | extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
923 | int gamma_size); | 940 | int gamma_size); |
924 | extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, | 941 | extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
925 | uint32_t id, uint32_t type); | 942 | uint32_t id, uint32_t type); |
@@ -995,6 +1012,7 @@ extern int drm_add_modes_noedid(struct drm_connector *connector, | |||
995 | int hdisplay, int vdisplay); | 1012 | int hdisplay, int vdisplay); |
996 | 1013 | ||
997 | extern int drm_edid_header_is_valid(const u8 *raw_edid); | 1014 | extern int drm_edid_header_is_valid(const u8 *raw_edid); |
1015 | extern bool drm_edid_block_valid(u8 *raw_edid); | ||
998 | extern bool drm_edid_is_valid(struct edid *edid); | 1016 | extern bool drm_edid_is_valid(struct edid *edid); |
999 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, | 1017 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
1000 | int hsize, int vsize, int fresh); | 1018 | int hsize, int vsize, int fresh); |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 74ce91684629..bcb9a66baa8c 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
@@ -238,5 +238,6 @@ int drm_av_sync_delay(struct drm_connector *connector, | |||
238 | struct drm_display_mode *mode); | 238 | struct drm_display_mode *mode); |
239 | struct drm_connector *drm_select_eld(struct drm_encoder *encoder, | 239 | struct drm_connector *drm_select_eld(struct drm_encoder *encoder, |
240 | struct drm_display_mode *mode); | 240 | struct drm_display_mode *mode); |
241 | int drm_load_edid_firmware(struct drm_connector *connector); | ||
241 | 242 | ||
242 | #endif /* __DRM_EDID_H__ */ | 243 | #endif /* __DRM_EDID_H__ */ |
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 6e3076ad646e..5120b01c2eeb 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h | |||
@@ -35,7 +35,6 @@ struct drm_fb_helper; | |||
35 | #include <linux/kgdb.h> | 35 | #include <linux/kgdb.h> |
36 | 36 | ||
37 | struct drm_fb_helper_crtc { | 37 | struct drm_fb_helper_crtc { |
38 | uint32_t crtc_id; | ||
39 | struct drm_mode_set mode_set; | 38 | struct drm_mode_set mode_set; |
40 | struct drm_display_mode *desired_mode; | 39 | struct drm_display_mode *desired_mode; |
41 | }; | 40 | }; |
@@ -74,7 +73,6 @@ struct drm_fb_helper { | |||
74 | int connector_count; | 73 | int connector_count; |
75 | struct drm_fb_helper_connector **connector_info; | 74 | struct drm_fb_helper_connector **connector_info; |
76 | struct drm_fb_helper_funcs *funcs; | 75 | struct drm_fb_helper_funcs *funcs; |
77 | int conn_limit; | ||
78 | struct fb_info *fbdev; | 76 | struct fb_info *fbdev; |
79 | u32 pseudo_palette[17]; | 77 | u32 pseudo_palette[17]; |
80 | struct list_head kernel_fb_list; | 78 | struct list_head kernel_fb_list; |
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 1ed3aae893a5..3963116083ae 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h | |||
@@ -74,16 +74,37 @@ struct drm_exynos_gem_mmap { | |||
74 | uint64_t mapped; | 74 | uint64_t mapped; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | /** | ||
78 | * A structure for user connection request of virtual display. | ||
79 | * | ||
80 | * @connection: indicate whether doing connetion or not by user. | ||
81 | * @extensions: if this value is 1 then the vidi driver would need additional | ||
82 | * 128bytes edid data. | ||
83 | * @edid: the edid data pointer from user side. | ||
84 | */ | ||
85 | struct drm_exynos_vidi_connection { | ||
86 | unsigned int connection; | ||
87 | unsigned int extensions; | ||
88 | uint64_t *edid; | ||
89 | }; | ||
90 | |||
77 | struct drm_exynos_plane_set_zpos { | 91 | struct drm_exynos_plane_set_zpos { |
78 | __u32 plane_id; | 92 | __u32 plane_id; |
79 | __s32 zpos; | 93 | __s32 zpos; |
80 | }; | 94 | }; |
81 | 95 | ||
96 | /* memory type definitions. */ | ||
97 | enum e_drm_exynos_gem_mem_type { | ||
98 | /* Physically Non-Continuous memory. */ | ||
99 | EXYNOS_BO_NONCONTIG = 1 << 0 | ||
100 | }; | ||
101 | |||
82 | #define DRM_EXYNOS_GEM_CREATE 0x00 | 102 | #define DRM_EXYNOS_GEM_CREATE 0x00 |
83 | #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 | 103 | #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 |
84 | #define DRM_EXYNOS_GEM_MMAP 0x02 | 104 | #define DRM_EXYNOS_GEM_MMAP 0x02 |
85 | /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ | 105 | /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ |
86 | #define DRM_EXYNOS_PLANE_SET_ZPOS 0x06 | 106 | #define DRM_EXYNOS_PLANE_SET_ZPOS 0x06 |
107 | #define DRM_EXYNOS_VIDI_CONNECTION 0x07 | ||
87 | 108 | ||
88 | #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ | 109 | #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ |
89 | DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) | 110 | DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) |
@@ -97,6 +118,9 @@ struct drm_exynos_plane_set_zpos { | |||
97 | #define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \ | 118 | #define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \ |
98 | DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos) | 119 | DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos) |
99 | 120 | ||
121 | #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ | ||
122 | DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) | ||
123 | |||
100 | #ifdef __KERNEL__ | 124 | #ifdef __KERNEL__ |
101 | 125 | ||
102 | /** | 126 | /** |
@@ -147,11 +171,13 @@ struct exynos_drm_common_hdmi_pd { | |||
147 | * @timing: default video mode for initializing | 171 | * @timing: default video mode for initializing |
148 | * @default_win: default window layer number to be used for UI. | 172 | * @default_win: default window layer number to be used for UI. |
149 | * @bpp: default bit per pixel. | 173 | * @bpp: default bit per pixel. |
174 | * @is_v13: set if hdmi version 13 is. | ||
150 | */ | 175 | */ |
151 | struct exynos_drm_hdmi_pdata { | 176 | struct exynos_drm_hdmi_pdata { |
152 | struct fb_videomode timing; | 177 | struct fb_videomode timing; |
153 | unsigned int default_win; | 178 | unsigned int default_win; |
154 | unsigned int bpp; | 179 | unsigned int bpp; |
180 | unsigned int is_v13:1; | ||
155 | }; | 181 | }; |
156 | 182 | ||
157 | #endif /* __KERNEL__ */ | 183 | #endif /* __KERNEL__ */ |
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h index 113686785717..884613ee00ad 100644 --- a/include/drm/gma_drm.h +++ b/include/drm/gma_drm.h | |||
@@ -83,9 +83,9 @@ struct drm_psb_gem_mmap { | |||
83 | #define DRM_GMA_GAMMA 0x04 /* Set gamma table */ | 83 | #define DRM_GMA_GAMMA 0x04 /* Set gamma table */ |
84 | #define DRM_GMA_ADB 0x05 /* Get backlight */ | 84 | #define DRM_GMA_ADB 0x05 /* Get backlight */ |
85 | #define DRM_GMA_DPST_BL 0x06 /* Set backlight */ | 85 | #define DRM_GMA_DPST_BL 0x06 /* Set backlight */ |
86 | #define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1 /* CRTC to physical pipe# */ | ||
87 | #define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */ | 86 | #define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */ |
88 | #define PSB_MODE_OPERATION_MODE_VALID 0x01 | 87 | #define PSB_MODE_OPERATION_MODE_VALID 0x01 |
88 | #define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x08 /* CRTC to physical pipe# */ | ||
89 | 89 | ||
90 | 90 | ||
91 | #endif | 91 | #endif |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 924f6a454fed..da929bb5b788 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -296,6 +296,7 @@ typedef struct drm_i915_irq_wait { | |||
296 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 | 296 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 |
297 | #define I915_PARAM_HAS_RELAXED_DELTA 15 | 297 | #define I915_PARAM_HAS_RELAXED_DELTA 15 |
298 | #define I915_PARAM_HAS_GEN7_SOL_RESET 16 | 298 | #define I915_PARAM_HAS_GEN7_SOL_RESET 16 |
299 | #define I915_PARAM_HAS_LLC 17 | ||
299 | 300 | ||
300 | typedef struct drm_i915_getparam { | 301 | typedef struct drm_i915_getparam { |
301 | int param; | 302 | int param; |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index b174620cc9b3..0a0001b9dc78 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
@@ -15,6 +15,10 @@ const struct intel_gtt { | |||
15 | unsigned int needs_dmar : 1; | 15 | unsigned int needs_dmar : 1; |
16 | /* Whether we idle the gpu before mapping/unmapping */ | 16 | /* Whether we idle the gpu before mapping/unmapping */ |
17 | unsigned int do_idle_maps : 1; | 17 | unsigned int do_idle_maps : 1; |
18 | /* Share the scratch page dma with ppgtts. */ | ||
19 | dma_addr_t scratch_page_dma; | ||
20 | /* for ppgtt PDE access */ | ||
21 | u32 __iomem *gtt; | ||
18 | } *intel_gtt_get(void); | 22 | } *intel_gtt_get(void); |
19 | 23 | ||
20 | void intel_gtt_chipset_flush(void); | 24 | void intel_gtt_chipset_flush(void); |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index b55da40953fd..cb2f0c362a13 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -804,13 +804,23 @@ struct drm_radeon_gem_create { | |||
804 | uint32_t flags; | 804 | uint32_t flags; |
805 | }; | 805 | }; |
806 | 806 | ||
807 | #define RADEON_TILING_MACRO 0x1 | 807 | #define RADEON_TILING_MACRO 0x1 |
808 | #define RADEON_TILING_MICRO 0x2 | 808 | #define RADEON_TILING_MICRO 0x2 |
809 | #define RADEON_TILING_SWAP_16BIT 0x4 | 809 | #define RADEON_TILING_SWAP_16BIT 0x4 |
810 | #define RADEON_TILING_SWAP_32BIT 0x8 | 810 | #define RADEON_TILING_SWAP_32BIT 0x8 |
811 | #define RADEON_TILING_SURFACE 0x10 /* this object requires a surface | 811 | /* this object requires a surface when mapped - i.e. front buffer */ |
812 | * when mapped - i.e. front buffer */ | 812 | #define RADEON_TILING_SURFACE 0x10 |
813 | #define RADEON_TILING_MICRO_SQUARE 0x20 | 813 | #define RADEON_TILING_MICRO_SQUARE 0x20 |
814 | #define RADEON_TILING_EG_BANKW_SHIFT 8 | ||
815 | #define RADEON_TILING_EG_BANKW_MASK 0xf | ||
816 | #define RADEON_TILING_EG_BANKH_SHIFT 12 | ||
817 | #define RADEON_TILING_EG_BANKH_MASK 0xf | ||
818 | #define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 | ||
819 | #define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf | ||
820 | #define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24 | ||
821 | #define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf | ||
822 | #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 | ||
823 | #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf | ||
814 | 824 | ||
815 | struct drm_radeon_gem_set_tiling { | 825 | struct drm_radeon_gem_set_tiling { |
816 | uint32_t handle; | 826 | uint32_t handle; |
diff --git a/include/linux/fb.h b/include/linux/fb.h index c18122f40543..a395b8c76992 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -1003,6 +1003,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, | |||
1003 | /* drivers/video/fbmem.c */ | 1003 | /* drivers/video/fbmem.c */ |
1004 | extern int register_framebuffer(struct fb_info *fb_info); | 1004 | extern int register_framebuffer(struct fb_info *fb_info); |
1005 | extern int unregister_framebuffer(struct fb_info *fb_info); | 1005 | extern int unregister_framebuffer(struct fb_info *fb_info); |
1006 | extern int unlink_framebuffer(struct fb_info *fb_info); | ||
1006 | extern void remove_conflicting_framebuffers(struct apertures_struct *a, | 1007 | extern void remove_conflicting_framebuffers(struct apertures_struct *a, |
1007 | const char *name, bool primary); | 1008 | const char *name, bool primary); |
1008 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); | 1009 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 4f98148c11c3..584ffa0f3282 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h | |||
@@ -49,5 +49,6 @@ struct i2c_algo_bit_data { | |||
49 | 49 | ||
50 | int i2c_bit_add_bus(struct i2c_adapter *); | 50 | int i2c_bit_add_bus(struct i2c_adapter *); |
51 | int i2c_bit_add_numbered_bus(struct i2c_adapter *); | 51 | int i2c_bit_add_numbered_bus(struct i2c_adapter *); |
52 | extern const struct i2c_algorithm i2c_bit_algo; | ||
52 | 53 | ||
53 | #endif /* _LINUX_I2C_ALGO_BIT_H */ | 54 | #endif /* _LINUX_I2C_ALGO_BIT_H */ |
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h new file mode 100644 index 000000000000..cd6a51c71e7e --- /dev/null +++ b/include/linux/i2c/tc35876x.h | |||
@@ -0,0 +1,11 @@ | |||
1 | |||
2 | #ifndef _TC35876X_H | ||
3 | #define _TC35876X_H | ||
4 | |||
5 | struct tc35876x_platform_data { | ||
6 | int gpio_bridge_reset; | ||
7 | int gpio_panel_bl_en; | ||
8 | int gpio_panel_vadd; | ||
9 | }; | ||
10 | |||
11 | #endif /* _TC35876X_H */ | ||