diff options
28 files changed, 3631 insertions, 3343 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 870f12cfed93..120490949997 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -178,86 +178,6 @@ struct agp_bridge_data { | |||
178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) | 178 | #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) |
179 | 179 | ||
180 | 180 | ||
181 | /* Intel registers */ | ||
182 | #define INTEL_APSIZE 0xb4 | ||
183 | #define INTEL_ATTBASE 0xb8 | ||
184 | #define INTEL_AGPCTRL 0xb0 | ||
185 | #define INTEL_NBXCFG 0x50 | ||
186 | #define INTEL_ERRSTS 0x91 | ||
187 | |||
188 | /* Intel i830 registers */ | ||
189 | #define I830_GMCH_CTRL 0x52 | ||
190 | #define I830_GMCH_ENABLED 0x4 | ||
191 | #define I830_GMCH_MEM_MASK 0x1 | ||
192 | #define I830_GMCH_MEM_64M 0x1 | ||
193 | #define I830_GMCH_MEM_128M 0 | ||
194 | #define I830_GMCH_GMS_MASK 0x70 | ||
195 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
196 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
197 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
198 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
199 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
200 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
201 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
202 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
203 | |||
204 | /* This one is for I830MP w. an external graphic card */ | ||
205 | #define INTEL_I830_ERRSTS 0x92 | ||
206 | |||
207 | /* Intel 855GM/852GM registers */ | ||
208 | #define I855_GMCH_GMS_MASK 0xF0 | ||
209 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
210 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
211 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
212 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
213 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
214 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
215 | #define I85X_CAPID 0x44 | ||
216 | #define I85X_VARIANT_MASK 0x7 | ||
217 | #define I85X_VARIANT_SHIFT 5 | ||
218 | #define I855_GME 0x0 | ||
219 | #define I855_GM 0x4 | ||
220 | #define I852_GME 0x2 | ||
221 | #define I852_GM 0x5 | ||
222 | |||
223 | /* Intel i845 registers */ | ||
224 | #define INTEL_I845_AGPM 0x51 | ||
225 | #define INTEL_I845_ERRSTS 0xc8 | ||
226 | |||
227 | /* Intel i860 registers */ | ||
228 | #define INTEL_I860_MCHCFG 0x50 | ||
229 | #define INTEL_I860_ERRSTS 0xc8 | ||
230 | |||
231 | /* Intel i810 registers */ | ||
232 | #define I810_GMADDR 0x10 | ||
233 | #define I810_MMADDR 0x14 | ||
234 | #define I810_PTE_BASE 0x10000 | ||
235 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
236 | #define I810_PTE_LOCAL 0x00000002 | ||
237 | #define I810_PTE_VALID 0x00000001 | ||
238 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
239 | #define I810_SMRAM_MISCC 0x70 | ||
240 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
241 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
242 | #define I810_GMS 0x000000c0 | ||
243 | #define I810_GMS_DISABLE 0x00000000 | ||
244 | #define I810_PGETBL_CTL 0x2020 | ||
245 | #define I810_PGETBL_ENABLED 0x00000001 | ||
246 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
247 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
248 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
249 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
250 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
251 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
252 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
253 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
254 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
255 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
256 | |||
257 | #define I810_DRAM_CTL 0x3000 | ||
258 | #define I810_DRAM_ROW_0 0x00000001 | ||
259 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
260 | |||
261 | struct agp_device_ids { | 181 | struct agp_device_ids { |
262 | unsigned short device_id; /* first, to make table easier to read */ | 182 | unsigned short device_id; /* first, to make table easier to read */ |
263 | enum chipset_type chipset; | 183 | enum chipset_type chipset; |
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 793f39ea9618..aa109cbe0e6e 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/page-flags.h> | 28 | #include <linux/page-flags.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include "agp.h" | 30 | #include "agp.h" |
31 | #include "intel-agp.h" | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * The real differences to the generic AGP code is | 34 | * The real differences to the generic AGP code is |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index aa4248efc5d8..07a9aad28c11 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -11,1531 +11,13 @@ | |||
11 | #include <linux/agp_backend.h> | 11 | #include <linux/agp_backend.h> |
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | #include "agp.h" | 13 | #include "agp.h" |
14 | #include "intel-agp.h" | ||
15 | |||
16 | #include "intel-gtt.c" | ||
14 | 17 | ||
15 | int intel_agp_enabled; | 18 | int intel_agp_enabled; |
16 | EXPORT_SYMBOL(intel_agp_enabled); | 19 | EXPORT_SYMBOL(intel_agp_enabled); |
17 | 20 | ||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
29 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
30 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
31 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
32 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
33 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
34 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
35 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
36 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
37 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
38 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
39 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
40 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
41 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
42 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
43 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
44 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
45 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
46 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
47 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
48 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
49 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
50 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
51 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
52 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
53 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
54 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
55 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
56 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
57 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
58 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
59 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
60 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
61 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
62 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
63 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
64 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
65 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
66 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
67 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
68 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
69 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
70 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
71 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
72 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
73 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
74 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
75 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
76 | |||
77 | /* cover 915 and 945 variants */ | ||
78 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
79 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
80 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
81 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
82 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
83 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
84 | |||
85 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
86 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
87 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
88 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
89 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
90 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
91 | |||
92 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
93 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
94 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
95 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
96 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
97 | |||
98 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
99 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
100 | |||
101 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
102 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
103 | |||
104 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
105 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
106 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
107 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
108 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
109 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
110 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
111 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
112 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
113 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
114 | IS_SNB) | ||
115 | |||
116 | extern int agp_memory_reserved; | ||
117 | |||
118 | |||
119 | /* Intel 815 register */ | ||
120 | #define INTEL_815_APCONT 0x51 | ||
121 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
122 | |||
123 | /* Intel i820 registers */ | ||
124 | #define INTEL_I820_RDCR 0x51 | ||
125 | #define INTEL_I820_ERRSTS 0xc8 | ||
126 | |||
127 | /* Intel i840 registers */ | ||
128 | #define INTEL_I840_MCHCFG 0x50 | ||
129 | #define INTEL_I840_ERRSTS 0xc8 | ||
130 | |||
131 | /* Intel i850 registers */ | ||
132 | #define INTEL_I850_MCHCFG 0x50 | ||
133 | #define INTEL_I850_ERRSTS 0xc8 | ||
134 | |||
135 | /* intel 915G registers */ | ||
136 | #define I915_GMADDR 0x18 | ||
137 | #define I915_MMADDR 0x10 | ||
138 | #define I915_PTEADDR 0x1C | ||
139 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
140 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
141 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
142 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
143 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
144 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
145 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
146 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
147 | |||
148 | #define I915_IFPADDR 0x60 | ||
149 | |||
150 | /* Intel 965G registers */ | ||
151 | #define I965_MSAC 0x62 | ||
152 | #define I965_IFPADDR 0x70 | ||
153 | |||
154 | /* Intel 7505 registers */ | ||
155 | #define INTEL_I7505_APSIZE 0x74 | ||
156 | #define INTEL_I7505_NCAPID 0x60 | ||
157 | #define INTEL_I7505_NISTAT 0x6c | ||
158 | #define INTEL_I7505_ATTBASE 0x78 | ||
159 | #define INTEL_I7505_ERRSTS 0x42 | ||
160 | #define INTEL_I7505_AGPCTRL 0x70 | ||
161 | #define INTEL_I7505_MCHCFG 0x50 | ||
162 | |||
163 | #define SNB_GMCH_CTRL 0x50 | ||
164 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
165 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
166 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
167 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
168 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
169 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
170 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
171 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
172 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
173 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
174 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
175 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
176 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
177 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
178 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
179 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
180 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
181 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
182 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
183 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
184 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
185 | |||
186 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
187 | { | ||
188 | {64, 16384, 4}, | ||
189 | /* The 32M mode still requires a 64k gatt */ | ||
190 | {32, 8192, 4} | ||
191 | }; | ||
192 | |||
193 | #define AGP_DCACHE_MEMORY 1 | ||
194 | #define AGP_PHYS_MEMORY 2 | ||
195 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
196 | |||
197 | static struct gatt_mask intel_i810_masks[] = | ||
198 | { | ||
199 | {.mask = I810_PTE_VALID, .type = 0}, | ||
200 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
201 | {.mask = I810_PTE_VALID, .type = 0}, | ||
202 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
203 | .type = INTEL_AGP_CACHED_MEMORY} | ||
204 | }; | ||
205 | |||
206 | static struct _intel_private { | ||
207 | struct pci_dev *pcidev; /* device one */ | ||
208 | u8 __iomem *registers; | ||
209 | u32 __iomem *gtt; /* I915G */ | ||
210 | int num_dcache_entries; | ||
211 | /* gtt_entries is the number of gtt entries that are already mapped | ||
212 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
213 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
214 | * popup and for the GTT. | ||
215 | */ | ||
216 | int gtt_entries; /* i830+ */ | ||
217 | int gtt_total_size; | ||
218 | union { | ||
219 | void __iomem *i9xx_flush_page; | ||
220 | void *i8xx_flush_page; | ||
221 | }; | ||
222 | struct page *i8xx_page; | ||
223 | struct resource ifp_resource; | ||
224 | int resource_valid; | ||
225 | } intel_private; | ||
226 | |||
227 | #ifdef USE_PCI_DMA_API | ||
228 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
229 | { | ||
230 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
231 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
232 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
233 | return -EINVAL; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
238 | { | ||
239 | pci_unmap_page(intel_private.pcidev, dma, | ||
240 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
241 | } | ||
242 | |||
243 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
244 | { | ||
245 | struct sg_table st; | ||
246 | |||
247 | st.sgl = mem->sg_list; | ||
248 | st.orig_nents = st.nents = mem->page_count; | ||
249 | |||
250 | sg_free_table(&st); | ||
251 | |||
252 | mem->sg_list = NULL; | ||
253 | mem->num_sg = 0; | ||
254 | } | ||
255 | |||
256 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
257 | { | ||
258 | struct sg_table st; | ||
259 | struct scatterlist *sg; | ||
260 | int i; | ||
261 | |||
262 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
263 | |||
264 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | mem->sg_list = sg = st.sgl; | ||
268 | |||
269 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
270 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
271 | |||
272 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
273 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
274 | if (unlikely(!mem->num_sg)) { | ||
275 | intel_agp_free_sglist(mem); | ||
276 | return -ENOMEM; | ||
277 | } | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
282 | { | ||
283 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
284 | |||
285 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
286 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
287 | intel_agp_free_sglist(mem); | ||
288 | } | ||
289 | |||
290 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
291 | off_t pg_start, int mask_type) | ||
292 | { | ||
293 | struct scatterlist *sg; | ||
294 | int i, j; | ||
295 | |||
296 | j = pg_start; | ||
297 | |||
298 | WARN_ON(!mem->num_sg); | ||
299 | |||
300 | if (mem->num_sg == mem->page_count) { | ||
301 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
302 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
303 | sg_dma_address(sg), mask_type), | ||
304 | intel_private.gtt+j); | ||
305 | j++; | ||
306 | } | ||
307 | } else { | ||
308 | /* sg may merge pages, but we have to separate | ||
309 | * per-page addr for GTT */ | ||
310 | unsigned int len, m; | ||
311 | |||
312 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
313 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
314 | for (m = 0; m < len; m++) { | ||
315 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
316 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
317 | mask_type), | ||
318 | intel_private.gtt+j); | ||
319 | j++; | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | readl(intel_private.gtt+j-1); | ||
324 | } | ||
325 | |||
326 | #else | ||
327 | |||
328 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
329 | off_t pg_start, int mask_type) | ||
330 | { | ||
331 | int i, j; | ||
332 | u32 cache_bits = 0; | ||
333 | |||
334 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
335 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
336 | { | ||
337 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
338 | } | ||
339 | |||
340 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
341 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
342 | page_to_phys(mem->pages[i]), mask_type), | ||
343 | intel_private.gtt+j); | ||
344 | } | ||
345 | |||
346 | readl(intel_private.gtt+j-1); | ||
347 | } | ||
348 | |||
349 | #endif | ||
350 | |||
351 | static int intel_i810_fetch_size(void) | ||
352 | { | ||
353 | u32 smram_miscc; | ||
354 | struct aper_size_info_fixed *values; | ||
355 | |||
356 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
357 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
358 | |||
359 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
360 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
361 | return 0; | ||
362 | } | ||
363 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
364 | agp_bridge->previous_size = | ||
365 | agp_bridge->current_size = (void *) (values + 1); | ||
366 | agp_bridge->aperture_size_idx = 1; | ||
367 | return values[1].size; | ||
368 | } else { | ||
369 | agp_bridge->previous_size = | ||
370 | agp_bridge->current_size = (void *) (values); | ||
371 | agp_bridge->aperture_size_idx = 0; | ||
372 | return values[0].size; | ||
373 | } | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int intel_i810_configure(void) | ||
379 | { | ||
380 | struct aper_size_info_fixed *current_size; | ||
381 | u32 temp; | ||
382 | int i; | ||
383 | |||
384 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
385 | |||
386 | if (!intel_private.registers) { | ||
387 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
388 | temp &= 0xfff80000; | ||
389 | |||
390 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
391 | if (!intel_private.registers) { | ||
392 | dev_err(&intel_private.pcidev->dev, | ||
393 | "can't remap memory\n"); | ||
394 | return -ENOMEM; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
399 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
400 | /* This will need to be dynamically assigned */ | ||
401 | dev_info(&intel_private.pcidev->dev, | ||
402 | "detected 4MB dedicated video ram\n"); | ||
403 | intel_private.num_dcache_entries = 1024; | ||
404 | } | ||
405 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
406 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
407 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
408 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
409 | |||
410 | if (agp_bridge->driver->needs_scratch_page) { | ||
411 | for (i = 0; i < current_size->num_entries; i++) { | ||
412 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
413 | } | ||
414 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
415 | } | ||
416 | global_cache_flush(); | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void intel_i810_cleanup(void) | ||
421 | { | ||
422 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
423 | readl(intel_private.registers); /* PCI Posting. */ | ||
424 | iounmap(intel_private.registers); | ||
425 | } | ||
426 | |||
427 | static void intel_i810_tlbflush(struct agp_memory *mem) | ||
428 | { | ||
429 | return; | ||
430 | } | ||
431 | |||
432 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
433 | { | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | /* Exists to support ARGB cursors */ | ||
438 | static struct page *i8xx_alloc_pages(void) | ||
439 | { | ||
440 | struct page *page; | ||
441 | |||
442 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
443 | if (page == NULL) | ||
444 | return NULL; | ||
445 | |||
446 | if (set_pages_uc(page, 4) < 0) { | ||
447 | set_pages_wb(page, 4); | ||
448 | __free_pages(page, 2); | ||
449 | return NULL; | ||
450 | } | ||
451 | get_page(page); | ||
452 | atomic_inc(&agp_bridge->current_memory_agp); | ||
453 | return page; | ||
454 | } | ||
455 | |||
456 | static void i8xx_destroy_pages(struct page *page) | ||
457 | { | ||
458 | if (page == NULL) | ||
459 | return; | ||
460 | |||
461 | set_pages_wb(page, 4); | ||
462 | put_page(page); | ||
463 | __free_pages(page, 2); | ||
464 | atomic_dec(&agp_bridge->current_memory_agp); | ||
465 | } | ||
466 | |||
467 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
468 | int type) | ||
469 | { | ||
470 | if (type < AGP_USER_TYPES) | ||
471 | return type; | ||
472 | else if (type == AGP_USER_CACHED_MEMORY) | ||
473 | return INTEL_AGP_CACHED_MEMORY; | ||
474 | else | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
479 | int type) | ||
480 | { | ||
481 | int i, j, num_entries; | ||
482 | void *temp; | ||
483 | int ret = -EINVAL; | ||
484 | int mask_type; | ||
485 | |||
486 | if (mem->page_count == 0) | ||
487 | goto out; | ||
488 | |||
489 | temp = agp_bridge->current_size; | ||
490 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
491 | |||
492 | if ((pg_start + mem->page_count) > num_entries) | ||
493 | goto out_err; | ||
494 | |||
495 | |||
496 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
497 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
498 | ret = -EBUSY; | ||
499 | goto out_err; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | if (type != mem->type) | ||
504 | goto out_err; | ||
505 | |||
506 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
507 | |||
508 | switch (mask_type) { | ||
509 | case AGP_DCACHE_MEMORY: | ||
510 | if (!mem->is_flushed) | ||
511 | global_cache_flush(); | ||
512 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
513 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
514 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
515 | } | ||
516 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
517 | break; | ||
518 | case AGP_PHYS_MEMORY: | ||
519 | case AGP_NORMAL_MEMORY: | ||
520 | if (!mem->is_flushed) | ||
521 | global_cache_flush(); | ||
522 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
523 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
524 | page_to_phys(mem->pages[i]), mask_type), | ||
525 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
526 | } | ||
527 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
528 | break; | ||
529 | default: | ||
530 | goto out_err; | ||
531 | } | ||
532 | |||
533 | agp_bridge->driver->tlb_flush(mem); | ||
534 | out: | ||
535 | ret = 0; | ||
536 | out_err: | ||
537 | mem->is_flushed = true; | ||
538 | return ret; | ||
539 | } | ||
540 | |||
541 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
542 | int type) | ||
543 | { | ||
544 | int i; | ||
545 | |||
546 | if (mem->page_count == 0) | ||
547 | return 0; | ||
548 | |||
549 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
550 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
551 | } | ||
552 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
553 | |||
554 | agp_bridge->driver->tlb_flush(mem); | ||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * The i810/i830 requires a physical address to program its mouse | ||
560 | * pointer into hardware. | ||
561 | * However the Xserver still writes to it through the agp aperture. | ||
562 | */ | ||
563 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
564 | { | ||
565 | struct agp_memory *new; | ||
566 | struct page *page; | ||
567 | |||
568 | switch (pg_count) { | ||
569 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
570 | break; | ||
571 | case 4: | ||
572 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
573 | page = i8xx_alloc_pages(); | ||
574 | break; | ||
575 | default: | ||
576 | return NULL; | ||
577 | } | ||
578 | |||
579 | if (page == NULL) | ||
580 | return NULL; | ||
581 | |||
582 | new = agp_create_memory(pg_count); | ||
583 | if (new == NULL) | ||
584 | return NULL; | ||
585 | |||
586 | new->pages[0] = page; | ||
587 | if (pg_count == 4) { | ||
588 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
589 | new->pages[1] = new->pages[0] + 1; | ||
590 | new->pages[2] = new->pages[1] + 1; | ||
591 | new->pages[3] = new->pages[2] + 1; | ||
592 | } | ||
593 | new->page_count = pg_count; | ||
594 | new->num_scratch_pages = pg_count; | ||
595 | new->type = AGP_PHYS_MEMORY; | ||
596 | new->physical = page_to_phys(new->pages[0]); | ||
597 | return new; | ||
598 | } | ||
599 | |||
600 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
601 | { | ||
602 | struct agp_memory *new; | ||
603 | |||
604 | if (type == AGP_DCACHE_MEMORY) { | ||
605 | if (pg_count != intel_private.num_dcache_entries) | ||
606 | return NULL; | ||
607 | |||
608 | new = agp_create_memory(1); | ||
609 | if (new == NULL) | ||
610 | return NULL; | ||
611 | |||
612 | new->type = AGP_DCACHE_MEMORY; | ||
613 | new->page_count = pg_count; | ||
614 | new->num_scratch_pages = 0; | ||
615 | agp_free_page_array(new); | ||
616 | return new; | ||
617 | } | ||
618 | if (type == AGP_PHYS_MEMORY) | ||
619 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
620 | return NULL; | ||
621 | } | ||
622 | |||
623 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
624 | { | ||
625 | agp_free_key(curr->key); | ||
626 | if (curr->type == AGP_PHYS_MEMORY) { | ||
627 | if (curr->page_count == 4) | ||
628 | i8xx_destroy_pages(curr->pages[0]); | ||
629 | else { | ||
630 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
631 | AGP_PAGE_DESTROY_UNMAP); | ||
632 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
633 | AGP_PAGE_DESTROY_FREE); | ||
634 | } | ||
635 | agp_free_page_array(curr); | ||
636 | } | ||
637 | kfree(curr); | ||
638 | } | ||
639 | |||
640 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
641 | dma_addr_t addr, int type) | ||
642 | { | ||
643 | /* Type checking must be done elsewhere */ | ||
644 | return addr | bridge->driver->masks[type].mask; | ||
645 | } | ||
646 | |||
647 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
648 | { | ||
649 | {128, 32768, 5}, | ||
650 | /* The 64M mode still requires a 128k gatt */ | ||
651 | {64, 16384, 5}, | ||
652 | {256, 65536, 6}, | ||
653 | {512, 131072, 7}, | ||
654 | }; | ||
655 | |||
656 | static void intel_i830_init_gtt_entries(void) | ||
657 | { | ||
658 | u16 gmch_ctrl; | ||
659 | int gtt_entries = 0; | ||
660 | u8 rdct; | ||
661 | int local = 0; | ||
662 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
663 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
664 | |||
665 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
666 | |||
667 | if (IS_I965) { | ||
668 | u32 pgetbl_ctl; | ||
669 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
670 | |||
671 | /* The 965 has a field telling us the size of the GTT, | ||
672 | * which may be larger than what is necessary to map the | ||
673 | * aperture. | ||
674 | */ | ||
675 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
676 | case I965_PGETBL_SIZE_128KB: | ||
677 | size = 128; | ||
678 | break; | ||
679 | case I965_PGETBL_SIZE_256KB: | ||
680 | size = 256; | ||
681 | break; | ||
682 | case I965_PGETBL_SIZE_512KB: | ||
683 | size = 512; | ||
684 | break; | ||
685 | case I965_PGETBL_SIZE_1MB: | ||
686 | size = 1024; | ||
687 | break; | ||
688 | case I965_PGETBL_SIZE_2MB: | ||
689 | size = 2048; | ||
690 | break; | ||
691 | case I965_PGETBL_SIZE_1_5MB: | ||
692 | size = 1024 + 512; | ||
693 | break; | ||
694 | default: | ||
695 | dev_info(&intel_private.pcidev->dev, | ||
696 | "unknown page table size, assuming 512KB\n"); | ||
697 | size = 512; | ||
698 | } | ||
699 | size += 4; /* add in BIOS popup space */ | ||
700 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
701 | /* G33's GTT size defined in gmch_ctrl */ | ||
702 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
703 | case G33_PGETBL_SIZE_1M: | ||
704 | size = 1024; | ||
705 | break; | ||
706 | case G33_PGETBL_SIZE_2M: | ||
707 | size = 2048; | ||
708 | break; | ||
709 | default: | ||
710 | dev_info(&agp_bridge->dev->dev, | ||
711 | "unknown page table size 0x%x, assuming 512KB\n", | ||
712 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
713 | size = 512; | ||
714 | } | ||
715 | size += 4; | ||
716 | } else if (IS_G4X || IS_PINEVIEW) { | ||
717 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
718 | * stolen, ignore it in stolen gtt entries counting. However, | ||
719 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
720 | */ | ||
721 | size = 4; | ||
722 | } else { | ||
723 | /* On previous hardware, the GTT size was just what was | ||
724 | * required to map the aperture. | ||
725 | */ | ||
726 | size = agp_bridge->driver->fetch_size() + 4; | ||
727 | } | ||
728 | |||
729 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
730 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
731 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
732 | case I830_GMCH_GMS_STOLEN_512: | ||
733 | gtt_entries = KB(512) - KB(size); | ||
734 | break; | ||
735 | case I830_GMCH_GMS_STOLEN_1024: | ||
736 | gtt_entries = MB(1) - KB(size); | ||
737 | break; | ||
738 | case I830_GMCH_GMS_STOLEN_8192: | ||
739 | gtt_entries = MB(8) - KB(size); | ||
740 | break; | ||
741 | case I830_GMCH_GMS_LOCAL: | ||
742 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
743 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
744 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
745 | local = 1; | ||
746 | break; | ||
747 | default: | ||
748 | gtt_entries = 0; | ||
749 | break; | ||
750 | } | ||
751 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
752 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
753 | /* | ||
754 | * SandyBridge has new memory control reg at 0x50.w | ||
755 | */ | ||
756 | u16 snb_gmch_ctl; | ||
757 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
758 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
759 | case SNB_GMCH_GMS_STOLEN_32M: | ||
760 | gtt_entries = MB(32) - KB(size); | ||
761 | break; | ||
762 | case SNB_GMCH_GMS_STOLEN_64M: | ||
763 | gtt_entries = MB(64) - KB(size); | ||
764 | break; | ||
765 | case SNB_GMCH_GMS_STOLEN_96M: | ||
766 | gtt_entries = MB(96) - KB(size); | ||
767 | break; | ||
768 | case SNB_GMCH_GMS_STOLEN_128M: | ||
769 | gtt_entries = MB(128) - KB(size); | ||
770 | break; | ||
771 | case SNB_GMCH_GMS_STOLEN_160M: | ||
772 | gtt_entries = MB(160) - KB(size); | ||
773 | break; | ||
774 | case SNB_GMCH_GMS_STOLEN_192M: | ||
775 | gtt_entries = MB(192) - KB(size); | ||
776 | break; | ||
777 | case SNB_GMCH_GMS_STOLEN_224M: | ||
778 | gtt_entries = MB(224) - KB(size); | ||
779 | break; | ||
780 | case SNB_GMCH_GMS_STOLEN_256M: | ||
781 | gtt_entries = MB(256) - KB(size); | ||
782 | break; | ||
783 | case SNB_GMCH_GMS_STOLEN_288M: | ||
784 | gtt_entries = MB(288) - KB(size); | ||
785 | break; | ||
786 | case SNB_GMCH_GMS_STOLEN_320M: | ||
787 | gtt_entries = MB(320) - KB(size); | ||
788 | break; | ||
789 | case SNB_GMCH_GMS_STOLEN_352M: | ||
790 | gtt_entries = MB(352) - KB(size); | ||
791 | break; | ||
792 | case SNB_GMCH_GMS_STOLEN_384M: | ||
793 | gtt_entries = MB(384) - KB(size); | ||
794 | break; | ||
795 | case SNB_GMCH_GMS_STOLEN_416M: | ||
796 | gtt_entries = MB(416) - KB(size); | ||
797 | break; | ||
798 | case SNB_GMCH_GMS_STOLEN_448M: | ||
799 | gtt_entries = MB(448) - KB(size); | ||
800 | break; | ||
801 | case SNB_GMCH_GMS_STOLEN_480M: | ||
802 | gtt_entries = MB(480) - KB(size); | ||
803 | break; | ||
804 | case SNB_GMCH_GMS_STOLEN_512M: | ||
805 | gtt_entries = MB(512) - KB(size); | ||
806 | break; | ||
807 | } | ||
808 | } else { | ||
809 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
810 | case I855_GMCH_GMS_STOLEN_1M: | ||
811 | gtt_entries = MB(1) - KB(size); | ||
812 | break; | ||
813 | case I855_GMCH_GMS_STOLEN_4M: | ||
814 | gtt_entries = MB(4) - KB(size); | ||
815 | break; | ||
816 | case I855_GMCH_GMS_STOLEN_8M: | ||
817 | gtt_entries = MB(8) - KB(size); | ||
818 | break; | ||
819 | case I855_GMCH_GMS_STOLEN_16M: | ||
820 | gtt_entries = MB(16) - KB(size); | ||
821 | break; | ||
822 | case I855_GMCH_GMS_STOLEN_32M: | ||
823 | gtt_entries = MB(32) - KB(size); | ||
824 | break; | ||
825 | case I915_GMCH_GMS_STOLEN_48M: | ||
826 | /* Check it's really I915G */ | ||
827 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
828 | gtt_entries = MB(48) - KB(size); | ||
829 | else | ||
830 | gtt_entries = 0; | ||
831 | break; | ||
832 | case I915_GMCH_GMS_STOLEN_64M: | ||
833 | /* Check it's really I915G */ | ||
834 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
835 | gtt_entries = MB(64) - KB(size); | ||
836 | else | ||
837 | gtt_entries = 0; | ||
838 | break; | ||
839 | case G33_GMCH_GMS_STOLEN_128M: | ||
840 | if (IS_G33 || IS_I965 || IS_G4X) | ||
841 | gtt_entries = MB(128) - KB(size); | ||
842 | else | ||
843 | gtt_entries = 0; | ||
844 | break; | ||
845 | case G33_GMCH_GMS_STOLEN_256M: | ||
846 | if (IS_G33 || IS_I965 || IS_G4X) | ||
847 | gtt_entries = MB(256) - KB(size); | ||
848 | else | ||
849 | gtt_entries = 0; | ||
850 | break; | ||
851 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
852 | if (IS_I965 || IS_G4X) | ||
853 | gtt_entries = MB(96) - KB(size); | ||
854 | else | ||
855 | gtt_entries = 0; | ||
856 | break; | ||
857 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
858 | if (IS_I965 || IS_G4X) | ||
859 | gtt_entries = MB(160) - KB(size); | ||
860 | else | ||
861 | gtt_entries = 0; | ||
862 | break; | ||
863 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
864 | if (IS_I965 || IS_G4X) | ||
865 | gtt_entries = MB(224) - KB(size); | ||
866 | else | ||
867 | gtt_entries = 0; | ||
868 | break; | ||
869 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
870 | if (IS_I965 || IS_G4X) | ||
871 | gtt_entries = MB(352) - KB(size); | ||
872 | else | ||
873 | gtt_entries = 0; | ||
874 | break; | ||
875 | default: | ||
876 | gtt_entries = 0; | ||
877 | break; | ||
878 | } | ||
879 | } | ||
880 | if (gtt_entries > 0) { | ||
881 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
882 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
883 | gtt_entries /= KB(4); | ||
884 | } else { | ||
885 | dev_info(&agp_bridge->dev->dev, | ||
886 | "no pre-allocated video memory detected\n"); | ||
887 | gtt_entries = 0; | ||
888 | } | ||
889 | |||
890 | intel_private.gtt_entries = gtt_entries; | ||
891 | } | ||
892 | |||
893 | static void intel_i830_fini_flush(void) | ||
894 | { | ||
895 | kunmap(intel_private.i8xx_page); | ||
896 | intel_private.i8xx_flush_page = NULL; | ||
897 | unmap_page_from_agp(intel_private.i8xx_page); | ||
898 | |||
899 | __free_page(intel_private.i8xx_page); | ||
900 | intel_private.i8xx_page = NULL; | ||
901 | } | ||
902 | |||
903 | static void intel_i830_setup_flush(void) | ||
904 | { | ||
905 | /* return if we've already set the flush mechanism up */ | ||
906 | if (intel_private.i8xx_page) | ||
907 | return; | ||
908 | |||
909 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
910 | if (!intel_private.i8xx_page) | ||
911 | return; | ||
912 | |||
913 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
914 | if (!intel_private.i8xx_flush_page) | ||
915 | intel_i830_fini_flush(); | ||
916 | } | ||
917 | |||
918 | /* The chipset_flush interface needs to get data that has already been | ||
919 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
920 | * doesn't snoop those buffers. | ||
921 | * | ||
922 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
923 | * chipset write buffers that the later chips do. According to the 865 | ||
924 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
925 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
926 | * that it'll push whatever was in there out. It appears to work. | ||
927 | */ | ||
928 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
929 | { | ||
930 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
931 | |||
932 | memset(pg, 0, 1024); | ||
933 | |||
934 | if (cpu_has_clflush) | ||
935 | clflush_cache_range(pg, 1024); | ||
936 | else if (wbinvd_on_all_cpus() != 0) | ||
937 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
938 | } | ||
939 | |||
940 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
941 | * Use the memory already set aside for in the GTT. | ||
942 | */ | ||
943 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
944 | { | ||
945 | int page_order; | ||
946 | struct aper_size_info_fixed *size; | ||
947 | int num_entries; | ||
948 | u32 temp; | ||
949 | |||
950 | size = agp_bridge->current_size; | ||
951 | page_order = size->page_order; | ||
952 | num_entries = size->num_entries; | ||
953 | agp_bridge->gatt_table_real = NULL; | ||
954 | |||
955 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
956 | temp &= 0xfff80000; | ||
957 | |||
958 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
959 | if (!intel_private.registers) | ||
960 | return -ENOMEM; | ||
961 | |||
962 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
963 | global_cache_flush(); /* FIXME: ?? */ | ||
964 | |||
965 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
966 | intel_i830_init_gtt_entries(); | ||
967 | |||
968 | agp_bridge->gatt_table = NULL; | ||
969 | |||
970 | agp_bridge->gatt_bus_addr = temp; | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | /* Return the gatt table to a sane state. Use the top of stolen | ||
976 | * memory for the GTT. | ||
977 | */ | ||
978 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
979 | { | ||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | static int intel_i830_fetch_size(void) | ||
984 | { | ||
985 | u16 gmch_ctrl; | ||
986 | struct aper_size_info_fixed *values; | ||
987 | |||
988 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
989 | |||
990 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
991 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
992 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
993 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
994 | agp_bridge->aperture_size_idx = 0; | ||
995 | return values[0].size; | ||
996 | } | ||
997 | |||
998 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
999 | |||
1000 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
1001 | agp_bridge->previous_size = agp_bridge->current_size = (void *) values; | ||
1002 | agp_bridge->aperture_size_idx = 0; | ||
1003 | return values[0].size; | ||
1004 | } else { | ||
1005 | agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1); | ||
1006 | agp_bridge->aperture_size_idx = 1; | ||
1007 | return values[1].size; | ||
1008 | } | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static int intel_i830_configure(void) | ||
1014 | { | ||
1015 | struct aper_size_info_fixed *current_size; | ||
1016 | u32 temp; | ||
1017 | u16 gmch_ctrl; | ||
1018 | int i; | ||
1019 | |||
1020 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1021 | |||
1022 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
1023 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1024 | |||
1025 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1026 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1027 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1028 | |||
1029 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1030 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1031 | |||
1032 | if (agp_bridge->driver->needs_scratch_page) { | ||
1033 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
1034 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1035 | } | ||
1036 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
1037 | } | ||
1038 | |||
1039 | global_cache_flush(); | ||
1040 | |||
1041 | intel_i830_setup_flush(); | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | static void intel_i830_cleanup(void) | ||
1046 | { | ||
1047 | iounmap(intel_private.registers); | ||
1048 | } | ||
1049 | |||
1050 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1051 | int type) | ||
1052 | { | ||
1053 | int i, j, num_entries; | ||
1054 | void *temp; | ||
1055 | int ret = -EINVAL; | ||
1056 | int mask_type; | ||
1057 | |||
1058 | if (mem->page_count == 0) | ||
1059 | goto out; | ||
1060 | |||
1061 | temp = agp_bridge->current_size; | ||
1062 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1063 | |||
1064 | if (pg_start < intel_private.gtt_entries) { | ||
1065 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1066 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1067 | pg_start, intel_private.gtt_entries); | ||
1068 | |||
1069 | dev_info(&intel_private.pcidev->dev, | ||
1070 | "trying to insert into local/stolen memory\n"); | ||
1071 | goto out_err; | ||
1072 | } | ||
1073 | |||
1074 | if ((pg_start + mem->page_count) > num_entries) | ||
1075 | goto out_err; | ||
1076 | |||
1077 | /* The i830 can't check the GTT for entries since its read only, | ||
1078 | * depend on the caller to make the correct offset decisions. | ||
1079 | */ | ||
1080 | |||
1081 | if (type != mem->type) | ||
1082 | goto out_err; | ||
1083 | |||
1084 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1085 | |||
1086 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1087 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1088 | goto out_err; | ||
1089 | |||
1090 | if (!mem->is_flushed) | ||
1091 | global_cache_flush(); | ||
1092 | |||
1093 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
1094 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
1095 | page_to_phys(mem->pages[i]), mask_type), | ||
1096 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
1097 | } | ||
1098 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
1099 | agp_bridge->driver->tlb_flush(mem); | ||
1100 | |||
1101 | out: | ||
1102 | ret = 0; | ||
1103 | out_err: | ||
1104 | mem->is_flushed = true; | ||
1105 | return ret; | ||
1106 | } | ||
1107 | |||
1108 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1109 | int type) | ||
1110 | { | ||
1111 | int i; | ||
1112 | |||
1113 | if (mem->page_count == 0) | ||
1114 | return 0; | ||
1115 | |||
1116 | if (pg_start < intel_private.gtt_entries) { | ||
1117 | dev_info(&intel_private.pcidev->dev, | ||
1118 | "trying to disable local/stolen memory\n"); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | |||
1122 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
1123 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
1124 | } | ||
1125 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
1126 | |||
1127 | agp_bridge->driver->tlb_flush(mem); | ||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
1132 | { | ||
1133 | if (type == AGP_PHYS_MEMORY) | ||
1134 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
1135 | /* always return NULL for other allocation types for now */ | ||
1136 | return NULL; | ||
1137 | } | ||
1138 | |||
1139 | static int intel_alloc_chipset_flush_resource(void) | ||
1140 | { | ||
1141 | int ret; | ||
1142 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
1143 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
1144 | pcibios_align_resource, agp_bridge->dev); | ||
1145 | |||
1146 | return ret; | ||
1147 | } | ||
1148 | |||
1149 | static void intel_i915_setup_chipset_flush(void) | ||
1150 | { | ||
1151 | int ret; | ||
1152 | u32 temp; | ||
1153 | |||
1154 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
1155 | if (!(temp & 0x1)) { | ||
1156 | intel_alloc_chipset_flush_resource(); | ||
1157 | intel_private.resource_valid = 1; | ||
1158 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1159 | } else { | ||
1160 | temp &= ~1; | ||
1161 | |||
1162 | intel_private.resource_valid = 1; | ||
1163 | intel_private.ifp_resource.start = temp; | ||
1164 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
1165 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1166 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1167 | if (ret) | ||
1168 | intel_private.resource_valid = 0; | ||
1169 | } | ||
1170 | } | ||
1171 | |||
1172 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1173 | { | ||
1174 | u32 temp_hi, temp_lo; | ||
1175 | int ret; | ||
1176 | |||
1177 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1178 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1179 | |||
1180 | if (!(temp_lo & 0x1)) { | ||
1181 | |||
1182 | intel_alloc_chipset_flush_resource(); | ||
1183 | |||
1184 | intel_private.resource_valid = 1; | ||
1185 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1186 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1187 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1188 | } else { | ||
1189 | u64 l64; | ||
1190 | |||
1191 | temp_lo &= ~0x1; | ||
1192 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1193 | |||
1194 | intel_private.resource_valid = 1; | ||
1195 | intel_private.ifp_resource.start = l64; | ||
1196 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1197 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1198 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1199 | if (ret) | ||
1200 | intel_private.resource_valid = 0; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | static void intel_i9xx_setup_flush(void) | ||
1205 | { | ||
1206 | /* return if already configured */ | ||
1207 | if (intel_private.ifp_resource.start) | ||
1208 | return; | ||
1209 | |||
1210 | if (IS_SNB) | ||
1211 | return; | ||
1212 | |||
1213 | /* setup a resource for this object */ | ||
1214 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1215 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1216 | |||
1217 | /* Setup chipset flush for 915 */ | ||
1218 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1219 | intel_i965_g33_setup_chipset_flush(); | ||
1220 | } else { | ||
1221 | intel_i915_setup_chipset_flush(); | ||
1222 | } | ||
1223 | |||
1224 | if (intel_private.ifp_resource.start) { | ||
1225 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1226 | if (!intel_private.i9xx_flush_page) | ||
1227 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | static int intel_i915_configure(void) | ||
1232 | { | ||
1233 | struct aper_size_info_fixed *current_size; | ||
1234 | u32 temp; | ||
1235 | u16 gmch_ctrl; | ||
1236 | int i; | ||
1237 | |||
1238 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1239 | |||
1240 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1241 | |||
1242 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1243 | |||
1244 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1245 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1246 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1247 | |||
1248 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1249 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1250 | |||
1251 | if (agp_bridge->driver->needs_scratch_page) { | ||
1252 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1253 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1254 | } | ||
1255 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1256 | } | ||
1257 | |||
1258 | global_cache_flush(); | ||
1259 | |||
1260 | intel_i9xx_setup_flush(); | ||
1261 | |||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | static void intel_i915_cleanup(void) | ||
1266 | { | ||
1267 | if (intel_private.i9xx_flush_page) | ||
1268 | iounmap(intel_private.i9xx_flush_page); | ||
1269 | if (intel_private.resource_valid) | ||
1270 | release_resource(&intel_private.ifp_resource); | ||
1271 | intel_private.ifp_resource.start = 0; | ||
1272 | intel_private.resource_valid = 0; | ||
1273 | iounmap(intel_private.gtt); | ||
1274 | iounmap(intel_private.registers); | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1278 | { | ||
1279 | if (intel_private.i9xx_flush_page) | ||
1280 | writel(1, intel_private.i9xx_flush_page); | ||
1281 | } | ||
1282 | |||
1283 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1284 | int type) | ||
1285 | { | ||
1286 | int num_entries; | ||
1287 | void *temp; | ||
1288 | int ret = -EINVAL; | ||
1289 | int mask_type; | ||
1290 | |||
1291 | if (mem->page_count == 0) | ||
1292 | goto out; | ||
1293 | |||
1294 | temp = agp_bridge->current_size; | ||
1295 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1296 | |||
1297 | if (pg_start < intel_private.gtt_entries) { | ||
1298 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1299 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1300 | pg_start, intel_private.gtt_entries); | ||
1301 | |||
1302 | dev_info(&intel_private.pcidev->dev, | ||
1303 | "trying to insert into local/stolen memory\n"); | ||
1304 | goto out_err; | ||
1305 | } | ||
1306 | |||
1307 | if ((pg_start + mem->page_count) > num_entries) | ||
1308 | goto out_err; | ||
1309 | |||
1310 | /* The i915 can't check the GTT for entries since it's read only; | ||
1311 | * depend on the caller to make the correct offset decisions. | ||
1312 | */ | ||
1313 | |||
1314 | if (type != mem->type) | ||
1315 | goto out_err; | ||
1316 | |||
1317 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1318 | |||
1319 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1320 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1321 | goto out_err; | ||
1322 | |||
1323 | if (!mem->is_flushed) | ||
1324 | global_cache_flush(); | ||
1325 | |||
1326 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1327 | agp_bridge->driver->tlb_flush(mem); | ||
1328 | |||
1329 | out: | ||
1330 | ret = 0; | ||
1331 | out_err: | ||
1332 | mem->is_flushed = true; | ||
1333 | return ret; | ||
1334 | } | ||
1335 | |||
1336 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1337 | int type) | ||
1338 | { | ||
1339 | int i; | ||
1340 | |||
1341 | if (mem->page_count == 0) | ||
1342 | return 0; | ||
1343 | |||
1344 | if (pg_start < intel_private.gtt_entries) { | ||
1345 | dev_info(&intel_private.pcidev->dev, | ||
1346 | "trying to disable local/stolen memory\n"); | ||
1347 | return -EINVAL; | ||
1348 | } | ||
1349 | |||
1350 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1351 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1352 | |||
1353 | readl(intel_private.gtt+i-1); | ||
1354 | |||
1355 | agp_bridge->driver->tlb_flush(mem); | ||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | /* Return the aperture size by just checking the resource length. The effect | ||
1360 | * described in the spec of the MSAC registers is just changing of the | ||
1361 | * resource size. | ||
1362 | */ | ||
1363 | static int intel_i9xx_fetch_size(void) | ||
1364 | { | ||
1365 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1366 | int aper_size; /* size in megabytes */ | ||
1367 | int i; | ||
1368 | |||
1369 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1370 | |||
1371 | for (i = 0; i < num_sizes; i++) { | ||
1372 | if (aper_size == intel_i830_sizes[i].size) { | ||
1373 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1374 | agp_bridge->previous_size = agp_bridge->current_size; | ||
1375 | return aper_size; | ||
1376 | } | ||
1377 | } | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1382 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1383 | * Use the memory already set aside for in the GTT. | ||
1384 | */ | ||
1385 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1386 | { | ||
1387 | int page_order; | ||
1388 | struct aper_size_info_fixed *size; | ||
1389 | int num_entries; | ||
1390 | u32 temp, temp2; | ||
1391 | int gtt_map_size = 256 * 1024; | ||
1392 | |||
1393 | size = agp_bridge->current_size; | ||
1394 | page_order = size->page_order; | ||
1395 | num_entries = size->num_entries; | ||
1396 | agp_bridge->gatt_table_real = NULL; | ||
1397 | |||
1398 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1399 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1400 | |||
1401 | if (IS_G33) | ||
1402 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1403 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1404 | if (!intel_private.gtt) | ||
1405 | return -ENOMEM; | ||
1406 | |||
1407 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1408 | |||
1409 | temp &= 0xfff80000; | ||
1410 | |||
1411 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1412 | if (!intel_private.registers) { | ||
1413 | iounmap(intel_private.gtt); | ||
1414 | return -ENOMEM; | ||
1415 | } | ||
1416 | |||
1417 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1418 | global_cache_flush(); /* FIXME: ? */ | ||
1419 | |||
1420 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1421 | intel_i830_init_gtt_entries(); | ||
1422 | |||
1423 | agp_bridge->gatt_table = NULL; | ||
1424 | |||
1425 | agp_bridge->gatt_bus_addr = temp; | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * The i965 supports 36-bit physical addresses, but to keep | ||
1432 | * the format of the GTT the same, the bits that don't fit | ||
1433 | * in a 32-bit word are shifted down to bits 4..7. | ||
1434 | * | ||
1435 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1436 | * is always zero on 32-bit architectures, so no need to make | ||
1437 | * this conditional. | ||
1438 | */ | ||
1439 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1440 | dma_addr_t addr, int type) | ||
1441 | { | ||
1442 | /* Shift high bits down */ | ||
1443 | addr |= (addr >> 28) & 0xf0; | ||
1444 | |||
1445 | /* Type checking must be done elsewhere */ | ||
1446 | return addr | bridge->driver->masks[type].mask; | ||
1447 | } | ||
1448 | |||
1449 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1450 | { | ||
1451 | u16 snb_gmch_ctl; | ||
1452 | |||
1453 | switch (agp_bridge->dev->device) { | ||
1454 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1455 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1456 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1457 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1458 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1459 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1460 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1461 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1462 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1463 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1464 | *gtt_offset = *gtt_size = MB(2); | ||
1465 | break; | ||
1466 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1467 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1468 | *gtt_offset = MB(2); | ||
1469 | |||
1470 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1471 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1472 | default: | ||
1473 | case SNB_GTT_SIZE_0M: | ||
1474 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1475 | *gtt_size = MB(0); | ||
1476 | break; | ||
1477 | case SNB_GTT_SIZE_1M: | ||
1478 | *gtt_size = MB(1); | ||
1479 | break; | ||
1480 | case SNB_GTT_SIZE_2M: | ||
1481 | *gtt_size = MB(2); | ||
1482 | break; | ||
1483 | } | ||
1484 | break; | ||
1485 | default: | ||
1486 | *gtt_offset = *gtt_size = KB(512); | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1491 | * Use the memory already set aside for in the GTT. | ||
1492 | */ | ||
1493 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1494 | { | ||
1495 | int page_order; | ||
1496 | struct aper_size_info_fixed *size; | ||
1497 | int num_entries; | ||
1498 | u32 temp; | ||
1499 | int gtt_offset, gtt_size; | ||
1500 | |||
1501 | size = agp_bridge->current_size; | ||
1502 | page_order = size->page_order; | ||
1503 | num_entries = size->num_entries; | ||
1504 | agp_bridge->gatt_table_real = NULL; | ||
1505 | |||
1506 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1507 | |||
1508 | temp &= 0xfff00000; | ||
1509 | |||
1510 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1511 | |||
1512 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1513 | |||
1514 | if (!intel_private.gtt) | ||
1515 | return -ENOMEM; | ||
1516 | |||
1517 | intel_private.gtt_total_size = gtt_size / 4; | ||
1518 | |||
1519 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1520 | if (!intel_private.registers) { | ||
1521 | iounmap(intel_private.gtt); | ||
1522 | return -ENOMEM; | ||
1523 | } | ||
1524 | |||
1525 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1526 | global_cache_flush(); /* FIXME: ? */ | ||
1527 | |||
1528 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1529 | intel_i830_init_gtt_entries(); | ||
1530 | |||
1531 | agp_bridge->gatt_table = NULL; | ||
1532 | |||
1533 | agp_bridge->gatt_bus_addr = temp; | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | |||
1539 | static int intel_fetch_size(void) | 21 | static int intel_fetch_size(void) |
1540 | { | 22 | { |
1541 | int i; | 23 | int i; |
@@ -2003,33 +485,6 @@ static const struct agp_bridge_driver intel_generic_driver = { | |||
2003 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 485 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2004 | }; | 486 | }; |
2005 | 487 | ||
2006 | static const struct agp_bridge_driver intel_810_driver = { | ||
2007 | .owner = THIS_MODULE, | ||
2008 | .aperture_sizes = intel_i810_sizes, | ||
2009 | .size_type = FIXED_APER_SIZE, | ||
2010 | .num_aperture_sizes = 2, | ||
2011 | .needs_scratch_page = true, | ||
2012 | .configure = intel_i810_configure, | ||
2013 | .fetch_size = intel_i810_fetch_size, | ||
2014 | .cleanup = intel_i810_cleanup, | ||
2015 | .tlb_flush = intel_i810_tlbflush, | ||
2016 | .mask_memory = intel_i810_mask_memory, | ||
2017 | .masks = intel_i810_masks, | ||
2018 | .agp_enable = intel_i810_agp_enable, | ||
2019 | .cache_flush = global_cache_flush, | ||
2020 | .create_gatt_table = agp_generic_create_gatt_table, | ||
2021 | .free_gatt_table = agp_generic_free_gatt_table, | ||
2022 | .insert_memory = intel_i810_insert_entries, | ||
2023 | .remove_memory = intel_i810_remove_entries, | ||
2024 | .alloc_by_type = intel_i810_alloc_by_type, | ||
2025 | .free_by_type = intel_i810_free_by_type, | ||
2026 | .agp_alloc_page = agp_generic_alloc_page, | ||
2027 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2028 | .agp_destroy_page = agp_generic_destroy_page, | ||
2029 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2030 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
2031 | }; | ||
2032 | |||
2033 | static const struct agp_bridge_driver intel_815_driver = { | 488 | static const struct agp_bridge_driver intel_815_driver = { |
2034 | .owner = THIS_MODULE, | 489 | .owner = THIS_MODULE, |
2035 | .aperture_sizes = intel_815_sizes, | 490 | .aperture_sizes = intel_815_sizes, |
@@ -2056,34 +511,6 @@ static const struct agp_bridge_driver intel_815_driver = { | |||
2056 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 511 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2057 | }; | 512 | }; |
2058 | 513 | ||
2059 | static const struct agp_bridge_driver intel_830_driver = { | ||
2060 | .owner = THIS_MODULE, | ||
2061 | .aperture_sizes = intel_i830_sizes, | ||
2062 | .size_type = FIXED_APER_SIZE, | ||
2063 | .num_aperture_sizes = 4, | ||
2064 | .needs_scratch_page = true, | ||
2065 | .configure = intel_i830_configure, | ||
2066 | .fetch_size = intel_i830_fetch_size, | ||
2067 | .cleanup = intel_i830_cleanup, | ||
2068 | .tlb_flush = intel_i810_tlbflush, | ||
2069 | .mask_memory = intel_i810_mask_memory, | ||
2070 | .masks = intel_i810_masks, | ||
2071 | .agp_enable = intel_i810_agp_enable, | ||
2072 | .cache_flush = global_cache_flush, | ||
2073 | .create_gatt_table = intel_i830_create_gatt_table, | ||
2074 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2075 | .insert_memory = intel_i830_insert_entries, | ||
2076 | .remove_memory = intel_i830_remove_entries, | ||
2077 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2078 | .free_by_type = intel_i810_free_by_type, | ||
2079 | .agp_alloc_page = agp_generic_alloc_page, | ||
2080 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2081 | .agp_destroy_page = agp_generic_destroy_page, | ||
2082 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2083 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2084 | .chipset_flush = intel_i830_chipset_flush, | ||
2085 | }; | ||
2086 | |||
2087 | static const struct agp_bridge_driver intel_820_driver = { | 514 | static const struct agp_bridge_driver intel_820_driver = { |
2088 | .owner = THIS_MODULE, | 515 | .owner = THIS_MODULE, |
2089 | .aperture_sizes = intel_8xx_sizes, | 516 | .aperture_sizes = intel_8xx_sizes, |
@@ -2240,74 +667,6 @@ static const struct agp_bridge_driver intel_860_driver = { | |||
2240 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 667 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2241 | }; | 668 | }; |
2242 | 669 | ||
2243 | static const struct agp_bridge_driver intel_915_driver = { | ||
2244 | .owner = THIS_MODULE, | ||
2245 | .aperture_sizes = intel_i830_sizes, | ||
2246 | .size_type = FIXED_APER_SIZE, | ||
2247 | .num_aperture_sizes = 4, | ||
2248 | .needs_scratch_page = true, | ||
2249 | .configure = intel_i915_configure, | ||
2250 | .fetch_size = intel_i9xx_fetch_size, | ||
2251 | .cleanup = intel_i915_cleanup, | ||
2252 | .tlb_flush = intel_i810_tlbflush, | ||
2253 | .mask_memory = intel_i810_mask_memory, | ||
2254 | .masks = intel_i810_masks, | ||
2255 | .agp_enable = intel_i810_agp_enable, | ||
2256 | .cache_flush = global_cache_flush, | ||
2257 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2258 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2259 | .insert_memory = intel_i915_insert_entries, | ||
2260 | .remove_memory = intel_i915_remove_entries, | ||
2261 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2262 | .free_by_type = intel_i810_free_by_type, | ||
2263 | .agp_alloc_page = agp_generic_alloc_page, | ||
2264 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2265 | .agp_destroy_page = agp_generic_destroy_page, | ||
2266 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2267 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2268 | .chipset_flush = intel_i915_chipset_flush, | ||
2269 | #ifdef USE_PCI_DMA_API | ||
2270 | .agp_map_page = intel_agp_map_page, | ||
2271 | .agp_unmap_page = intel_agp_unmap_page, | ||
2272 | .agp_map_memory = intel_agp_map_memory, | ||
2273 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2274 | #endif | ||
2275 | }; | ||
2276 | |||
2277 | static const struct agp_bridge_driver intel_i965_driver = { | ||
2278 | .owner = THIS_MODULE, | ||
2279 | .aperture_sizes = intel_i830_sizes, | ||
2280 | .size_type = FIXED_APER_SIZE, | ||
2281 | .num_aperture_sizes = 4, | ||
2282 | .needs_scratch_page = true, | ||
2283 | .configure = intel_i915_configure, | ||
2284 | .fetch_size = intel_i9xx_fetch_size, | ||
2285 | .cleanup = intel_i915_cleanup, | ||
2286 | .tlb_flush = intel_i810_tlbflush, | ||
2287 | .mask_memory = intel_i965_mask_memory, | ||
2288 | .masks = intel_i810_masks, | ||
2289 | .agp_enable = intel_i810_agp_enable, | ||
2290 | .cache_flush = global_cache_flush, | ||
2291 | .create_gatt_table = intel_i965_create_gatt_table, | ||
2292 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2293 | .insert_memory = intel_i915_insert_entries, | ||
2294 | .remove_memory = intel_i915_remove_entries, | ||
2295 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2296 | .free_by_type = intel_i810_free_by_type, | ||
2297 | .agp_alloc_page = agp_generic_alloc_page, | ||
2298 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2299 | .agp_destroy_page = agp_generic_destroy_page, | ||
2300 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2301 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2302 | .chipset_flush = intel_i915_chipset_flush, | ||
2303 | #ifdef USE_PCI_DMA_API | ||
2304 | .agp_map_page = intel_agp_map_page, | ||
2305 | .agp_unmap_page = intel_agp_unmap_page, | ||
2306 | .agp_map_memory = intel_agp_map_memory, | ||
2307 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2308 | #endif | ||
2309 | }; | ||
2310 | |||
2311 | static const struct agp_bridge_driver intel_7505_driver = { | 670 | static const struct agp_bridge_driver intel_7505_driver = { |
2312 | .owner = THIS_MODULE, | 671 | .owner = THIS_MODULE, |
2313 | .aperture_sizes = intel_8xx_sizes, | 672 | .aperture_sizes = intel_8xx_sizes, |
@@ -2334,40 +693,6 @@ static const struct agp_bridge_driver intel_7505_driver = { | |||
2334 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 693 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2335 | }; | 694 | }; |
2336 | 695 | ||
2337 | static const struct agp_bridge_driver intel_g33_driver = { | ||
2338 | .owner = THIS_MODULE, | ||
2339 | .aperture_sizes = intel_i830_sizes, | ||
2340 | .size_type = FIXED_APER_SIZE, | ||
2341 | .num_aperture_sizes = 4, | ||
2342 | .needs_scratch_page = true, | ||
2343 | .configure = intel_i915_configure, | ||
2344 | .fetch_size = intel_i9xx_fetch_size, | ||
2345 | .cleanup = intel_i915_cleanup, | ||
2346 | .tlb_flush = intel_i810_tlbflush, | ||
2347 | .mask_memory = intel_i965_mask_memory, | ||
2348 | .masks = intel_i810_masks, | ||
2349 | .agp_enable = intel_i810_agp_enable, | ||
2350 | .cache_flush = global_cache_flush, | ||
2351 | .create_gatt_table = intel_i915_create_gatt_table, | ||
2352 | .free_gatt_table = intel_i830_free_gatt_table, | ||
2353 | .insert_memory = intel_i915_insert_entries, | ||
2354 | .remove_memory = intel_i915_remove_entries, | ||
2355 | .alloc_by_type = intel_i830_alloc_by_type, | ||
2356 | .free_by_type = intel_i810_free_by_type, | ||
2357 | .agp_alloc_page = agp_generic_alloc_page, | ||
2358 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
2359 | .agp_destroy_page = agp_generic_destroy_page, | ||
2360 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
2361 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
2362 | .chipset_flush = intel_i915_chipset_flush, | ||
2363 | #ifdef USE_PCI_DMA_API | ||
2364 | .agp_map_page = intel_agp_map_page, | ||
2365 | .agp_unmap_page = intel_agp_unmap_page, | ||
2366 | .agp_map_memory = intel_agp_map_memory, | ||
2367 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2368 | #endif | ||
2369 | }; | ||
2370 | |||
2371 | static int find_gmch(u16 device) | 696 | static int find_gmch(u16 device) |
2372 | { | 697 | { |
2373 | struct pci_dev *gmch_device; | 698 | struct pci_dev *gmch_device; |
@@ -2392,103 +717,137 @@ static int find_gmch(u16 device) | |||
2392 | static const struct intel_driver_description { | 717 | static const struct intel_driver_description { |
2393 | unsigned int chip_id; | 718 | unsigned int chip_id; |
2394 | unsigned int gmch_chip_id; | 719 | unsigned int gmch_chip_id; |
2395 | unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */ | ||
2396 | char *name; | 720 | char *name; |
2397 | const struct agp_bridge_driver *driver; | 721 | const struct agp_bridge_driver *driver; |
2398 | const struct agp_bridge_driver *gmch_driver; | 722 | const struct agp_bridge_driver *gmch_driver; |
2399 | } intel_agp_chipsets[] = { | 723 | } intel_agp_chipsets[] = { |
2400 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, | 724 | { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, |
2401 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, | 725 | { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, |
2402 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, | 726 | { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, |
2403 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", | 727 | { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", |
2404 | NULL, &intel_810_driver }, | 728 | NULL, &intel_810_driver }, |
2405 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", | 729 | { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", |
2406 | NULL, &intel_810_driver }, | 730 | NULL, &intel_810_driver }, |
2407 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", | 731 | { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", |
2408 | NULL, &intel_810_driver }, | 732 | NULL, &intel_810_driver }, |
2409 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", | 733 | { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", |
2410 | &intel_815_driver, &intel_810_driver }, | 734 | &intel_815_driver, &intel_810_driver }, |
2411 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 735 | { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, |
2412 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, | 736 | { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, |
2413 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", | 737 | { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", |
2414 | &intel_830mp_driver, &intel_830_driver }, | 738 | &intel_830mp_driver, &intel_830_driver }, |
2415 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, | 739 | { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, |
2416 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, | 740 | { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, |
2417 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", | 741 | { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", |
2418 | &intel_845_driver, &intel_830_driver }, | 742 | &intel_845_driver, &intel_830_driver }, |
2419 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, | 743 | { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, |
2420 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", | 744 | { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", |
2421 | &intel_845_driver, &intel_830_driver }, | 745 | &intel_845_driver, &intel_830_driver }, |
2422 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, | 746 | { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, |
2423 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", | 747 | { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", |
2424 | &intel_845_driver, &intel_830_driver }, | 748 | &intel_845_driver, &intel_830_driver }, |
2425 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, | 749 | { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, |
2426 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", | 750 | { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", |
2427 | &intel_845_driver, &intel_830_driver }, | 751 | &intel_845_driver, &intel_830_driver }, |
2428 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, | 752 | { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, |
2429 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", | 753 | { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
2430 | NULL, &intel_915_driver }, | 754 | NULL, &intel_915_driver }, |
2431 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", | 755 | { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
2432 | NULL, &intel_915_driver }, | 756 | NULL, &intel_915_driver }, |
2433 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", | 757 | { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
2434 | NULL, &intel_915_driver }, | 758 | NULL, &intel_915_driver }, |
2435 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", | 759 | { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
2436 | NULL, &intel_915_driver }, | 760 | NULL, &intel_915_driver }, |
2437 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", | 761 | { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
2438 | NULL, &intel_915_driver }, | 762 | NULL, &intel_915_driver }, |
2439 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", | 763 | { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
2440 | NULL, &intel_915_driver }, | 764 | NULL, &intel_915_driver }, |
2441 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", | 765 | { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
2442 | NULL, &intel_i965_driver }, | 766 | NULL, &intel_i965_driver }, |
2443 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", | 767 | { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
2444 | NULL, &intel_i965_driver }, | 768 | NULL, &intel_i965_driver }, |
2445 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", | 769 | { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
2446 | NULL, &intel_i965_driver }, | 770 | NULL, &intel_i965_driver }, |
2447 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", | 771 | { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
2448 | NULL, &intel_i965_driver }, | 772 | NULL, &intel_i965_driver }, |
2449 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", | 773 | { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
2450 | NULL, &intel_i965_driver }, | 774 | NULL, &intel_i965_driver }, |
2451 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", | 775 | { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
2452 | NULL, &intel_i965_driver }, | 776 | NULL, &intel_i965_driver }, |
2453 | { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, | 777 | { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, |
2454 | { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, | 778 | { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, |
2455 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", | 779 | { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
2456 | NULL, &intel_g33_driver }, | 780 | NULL, &intel_g33_driver }, |
2457 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", | 781 | { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
2458 | NULL, &intel_g33_driver }, | 782 | NULL, &intel_g33_driver }, |
2459 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 783 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
2460 | NULL, &intel_g33_driver }, | 784 | NULL, &intel_g33_driver }, |
2461 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", | 785 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
2462 | NULL, &intel_g33_driver }, | 786 | NULL, &intel_g33_driver }, |
2463 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", | 787 | { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
2464 | NULL, &intel_g33_driver }, | 788 | NULL, &intel_g33_driver }, |
2465 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, | 789 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, |
2466 | "GM45", NULL, &intel_i965_driver }, | 790 | "GM45", NULL, &intel_i965_driver }, |
2467 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, | 791 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, |
2468 | "Eaglelake", NULL, &intel_i965_driver }, | 792 | "Eaglelake", NULL, &intel_i965_driver }, |
2469 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, | 793 | { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, |
2470 | "Q45/Q43", NULL, &intel_i965_driver }, | 794 | "Q45/Q43", NULL, &intel_i965_driver }, |
2471 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, | 795 | { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, |
2472 | "G45/G43", NULL, &intel_i965_driver }, | 796 | "G45/G43", NULL, &intel_i965_driver }, |
2473 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, | 797 | { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, |
2474 | "B43", NULL, &intel_i965_driver }, | 798 | "B43", NULL, &intel_i965_driver }, |
2475 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, | 799 | { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, |
2476 | "G41", NULL, &intel_i965_driver }, | 800 | "G41", NULL, &intel_i965_driver }, |
2477 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, | 801 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
2478 | "HD Graphics", NULL, &intel_i965_driver }, | 802 | "HD Graphics", NULL, &intel_i965_driver }, |
2479 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 803 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2480 | "HD Graphics", NULL, &intel_i965_driver }, | 804 | "HD Graphics", NULL, &intel_i965_driver }, |
2481 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 805 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2482 | "HD Graphics", NULL, &intel_i965_driver }, | 806 | "HD Graphics", NULL, &intel_i965_driver }, |
2483 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, | 807 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
2484 | "HD Graphics", NULL, &intel_i965_driver }, | 808 | "HD Graphics", NULL, &intel_i965_driver }, |
2485 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, | 809 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, |
2486 | "Sandybridge", NULL, &intel_i965_driver }, | 810 | "Sandybridge", NULL, &intel_i965_driver }, |
2487 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, | 811 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, |
2488 | "Sandybridge", NULL, &intel_i965_driver }, | 812 | "Sandybridge", NULL, &intel_i965_driver }, |
2489 | { 0, 0, 0, NULL, NULL, NULL } | 813 | { 0, 0, NULL, NULL, NULL } |
2490 | }; | 814 | }; |
2491 | 815 | ||
816 | static int __devinit intel_gmch_probe(struct pci_dev *pdev, | ||
817 | struct agp_bridge_data *bridge) | ||
818 | { | ||
819 | int i; | ||
820 | bridge->driver = NULL; | ||
821 | |||
822 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | ||
823 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | ||
824 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | ||
825 | bridge->driver = | ||
826 | intel_agp_chipsets[i].gmch_driver; | ||
827 | break; | ||
828 | } | ||
829 | } | ||
830 | |||
831 | if (!bridge->driver) | ||
832 | return 0; | ||
833 | |||
834 | bridge->dev_private_data = &intel_private; | ||
835 | bridge->dev = pdev; | ||
836 | |||
837 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | ||
838 | |||
839 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | ||
840 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
841 | dev_err(&intel_private.pcidev->dev, | ||
842 | "set gfx device dma mask 36bit failed!\n"); | ||
843 | else | ||
844 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
845 | DMA_BIT_MASK(36)); | ||
846 | } | ||
847 | |||
848 | return 1; | ||
849 | } | ||
850 | |||
2492 | static int __devinit agp_intel_probe(struct pci_dev *pdev, | 851 | static int __devinit agp_intel_probe(struct pci_dev *pdev, |
2493 | const struct pci_device_id *ent) | 852 | const struct pci_device_id *ent) |
2494 | { | 853 | { |
@@ -2503,22 +862,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2503 | if (!bridge) | 862 | if (!bridge) |
2504 | return -ENOMEM; | 863 | return -ENOMEM; |
2505 | 864 | ||
865 | bridge->capndx = cap_ptr; | ||
866 | |||
867 | if (intel_gmch_probe(pdev, bridge)) | ||
868 | goto found_gmch; | ||
869 | |||
2506 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | 870 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { |
2507 | /* In case that multiple models of gfx chip may | 871 | /* In case that multiple models of gfx chip may |
2508 | stand on same host bridge type, this can be | 872 | stand on same host bridge type, this can be |
2509 | sure we detect the right IGD. */ | 873 | sure we detect the right IGD. */ |
2510 | if (pdev->device == intel_agp_chipsets[i].chip_id) { | 874 | if (pdev->device == intel_agp_chipsets[i].chip_id) { |
2511 | if ((intel_agp_chipsets[i].gmch_chip_id != 0) && | 875 | bridge->driver = intel_agp_chipsets[i].driver; |
2512 | find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { | 876 | break; |
2513 | bridge->driver = | ||
2514 | intel_agp_chipsets[i].gmch_driver; | ||
2515 | break; | ||
2516 | } else if (intel_agp_chipsets[i].multi_gmch_chip) { | ||
2517 | continue; | ||
2518 | } else { | ||
2519 | bridge->driver = intel_agp_chipsets[i].driver; | ||
2520 | break; | ||
2521 | } | ||
2522 | } | 877 | } |
2523 | } | 878 | } |
2524 | 879 | ||
@@ -2530,18 +885,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2530 | return -ENODEV; | 885 | return -ENODEV; |
2531 | } | 886 | } |
2532 | 887 | ||
2533 | if (bridge->driver == NULL) { | ||
2534 | /* bridge has no AGP and no IGD detected */ | ||
2535 | if (cap_ptr) | ||
2536 | dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", | ||
2537 | intel_agp_chipsets[i].gmch_chip_id); | ||
2538 | agp_put_bridge(bridge); | ||
2539 | return -ENODEV; | ||
2540 | } | ||
2541 | |||
2542 | bridge->dev = pdev; | 888 | bridge->dev = pdev; |
2543 | bridge->capndx = cap_ptr; | 889 | bridge->dev_private_data = NULL; |
2544 | bridge->dev_private_data = &intel_private; | ||
2545 | 890 | ||
2546 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | 891 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); |
2547 | 892 | ||
@@ -2577,15 +922,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, | |||
2577 | &bridge->mode); | 922 | &bridge->mode); |
2578 | } | 923 | } |
2579 | 924 | ||
2580 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | 925 | found_gmch: |
2581 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
2582 | dev_err(&intel_private.pcidev->dev, | ||
2583 | "set gfx device dma mask 36bit failed!\n"); | ||
2584 | else | ||
2585 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
2586 | DMA_BIT_MASK(36)); | ||
2587 | } | ||
2588 | |||
2589 | pci_set_drvdata(pdev, bridge); | 926 | pci_set_drvdata(pdev, bridge); |
2590 | err = agp_add_bridge(bridge); | 927 | err = agp_add_bridge(bridge); |
2591 | if (!err) | 928 | if (!err) |
@@ -2611,22 +948,7 @@ static int agp_intel_resume(struct pci_dev *pdev) | |||
2611 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 948 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
2612 | int ret_val; | 949 | int ret_val; |
2613 | 950 | ||
2614 | if (bridge->driver == &intel_generic_driver) | 951 | bridge->driver->configure(); |
2615 | intel_configure(); | ||
2616 | else if (bridge->driver == &intel_850_driver) | ||
2617 | intel_850_configure(); | ||
2618 | else if (bridge->driver == &intel_845_driver) | ||
2619 | intel_845_configure(); | ||
2620 | else if (bridge->driver == &intel_830mp_driver) | ||
2621 | intel_830mp_configure(); | ||
2622 | else if (bridge->driver == &intel_915_driver) | ||
2623 | intel_i915_configure(); | ||
2624 | else if (bridge->driver == &intel_830_driver) | ||
2625 | intel_i830_configure(); | ||
2626 | else if (bridge->driver == &intel_810_driver) | ||
2627 | intel_i810_configure(); | ||
2628 | else if (bridge->driver == &intel_i965_driver) | ||
2629 | intel_i915_configure(); | ||
2630 | 952 | ||
2631 | ret_val = agp_rebind_memory(); | 953 | ret_val = agp_rebind_memory(); |
2632 | if (ret_val != 0) | 954 | if (ret_val != 0) |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h new file mode 100644 index 000000000000..2547465d4658 --- /dev/null +++ b/drivers/char/agp/intel-agp.h | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * Common Intel AGPGART and GTT definitions. | ||
3 | */ | ||
4 | |||
5 | /* Intel registers */ | ||
6 | #define INTEL_APSIZE 0xb4 | ||
7 | #define INTEL_ATTBASE 0xb8 | ||
8 | #define INTEL_AGPCTRL 0xb0 | ||
9 | #define INTEL_NBXCFG 0x50 | ||
10 | #define INTEL_ERRSTS 0x91 | ||
11 | |||
12 | /* Intel i830 registers */ | ||
13 | #define I830_GMCH_CTRL 0x52 | ||
14 | #define I830_GMCH_ENABLED 0x4 | ||
15 | #define I830_GMCH_MEM_MASK 0x1 | ||
16 | #define I830_GMCH_MEM_64M 0x1 | ||
17 | #define I830_GMCH_MEM_128M 0 | ||
18 | #define I830_GMCH_GMS_MASK 0x70 | ||
19 | #define I830_GMCH_GMS_DISABLED 0x00 | ||
20 | #define I830_GMCH_GMS_LOCAL 0x10 | ||
21 | #define I830_GMCH_GMS_STOLEN_512 0x20 | ||
22 | #define I830_GMCH_GMS_STOLEN_1024 0x30 | ||
23 | #define I830_GMCH_GMS_STOLEN_8192 0x40 | ||
24 | #define I830_RDRAM_CHANNEL_TYPE 0x03010 | ||
25 | #define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) | ||
26 | #define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) | ||
27 | |||
28 | /* This one is for I830MP w. an external graphic card */ | ||
29 | #define INTEL_I830_ERRSTS 0x92 | ||
30 | |||
31 | /* Intel 855GM/852GM registers */ | ||
32 | #define I855_GMCH_GMS_MASK 0xF0 | ||
33 | #define I855_GMCH_GMS_STOLEN_0M 0x0 | ||
34 | #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) | ||
35 | #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) | ||
36 | #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) | ||
37 | #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) | ||
38 | #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) | ||
39 | #define I85X_CAPID 0x44 | ||
40 | #define I85X_VARIANT_MASK 0x7 | ||
41 | #define I85X_VARIANT_SHIFT 5 | ||
42 | #define I855_GME 0x0 | ||
43 | #define I855_GM 0x4 | ||
44 | #define I852_GME 0x2 | ||
45 | #define I852_GM 0x5 | ||
46 | |||
47 | /* Intel i845 registers */ | ||
48 | #define INTEL_I845_AGPM 0x51 | ||
49 | #define INTEL_I845_ERRSTS 0xc8 | ||
50 | |||
51 | /* Intel i860 registers */ | ||
52 | #define INTEL_I860_MCHCFG 0x50 | ||
53 | #define INTEL_I860_ERRSTS 0xc8 | ||
54 | |||
55 | /* Intel i810 registers */ | ||
56 | #define I810_GMADDR 0x10 | ||
57 | #define I810_MMADDR 0x14 | ||
58 | #define I810_PTE_BASE 0x10000 | ||
59 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | ||
60 | #define I810_PTE_LOCAL 0x00000002 | ||
61 | #define I810_PTE_VALID 0x00000001 | ||
62 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
63 | #define I810_SMRAM_MISCC 0x70 | ||
64 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | ||
65 | #define I810_GFX_MEM_WIN_32M 0x00010000 | ||
66 | #define I810_GMS 0x000000c0 | ||
67 | #define I810_GMS_DISABLE 0x00000000 | ||
68 | #define I810_PGETBL_CTL 0x2020 | ||
69 | #define I810_PGETBL_ENABLED 0x00000001 | ||
70 | #define I965_PGETBL_SIZE_MASK 0x0000000e | ||
71 | #define I965_PGETBL_SIZE_512KB (0 << 1) | ||
72 | #define I965_PGETBL_SIZE_256KB (1 << 1) | ||
73 | #define I965_PGETBL_SIZE_128KB (2 << 1) | ||
74 | #define I965_PGETBL_SIZE_1MB (3 << 1) | ||
75 | #define I965_PGETBL_SIZE_2MB (4 << 1) | ||
76 | #define I965_PGETBL_SIZE_1_5MB (5 << 1) | ||
77 | #define G33_PGETBL_SIZE_MASK (3 << 8) | ||
78 | #define G33_PGETBL_SIZE_1M (1 << 8) | ||
79 | #define G33_PGETBL_SIZE_2M (2 << 8) | ||
80 | |||
81 | #define I810_DRAM_CTL 0x3000 | ||
82 | #define I810_DRAM_ROW_0 0x00000001 | ||
83 | #define I810_DRAM_ROW_0_SDRAM 0x00000001 | ||
84 | |||
85 | /* Intel 815 register */ | ||
86 | #define INTEL_815_APCONT 0x51 | ||
87 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | ||
88 | |||
89 | /* Intel i820 registers */ | ||
90 | #define INTEL_I820_RDCR 0x51 | ||
91 | #define INTEL_I820_ERRSTS 0xc8 | ||
92 | |||
93 | /* Intel i840 registers */ | ||
94 | #define INTEL_I840_MCHCFG 0x50 | ||
95 | #define INTEL_I840_ERRSTS 0xc8 | ||
96 | |||
97 | /* Intel i850 registers */ | ||
98 | #define INTEL_I850_MCHCFG 0x50 | ||
99 | #define INTEL_I850_ERRSTS 0xc8 | ||
100 | |||
101 | /* intel 915G registers */ | ||
102 | #define I915_GMADDR 0x18 | ||
103 | #define I915_MMADDR 0x10 | ||
104 | #define I915_PTEADDR 0x1C | ||
105 | #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) | ||
106 | #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) | ||
107 | #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) | ||
108 | #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) | ||
109 | #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) | ||
110 | #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) | ||
111 | #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) | ||
112 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | ||
113 | |||
114 | #define I915_IFPADDR 0x60 | ||
115 | |||
116 | /* Intel 965G registers */ | ||
117 | #define I965_MSAC 0x62 | ||
118 | #define I965_IFPADDR 0x70 | ||
119 | |||
120 | /* Intel 7505 registers */ | ||
121 | #define INTEL_I7505_APSIZE 0x74 | ||
122 | #define INTEL_I7505_NCAPID 0x60 | ||
123 | #define INTEL_I7505_NISTAT 0x6c | ||
124 | #define INTEL_I7505_ATTBASE 0x78 | ||
125 | #define INTEL_I7505_ERRSTS 0x42 | ||
126 | #define INTEL_I7505_AGPCTRL 0x70 | ||
127 | #define INTEL_I7505_MCHCFG 0x50 | ||
128 | |||
129 | #define SNB_GMCH_CTRL 0x50 | ||
130 | #define SNB_GMCH_GMS_STOLEN_MASK 0xF8 | ||
131 | #define SNB_GMCH_GMS_STOLEN_32M (1 << 3) | ||
132 | #define SNB_GMCH_GMS_STOLEN_64M (2 << 3) | ||
133 | #define SNB_GMCH_GMS_STOLEN_96M (3 << 3) | ||
134 | #define SNB_GMCH_GMS_STOLEN_128M (4 << 3) | ||
135 | #define SNB_GMCH_GMS_STOLEN_160M (5 << 3) | ||
136 | #define SNB_GMCH_GMS_STOLEN_192M (6 << 3) | ||
137 | #define SNB_GMCH_GMS_STOLEN_224M (7 << 3) | ||
138 | #define SNB_GMCH_GMS_STOLEN_256M (8 << 3) | ||
139 | #define SNB_GMCH_GMS_STOLEN_288M (9 << 3) | ||
140 | #define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) | ||
141 | #define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) | ||
142 | #define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) | ||
143 | #define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) | ||
144 | #define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) | ||
145 | #define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) | ||
146 | #define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) | ||
147 | #define SNB_GTT_SIZE_0M (0 << 8) | ||
148 | #define SNB_GTT_SIZE_1M (1 << 8) | ||
149 | #define SNB_GTT_SIZE_2M (2 << 8) | ||
150 | #define SNB_GTT_SIZE_MASK (3 << 8) | ||
151 | |||
152 | /* pci devices ids */ | ||
153 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | ||
154 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | ||
155 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | ||
156 | #define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 | ||
157 | #define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980 | ||
158 | #define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982 | ||
159 | #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 | ||
160 | #define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992 | ||
161 | #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 | ||
162 | #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 | ||
163 | #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 | ||
164 | #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 | ||
165 | #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 | ||
166 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | ||
167 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | ||
168 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | ||
169 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010 | ||
170 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011 | ||
171 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000 | ||
172 | #define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001 | ||
173 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | ||
174 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | ||
175 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | ||
176 | #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 | ||
177 | #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 | ||
178 | #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 | ||
179 | #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 | ||
180 | #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 | ||
181 | #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 | ||
182 | #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 | ||
183 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 | ||
184 | #define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02 | ||
185 | #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 | ||
186 | #define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 | ||
187 | #define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 | ||
188 | #define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 | ||
189 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | ||
190 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | ||
191 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | ||
192 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | ||
193 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | ||
194 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | ||
195 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a | ||
196 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 | ||
197 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 | ||
198 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | ||
199 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | ||
200 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | ||
201 | |||
202 | /* cover 915 and 945 variants */ | ||
203 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | ||
204 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ | ||
205 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ | ||
206 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ | ||
207 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ | ||
208 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) | ||
209 | |||
210 | #define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ | ||
211 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ | ||
212 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ | ||
213 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ | ||
214 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ | ||
215 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) | ||
216 | |||
217 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | ||
218 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | ||
219 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ | ||
220 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
221 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
222 | |||
223 | #define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ | ||
224 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) | ||
225 | |||
226 | #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ | ||
227 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
228 | |||
229 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ | ||
230 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | ||
231 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ | ||
232 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ | ||
233 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ | ||
234 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ | ||
235 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ | ||
236 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ | ||
237 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ | ||
238 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ | ||
239 | IS_SNB) | ||
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c new file mode 100644 index 000000000000..e8ea6825822c --- /dev/null +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -0,0 +1,1516 @@ | |||
1 | /* | ||
2 | * Intel GTT (Graphics Translation Table) routines | ||
3 | * | ||
4 | * Caveat: This driver implements the linux agp interface, but this is far from | ||
5 | * a agp driver! GTT support ended up here for purely historical reasons: The | ||
6 | * old userspace intel graphics drivers needed an interface to map memory into | ||
7 | * the GTT. And the drm provides a default interface for graphic devices sitting | ||
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to | ||
9 | * avoid having to create a new api. | ||
10 | * | ||
11 | * With gem this does not make much sense anymore, just needlessly complicates | ||
12 | * the code. But as long as the old graphics stack is still support, it's stuck | ||
13 | * here. | ||
14 | * | ||
15 | * /fairy-tale-mode off | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * If we have Intel graphics, we're not going to have anything other than | ||
20 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
21 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
22 | * Only newer chipsets need to bother with this, of course. | ||
23 | */ | ||
24 | #ifdef CONFIG_DMAR | ||
25 | #define USE_PCI_DMA_API 1 | ||
26 | #endif | ||
27 | |||
28 | static const struct aper_size_info_fixed intel_i810_sizes[] = | ||
29 | { | ||
30 | {64, 16384, 4}, | ||
31 | /* The 32M mode still requires a 64k gatt */ | ||
32 | {32, 8192, 4} | ||
33 | }; | ||
34 | |||
35 | #define AGP_DCACHE_MEMORY 1 | ||
36 | #define AGP_PHYS_MEMORY 2 | ||
37 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
38 | |||
39 | static struct gatt_mask intel_i810_masks[] = | ||
40 | { | ||
41 | {.mask = I810_PTE_VALID, .type = 0}, | ||
42 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | ||
43 | {.mask = I810_PTE_VALID, .type = 0}, | ||
44 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
45 | .type = INTEL_AGP_CACHED_MEMORY} | ||
46 | }; | ||
47 | |||
48 | static struct _intel_private { | ||
49 | struct pci_dev *pcidev; /* device one */ | ||
50 | u8 __iomem *registers; | ||
51 | u32 __iomem *gtt; /* I915G */ | ||
52 | int num_dcache_entries; | ||
53 | /* gtt_entries is the number of gtt entries that are already mapped | ||
54 | * to stolen memory. Stolen memory is larger than the memory mapped | ||
55 | * through gtt_entries, as it includes some reserved space for the BIOS | ||
56 | * popup and for the GTT. | ||
57 | */ | ||
58 | int gtt_entries; /* i830+ */ | ||
59 | int gtt_total_size; | ||
60 | union { | ||
61 | void __iomem *i9xx_flush_page; | ||
62 | void *i8xx_flush_page; | ||
63 | }; | ||
64 | struct page *i8xx_page; | ||
65 | struct resource ifp_resource; | ||
66 | int resource_valid; | ||
67 | } intel_private; | ||
68 | |||
69 | #ifdef USE_PCI_DMA_API | ||
70 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
71 | { | ||
72 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
73 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
74 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
75 | return -EINVAL; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
80 | { | ||
81 | pci_unmap_page(intel_private.pcidev, dma, | ||
82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
83 | } | ||
84 | |||
85 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
86 | { | ||
87 | struct sg_table st; | ||
88 | |||
89 | st.sgl = mem->sg_list; | ||
90 | st.orig_nents = st.nents = mem->page_count; | ||
91 | |||
92 | sg_free_table(&st); | ||
93 | |||
94 | mem->sg_list = NULL; | ||
95 | mem->num_sg = 0; | ||
96 | } | ||
97 | |||
98 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
99 | { | ||
100 | struct sg_table st; | ||
101 | struct scatterlist *sg; | ||
102 | int i; | ||
103 | |||
104 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
105 | |||
106 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | mem->sg_list = sg = st.sgl; | ||
110 | |||
111 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
112 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
113 | |||
114 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
115 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
116 | if (unlikely(!mem->num_sg)) { | ||
117 | intel_agp_free_sglist(mem); | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
124 | { | ||
125 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
126 | |||
127 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
128 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
129 | intel_agp_free_sglist(mem); | ||
130 | } | ||
131 | |||
132 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
133 | off_t pg_start, int mask_type) | ||
134 | { | ||
135 | struct scatterlist *sg; | ||
136 | int i, j; | ||
137 | |||
138 | j = pg_start; | ||
139 | |||
140 | WARN_ON(!mem->num_sg); | ||
141 | |||
142 | if (mem->num_sg == mem->page_count) { | ||
143 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
144 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
145 | sg_dma_address(sg), mask_type), | ||
146 | intel_private.gtt+j); | ||
147 | j++; | ||
148 | } | ||
149 | } else { | ||
150 | /* sg may merge pages, but we have to separate | ||
151 | * per-page addr for GTT */ | ||
152 | unsigned int len, m; | ||
153 | |||
154 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
155 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
156 | for (m = 0; m < len; m++) { | ||
157 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
158 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
159 | mask_type), | ||
160 | intel_private.gtt+j); | ||
161 | j++; | ||
162 | } | ||
163 | } | ||
164 | } | ||
165 | readl(intel_private.gtt+j-1); | ||
166 | } | ||
167 | |||
168 | #else | ||
169 | |||
170 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
171 | off_t pg_start, int mask_type) | ||
172 | { | ||
173 | int i, j; | ||
174 | u32 cache_bits = 0; | ||
175 | |||
176 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
177 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | ||
178 | { | ||
179 | cache_bits = I830_PTE_SYSTEM_CACHED; | ||
180 | } | ||
181 | |||
182 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
183 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
184 | page_to_phys(mem->pages[i]), mask_type), | ||
185 | intel_private.gtt+j); | ||
186 | } | ||
187 | |||
188 | readl(intel_private.gtt+j-1); | ||
189 | } | ||
190 | |||
191 | #endif | ||
192 | |||
193 | static int intel_i810_fetch_size(void) | ||
194 | { | ||
195 | u32 smram_miscc; | ||
196 | struct aper_size_info_fixed *values; | ||
197 | |||
198 | pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); | ||
199 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
200 | |||
201 | if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { | ||
202 | dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); | ||
203 | return 0; | ||
204 | } | ||
205 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { | ||
206 | agp_bridge->current_size = (void *) (values + 1); | ||
207 | agp_bridge->aperture_size_idx = 1; | ||
208 | return values[1].size; | ||
209 | } else { | ||
210 | agp_bridge->current_size = (void *) (values); | ||
211 | agp_bridge->aperture_size_idx = 0; | ||
212 | return values[0].size; | ||
213 | } | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int intel_i810_configure(void) | ||
219 | { | ||
220 | struct aper_size_info_fixed *current_size; | ||
221 | u32 temp; | ||
222 | int i; | ||
223 | |||
224 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
225 | |||
226 | if (!intel_private.registers) { | ||
227 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
228 | temp &= 0xfff80000; | ||
229 | |||
230 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
231 | if (!intel_private.registers) { | ||
232 | dev_err(&intel_private.pcidev->dev, | ||
233 | "can't remap memory\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | if ((readl(intel_private.registers+I810_DRAM_CTL) | ||
239 | & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { | ||
240 | /* This will need to be dynamically assigned */ | ||
241 | dev_info(&intel_private.pcidev->dev, | ||
242 | "detected 4MB dedicated video ram\n"); | ||
243 | intel_private.num_dcache_entries = 1024; | ||
244 | } | ||
245 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
246 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
247 | writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
248 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
249 | |||
250 | if (agp_bridge->driver->needs_scratch_page) { | ||
251 | for (i = 0; i < current_size->num_entries; i++) { | ||
252 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
253 | } | ||
254 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ | ||
255 | } | ||
256 | global_cache_flush(); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static void intel_i810_cleanup(void) | ||
261 | { | ||
262 | writel(0, intel_private.registers+I810_PGETBL_CTL); | ||
263 | readl(intel_private.registers); /* PCI Posting. */ | ||
264 | iounmap(intel_private.registers); | ||
265 | } | ||
266 | |||
267 | static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) | ||
268 | { | ||
269 | return; | ||
270 | } | ||
271 | |||
272 | /* Exists to support ARGB cursors */ | ||
273 | static struct page *i8xx_alloc_pages(void) | ||
274 | { | ||
275 | struct page *page; | ||
276 | |||
277 | page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); | ||
278 | if (page == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | if (set_pages_uc(page, 4) < 0) { | ||
282 | set_pages_wb(page, 4); | ||
283 | __free_pages(page, 2); | ||
284 | return NULL; | ||
285 | } | ||
286 | get_page(page); | ||
287 | atomic_inc(&agp_bridge->current_memory_agp); | ||
288 | return page; | ||
289 | } | ||
290 | |||
291 | static void i8xx_destroy_pages(struct page *page) | ||
292 | { | ||
293 | if (page == NULL) | ||
294 | return; | ||
295 | |||
296 | set_pages_wb(page, 4); | ||
297 | put_page(page); | ||
298 | __free_pages(page, 2); | ||
299 | atomic_dec(&agp_bridge->current_memory_agp); | ||
300 | } | ||
301 | |||
302 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
303 | int type) | ||
304 | { | ||
305 | if (type < AGP_USER_TYPES) | ||
306 | return type; | ||
307 | else if (type == AGP_USER_CACHED_MEMORY) | ||
308 | return INTEL_AGP_CACHED_MEMORY; | ||
309 | else | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
314 | int type) | ||
315 | { | ||
316 | int i, j, num_entries; | ||
317 | void *temp; | ||
318 | int ret = -EINVAL; | ||
319 | int mask_type; | ||
320 | |||
321 | if (mem->page_count == 0) | ||
322 | goto out; | ||
323 | |||
324 | temp = agp_bridge->current_size; | ||
325 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
326 | |||
327 | if ((pg_start + mem->page_count) > num_entries) | ||
328 | goto out_err; | ||
329 | |||
330 | |||
331 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
332 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { | ||
333 | ret = -EBUSY; | ||
334 | goto out_err; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | if (type != mem->type) | ||
339 | goto out_err; | ||
340 | |||
341 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
342 | |||
343 | switch (mask_type) { | ||
344 | case AGP_DCACHE_MEMORY: | ||
345 | if (!mem->is_flushed) | ||
346 | global_cache_flush(); | ||
347 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
348 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
349 | intel_private.registers+I810_PTE_BASE+(i*4)); | ||
350 | } | ||
351 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
352 | break; | ||
353 | case AGP_PHYS_MEMORY: | ||
354 | case AGP_NORMAL_MEMORY: | ||
355 | if (!mem->is_flushed) | ||
356 | global_cache_flush(); | ||
357 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
358 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
359 | page_to_phys(mem->pages[i]), mask_type), | ||
360 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
361 | } | ||
362 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
363 | break; | ||
364 | default: | ||
365 | goto out_err; | ||
366 | } | ||
367 | |||
368 | out: | ||
369 | ret = 0; | ||
370 | out_err: | ||
371 | mem->is_flushed = true; | ||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
376 | int type) | ||
377 | { | ||
378 | int i; | ||
379 | |||
380 | if (mem->page_count == 0) | ||
381 | return 0; | ||
382 | |||
383 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
384 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
385 | } | ||
386 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * The i810/i830 requires a physical address to program its mouse | ||
393 | * pointer into hardware. | ||
394 | * However the Xserver still writes to it through the agp aperture. | ||
395 | */ | ||
396 | static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) | ||
397 | { | ||
398 | struct agp_memory *new; | ||
399 | struct page *page; | ||
400 | |||
401 | switch (pg_count) { | ||
402 | case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); | ||
403 | break; | ||
404 | case 4: | ||
405 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
406 | page = i8xx_alloc_pages(); | ||
407 | break; | ||
408 | default: | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | if (page == NULL) | ||
413 | return NULL; | ||
414 | |||
415 | new = agp_create_memory(pg_count); | ||
416 | if (new == NULL) | ||
417 | return NULL; | ||
418 | |||
419 | new->pages[0] = page; | ||
420 | if (pg_count == 4) { | ||
421 | /* kludge to get 4 physical pages for ARGB cursor */ | ||
422 | new->pages[1] = new->pages[0] + 1; | ||
423 | new->pages[2] = new->pages[1] + 1; | ||
424 | new->pages[3] = new->pages[2] + 1; | ||
425 | } | ||
426 | new->page_count = pg_count; | ||
427 | new->num_scratch_pages = pg_count; | ||
428 | new->type = AGP_PHYS_MEMORY; | ||
429 | new->physical = page_to_phys(new->pages[0]); | ||
430 | return new; | ||
431 | } | ||
432 | |||
433 | static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | ||
434 | { | ||
435 | struct agp_memory *new; | ||
436 | |||
437 | if (type == AGP_DCACHE_MEMORY) { | ||
438 | if (pg_count != intel_private.num_dcache_entries) | ||
439 | return NULL; | ||
440 | |||
441 | new = agp_create_memory(1); | ||
442 | if (new == NULL) | ||
443 | return NULL; | ||
444 | |||
445 | new->type = AGP_DCACHE_MEMORY; | ||
446 | new->page_count = pg_count; | ||
447 | new->num_scratch_pages = 0; | ||
448 | agp_free_page_array(new); | ||
449 | return new; | ||
450 | } | ||
451 | if (type == AGP_PHYS_MEMORY) | ||
452 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
453 | return NULL; | ||
454 | } | ||
455 | |||
456 | static void intel_i810_free_by_type(struct agp_memory *curr) | ||
457 | { | ||
458 | agp_free_key(curr->key); | ||
459 | if (curr->type == AGP_PHYS_MEMORY) { | ||
460 | if (curr->page_count == 4) | ||
461 | i8xx_destroy_pages(curr->pages[0]); | ||
462 | else { | ||
463 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
464 | AGP_PAGE_DESTROY_UNMAP); | ||
465 | agp_bridge->driver->agp_destroy_page(curr->pages[0], | ||
466 | AGP_PAGE_DESTROY_FREE); | ||
467 | } | ||
468 | agp_free_page_array(curr); | ||
469 | } | ||
470 | kfree(curr); | ||
471 | } | ||
472 | |||
473 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | ||
474 | dma_addr_t addr, int type) | ||
475 | { | ||
476 | /* Type checking must be done elsewhere */ | ||
477 | return addr | bridge->driver->masks[type].mask; | ||
478 | } | ||
479 | |||
480 | static struct aper_size_info_fixed intel_i830_sizes[] = | ||
481 | { | ||
482 | {128, 32768, 5}, | ||
483 | /* The 64M mode still requires a 128k gatt */ | ||
484 | {64, 16384, 5}, | ||
485 | {256, 65536, 6}, | ||
486 | {512, 131072, 7}, | ||
487 | }; | ||
488 | |||
489 | static void intel_i830_init_gtt_entries(void) | ||
490 | { | ||
491 | u16 gmch_ctrl; | ||
492 | int gtt_entries = 0; | ||
493 | u8 rdct; | ||
494 | int local = 0; | ||
495 | static const int ddt[4] = { 0, 16, 32, 64 }; | ||
496 | int size; /* reserved space (in kb) at the top of stolen memory */ | ||
497 | |||
498 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
499 | |||
500 | if (IS_I965) { | ||
501 | u32 pgetbl_ctl; | ||
502 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); | ||
503 | |||
504 | /* The 965 has a field telling us the size of the GTT, | ||
505 | * which may be larger than what is necessary to map the | ||
506 | * aperture. | ||
507 | */ | ||
508 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { | ||
509 | case I965_PGETBL_SIZE_128KB: | ||
510 | size = 128; | ||
511 | break; | ||
512 | case I965_PGETBL_SIZE_256KB: | ||
513 | size = 256; | ||
514 | break; | ||
515 | case I965_PGETBL_SIZE_512KB: | ||
516 | size = 512; | ||
517 | break; | ||
518 | case I965_PGETBL_SIZE_1MB: | ||
519 | size = 1024; | ||
520 | break; | ||
521 | case I965_PGETBL_SIZE_2MB: | ||
522 | size = 2048; | ||
523 | break; | ||
524 | case I965_PGETBL_SIZE_1_5MB: | ||
525 | size = 1024 + 512; | ||
526 | break; | ||
527 | default: | ||
528 | dev_info(&intel_private.pcidev->dev, | ||
529 | "unknown page table size, assuming 512KB\n"); | ||
530 | size = 512; | ||
531 | } | ||
532 | size += 4; /* add in BIOS popup space */ | ||
533 | } else if (IS_G33 && !IS_PINEVIEW) { | ||
534 | /* G33's GTT size defined in gmch_ctrl */ | ||
535 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | ||
536 | case G33_PGETBL_SIZE_1M: | ||
537 | size = 1024; | ||
538 | break; | ||
539 | case G33_PGETBL_SIZE_2M: | ||
540 | size = 2048; | ||
541 | break; | ||
542 | default: | ||
543 | dev_info(&agp_bridge->dev->dev, | ||
544 | "unknown page table size 0x%x, assuming 512KB\n", | ||
545 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | ||
546 | size = 512; | ||
547 | } | ||
548 | size += 4; | ||
549 | } else if (IS_G4X || IS_PINEVIEW) { | ||
550 | /* On 4 series hardware, GTT stolen is separate from graphics | ||
551 | * stolen, ignore it in stolen gtt entries counting. However, | ||
552 | * 4KB of the stolen memory doesn't get mapped to the GTT. | ||
553 | */ | ||
554 | size = 4; | ||
555 | } else { | ||
556 | /* On previous hardware, the GTT size was just what was | ||
557 | * required to map the aperture. | ||
558 | */ | ||
559 | size = agp_bridge->driver->fetch_size() + 4; | ||
560 | } | ||
561 | |||
562 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || | ||
563 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
564 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
565 | case I830_GMCH_GMS_STOLEN_512: | ||
566 | gtt_entries = KB(512) - KB(size); | ||
567 | break; | ||
568 | case I830_GMCH_GMS_STOLEN_1024: | ||
569 | gtt_entries = MB(1) - KB(size); | ||
570 | break; | ||
571 | case I830_GMCH_GMS_STOLEN_8192: | ||
572 | gtt_entries = MB(8) - KB(size); | ||
573 | break; | ||
574 | case I830_GMCH_GMS_LOCAL: | ||
575 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); | ||
576 | gtt_entries = (I830_RDRAM_ND(rdct) + 1) * | ||
577 | MB(ddt[I830_RDRAM_DDT(rdct)]); | ||
578 | local = 1; | ||
579 | break; | ||
580 | default: | ||
581 | gtt_entries = 0; | ||
582 | break; | ||
583 | } | ||
584 | } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | ||
585 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { | ||
586 | /* | ||
587 | * SandyBridge has new memory control reg at 0x50.w | ||
588 | */ | ||
589 | u16 snb_gmch_ctl; | ||
590 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
591 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { | ||
592 | case SNB_GMCH_GMS_STOLEN_32M: | ||
593 | gtt_entries = MB(32) - KB(size); | ||
594 | break; | ||
595 | case SNB_GMCH_GMS_STOLEN_64M: | ||
596 | gtt_entries = MB(64) - KB(size); | ||
597 | break; | ||
598 | case SNB_GMCH_GMS_STOLEN_96M: | ||
599 | gtt_entries = MB(96) - KB(size); | ||
600 | break; | ||
601 | case SNB_GMCH_GMS_STOLEN_128M: | ||
602 | gtt_entries = MB(128) - KB(size); | ||
603 | break; | ||
604 | case SNB_GMCH_GMS_STOLEN_160M: | ||
605 | gtt_entries = MB(160) - KB(size); | ||
606 | break; | ||
607 | case SNB_GMCH_GMS_STOLEN_192M: | ||
608 | gtt_entries = MB(192) - KB(size); | ||
609 | break; | ||
610 | case SNB_GMCH_GMS_STOLEN_224M: | ||
611 | gtt_entries = MB(224) - KB(size); | ||
612 | break; | ||
613 | case SNB_GMCH_GMS_STOLEN_256M: | ||
614 | gtt_entries = MB(256) - KB(size); | ||
615 | break; | ||
616 | case SNB_GMCH_GMS_STOLEN_288M: | ||
617 | gtt_entries = MB(288) - KB(size); | ||
618 | break; | ||
619 | case SNB_GMCH_GMS_STOLEN_320M: | ||
620 | gtt_entries = MB(320) - KB(size); | ||
621 | break; | ||
622 | case SNB_GMCH_GMS_STOLEN_352M: | ||
623 | gtt_entries = MB(352) - KB(size); | ||
624 | break; | ||
625 | case SNB_GMCH_GMS_STOLEN_384M: | ||
626 | gtt_entries = MB(384) - KB(size); | ||
627 | break; | ||
628 | case SNB_GMCH_GMS_STOLEN_416M: | ||
629 | gtt_entries = MB(416) - KB(size); | ||
630 | break; | ||
631 | case SNB_GMCH_GMS_STOLEN_448M: | ||
632 | gtt_entries = MB(448) - KB(size); | ||
633 | break; | ||
634 | case SNB_GMCH_GMS_STOLEN_480M: | ||
635 | gtt_entries = MB(480) - KB(size); | ||
636 | break; | ||
637 | case SNB_GMCH_GMS_STOLEN_512M: | ||
638 | gtt_entries = MB(512) - KB(size); | ||
639 | break; | ||
640 | } | ||
641 | } else { | ||
642 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { | ||
643 | case I855_GMCH_GMS_STOLEN_1M: | ||
644 | gtt_entries = MB(1) - KB(size); | ||
645 | break; | ||
646 | case I855_GMCH_GMS_STOLEN_4M: | ||
647 | gtt_entries = MB(4) - KB(size); | ||
648 | break; | ||
649 | case I855_GMCH_GMS_STOLEN_8M: | ||
650 | gtt_entries = MB(8) - KB(size); | ||
651 | break; | ||
652 | case I855_GMCH_GMS_STOLEN_16M: | ||
653 | gtt_entries = MB(16) - KB(size); | ||
654 | break; | ||
655 | case I855_GMCH_GMS_STOLEN_32M: | ||
656 | gtt_entries = MB(32) - KB(size); | ||
657 | break; | ||
658 | case I915_GMCH_GMS_STOLEN_48M: | ||
659 | /* Check it's really I915G */ | ||
660 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
661 | gtt_entries = MB(48) - KB(size); | ||
662 | else | ||
663 | gtt_entries = 0; | ||
664 | break; | ||
665 | case I915_GMCH_GMS_STOLEN_64M: | ||
666 | /* Check it's really I915G */ | ||
667 | if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) | ||
668 | gtt_entries = MB(64) - KB(size); | ||
669 | else | ||
670 | gtt_entries = 0; | ||
671 | break; | ||
672 | case G33_GMCH_GMS_STOLEN_128M: | ||
673 | if (IS_G33 || IS_I965 || IS_G4X) | ||
674 | gtt_entries = MB(128) - KB(size); | ||
675 | else | ||
676 | gtt_entries = 0; | ||
677 | break; | ||
678 | case G33_GMCH_GMS_STOLEN_256M: | ||
679 | if (IS_G33 || IS_I965 || IS_G4X) | ||
680 | gtt_entries = MB(256) - KB(size); | ||
681 | else | ||
682 | gtt_entries = 0; | ||
683 | break; | ||
684 | case INTEL_GMCH_GMS_STOLEN_96M: | ||
685 | if (IS_I965 || IS_G4X) | ||
686 | gtt_entries = MB(96) - KB(size); | ||
687 | else | ||
688 | gtt_entries = 0; | ||
689 | break; | ||
690 | case INTEL_GMCH_GMS_STOLEN_160M: | ||
691 | if (IS_I965 || IS_G4X) | ||
692 | gtt_entries = MB(160) - KB(size); | ||
693 | else | ||
694 | gtt_entries = 0; | ||
695 | break; | ||
696 | case INTEL_GMCH_GMS_STOLEN_224M: | ||
697 | if (IS_I965 || IS_G4X) | ||
698 | gtt_entries = MB(224) - KB(size); | ||
699 | else | ||
700 | gtt_entries = 0; | ||
701 | break; | ||
702 | case INTEL_GMCH_GMS_STOLEN_352M: | ||
703 | if (IS_I965 || IS_G4X) | ||
704 | gtt_entries = MB(352) - KB(size); | ||
705 | else | ||
706 | gtt_entries = 0; | ||
707 | break; | ||
708 | default: | ||
709 | gtt_entries = 0; | ||
710 | break; | ||
711 | } | ||
712 | } | ||
713 | if (gtt_entries > 0) { | ||
714 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | ||
715 | gtt_entries / KB(1), local ? "local" : "stolen"); | ||
716 | gtt_entries /= KB(4); | ||
717 | } else { | ||
718 | dev_info(&agp_bridge->dev->dev, | ||
719 | "no pre-allocated video memory detected\n"); | ||
720 | gtt_entries = 0; | ||
721 | } | ||
722 | |||
723 | intel_private.gtt_entries = gtt_entries; | ||
724 | } | ||
725 | |||
726 | static void intel_i830_fini_flush(void) | ||
727 | { | ||
728 | kunmap(intel_private.i8xx_page); | ||
729 | intel_private.i8xx_flush_page = NULL; | ||
730 | unmap_page_from_agp(intel_private.i8xx_page); | ||
731 | |||
732 | __free_page(intel_private.i8xx_page); | ||
733 | intel_private.i8xx_page = NULL; | ||
734 | } | ||
735 | |||
736 | static void intel_i830_setup_flush(void) | ||
737 | { | ||
738 | /* return if we've already set the flush mechanism up */ | ||
739 | if (intel_private.i8xx_page) | ||
740 | return; | ||
741 | |||
742 | intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); | ||
743 | if (!intel_private.i8xx_page) | ||
744 | return; | ||
745 | |||
746 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
747 | if (!intel_private.i8xx_flush_page) | ||
748 | intel_i830_fini_flush(); | ||
749 | } | ||
750 | |||
751 | /* The chipset_flush interface needs to get data that has already been | ||
752 | * flushed out of the CPU all the way out to main memory, because the GPU | ||
753 | * doesn't snoop those buffers. | ||
754 | * | ||
755 | * The 8xx series doesn't have the same lovely interface for flushing the | ||
756 | * chipset write buffers that the later chips do. According to the 865 | ||
757 | * specs, it's 64 octwords, or 1KB. So, to get those previous things in | ||
758 | * that buffer out, we just fill 1KB and clflush it out, on the assumption | ||
759 | * that it'll push whatever was in there out. It appears to work. | ||
760 | */ | ||
761 | static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) | ||
762 | { | ||
763 | unsigned int *pg = intel_private.i8xx_flush_page; | ||
764 | |||
765 | memset(pg, 0, 1024); | ||
766 | |||
767 | if (cpu_has_clflush) | ||
768 | clflush_cache_range(pg, 1024); | ||
769 | else if (wbinvd_on_all_cpus() != 0) | ||
770 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
771 | } | ||
772 | |||
773 | /* The intel i830 automatically initializes the agp aperture during POST. | ||
774 | * Use the memory already set aside for in the GTT. | ||
775 | */ | ||
776 | static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | ||
777 | { | ||
778 | int page_order; | ||
779 | struct aper_size_info_fixed *size; | ||
780 | int num_entries; | ||
781 | u32 temp; | ||
782 | |||
783 | size = agp_bridge->current_size; | ||
784 | page_order = size->page_order; | ||
785 | num_entries = size->num_entries; | ||
786 | agp_bridge->gatt_table_real = NULL; | ||
787 | |||
788 | pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); | ||
789 | temp &= 0xfff80000; | ||
790 | |||
791 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
792 | if (!intel_private.registers) | ||
793 | return -ENOMEM; | ||
794 | |||
795 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
796 | global_cache_flush(); /* FIXME: ?? */ | ||
797 | |||
798 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
799 | intel_i830_init_gtt_entries(); | ||
800 | |||
801 | agp_bridge->gatt_table = NULL; | ||
802 | |||
803 | agp_bridge->gatt_bus_addr = temp; | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | /* Return the gatt table to a sane state. Use the top of stolen | ||
809 | * memory for the GTT. | ||
810 | */ | ||
811 | static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) | ||
812 | { | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | static int intel_i830_fetch_size(void) | ||
817 | { | ||
818 | u16 gmch_ctrl; | ||
819 | struct aper_size_info_fixed *values; | ||
820 | |||
821 | values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); | ||
822 | |||
823 | if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && | ||
824 | agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { | ||
825 | /* 855GM/852GM/865G has 128MB aperture size */ | ||
826 | agp_bridge->current_size = (void *) values; | ||
827 | agp_bridge->aperture_size_idx = 0; | ||
828 | return values[0].size; | ||
829 | } | ||
830 | |||
831 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
832 | |||
833 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { | ||
834 | agp_bridge->current_size = (void *) values; | ||
835 | agp_bridge->aperture_size_idx = 0; | ||
836 | return values[0].size; | ||
837 | } else { | ||
838 | agp_bridge->current_size = (void *) (values + 1); | ||
839 | agp_bridge->aperture_size_idx = 1; | ||
840 | return values[1].size; | ||
841 | } | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | static int intel_i830_configure(void) | ||
847 | { | ||
848 | struct aper_size_info_fixed *current_size; | ||
849 | u32 temp; | ||
850 | u16 gmch_ctrl; | ||
851 | int i; | ||
852 | |||
853 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
854 | |||
855 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); | ||
856 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
857 | |||
858 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
859 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
860 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
861 | |||
862 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
863 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
864 | |||
865 | if (agp_bridge->driver->needs_scratch_page) { | ||
866 | for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
867 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
868 | } | ||
869 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
870 | } | ||
871 | |||
872 | global_cache_flush(); | ||
873 | |||
874 | intel_i830_setup_flush(); | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static void intel_i830_cleanup(void) | ||
879 | { | ||
880 | iounmap(intel_private.registers); | ||
881 | } | ||
882 | |||
883 | static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
884 | int type) | ||
885 | { | ||
886 | int i, j, num_entries; | ||
887 | void *temp; | ||
888 | int ret = -EINVAL; | ||
889 | int mask_type; | ||
890 | |||
891 | if (mem->page_count == 0) | ||
892 | goto out; | ||
893 | |||
894 | temp = agp_bridge->current_size; | ||
895 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
896 | |||
897 | if (pg_start < intel_private.gtt_entries) { | ||
898 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
899 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
900 | pg_start, intel_private.gtt_entries); | ||
901 | |||
902 | dev_info(&intel_private.pcidev->dev, | ||
903 | "trying to insert into local/stolen memory\n"); | ||
904 | goto out_err; | ||
905 | } | ||
906 | |||
907 | if ((pg_start + mem->page_count) > num_entries) | ||
908 | goto out_err; | ||
909 | |||
910 | /* The i830 can't check the GTT for entries since its read only, | ||
911 | * depend on the caller to make the correct offset decisions. | ||
912 | */ | ||
913 | |||
914 | if (type != mem->type) | ||
915 | goto out_err; | ||
916 | |||
917 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
918 | |||
919 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
920 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
921 | goto out_err; | ||
922 | |||
923 | if (!mem->is_flushed) | ||
924 | global_cache_flush(); | ||
925 | |||
926 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
927 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
928 | page_to_phys(mem->pages[i]), mask_type), | ||
929 | intel_private.registers+I810_PTE_BASE+(j*4)); | ||
930 | } | ||
931 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
932 | |||
933 | out: | ||
934 | ret = 0; | ||
935 | out_err: | ||
936 | mem->is_flushed = true; | ||
937 | return ret; | ||
938 | } | ||
939 | |||
940 | static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
941 | int type) | ||
942 | { | ||
943 | int i; | ||
944 | |||
945 | if (mem->page_count == 0) | ||
946 | return 0; | ||
947 | |||
948 | if (pg_start < intel_private.gtt_entries) { | ||
949 | dev_info(&intel_private.pcidev->dev, | ||
950 | "trying to disable local/stolen memory\n"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | |||
954 | for (i = pg_start; i < (mem->page_count + pg_start); i++) { | ||
955 | writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); | ||
956 | } | ||
957 | readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) | ||
963 | { | ||
964 | if (type == AGP_PHYS_MEMORY) | ||
965 | return alloc_agpphysmem_i8xx(pg_count, type); | ||
966 | /* always return NULL for other allocation types for now */ | ||
967 | return NULL; | ||
968 | } | ||
969 | |||
970 | static int intel_alloc_chipset_flush_resource(void) | ||
971 | { | ||
972 | int ret; | ||
973 | ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, | ||
974 | PAGE_SIZE, PCIBIOS_MIN_MEM, 0, | ||
975 | pcibios_align_resource, agp_bridge->dev); | ||
976 | |||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void intel_i915_setup_chipset_flush(void) | ||
981 | { | ||
982 | int ret; | ||
983 | u32 temp; | ||
984 | |||
985 | pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); | ||
986 | if (!(temp & 0x1)) { | ||
987 | intel_alloc_chipset_flush_resource(); | ||
988 | intel_private.resource_valid = 1; | ||
989 | pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
990 | } else { | ||
991 | temp &= ~1; | ||
992 | |||
993 | intel_private.resource_valid = 1; | ||
994 | intel_private.ifp_resource.start = temp; | ||
995 | intel_private.ifp_resource.end = temp + PAGE_SIZE; | ||
996 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
997 | /* some BIOSes reserve this area in a pnp some don't */ | ||
998 | if (ret) | ||
999 | intel_private.resource_valid = 0; | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | static void intel_i965_g33_setup_chipset_flush(void) | ||
1004 | { | ||
1005 | u32 temp_hi, temp_lo; | ||
1006 | int ret; | ||
1007 | |||
1008 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); | ||
1009 | pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); | ||
1010 | |||
1011 | if (!(temp_lo & 0x1)) { | ||
1012 | |||
1013 | intel_alloc_chipset_flush_resource(); | ||
1014 | |||
1015 | intel_private.resource_valid = 1; | ||
1016 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, | ||
1017 | upper_32_bits(intel_private.ifp_resource.start)); | ||
1018 | pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); | ||
1019 | } else { | ||
1020 | u64 l64; | ||
1021 | |||
1022 | temp_lo &= ~0x1; | ||
1023 | l64 = ((u64)temp_hi << 32) | temp_lo; | ||
1024 | |||
1025 | intel_private.resource_valid = 1; | ||
1026 | intel_private.ifp_resource.start = l64; | ||
1027 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; | ||
1028 | ret = request_resource(&iomem_resource, &intel_private.ifp_resource); | ||
1029 | /* some BIOSes reserve this area in a pnp some don't */ | ||
1030 | if (ret) | ||
1031 | intel_private.resource_valid = 0; | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | static void intel_i9xx_setup_flush(void) | ||
1036 | { | ||
1037 | /* return if already configured */ | ||
1038 | if (intel_private.ifp_resource.start) | ||
1039 | return; | ||
1040 | |||
1041 | if (IS_SNB) | ||
1042 | return; | ||
1043 | |||
1044 | /* setup a resource for this object */ | ||
1045 | intel_private.ifp_resource.name = "Intel Flush Page"; | ||
1046 | intel_private.ifp_resource.flags = IORESOURCE_MEM; | ||
1047 | |||
1048 | /* Setup chipset flush for 915 */ | ||
1049 | if (IS_I965 || IS_G33 || IS_G4X) { | ||
1050 | intel_i965_g33_setup_chipset_flush(); | ||
1051 | } else { | ||
1052 | intel_i915_setup_chipset_flush(); | ||
1053 | } | ||
1054 | |||
1055 | if (intel_private.ifp_resource.start) { | ||
1056 | intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); | ||
1057 | if (!intel_private.i9xx_flush_page) | ||
1058 | dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | static int intel_i915_configure(void) | ||
1063 | { | ||
1064 | struct aper_size_info_fixed *current_size; | ||
1065 | u32 temp; | ||
1066 | u16 gmch_ctrl; | ||
1067 | int i; | ||
1068 | |||
1069 | current_size = A_SIZE_FIX(agp_bridge->current_size); | ||
1070 | |||
1071 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); | ||
1072 | |||
1073 | agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); | ||
1074 | |||
1075 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | ||
1076 | gmch_ctrl |= I830_GMCH_ENABLED; | ||
1077 | pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); | ||
1078 | |||
1079 | writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); | ||
1080 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
1081 | |||
1082 | if (agp_bridge->driver->needs_scratch_page) { | ||
1083 | for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
1084 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1085 | } | ||
1086 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
1087 | } | ||
1088 | |||
1089 | global_cache_flush(); | ||
1090 | |||
1091 | intel_i9xx_setup_flush(); | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static void intel_i915_cleanup(void) | ||
1097 | { | ||
1098 | if (intel_private.i9xx_flush_page) | ||
1099 | iounmap(intel_private.i9xx_flush_page); | ||
1100 | if (intel_private.resource_valid) | ||
1101 | release_resource(&intel_private.ifp_resource); | ||
1102 | intel_private.ifp_resource.start = 0; | ||
1103 | intel_private.resource_valid = 0; | ||
1104 | iounmap(intel_private.gtt); | ||
1105 | iounmap(intel_private.registers); | ||
1106 | } | ||
1107 | |||
1108 | static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | ||
1109 | { | ||
1110 | if (intel_private.i9xx_flush_page) | ||
1111 | writel(1, intel_private.i9xx_flush_page); | ||
1112 | } | ||
1113 | |||
1114 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | ||
1115 | int type) | ||
1116 | { | ||
1117 | int num_entries; | ||
1118 | void *temp; | ||
1119 | int ret = -EINVAL; | ||
1120 | int mask_type; | ||
1121 | |||
1122 | if (mem->page_count == 0) | ||
1123 | goto out; | ||
1124 | |||
1125 | temp = agp_bridge->current_size; | ||
1126 | num_entries = A_SIZE_FIX(temp)->num_entries; | ||
1127 | |||
1128 | if (pg_start < intel_private.gtt_entries) { | ||
1129 | dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, | ||
1130 | "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", | ||
1131 | pg_start, intel_private.gtt_entries); | ||
1132 | |||
1133 | dev_info(&intel_private.pcidev->dev, | ||
1134 | "trying to insert into local/stolen memory\n"); | ||
1135 | goto out_err; | ||
1136 | } | ||
1137 | |||
1138 | if ((pg_start + mem->page_count) > num_entries) | ||
1139 | goto out_err; | ||
1140 | |||
1141 | /* The i915 can't check the GTT for entries since it's read only; | ||
1142 | * depend on the caller to make the correct offset decisions. | ||
1143 | */ | ||
1144 | |||
1145 | if (type != mem->type) | ||
1146 | goto out_err; | ||
1147 | |||
1148 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
1149 | |||
1150 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && | ||
1151 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
1152 | goto out_err; | ||
1153 | |||
1154 | if (!mem->is_flushed) | ||
1155 | global_cache_flush(); | ||
1156 | |||
1157 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); | ||
1158 | |||
1159 | out: | ||
1160 | ret = 0; | ||
1161 | out_err: | ||
1162 | mem->is_flushed = true; | ||
1163 | return ret; | ||
1164 | } | ||
1165 | |||
1166 | static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, | ||
1167 | int type) | ||
1168 | { | ||
1169 | int i; | ||
1170 | |||
1171 | if (mem->page_count == 0) | ||
1172 | return 0; | ||
1173 | |||
1174 | if (pg_start < intel_private.gtt_entries) { | ||
1175 | dev_info(&intel_private.pcidev->dev, | ||
1176 | "trying to disable local/stolen memory\n"); | ||
1177 | return -EINVAL; | ||
1178 | } | ||
1179 | |||
1180 | for (i = pg_start; i < (mem->page_count + pg_start); i++) | ||
1181 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
1182 | |||
1183 | readl(intel_private.gtt+i-1); | ||
1184 | |||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | /* Return the aperture size by just checking the resource length. The effect | ||
1189 | * described in the spec of the MSAC registers is just changing of the | ||
1190 | * resource size. | ||
1191 | */ | ||
1192 | static int intel_i9xx_fetch_size(void) | ||
1193 | { | ||
1194 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); | ||
1195 | int aper_size; /* size in megabytes */ | ||
1196 | int i; | ||
1197 | |||
1198 | aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); | ||
1199 | |||
1200 | for (i = 0; i < num_sizes; i++) { | ||
1201 | if (aper_size == intel_i830_sizes[i].size) { | ||
1202 | agp_bridge->current_size = intel_i830_sizes + i; | ||
1203 | return aper_size; | ||
1204 | } | ||
1205 | } | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1210 | /* The intel i915 automatically initializes the agp aperture during POST. | ||
1211 | * Use the memory already set aside for in the GTT. | ||
1212 | */ | ||
1213 | static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
1214 | { | ||
1215 | int page_order; | ||
1216 | struct aper_size_info_fixed *size; | ||
1217 | int num_entries; | ||
1218 | u32 temp, temp2; | ||
1219 | int gtt_map_size = 256 * 1024; | ||
1220 | |||
1221 | size = agp_bridge->current_size; | ||
1222 | page_order = size->page_order; | ||
1223 | num_entries = size->num_entries; | ||
1224 | agp_bridge->gatt_table_real = NULL; | ||
1225 | |||
1226 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1227 | pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); | ||
1228 | |||
1229 | if (IS_G33) | ||
1230 | gtt_map_size = 1024 * 1024; /* 1M on G33 */ | ||
1231 | intel_private.gtt = ioremap(temp2, gtt_map_size); | ||
1232 | if (!intel_private.gtt) | ||
1233 | return -ENOMEM; | ||
1234 | |||
1235 | intel_private.gtt_total_size = gtt_map_size / 4; | ||
1236 | |||
1237 | temp &= 0xfff80000; | ||
1238 | |||
1239 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1240 | if (!intel_private.registers) { | ||
1241 | iounmap(intel_private.gtt); | ||
1242 | return -ENOMEM; | ||
1243 | } | ||
1244 | |||
1245 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1246 | global_cache_flush(); /* FIXME: ? */ | ||
1247 | |||
1248 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1249 | intel_i830_init_gtt_entries(); | ||
1250 | |||
1251 | agp_bridge->gatt_table = NULL; | ||
1252 | |||
1253 | agp_bridge->gatt_bus_addr = temp; | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * The i965 supports 36-bit physical addresses, but to keep | ||
1260 | * the format of the GTT the same, the bits that don't fit | ||
1261 | * in a 32-bit word are shifted down to bits 4..7. | ||
1262 | * | ||
1263 | * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" | ||
1264 | * is always zero on 32-bit architectures, so no need to make | ||
1265 | * this conditional. | ||
1266 | */ | ||
1267 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | ||
1268 | dma_addr_t addr, int type) | ||
1269 | { | ||
1270 | /* Shift high bits down */ | ||
1271 | addr |= (addr >> 28) & 0xf0; | ||
1272 | |||
1273 | /* Type checking must be done elsewhere */ | ||
1274 | return addr | bridge->driver->masks[type].mask; | ||
1275 | } | ||
1276 | |||
1277 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | ||
1278 | { | ||
1279 | u16 snb_gmch_ctl; | ||
1280 | |||
1281 | switch (agp_bridge->dev->device) { | ||
1282 | case PCI_DEVICE_ID_INTEL_GM45_HB: | ||
1283 | case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: | ||
1284 | case PCI_DEVICE_ID_INTEL_Q45_HB: | ||
1285 | case PCI_DEVICE_ID_INTEL_G45_HB: | ||
1286 | case PCI_DEVICE_ID_INTEL_G41_HB: | ||
1287 | case PCI_DEVICE_ID_INTEL_B43_HB: | ||
1288 | case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: | ||
1289 | case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: | ||
1290 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: | ||
1291 | case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: | ||
1292 | *gtt_offset = *gtt_size = MB(2); | ||
1293 | break; | ||
1294 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: | ||
1295 | case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: | ||
1296 | *gtt_offset = MB(2); | ||
1297 | |||
1298 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
1299 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { | ||
1300 | default: | ||
1301 | case SNB_GTT_SIZE_0M: | ||
1302 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); | ||
1303 | *gtt_size = MB(0); | ||
1304 | break; | ||
1305 | case SNB_GTT_SIZE_1M: | ||
1306 | *gtt_size = MB(1); | ||
1307 | break; | ||
1308 | case SNB_GTT_SIZE_2M: | ||
1309 | *gtt_size = MB(2); | ||
1310 | break; | ||
1311 | } | ||
1312 | break; | ||
1313 | default: | ||
1314 | *gtt_offset = *gtt_size = KB(512); | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | /* The intel i965 automatically initializes the agp aperture during POST. | ||
1319 | * Use the memory already set aside for in the GTT. | ||
1320 | */ | ||
1321 | static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
1322 | { | ||
1323 | int page_order; | ||
1324 | struct aper_size_info_fixed *size; | ||
1325 | int num_entries; | ||
1326 | u32 temp; | ||
1327 | int gtt_offset, gtt_size; | ||
1328 | |||
1329 | size = agp_bridge->current_size; | ||
1330 | page_order = size->page_order; | ||
1331 | num_entries = size->num_entries; | ||
1332 | agp_bridge->gatt_table_real = NULL; | ||
1333 | |||
1334 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); | ||
1335 | |||
1336 | temp &= 0xfff00000; | ||
1337 | |||
1338 | intel_i965_get_gtt_range(>t_offset, >t_size); | ||
1339 | |||
1340 | intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); | ||
1341 | |||
1342 | if (!intel_private.gtt) | ||
1343 | return -ENOMEM; | ||
1344 | |||
1345 | intel_private.gtt_total_size = gtt_size / 4; | ||
1346 | |||
1347 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
1348 | if (!intel_private.registers) { | ||
1349 | iounmap(intel_private.gtt); | ||
1350 | return -ENOMEM; | ||
1351 | } | ||
1352 | |||
1353 | temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; | ||
1354 | global_cache_flush(); /* FIXME: ? */ | ||
1355 | |||
1356 | /* we have to call this as early as possible after the MMIO base address is known */ | ||
1357 | intel_i830_init_gtt_entries(); | ||
1358 | |||
1359 | agp_bridge->gatt_table = NULL; | ||
1360 | |||
1361 | agp_bridge->gatt_bus_addr = temp; | ||
1362 | |||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static const struct agp_bridge_driver intel_810_driver = { | ||
1367 | .owner = THIS_MODULE, | ||
1368 | .aperture_sizes = intel_i810_sizes, | ||
1369 | .size_type = FIXED_APER_SIZE, | ||
1370 | .num_aperture_sizes = 2, | ||
1371 | .needs_scratch_page = true, | ||
1372 | .configure = intel_i810_configure, | ||
1373 | .fetch_size = intel_i810_fetch_size, | ||
1374 | .cleanup = intel_i810_cleanup, | ||
1375 | .mask_memory = intel_i810_mask_memory, | ||
1376 | .masks = intel_i810_masks, | ||
1377 | .agp_enable = intel_i810_agp_enable, | ||
1378 | .cache_flush = global_cache_flush, | ||
1379 | .create_gatt_table = agp_generic_create_gatt_table, | ||
1380 | .free_gatt_table = agp_generic_free_gatt_table, | ||
1381 | .insert_memory = intel_i810_insert_entries, | ||
1382 | .remove_memory = intel_i810_remove_entries, | ||
1383 | .alloc_by_type = intel_i810_alloc_by_type, | ||
1384 | .free_by_type = intel_i810_free_by_type, | ||
1385 | .agp_alloc_page = agp_generic_alloc_page, | ||
1386 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1387 | .agp_destroy_page = agp_generic_destroy_page, | ||
1388 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1389 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1390 | }; | ||
1391 | |||
1392 | static const struct agp_bridge_driver intel_830_driver = { | ||
1393 | .owner = THIS_MODULE, | ||
1394 | .aperture_sizes = intel_i830_sizes, | ||
1395 | .size_type = FIXED_APER_SIZE, | ||
1396 | .num_aperture_sizes = 4, | ||
1397 | .needs_scratch_page = true, | ||
1398 | .configure = intel_i830_configure, | ||
1399 | .fetch_size = intel_i830_fetch_size, | ||
1400 | .cleanup = intel_i830_cleanup, | ||
1401 | .mask_memory = intel_i810_mask_memory, | ||
1402 | .masks = intel_i810_masks, | ||
1403 | .agp_enable = intel_i810_agp_enable, | ||
1404 | .cache_flush = global_cache_flush, | ||
1405 | .create_gatt_table = intel_i830_create_gatt_table, | ||
1406 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1407 | .insert_memory = intel_i830_insert_entries, | ||
1408 | .remove_memory = intel_i830_remove_entries, | ||
1409 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1410 | .free_by_type = intel_i810_free_by_type, | ||
1411 | .agp_alloc_page = agp_generic_alloc_page, | ||
1412 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1413 | .agp_destroy_page = agp_generic_destroy_page, | ||
1414 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1415 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1416 | .chipset_flush = intel_i830_chipset_flush, | ||
1417 | }; | ||
1418 | |||
1419 | static const struct agp_bridge_driver intel_915_driver = { | ||
1420 | .owner = THIS_MODULE, | ||
1421 | .aperture_sizes = intel_i830_sizes, | ||
1422 | .size_type = FIXED_APER_SIZE, | ||
1423 | .num_aperture_sizes = 4, | ||
1424 | .needs_scratch_page = true, | ||
1425 | .configure = intel_i915_configure, | ||
1426 | .fetch_size = intel_i9xx_fetch_size, | ||
1427 | .cleanup = intel_i915_cleanup, | ||
1428 | .mask_memory = intel_i810_mask_memory, | ||
1429 | .masks = intel_i810_masks, | ||
1430 | .agp_enable = intel_i810_agp_enable, | ||
1431 | .cache_flush = global_cache_flush, | ||
1432 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1433 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1434 | .insert_memory = intel_i915_insert_entries, | ||
1435 | .remove_memory = intel_i915_remove_entries, | ||
1436 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1437 | .free_by_type = intel_i810_free_by_type, | ||
1438 | .agp_alloc_page = agp_generic_alloc_page, | ||
1439 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1440 | .agp_destroy_page = agp_generic_destroy_page, | ||
1441 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1442 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1443 | .chipset_flush = intel_i915_chipset_flush, | ||
1444 | #ifdef USE_PCI_DMA_API | ||
1445 | .agp_map_page = intel_agp_map_page, | ||
1446 | .agp_unmap_page = intel_agp_unmap_page, | ||
1447 | .agp_map_memory = intel_agp_map_memory, | ||
1448 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1449 | #endif | ||
1450 | }; | ||
1451 | |||
1452 | static const struct agp_bridge_driver intel_i965_driver = { | ||
1453 | .owner = THIS_MODULE, | ||
1454 | .aperture_sizes = intel_i830_sizes, | ||
1455 | .size_type = FIXED_APER_SIZE, | ||
1456 | .num_aperture_sizes = 4, | ||
1457 | .needs_scratch_page = true, | ||
1458 | .configure = intel_i915_configure, | ||
1459 | .fetch_size = intel_i9xx_fetch_size, | ||
1460 | .cleanup = intel_i915_cleanup, | ||
1461 | .mask_memory = intel_i965_mask_memory, | ||
1462 | .masks = intel_i810_masks, | ||
1463 | .agp_enable = intel_i810_agp_enable, | ||
1464 | .cache_flush = global_cache_flush, | ||
1465 | .create_gatt_table = intel_i965_create_gatt_table, | ||
1466 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1467 | .insert_memory = intel_i915_insert_entries, | ||
1468 | .remove_memory = intel_i915_remove_entries, | ||
1469 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1470 | .free_by_type = intel_i810_free_by_type, | ||
1471 | .agp_alloc_page = agp_generic_alloc_page, | ||
1472 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1473 | .agp_destroy_page = agp_generic_destroy_page, | ||
1474 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1475 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1476 | .chipset_flush = intel_i915_chipset_flush, | ||
1477 | #ifdef USE_PCI_DMA_API | ||
1478 | .agp_map_page = intel_agp_map_page, | ||
1479 | .agp_unmap_page = intel_agp_unmap_page, | ||
1480 | .agp_map_memory = intel_agp_map_memory, | ||
1481 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1482 | #endif | ||
1483 | }; | ||
1484 | |||
1485 | static const struct agp_bridge_driver intel_g33_driver = { | ||
1486 | .owner = THIS_MODULE, | ||
1487 | .aperture_sizes = intel_i830_sizes, | ||
1488 | .size_type = FIXED_APER_SIZE, | ||
1489 | .num_aperture_sizes = 4, | ||
1490 | .needs_scratch_page = true, | ||
1491 | .configure = intel_i915_configure, | ||
1492 | .fetch_size = intel_i9xx_fetch_size, | ||
1493 | .cleanup = intel_i915_cleanup, | ||
1494 | .mask_memory = intel_i965_mask_memory, | ||
1495 | .masks = intel_i810_masks, | ||
1496 | .agp_enable = intel_i810_agp_enable, | ||
1497 | .cache_flush = global_cache_flush, | ||
1498 | .create_gatt_table = intel_i915_create_gatt_table, | ||
1499 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1500 | .insert_memory = intel_i915_insert_entries, | ||
1501 | .remove_memory = intel_i915_remove_entries, | ||
1502 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1503 | .free_by_type = intel_i810_free_by_type, | ||
1504 | .agp_alloc_page = agp_generic_alloc_page, | ||
1505 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1506 | .agp_destroy_page = agp_generic_destroy_page, | ||
1507 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1508 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1509 | .chipset_flush = intel_i915_chipset_flush, | ||
1510 | #ifdef USE_PCI_DMA_API | ||
1511 | .agp_map_page = intel_agp_map_page, | ||
1512 | .agp_unmap_page = intel_agp_unmap_page, | ||
1513 | .agp_map_memory = intel_agp_map_memory, | ||
1514 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1515 | #endif | ||
1516 | }; | ||
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 288fc50627e2..0d6ff640e1c6 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops { | |||
70 | void (*dpms)(struct intel_dvo_device *dvo, int mode); | 70 | void (*dpms)(struct intel_dvo_device *dvo, int mode); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Saves the output's state for restoration on VT switch. | ||
74 | */ | ||
75 | void (*save)(struct intel_dvo_device *dvo); | ||
76 | |||
77 | /* | ||
78 | * Restore's the output's state at VT switch. | ||
79 | */ | ||
80 | void (*restore)(struct intel_dvo_device *dvo); | ||
81 | |||
82 | /* | ||
83 | * Callback for testing a video mode for a given output. | 73 | * Callback for testing a video mode for a given output. |
84 | * | 74 | * |
85 | * This function should only check for cases where a mode can't | 75 | * This function should only check for cases where a mode can't |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 1184c14ba87d..14d59804acd7 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -159,16 +159,7 @@ | |||
159 | #define CH7017_BANG_LIMIT_CONTROL 0x7f | 159 | #define CH7017_BANG_LIMIT_CONTROL 0x7f |
160 | 160 | ||
161 | struct ch7017_priv { | 161 | struct ch7017_priv { |
162 | uint8_t save_hapi; | 162 | uint8_t dummy; |
163 | uint8_t save_vali; | ||
164 | uint8_t save_valo; | ||
165 | uint8_t save_ailo; | ||
166 | uint8_t save_lvds_pll_vco; | ||
167 | uint8_t save_feedback_div; | ||
168 | uint8_t save_lvds_control_2; | ||
169 | uint8_t save_outputs_enable; | ||
170 | uint8_t save_lvds_power_down; | ||
171 | uint8_t save_power_management; | ||
172 | }; | 163 | }; |
173 | 164 | ||
174 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); | 165 | static void ch7017_dump_regs(struct intel_dvo_device *dvo); |
@@ -401,39 +392,6 @@ do { \ | |||
401 | DUMP(CH7017_LVDS_POWER_DOWN); | 392 | DUMP(CH7017_LVDS_POWER_DOWN); |
402 | } | 393 | } |
403 | 394 | ||
404 | static void ch7017_save(struct intel_dvo_device *dvo) | ||
405 | { | ||
406 | struct ch7017_priv *priv = dvo->dev_priv; | ||
407 | |||
408 | ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi); | ||
409 | ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo); | ||
410 | ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo); | ||
411 | ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco); | ||
412 | ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div); | ||
413 | ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2); | ||
414 | ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable); | ||
415 | ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down); | ||
416 | ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management); | ||
417 | } | ||
418 | |||
419 | static void ch7017_restore(struct intel_dvo_device *dvo) | ||
420 | { | ||
421 | struct ch7017_priv *priv = dvo->dev_priv; | ||
422 | |||
423 | /* Power down before changing mode */ | ||
424 | ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); | ||
425 | |||
426 | ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi); | ||
427 | ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo); | ||
428 | ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo); | ||
429 | ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco); | ||
430 | ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div); | ||
431 | ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2); | ||
432 | ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable); | ||
433 | ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down); | ||
434 | ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management); | ||
435 | } | ||
436 | |||
437 | static void ch7017_destroy(struct intel_dvo_device *dvo) | 395 | static void ch7017_destroy(struct intel_dvo_device *dvo) |
438 | { | 396 | { |
439 | struct ch7017_priv *priv = dvo->dev_priv; | 397 | struct ch7017_priv *priv = dvo->dev_priv; |
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = { | |||
451 | .mode_set = ch7017_mode_set, | 409 | .mode_set = ch7017_mode_set, |
452 | .dpms = ch7017_dpms, | 410 | .dpms = ch7017_dpms, |
453 | .dump_regs = ch7017_dump_regs, | 411 | .dump_regs = ch7017_dump_regs, |
454 | .save = ch7017_save, | ||
455 | .restore = ch7017_restore, | ||
456 | .destroy = ch7017_destroy, | 412 | .destroy = ch7017_destroy, |
457 | }; | 413 | }; |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d56ff5cc22b2..6f1944b24441 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct { | |||
92 | { CH7301_VID, "CH7301" }, | 92 | { CH7301_VID, "CH7301" }, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct ch7xxx_reg_state { | ||
96 | uint8_t regs[CH7xxx_NUM_REGS]; | ||
97 | }; | ||
98 | |||
99 | struct ch7xxx_priv { | 95 | struct ch7xxx_priv { |
100 | bool quiet; | 96 | bool quiet; |
101 | |||
102 | struct ch7xxx_reg_state save_reg; | ||
103 | struct ch7xxx_reg_state mode_reg; | ||
104 | uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT; | ||
105 | uint8_t save_TLPF, save_TCT, save_PM, save_IDF; | ||
106 | }; | 97 | }; |
107 | 98 | ||
108 | static void ch7xxx_save(struct intel_dvo_device *dvo); | ||
109 | |||
110 | static char *ch7xxx_get_id(uint8_t vid) | 99 | static char *ch7xxx_get_id(uint8_t vid) |
111 | { | 100 | { |
112 | int i; | 101 | int i; |
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) | |||
312 | 301 | ||
313 | static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) | 302 | static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) |
314 | { | 303 | { |
315 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
316 | int i; | 304 | int i; |
317 | 305 | ||
318 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { | 306 | for (i = 0; i < CH7xxx_NUM_REGS; i++) { |
307 | uint8_t val; | ||
319 | if ((i % 8) == 0 ) | 308 | if ((i % 8) == 0 ) |
320 | DRM_LOG_KMS("\n %02X: ", i); | 309 | DRM_LOG_KMS("\n %02X: ", i); |
321 | DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]); | 310 | ch7xxx_readb(dvo, i, &val); |
311 | DRM_LOG_KMS("%02X ", val); | ||
322 | } | 312 | } |
323 | } | 313 | } |
324 | 314 | ||
325 | static void ch7xxx_save(struct intel_dvo_device *dvo) | ||
326 | { | ||
327 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | ||
328 | |||
329 | ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL); | ||
330 | ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP); | ||
331 | ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD); | ||
332 | ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT); | ||
333 | ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF); | ||
334 | ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM); | ||
335 | ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF); | ||
336 | } | ||
337 | |||
338 | static void ch7xxx_restore(struct intel_dvo_device *dvo) | ||
339 | { | ||
340 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | ||
341 | |||
342 | ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL); | ||
343 | ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP); | ||
344 | ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD); | ||
345 | ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT); | ||
346 | ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF); | ||
347 | ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF); | ||
348 | ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM); | ||
349 | } | ||
350 | |||
351 | static void ch7xxx_destroy(struct intel_dvo_device *dvo) | 315 | static void ch7xxx_destroy(struct intel_dvo_device *dvo) |
352 | { | 316 | { |
353 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | 317 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; |
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = { | |||
365 | .mode_set = ch7xxx_mode_set, | 329 | .mode_set = ch7xxx_mode_set, |
366 | .dpms = ch7xxx_dpms, | 330 | .dpms = ch7xxx_dpms, |
367 | .dump_regs = ch7xxx_dump_regs, | 331 | .dump_regs = ch7xxx_dump_regs, |
368 | .save = ch7xxx_save, | ||
369 | .restore = ch7xxx_restore, | ||
370 | .destroy = ch7xxx_destroy, | 332 | .destroy = ch7xxx_destroy, |
371 | }; | 333 | }; |
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 24169e528f0f..a2ec3f487202 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -153,9 +153,6 @@ struct ivch_priv { | |||
153 | bool quiet; | 153 | bool quiet; |
154 | 154 | ||
155 | uint16_t width, height; | 155 | uint16_t width, height; |
156 | |||
157 | uint16_t save_VR01; | ||
158 | uint16_t save_VR40; | ||
159 | }; | 156 | }; |
160 | 157 | ||
161 | 158 | ||
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) | |||
405 | DRM_LOG_KMS("VR8F: 0x%04x\n", val); | 402 | DRM_LOG_KMS("VR8F: 0x%04x\n", val); |
406 | } | 403 | } |
407 | 404 | ||
408 | static void ivch_save(struct intel_dvo_device *dvo) | ||
409 | { | ||
410 | struct ivch_priv *priv = dvo->dev_priv; | ||
411 | |||
412 | ivch_read(dvo, VR01, &priv->save_VR01); | ||
413 | ivch_read(dvo, VR40, &priv->save_VR40); | ||
414 | } | ||
415 | |||
416 | static void ivch_restore(struct intel_dvo_device *dvo) | ||
417 | { | ||
418 | struct ivch_priv *priv = dvo->dev_priv; | ||
419 | |||
420 | ivch_write(dvo, VR01, priv->save_VR01); | ||
421 | ivch_write(dvo, VR40, priv->save_VR40); | ||
422 | } | ||
423 | |||
424 | static void ivch_destroy(struct intel_dvo_device *dvo) | 405 | static void ivch_destroy(struct intel_dvo_device *dvo) |
425 | { | 406 | { |
426 | struct ivch_priv *priv = dvo->dev_priv; | 407 | struct ivch_priv *priv = dvo->dev_priv; |
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) | |||
434 | struct intel_dvo_dev_ops ivch_ops= { | 415 | struct intel_dvo_dev_ops ivch_ops= { |
435 | .init = ivch_init, | 416 | .init = ivch_init, |
436 | .dpms = ivch_dpms, | 417 | .dpms = ivch_dpms, |
437 | .save = ivch_save, | ||
438 | .restore = ivch_restore, | ||
439 | .mode_valid = ivch_mode_valid, | 418 | .mode_valid = ivch_mode_valid, |
440 | .mode_set = ivch_mode_set, | 419 | .mode_set = ivch_mode_set, |
441 | .detect = ivch_detect, | 420 | .detect = ivch_detect, |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 0001c13f0a80..9b8e6765cf26 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
58 | 58 | ||
59 | #define SIL164_REGC 0x0c | 59 | #define SIL164_REGC 0x0c |
60 | 60 | ||
61 | struct sil164_save_rec { | ||
62 | uint8_t reg8; | ||
63 | uint8_t reg9; | ||
64 | uint8_t regc; | ||
65 | }; | ||
66 | |||
67 | struct sil164_priv { | 61 | struct sil164_priv { |
68 | //I2CDevRec d; | 62 | //I2CDevRec d; |
69 | bool quiet; | 63 | bool quiet; |
70 | struct sil164_save_rec save_regs; | ||
71 | struct sil164_save_rec mode_regs; | ||
72 | }; | 64 | }; |
73 | 65 | ||
74 | #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) | 66 | #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) |
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo) | |||
252 | DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); | 244 | DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); |
253 | } | 245 | } |
254 | 246 | ||
255 | static void sil164_save(struct intel_dvo_device *dvo) | ||
256 | { | ||
257 | struct sil164_priv *sil= dvo->dev_priv; | ||
258 | |||
259 | if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8)) | ||
260 | return; | ||
261 | |||
262 | if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9)) | ||
263 | return; | ||
264 | |||
265 | if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc)) | ||
266 | return; | ||
267 | |||
268 | return; | ||
269 | } | ||
270 | |||
271 | static void sil164_restore(struct intel_dvo_device *dvo) | ||
272 | { | ||
273 | struct sil164_priv *sil = dvo->dev_priv; | ||
274 | |||
275 | /* Restore it powered down initially */ | ||
276 | sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1); | ||
277 | |||
278 | sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9); | ||
279 | sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc); | ||
280 | sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8); | ||
281 | } | ||
282 | |||
283 | static void sil164_destroy(struct intel_dvo_device *dvo) | 247 | static void sil164_destroy(struct intel_dvo_device *dvo) |
284 | { | 248 | { |
285 | struct sil164_priv *sil = dvo->dev_priv; | 249 | struct sil164_priv *sil = dvo->dev_priv; |
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = { | |||
297 | .mode_set = sil164_mode_set, | 261 | .mode_set = sil164_mode_set, |
298 | .dpms = sil164_dpms, | 262 | .dpms = sil164_dpms, |
299 | .dump_regs = sil164_dump_regs, | 263 | .dump_regs = sil164_dump_regs, |
300 | .save = sil164_save, | ||
301 | .restore = sil164_restore, | ||
302 | .destroy = sil164_destroy, | 264 | .destroy = sil164_destroy, |
303 | }; | 265 | }; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index c7c391bc116a..66c697bc9b22 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -86,16 +86,8 @@ | |||
86 | #define TFP410_V_RES_LO 0x3C | 86 | #define TFP410_V_RES_LO 0x3C |
87 | #define TFP410_V_RES_HI 0x3D | 87 | #define TFP410_V_RES_HI 0x3D |
88 | 88 | ||
89 | struct tfp410_save_rec { | ||
90 | uint8_t ctl1; | ||
91 | uint8_t ctl2; | ||
92 | }; | ||
93 | |||
94 | struct tfp410_priv { | 89 | struct tfp410_priv { |
95 | bool quiet; | 90 | bool quiet; |
96 | |||
97 | struct tfp410_save_rec saved_reg; | ||
98 | struct tfp410_save_rec mode_reg; | ||
99 | }; | 91 | }; |
100 | 92 | ||
101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 93 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo) | |||
293 | DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); | 285 | DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); |
294 | } | 286 | } |
295 | 287 | ||
296 | static void tfp410_save(struct intel_dvo_device *dvo) | ||
297 | { | ||
298 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
299 | |||
300 | if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1)) | ||
301 | return; | ||
302 | |||
303 | if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2)) | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | static void tfp410_restore(struct intel_dvo_device *dvo) | ||
308 | { | ||
309 | struct tfp410_priv *tfp = dvo->dev_priv; | ||
310 | |||
311 | /* Restore it powered down initially */ | ||
312 | tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1); | ||
313 | |||
314 | tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2); | ||
315 | tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1); | ||
316 | } | ||
317 | |||
318 | static void tfp410_destroy(struct intel_dvo_device *dvo) | 288 | static void tfp410_destroy(struct intel_dvo_device *dvo) |
319 | { | 289 | { |
320 | struct tfp410_priv *tfp = dvo->dev_priv; | 290 | struct tfp410_priv *tfp = dvo->dev_priv; |
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = { | |||
332 | .mode_set = tfp410_mode_set, | 302 | .mode_set = tfp410_mode_set, |
333 | .dpms = tfp410_dpms, | 303 | .dpms = tfp410_dpms, |
334 | .dump_regs = tfp410_dump_regs, | 304 | .dump_regs = tfp410_dump_regs, |
335 | .save = tfp410_save, | ||
336 | .restore = tfp410_restore, | ||
337 | .destroy = tfp410_destroy, | 305 | .destroy = tfp410_destroy, |
338 | }; | 306 | }; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dc93939507d..f0538da9a31c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1579,7 +1579,7 @@ static void i915_get_mem_freq(struct drm_device *dev) | |||
1579 | */ | 1579 | */ |
1580 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1580 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1581 | { | 1581 | { |
1582 | struct drm_i915_private *dev_priv = dev->dev_private; | 1582 | struct drm_i915_private *dev_priv; |
1583 | resource_size_t base, size; | 1583 | resource_size_t base, size; |
1584 | int ret = 0, mmio_bar; | 1584 | int ret = 0, mmio_bar; |
1585 | uint32_t agp_size, prealloc_size, prealloc_start; | 1585 | uint32_t agp_size, prealloc_size, prealloc_start; |
@@ -1711,6 +1711,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1711 | /* Start out suspended */ | 1711 | /* Start out suspended */ |
1712 | dev_priv->mm.suspended = 1; | 1712 | dev_priv->mm.suspended = 1; |
1713 | 1713 | ||
1714 | intel_detect_pch(dev); | ||
1715 | |||
1714 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1716 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1715 | ret = i915_load_modeset_init(dev, prealloc_start, | 1717 | ret = i915_load_modeset_init(dev, prealloc_start, |
1716 | prealloc_size, agp_size); | 1718 | prealloc_size, agp_size); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0af3dcc85ce9..01e91ea5bdea 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -187,6 +187,35 @@ const static struct pci_device_id pciidlist[] = { | |||
187 | MODULE_DEVICE_TABLE(pci, pciidlist); | 187 | MODULE_DEVICE_TABLE(pci, pciidlist); |
188 | #endif | 188 | #endif |
189 | 189 | ||
190 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | ||
191 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | ||
192 | |||
193 | void intel_detect_pch (struct drm_device *dev) | ||
194 | { | ||
195 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
196 | struct pci_dev *pch; | ||
197 | |||
198 | /* | ||
199 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | ||
200 | * make graphics device passthrough work easy for VMM, that only | ||
201 | * need to expose ISA bridge to let driver know the real hardware | ||
202 | * underneath. This is a requirement from virtualization team. | ||
203 | */ | ||
204 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | ||
205 | if (pch) { | ||
206 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | ||
207 | int id; | ||
208 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | ||
209 | |||
210 | if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | ||
211 | dev_priv->pch_type = PCH_CPT; | ||
212 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | ||
213 | } | ||
214 | } | ||
215 | pci_dev_put(pch); | ||
216 | } | ||
217 | } | ||
218 | |||
190 | static int i915_drm_freeze(struct drm_device *dev) | 219 | static int i915_drm_freeze(struct drm_device *dev) |
191 | { | 220 | { |
192 | struct drm_i915_private *dev_priv = dev->dev_private; | 221 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6960849522f8..790fef32afef 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -221,6 +221,11 @@ enum no_fbc_reason { | |||
221 | FBC_NOT_TILED, /* buffer not tiled */ | 221 | FBC_NOT_TILED, /* buffer not tiled */ |
222 | }; | 222 | }; |
223 | 223 | ||
224 | enum intel_pch { | ||
225 | PCH_IBX, /* Ibexpeak PCH */ | ||
226 | PCH_CPT, /* Cougarpoint PCH */ | ||
227 | }; | ||
228 | |||
224 | typedef struct drm_i915_private { | 229 | typedef struct drm_i915_private { |
225 | struct drm_device *dev; | 230 | struct drm_device *dev; |
226 | 231 | ||
@@ -331,6 +336,9 @@ typedef struct drm_i915_private { | |||
331 | /* Display functions */ | 336 | /* Display functions */ |
332 | struct drm_i915_display_funcs display; | 337 | struct drm_i915_display_funcs display; |
333 | 338 | ||
339 | /* PCH chipset type */ | ||
340 | enum intel_pch pch_type; | ||
341 | |||
334 | /* Register state */ | 342 | /* Register state */ |
335 | bool modeset_on_lid; | 343 | bool modeset_on_lid; |
336 | u8 saveLBB; | 344 | u8 saveLBB; |
@@ -992,6 +1000,9 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | |||
992 | extern void i8xx_disable_fbc(struct drm_device *dev); | 1000 | extern void i8xx_disable_fbc(struct drm_device *dev); |
993 | extern void g4x_disable_fbc(struct drm_device *dev); | 1001 | extern void g4x_disable_fbc(struct drm_device *dev); |
994 | 1002 | ||
1003 | extern void intel_detect_pch (struct drm_device *dev); | ||
1004 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | ||
1005 | |||
995 | /** | 1006 | /** |
996 | * Lock test for when it's just for synchronization of ring access. | 1007 | * Lock test for when it's just for synchronization of ring access. |
997 | * | 1008 | * |
@@ -1123,7 +1134,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1123 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1134 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1124 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1135 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
1125 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1136 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ |
1126 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | 1137 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ |
1138 | !IS_GEN6(dev)) | ||
1127 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | 1139 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
1128 | /* dsparb controlled by hw only */ | 1140 | /* dsparb controlled by hw only */ |
1129 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1141 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
@@ -1136,6 +1148,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1136 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1148 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ |
1137 | IS_GEN6(dev)) | 1149 | IS_GEN6(dev)) |
1138 | 1150 | ||
1151 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | ||
1152 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | ||
1153 | |||
1139 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1154 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1140 | 1155 | ||
1141 | #endif | 1156 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6421481d6222..4541e339e38a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev) | |||
169 | 169 | ||
170 | if (HAS_PCH_SPLIT(dev)) | 170 | if (HAS_PCH_SPLIT(dev)) |
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 171 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
172 | else | 172 | else { |
173 | i915_enable_pipestat(dev_priv, 1, | 173 | i915_enable_pipestat(dev_priv, 1, |
174 | I915_LEGACY_BLC_EVENT_ENABLE); | 174 | I915_LEGACY_BLC_EVENT_ENABLE); |
175 | if (IS_I965G(dev)) | ||
176 | i915_enable_pipestat(dev_priv, 0, | ||
177 | I915_LEGACY_BLC_EVENT_ENABLE); | ||
178 | } | ||
175 | } | 179 | } |
176 | 180 | ||
177 | /** | 181 | /** |
@@ -256,11 +260,11 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
256 | hotplug_work); | 260 | hotplug_work); |
257 | struct drm_device *dev = dev_priv->dev; | 261 | struct drm_device *dev = dev_priv->dev; |
258 | struct drm_mode_config *mode_config = &dev->mode_config; | 262 | struct drm_mode_config *mode_config = &dev->mode_config; |
259 | struct drm_connector *connector; | 263 | struct drm_encoder *encoder; |
260 | 264 | ||
261 | if (mode_config->num_connector) { | 265 | if (mode_config->num_encoder) { |
262 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 266 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
263 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 267 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
264 | 268 | ||
265 | if (intel_encoder->hot_plug) | 269 | if (intel_encoder->hot_plug) |
266 | (*intel_encoder->hot_plug) (intel_encoder); | 270 | (*intel_encoder->hot_plug) (intel_encoder); |
@@ -946,7 +950,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
946 | intel_finish_page_flip(dev, 1); | 950 | intel_finish_page_flip(dev, 1); |
947 | } | 951 | } |
948 | 952 | ||
949 | if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 953 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
954 | (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || | ||
950 | (iir & I915_ASLE_INTERRUPT)) | 955 | (iir & I915_ASLE_INTERRUPT)) |
951 | opregion_asle_intr(dev); | 956 | opregion_asle_intr(dev); |
952 | 957 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cbbf59f56dfa..527d30aecda2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1754,6 +1754,14 @@ | |||
1754 | #define DP_LINK_TRAIN_MASK (3 << 28) | 1754 | #define DP_LINK_TRAIN_MASK (3 << 28) |
1755 | #define DP_LINK_TRAIN_SHIFT 28 | 1755 | #define DP_LINK_TRAIN_SHIFT 28 |
1756 | 1756 | ||
1757 | /* CPT Link training mode */ | ||
1758 | #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) | ||
1759 | #define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) | ||
1760 | #define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) | ||
1761 | #define DP_LINK_TRAIN_OFF_CPT (3 << 8) | ||
1762 | #define DP_LINK_TRAIN_MASK_CPT (7 << 8) | ||
1763 | #define DP_LINK_TRAIN_SHIFT_CPT 8 | ||
1764 | |||
1757 | /* Signal voltages. These are mostly controlled by the other end */ | 1765 | /* Signal voltages. These are mostly controlled by the other end */ |
1758 | #define DP_VOLTAGE_0_4 (0 << 25) | 1766 | #define DP_VOLTAGE_0_4 (0 << 25) |
1759 | #define DP_VOLTAGE_0_6 (1 << 25) | 1767 | #define DP_VOLTAGE_0_6 (1 << 25) |
@@ -1978,15 +1986,24 @@ | |||
1978 | 1986 | ||
1979 | #define DSPFW1 0x70034 | 1987 | #define DSPFW1 0x70034 |
1980 | #define DSPFW_SR_SHIFT 23 | 1988 | #define DSPFW_SR_SHIFT 23 |
1989 | #define DSPFW_SR_MASK (0x1ff<<23) | ||
1981 | #define DSPFW_CURSORB_SHIFT 16 | 1990 | #define DSPFW_CURSORB_SHIFT 16 |
1991 | #define DSPFW_CURSORB_MASK (0x3f<<16) | ||
1982 | #define DSPFW_PLANEB_SHIFT 8 | 1992 | #define DSPFW_PLANEB_SHIFT 8 |
1993 | #define DSPFW_PLANEB_MASK (0x7f<<8) | ||
1994 | #define DSPFW_PLANEA_MASK (0x7f) | ||
1983 | #define DSPFW2 0x70038 | 1995 | #define DSPFW2 0x70038 |
1984 | #define DSPFW_CURSORA_MASK 0x00003f00 | 1996 | #define DSPFW_CURSORA_MASK 0x00003f00 |
1985 | #define DSPFW_CURSORA_SHIFT 8 | 1997 | #define DSPFW_CURSORA_SHIFT 8 |
1998 | #define DSPFW_PLANEC_MASK (0x7f) | ||
1986 | #define DSPFW3 0x7003c | 1999 | #define DSPFW3 0x7003c |
1987 | #define DSPFW_HPLL_SR_EN (1<<31) | 2000 | #define DSPFW_HPLL_SR_EN (1<<31) |
1988 | #define DSPFW_CURSOR_SR_SHIFT 24 | 2001 | #define DSPFW_CURSOR_SR_SHIFT 24 |
1989 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) | 2002 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) |
2003 | #define DSPFW_CURSOR_SR_MASK (0x3f<<24) | ||
2004 | #define DSPFW_HPLL_CURSOR_SHIFT 16 | ||
2005 | #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) | ||
2006 | #define DSPFW_HPLL_SR_MASK (0x1ff) | ||
1990 | 2007 | ||
1991 | /* FIFO watermark sizes etc */ | 2008 | /* FIFO watermark sizes etc */ |
1992 | #define G4X_FIFO_LINE_SIZE 64 | 2009 | #define G4X_FIFO_LINE_SIZE 64 |
@@ -2013,6 +2030,43 @@ | |||
2013 | #define PINEVIEW_CURSOR_DFT_WM 0 | 2030 | #define PINEVIEW_CURSOR_DFT_WM 0 |
2014 | #define PINEVIEW_CURSOR_GUARD_WM 5 | 2031 | #define PINEVIEW_CURSOR_GUARD_WM 5 |
2015 | 2032 | ||
2033 | |||
2034 | /* define the Watermark register on Ironlake */ | ||
2035 | #define WM0_PIPEA_ILK 0x45100 | ||
2036 | #define WM0_PIPE_PLANE_MASK (0x7f<<16) | ||
2037 | #define WM0_PIPE_PLANE_SHIFT 16 | ||
2038 | #define WM0_PIPE_SPRITE_MASK (0x3f<<8) | ||
2039 | #define WM0_PIPE_SPRITE_SHIFT 8 | ||
2040 | #define WM0_PIPE_CURSOR_MASK (0x1f) | ||
2041 | |||
2042 | #define WM0_PIPEB_ILK 0x45104 | ||
2043 | #define WM1_LP_ILK 0x45108 | ||
2044 | #define WM1_LP_SR_EN (1<<31) | ||
2045 | #define WM1_LP_LATENCY_SHIFT 24 | ||
2046 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | ||
2047 | #define WM1_LP_SR_MASK (0x1ff<<8) | ||
2048 | #define WM1_LP_SR_SHIFT 8 | ||
2049 | #define WM1_LP_CURSOR_MASK (0x3f) | ||
2050 | |||
2051 | /* Memory latency timer register */ | ||
2052 | #define MLTR_ILK 0x11222 | ||
2053 | /* the unit of memory self-refresh latency time is 0.5us */ | ||
2054 | #define ILK_SRLT_MASK 0x3f | ||
2055 | |||
2056 | /* define the fifo size on Ironlake */ | ||
2057 | #define ILK_DISPLAY_FIFO 128 | ||
2058 | #define ILK_DISPLAY_MAXWM 64 | ||
2059 | #define ILK_DISPLAY_DFTWM 8 | ||
2060 | |||
2061 | #define ILK_DISPLAY_SR_FIFO 512 | ||
2062 | #define ILK_DISPLAY_MAX_SRWM 0x1ff | ||
2063 | #define ILK_DISPLAY_DFT_SRWM 0x3f | ||
2064 | #define ILK_CURSOR_SR_FIFO 64 | ||
2065 | #define ILK_CURSOR_MAX_SRWM 0x3f | ||
2066 | #define ILK_CURSOR_DFT_SRWM 8 | ||
2067 | |||
2068 | #define ILK_FIFO_LINE_SIZE 64 | ||
2069 | |||
2016 | /* | 2070 | /* |
2017 | * The two pipe frame counter registers are not synchronized, so | 2071 | * The two pipe frame counter registers are not synchronized, so |
2018 | * reading a stable value is somewhat tricky. The following code | 2072 | * reading a stable value is somewhat tricky. The following code |
@@ -2293,8 +2347,15 @@ | |||
2293 | #define GTIIR 0x44018 | 2347 | #define GTIIR 0x44018 |
2294 | #define GTIER 0x4401c | 2348 | #define GTIER 0x4401c |
2295 | 2349 | ||
2350 | #define ILK_DISPLAY_CHICKEN2 0x42004 | ||
2351 | #define ILK_DPARB_GATE (1<<22) | ||
2352 | #define ILK_VSDPFD_FULL (1<<21) | ||
2353 | #define ILK_DSPCLK_GATE 0x42020 | ||
2354 | #define ILK_DPARB_CLK_GATE (1<<5) | ||
2355 | |||
2296 | #define DISP_ARB_CTL 0x45000 | 2356 | #define DISP_ARB_CTL 0x45000 |
2297 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 2357 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
2358 | #define DISP_FBC_WM_DIS (1<<15) | ||
2298 | 2359 | ||
2299 | /* PCH */ | 2360 | /* PCH */ |
2300 | 2361 | ||
@@ -2305,6 +2366,11 @@ | |||
2305 | #define SDE_PORTB_HOTPLUG (1 << 8) | 2366 | #define SDE_PORTB_HOTPLUG (1 << 8) |
2306 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 2367 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
2307 | #define SDE_HOTPLUG_MASK (0xf << 8) | 2368 | #define SDE_HOTPLUG_MASK (0xf << 8) |
2369 | /* CPT */ | ||
2370 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | ||
2371 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | ||
2372 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | ||
2373 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | ||
2308 | 2374 | ||
2309 | #define SDEISR 0xc4000 | 2375 | #define SDEISR 0xc4000 |
2310 | #define SDEIMR 0xc4004 | 2376 | #define SDEIMR 0xc4004 |
@@ -2396,6 +2462,17 @@ | |||
2396 | #define PCH_SSC4_PARMS 0xc6210 | 2462 | #define PCH_SSC4_PARMS 0xc6210 |
2397 | #define PCH_SSC4_AUX_PARMS 0xc6214 | 2463 | #define PCH_SSC4_AUX_PARMS 0xc6214 |
2398 | 2464 | ||
2465 | #define PCH_DPLL_SEL 0xc7000 | ||
2466 | #define TRANSA_DPLL_ENABLE (1<<3) | ||
2467 | #define TRANSA_DPLLB_SEL (1<<0) | ||
2468 | #define TRANSA_DPLLA_SEL 0 | ||
2469 | #define TRANSB_DPLL_ENABLE (1<<7) | ||
2470 | #define TRANSB_DPLLB_SEL (1<<4) | ||
2471 | #define TRANSB_DPLLA_SEL (0) | ||
2472 | #define TRANSC_DPLL_ENABLE (1<<11) | ||
2473 | #define TRANSC_DPLLB_SEL (1<<8) | ||
2474 | #define TRANSC_DPLLA_SEL (0) | ||
2475 | |||
2399 | /* transcoder */ | 2476 | /* transcoder */ |
2400 | 2477 | ||
2401 | #define TRANS_HTOTAL_A 0xe0000 | 2478 | #define TRANS_HTOTAL_A 0xe0000 |
@@ -2482,6 +2559,19 @@ | |||
2482 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) | 2559 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) |
2483 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) | 2560 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) |
2484 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) | 2561 | #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) |
2562 | /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. | ||
2563 | SNB has different settings. */ | ||
2564 | /* SNB A-stepping */ | ||
2565 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) | ||
2566 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) | ||
2567 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | ||
2568 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | ||
2569 | /* SNB B-stepping */ | ||
2570 | #define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | ||
2571 | #define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | ||
2572 | #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | ||
2573 | #define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | ||
2574 | #define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) | ||
2485 | #define FDI_DP_PORT_WIDTH_X1 (0<<19) | 2575 | #define FDI_DP_PORT_WIDTH_X1 (0<<19) |
2486 | #define FDI_DP_PORT_WIDTH_X2 (1<<19) | 2576 | #define FDI_DP_PORT_WIDTH_X2 (1<<19) |
2487 | #define FDI_DP_PORT_WIDTH_X3 (2<<19) | 2577 | #define FDI_DP_PORT_WIDTH_X3 (2<<19) |
@@ -2514,6 +2604,13 @@ | |||
2514 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) | 2604 | #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) |
2515 | #define FDI_SEL_RAWCLK (0<<4) | 2605 | #define FDI_SEL_RAWCLK (0<<4) |
2516 | #define FDI_SEL_PCDCLK (1<<4) | 2606 | #define FDI_SEL_PCDCLK (1<<4) |
2607 | /* CPT */ | ||
2608 | #define FDI_AUTO_TRAINING (1<<10) | ||
2609 | #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) | ||
2610 | #define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) | ||
2611 | #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) | ||
2612 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) | ||
2613 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) | ||
2517 | 2614 | ||
2518 | #define FDI_RXA_MISC 0xf0010 | 2615 | #define FDI_RXA_MISC 0xf0010 |
2519 | #define FDI_RXB_MISC 0xf1010 | 2616 | #define FDI_RXB_MISC 0xf1010 |
@@ -2585,6 +2682,9 @@ | |||
2585 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 2682 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
2586 | #define PORT_DETECTED (1 << 2) | 2683 | #define PORT_DETECTED (1 << 2) |
2587 | 2684 | ||
2685 | /* PCH SDVOB multiplex with HDMIB */ | ||
2686 | #define PCH_SDVOB HDMIB | ||
2687 | |||
2588 | #define HDMIC 0xe1150 | 2688 | #define HDMIC 0xe1150 |
2589 | #define HDMID 0xe1160 | 2689 | #define HDMID 0xe1160 |
2590 | 2690 | ||
@@ -2642,4 +2742,42 @@ | |||
2642 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 | 2742 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 |
2643 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 | 2743 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 |
2644 | 2744 | ||
2745 | /* CPT */ | ||
2746 | #define PORT_TRANS_A_SEL_CPT 0 | ||
2747 | #define PORT_TRANS_B_SEL_CPT (1<<29) | ||
2748 | #define PORT_TRANS_C_SEL_CPT (2<<29) | ||
2749 | #define PORT_TRANS_SEL_MASK (3<<29) | ||
2750 | |||
2751 | #define TRANS_DP_CTL_A 0xe0300 | ||
2752 | #define TRANS_DP_CTL_B 0xe1300 | ||
2753 | #define TRANS_DP_CTL_C 0xe2300 | ||
2754 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) | ||
2755 | #define TRANS_DP_PORT_SEL_B (0<<29) | ||
2756 | #define TRANS_DP_PORT_SEL_C (1<<29) | ||
2757 | #define TRANS_DP_PORT_SEL_D (2<<29) | ||
2758 | #define TRANS_DP_PORT_SEL_MASK (3<<29) | ||
2759 | #define TRANS_DP_AUDIO_ONLY (1<<26) | ||
2760 | #define TRANS_DP_ENH_FRAMING (1<<18) | ||
2761 | #define TRANS_DP_8BPC (0<<9) | ||
2762 | #define TRANS_DP_10BPC (1<<9) | ||
2763 | #define TRANS_DP_6BPC (2<<9) | ||
2764 | #define TRANS_DP_12BPC (3<<9) | ||
2765 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) | ||
2766 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | ||
2767 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | ||
2768 | #define TRANS_DP_HSYNC_ACTIVE_LOW 0 | ||
2769 | |||
2770 | /* SNB eDP training params */ | ||
2771 | /* SNB A-stepping */ | ||
2772 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) | ||
2773 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) | ||
2774 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | ||
2775 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | ||
2776 | /* SNB B-stepping */ | ||
2777 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | ||
2778 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | ||
2779 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | ||
2780 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | ||
2781 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) | ||
2782 | |||
2645 | #endif /* _I915_REG_H_ */ | 2783 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index ac0d1a73ac22..60a5800fba6e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev) | |||
600 | } | 600 | } |
601 | /* FIXME: save TV & SDVO state */ | 601 | /* FIXME: save TV & SDVO state */ |
602 | 602 | ||
603 | /* FBC state */ | 603 | /* Only save FBC state on the platform that supports FBC */ |
604 | if (IS_GM45(dev)) { | 604 | if (I915_HAS_FBC(dev)) { |
605 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); | 605 | if (IS_GM45(dev)) { |
606 | } else { | 606 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
607 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); | 607 | } else { |
608 | dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); | 608 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); |
609 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | 609 | dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); |
610 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | 610 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); |
611 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | ||
612 | } | ||
611 | } | 613 | } |
612 | 614 | ||
613 | /* VGA state */ | 615 | /* VGA state */ |
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev) | |||
702 | } | 704 | } |
703 | /* FIXME: restore TV & SDVO state */ | 705 | /* FIXME: restore TV & SDVO state */ |
704 | 706 | ||
705 | /* FBC info */ | 707 | /* only restore FBC info on the platform that supports FBC*/ |
706 | if (IS_GM45(dev)) { | 708 | if (I915_HAS_FBC(dev)) { |
707 | g4x_disable_fbc(dev); | 709 | if (IS_GM45(dev)) { |
708 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 710 | g4x_disable_fbc(dev); |
709 | } else { | 711 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
710 | i8xx_disable_fbc(dev); | 712 | } else { |
711 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); | 713 | i8xx_disable_fbc(dev); |
712 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); | 714 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); |
713 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); | 715 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); |
714 | I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); | 716 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); |
717 | I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); | ||
718 | } | ||
715 | } | 719 | } |
716 | |||
717 | /* VGA state */ | 720 | /* VGA state */ |
718 | if (IS_IRONLAKE(dev)) | 721 | if (IS_IRONLAKE(dev)) |
719 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 722 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 01840d9bc38f..303815321c79 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence, | |||
115 | __entry->obj, __entry->fence, __entry->tiling_mode) | 115 | __entry->obj, __entry->fence, __entry->tiling_mode) |
116 | ); | 116 | ); |
117 | 117 | ||
118 | TRACE_EVENT(i915_gem_object_unbind, | 118 | DECLARE_EVENT_CLASS(i915_gem_object, |
119 | 119 | ||
120 | TP_PROTO(struct drm_gem_object *obj), | 120 | TP_PROTO(struct drm_gem_object *obj), |
121 | 121 | ||
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind, | |||
132 | TP_printk("obj=%p", __entry->obj) | 132 | TP_printk("obj=%p", __entry->obj) |
133 | ); | 133 | ); |
134 | 134 | ||
135 | TRACE_EVENT(i915_gem_object_destroy, | 135 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, |
136 | 136 | ||
137 | TP_PROTO(struct drm_gem_object *obj), | 137 | TP_PROTO(struct drm_gem_object *obj), |
138 | 138 | ||
139 | TP_ARGS(obj), | 139 | TP_ARGS(obj) |
140 | ); | ||
140 | 141 | ||
141 | TP_STRUCT__entry( | 142 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, |
142 | __field(struct drm_gem_object *, obj) | ||
143 | ), | ||
144 | 143 | ||
145 | TP_fast_assign( | 144 | TP_PROTO(struct drm_gem_object *obj), |
146 | __entry->obj = obj; | ||
147 | ), | ||
148 | 145 | ||
149 | TP_printk("obj=%p", __entry->obj) | 146 | TP_ARGS(obj) |
150 | ); | 147 | ); |
151 | 148 | ||
152 | /* batch tracing */ | 149 | /* batch tracing */ |
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush, | |||
197 | __entry->flush_domains, __entry->invalidate_domains) | 194 | __entry->flush_domains, __entry->invalidate_domains) |
198 | ); | 195 | ); |
199 | 196 | ||
200 | 197 | DECLARE_EVENT_CLASS(i915_gem_request, | |
201 | TRACE_EVENT(i915_gem_request_complete, | ||
202 | 198 | ||
203 | TP_PROTO(struct drm_device *dev, u32 seqno), | 199 | TP_PROTO(struct drm_device *dev, u32 seqno), |
204 | 200 | ||
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete, | |||
217 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 213 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) |
218 | ); | 214 | ); |
219 | 215 | ||
220 | TRACE_EVENT(i915_gem_request_retire, | 216 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, |
221 | 217 | ||
222 | TP_PROTO(struct drm_device *dev, u32 seqno), | 218 | TP_PROTO(struct drm_device *dev, u32 seqno), |
223 | 219 | ||
224 | TP_ARGS(dev, seqno), | 220 | TP_ARGS(dev, seqno) |
225 | |||
226 | TP_STRUCT__entry( | ||
227 | __field(u32, dev) | ||
228 | __field(u32, seqno) | ||
229 | ), | ||
230 | |||
231 | TP_fast_assign( | ||
232 | __entry->dev = dev->primary->index; | ||
233 | __entry->seqno = seqno; | ||
234 | ), | ||
235 | |||
236 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | ||
237 | ); | 221 | ); |
238 | 222 | ||
239 | TRACE_EVENT(i915_gem_request_wait_begin, | 223 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, |
240 | 224 | ||
241 | TP_PROTO(struct drm_device *dev, u32 seqno), | 225 | TP_PROTO(struct drm_device *dev, u32 seqno), |
242 | 226 | ||
243 | TP_ARGS(dev, seqno), | 227 | TP_ARGS(dev, seqno) |
244 | |||
245 | TP_STRUCT__entry( | ||
246 | __field(u32, dev) | ||
247 | __field(u32, seqno) | ||
248 | ), | ||
249 | |||
250 | TP_fast_assign( | ||
251 | __entry->dev = dev->primary->index; | ||
252 | __entry->seqno = seqno; | ||
253 | ), | ||
254 | |||
255 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | ||
256 | ); | 228 | ); |
257 | 229 | ||
258 | TRACE_EVENT(i915_gem_request_wait_end, | 230 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, |
259 | 231 | ||
260 | TP_PROTO(struct drm_device *dev, u32 seqno), | 232 | TP_PROTO(struct drm_device *dev, u32 seqno), |
261 | 233 | ||
262 | TP_ARGS(dev, seqno), | 234 | TP_ARGS(dev, seqno) |
235 | ); | ||
263 | 236 | ||
264 | TP_STRUCT__entry( | 237 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, |
265 | __field(u32, dev) | ||
266 | __field(u32, seqno) | ||
267 | ), | ||
268 | 238 | ||
269 | TP_fast_assign( | 239 | TP_PROTO(struct drm_device *dev, u32 seqno), |
270 | __entry->dev = dev->primary->index; | ||
271 | __entry->seqno = seqno; | ||
272 | ), | ||
273 | 240 | ||
274 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 241 | TP_ARGS(dev, seqno) |
275 | ); | 242 | ); |
276 | 243 | ||
277 | TRACE_EVENT(i915_ring_wait_begin, | 244 | DECLARE_EVENT_CLASS(i915_ring, |
278 | 245 | ||
279 | TP_PROTO(struct drm_device *dev), | 246 | TP_PROTO(struct drm_device *dev), |
280 | 247 | ||
@@ -291,21 +258,18 @@ TRACE_EVENT(i915_ring_wait_begin, | |||
291 | TP_printk("dev=%u", __entry->dev) | 258 | TP_printk("dev=%u", __entry->dev) |
292 | ); | 259 | ); |
293 | 260 | ||
294 | TRACE_EVENT(i915_ring_wait_end, | 261 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, |
295 | 262 | ||
296 | TP_PROTO(struct drm_device *dev), | 263 | TP_PROTO(struct drm_device *dev), |
297 | 264 | ||
298 | TP_ARGS(dev), | 265 | TP_ARGS(dev) |
266 | ); | ||
299 | 267 | ||
300 | TP_STRUCT__entry( | 268 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, |
301 | __field(u32, dev) | ||
302 | ), | ||
303 | 269 | ||
304 | TP_fast_assign( | 270 | TP_PROTO(struct drm_device *dev), |
305 | __entry->dev = dev->primary->index; | ||
306 | ), | ||
307 | 271 | ||
308 | TP_printk("dev=%u", __entry->dev) | 272 | TP_ARGS(dev) |
309 | ); | 273 | ); |
310 | 274 | ||
311 | #endif /* _I915_TRACE_H_ */ | 275 | #endif /* _I915_TRACE_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 759c2ef72eff..26756cd34e3c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
136 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 136 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
137 | 137 | ||
138 | if (intel_crtc->pipe == 0) { | 138 | if (intel_crtc->pipe == 0) { |
139 | adpa |= ADPA_PIPE_A_SELECT; | 139 | if (HAS_PCH_CPT(dev)) |
140 | adpa |= PORT_TRANS_A_SEL_CPT; | ||
141 | else | ||
142 | adpa |= ADPA_PIPE_A_SELECT; | ||
140 | if (!HAS_PCH_SPLIT(dev)) | 143 | if (!HAS_PCH_SPLIT(dev)) |
141 | I915_WRITE(BCLRPAT_A, 0); | 144 | I915_WRITE(BCLRPAT_A, 0); |
142 | } else { | 145 | } else { |
143 | adpa |= ADPA_PIPE_B_SELECT; | 146 | if (HAS_PCH_CPT(dev)) |
147 | adpa |= PORT_TRANS_B_SEL_CPT; | ||
148 | else | ||
149 | adpa |= ADPA_PIPE_B_SELECT; | ||
144 | if (!HAS_PCH_SPLIT(dev)) | 150 | if (!HAS_PCH_SPLIT(dev)) |
145 | I915_WRITE(BCLRPAT_B, 0); | 151 | I915_WRITE(BCLRPAT_B, 0); |
146 | } | 152 | } |
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
152 | { | 158 | { |
153 | struct drm_device *dev = connector->dev; | 159 | struct drm_device *dev = connector->dev; |
154 | struct drm_i915_private *dev_priv = dev->dev_private; | 160 | struct drm_i915_private *dev_priv = dev->dev_private; |
155 | u32 adpa; | 161 | u32 adpa, temp; |
156 | bool ret; | 162 | bool ret; |
157 | 163 | ||
158 | adpa = I915_READ(PCH_ADPA); | 164 | temp = adpa = I915_READ(PCH_ADPA); |
159 | 165 | ||
160 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 166 | if (HAS_PCH_CPT(dev)) { |
161 | /* disable HPD first */ | 167 | /* Disable DAC before force detect */ |
162 | I915_WRITE(PCH_ADPA, adpa); | 168 | I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); |
163 | (void)I915_READ(PCH_ADPA); | 169 | (void)I915_READ(PCH_ADPA); |
170 | } else { | ||
171 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
172 | /* disable HPD first */ | ||
173 | I915_WRITE(PCH_ADPA, adpa); | ||
174 | (void)I915_READ(PCH_ADPA); | ||
175 | } | ||
164 | 176 | ||
165 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 177 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
166 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 178 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
176 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) | 188 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) |
177 | ; | 189 | ; |
178 | 190 | ||
191 | if (HAS_PCH_CPT(dev)) { | ||
192 | I915_WRITE(PCH_ADPA, temp); | ||
193 | (void)I915_READ(PCH_ADPA); | ||
194 | } | ||
195 | |||
179 | /* Check the status to see if both blue and green are on now */ | 196 | /* Check the status to see if both blue and green are on now */ |
180 | adpa = I915_READ(PCH_ADPA); | 197 | adpa = I915_READ(PCH_ADPA); |
181 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; | 198 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; |
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
245 | return false; | 262 | return false; |
246 | } | 263 | } |
247 | 264 | ||
248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | 265 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) |
249 | { | 266 | { |
250 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 267 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
251 | 268 | ||
252 | /* CRT should always be at 0, but check anyway */ | 269 | /* CRT should always be at 0, but check anyway */ |
253 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) | 270 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) |
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 404 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) |
388 | { | 405 | { |
389 | struct drm_device *dev = connector->dev; | 406 | struct drm_device *dev = connector->dev; |
390 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 407 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
391 | struct drm_encoder *encoder = &intel_encoder->enc; | 408 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
392 | struct drm_crtc *crtc; | 409 | struct drm_crtc *crtc; |
393 | int dpms_mode; | 410 | int dpms_mode; |
394 | enum drm_connector_status status; | 411 | enum drm_connector_status status; |
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
400 | return connector_status_disconnected; | 417 | return connector_status_disconnected; |
401 | } | 418 | } |
402 | 419 | ||
403 | if (intel_crt_detect_ddc(connector)) | 420 | if (intel_crt_detect_ddc(encoder)) |
404 | return connector_status_connected; | 421 | return connector_status_connected; |
405 | 422 | ||
406 | /* for pre-945g platforms use load detect */ | 423 | /* for pre-945g platforms use load detect */ |
407 | if (encoder->crtc && encoder->crtc->enabled) { | 424 | if (encoder->crtc && encoder->crtc->enabled) { |
408 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); | 425 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
409 | } else { | 426 | } else { |
410 | crtc = intel_get_load_detect_pipe(intel_encoder, | 427 | crtc = intel_get_load_detect_pipe(intel_encoder, connector, |
411 | NULL, &dpms_mode); | 428 | NULL, &dpms_mode); |
412 | if (crtc) { | 429 | if (crtc) { |
413 | status = intel_crt_load_detect(crtc, intel_encoder); | 430 | status = intel_crt_load_detect(crtc, intel_encoder); |
414 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); | 431 | intel_release_load_detect_pipe(intel_encoder, |
432 | connector, dpms_mode); | ||
415 | } else | 433 | } else |
416 | status = connector_status_unknown; | 434 | status = connector_status_unknown; |
417 | } | 435 | } |
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
421 | 439 | ||
422 | static void intel_crt_destroy(struct drm_connector *connector) | 440 | static void intel_crt_destroy(struct drm_connector *connector) |
423 | { | 441 | { |
424 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
425 | |||
426 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
427 | drm_sysfs_connector_remove(connector); | 442 | drm_sysfs_connector_remove(connector); |
428 | drm_connector_cleanup(connector); | 443 | drm_connector_cleanup(connector); |
429 | kfree(connector); | 444 | kfree(connector); |
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
432 | static int intel_crt_get_modes(struct drm_connector *connector) | 447 | static int intel_crt_get_modes(struct drm_connector *connector) |
433 | { | 448 | { |
434 | int ret; | 449 | int ret; |
435 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 450 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
436 | struct i2c_adapter *ddcbus; | 451 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
452 | struct i2c_adapter *ddc_bus; | ||
437 | struct drm_device *dev = connector->dev; | 453 | struct drm_device *dev = connector->dev; |
438 | 454 | ||
439 | 455 | ||
440 | ret = intel_ddc_get_modes(intel_encoder); | 456 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
441 | if (ret || !IS_G4X(dev)) | 457 | if (ret || !IS_G4X(dev)) |
442 | goto end; | 458 | goto end; |
443 | 459 | ||
444 | ddcbus = intel_encoder->ddc_bus; | ||
445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | 460 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ |
446 | intel_encoder->ddc_bus = | 461 | ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); |
447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | ||
448 | 462 | ||
449 | if (!intel_encoder->ddc_bus) { | 463 | if (!ddc_bus) { |
450 | intel_encoder->ddc_bus = ddcbus; | ||
451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | 464 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, |
452 | "DDC bus registration failed for CRTDDC_D.\n"); | 465 | "DDC bus registration failed for CRTDDC_D.\n"); |
453 | goto end; | 466 | goto end; |
454 | } | 467 | } |
455 | /* Try to get modes by GPIOD port */ | 468 | /* Try to get modes by GPIOD port */ |
456 | ret = intel_ddc_get_modes(intel_encoder); | 469 | ret = intel_ddc_get_modes(connector, ddc_bus); |
457 | intel_i2c_destroy(ddcbus); | 470 | intel_i2c_destroy(ddc_bus); |
458 | 471 | ||
459 | end: | 472 | end: |
460 | return ret; | 473 | return ret; |
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { | |||
491 | static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { | 504 | static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { |
492 | .mode_valid = intel_crt_mode_valid, | 505 | .mode_valid = intel_crt_mode_valid, |
493 | .get_modes = intel_crt_get_modes, | 506 | .get_modes = intel_crt_get_modes, |
494 | .best_encoder = intel_best_encoder, | 507 | .best_encoder = intel_attached_encoder, |
495 | }; | 508 | }; |
496 | 509 | ||
497 | static void intel_crt_enc_destroy(struct drm_encoder *encoder) | 510 | static void intel_crt_enc_destroy(struct drm_encoder *encoder) |
498 | { | 511 | { |
512 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
513 | |||
514 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
499 | drm_encoder_cleanup(encoder); | 515 | drm_encoder_cleanup(encoder); |
516 | kfree(intel_encoder); | ||
500 | } | 517 | } |
501 | 518 | ||
502 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { | 519 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { |
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev) | |||
507 | { | 524 | { |
508 | struct drm_connector *connector; | 525 | struct drm_connector *connector; |
509 | struct intel_encoder *intel_encoder; | 526 | struct intel_encoder *intel_encoder; |
527 | struct intel_connector *intel_connector; | ||
510 | struct drm_i915_private *dev_priv = dev->dev_private; | 528 | struct drm_i915_private *dev_priv = dev->dev_private; |
511 | u32 i2c_reg; | 529 | u32 i2c_reg; |
512 | 530 | ||
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev) | |||
514 | if (!intel_encoder) | 532 | if (!intel_encoder) |
515 | return; | 533 | return; |
516 | 534 | ||
517 | connector = &intel_encoder->base; | 535 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
518 | drm_connector_init(dev, &intel_encoder->base, | 536 | if (!intel_connector) { |
537 | kfree(intel_encoder); | ||
538 | return; | ||
539 | } | ||
540 | |||
541 | connector = &intel_connector->base; | ||
542 | drm_connector_init(dev, &intel_connector->base, | ||
519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 543 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
520 | 544 | ||
521 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, | 545 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, |
522 | DRM_MODE_ENCODER_DAC); | 546 | DRM_MODE_ENCODER_DAC); |
523 | 547 | ||
524 | drm_mode_connector_attach_encoder(&intel_encoder->base, | 548 | drm_mode_connector_attach_encoder(&intel_connector->base, |
525 | &intel_encoder->enc); | 549 | &intel_encoder->enc); |
526 | 550 | ||
527 | /* Set up the DDC bus. */ | 551 | /* Set up the DDC bus. */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7356fb6c918..4bb60af5bf2d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
742 | { | 742 | { |
743 | struct drm_device *dev = crtc->dev; | 743 | struct drm_device *dev = crtc->dev; |
744 | struct drm_mode_config *mode_config = &dev->mode_config; | 744 | struct drm_mode_config *mode_config = &dev->mode_config; |
745 | struct drm_connector *l_entry; | 745 | struct drm_encoder *l_entry; |
746 | 746 | ||
747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | 747 | list_for_each_entry(l_entry, &mode_config->encoder_list, head) { |
748 | if (l_entry->encoder && | 748 | if (l_entry && l_entry->crtc == crtc) { |
749 | l_entry->encoder->crtc == crtc) { | 749 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); |
750 | struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); | ||
751 | if (intel_encoder->type == type) | 750 | if (intel_encoder->type == type) |
752 | return true; | 751 | return true; |
753 | } | 752 | } |
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
755 | return false; | 754 | return false; |
756 | } | 755 | } |
757 | 756 | ||
758 | static struct drm_connector * | ||
759 | intel_pipe_get_connector (struct drm_crtc *crtc) | ||
760 | { | ||
761 | struct drm_device *dev = crtc->dev; | ||
762 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
763 | struct drm_connector *l_entry, *ret = NULL; | ||
764 | |||
765 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | ||
766 | if (l_entry->encoder && | ||
767 | l_entry->encoder->crtc == crtc) { | ||
768 | ret = l_entry; | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | return ret; | ||
773 | } | ||
774 | |||
775 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 757 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
776 | /** | 758 | /** |
777 | * Returns whether the given set of divisors are valid for a given refclk with | 759 | * Returns whether the given set of divisors are valid for a given refclk with |
@@ -1510,6 +1492,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | |||
1510 | udelay(500); | 1492 | udelay(500); |
1511 | } | 1493 | } |
1512 | 1494 | ||
1495 | /* The FDI link training functions for ILK/Ibexpeak. */ | ||
1496 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | ||
1497 | { | ||
1498 | struct drm_device *dev = crtc->dev; | ||
1499 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1500 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1501 | int pipe = intel_crtc->pipe; | ||
1502 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
1503 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1504 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | ||
1505 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | ||
1506 | u32 temp, tries = 0; | ||
1507 | |||
1508 | /* enable CPU FDI TX and PCH FDI RX */ | ||
1509 | temp = I915_READ(fdi_tx_reg); | ||
1510 | temp |= FDI_TX_ENABLE; | ||
1511 | temp &= ~(7 << 19); | ||
1512 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
1513 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1514 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1515 | I915_WRITE(fdi_tx_reg, temp); | ||
1516 | I915_READ(fdi_tx_reg); | ||
1517 | |||
1518 | temp = I915_READ(fdi_rx_reg); | ||
1519 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1520 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1521 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | ||
1522 | I915_READ(fdi_rx_reg); | ||
1523 | udelay(150); | ||
1524 | |||
1525 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1526 | for train result */ | ||
1527 | temp = I915_READ(fdi_rx_imr_reg); | ||
1528 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1529 | temp &= ~FDI_RX_BIT_LOCK; | ||
1530 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1531 | I915_READ(fdi_rx_imr_reg); | ||
1532 | udelay(150); | ||
1533 | |||
1534 | for (;;) { | ||
1535 | temp = I915_READ(fdi_rx_iir_reg); | ||
1536 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | ||
1537 | |||
1538 | if ((temp & FDI_RX_BIT_LOCK)) { | ||
1539 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | ||
1540 | I915_WRITE(fdi_rx_iir_reg, | ||
1541 | temp | FDI_RX_BIT_LOCK); | ||
1542 | break; | ||
1543 | } | ||
1544 | |||
1545 | tries++; | ||
1546 | |||
1547 | if (tries > 5) { | ||
1548 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1549 | break; | ||
1550 | } | ||
1551 | } | ||
1552 | |||
1553 | /* Train 2 */ | ||
1554 | temp = I915_READ(fdi_tx_reg); | ||
1555 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1556 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1557 | I915_WRITE(fdi_tx_reg, temp); | ||
1558 | |||
1559 | temp = I915_READ(fdi_rx_reg); | ||
1560 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1561 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1562 | I915_WRITE(fdi_rx_reg, temp); | ||
1563 | udelay(150); | ||
1564 | |||
1565 | tries = 0; | ||
1566 | |||
1567 | for (;;) { | ||
1568 | temp = I915_READ(fdi_rx_iir_reg); | ||
1569 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | ||
1570 | |||
1571 | if (temp & FDI_RX_SYMBOL_LOCK) { | ||
1572 | I915_WRITE(fdi_rx_iir_reg, | ||
1573 | temp | FDI_RX_SYMBOL_LOCK); | ||
1574 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | ||
1575 | break; | ||
1576 | } | ||
1577 | |||
1578 | tries++; | ||
1579 | |||
1580 | if (tries > 5) { | ||
1581 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1582 | break; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | DRM_DEBUG_KMS("FDI train done\n"); | ||
1587 | } | ||
1588 | |||
1589 | static int snb_b_fdi_train_param [] = { | ||
1590 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | ||
1591 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | ||
1592 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | ||
1593 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, | ||
1594 | }; | ||
1595 | |||
1596 | /* The FDI link training functions for SNB/Cougarpoint. */ | ||
1597 | static void gen6_fdi_link_train(struct drm_crtc *crtc) | ||
1598 | { | ||
1599 | struct drm_device *dev = crtc->dev; | ||
1600 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1601 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1602 | int pipe = intel_crtc->pipe; | ||
1603 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
1604 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | ||
1605 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | ||
1606 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | ||
1607 | u32 temp, i; | ||
1608 | |||
1609 | /* enable CPU FDI TX and PCH FDI RX */ | ||
1610 | temp = I915_READ(fdi_tx_reg); | ||
1611 | temp |= FDI_TX_ENABLE; | ||
1612 | temp &= ~(7 << 19); | ||
1613 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | ||
1614 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1615 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1616 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
1617 | /* SNB-B */ | ||
1618 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
1619 | I915_WRITE(fdi_tx_reg, temp); | ||
1620 | I915_READ(fdi_tx_reg); | ||
1621 | |||
1622 | temp = I915_READ(fdi_rx_reg); | ||
1623 | if (HAS_PCH_CPT(dev)) { | ||
1624 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1625 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
1626 | } else { | ||
1627 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1628 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1629 | } | ||
1630 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | ||
1631 | I915_READ(fdi_rx_reg); | ||
1632 | udelay(150); | ||
1633 | |||
1634 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1635 | for train result */ | ||
1636 | temp = I915_READ(fdi_rx_imr_reg); | ||
1637 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1638 | temp &= ~FDI_RX_BIT_LOCK; | ||
1639 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1640 | I915_READ(fdi_rx_imr_reg); | ||
1641 | udelay(150); | ||
1642 | |||
1643 | for (i = 0; i < 4; i++ ) { | ||
1644 | temp = I915_READ(fdi_tx_reg); | ||
1645 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
1646 | temp |= snb_b_fdi_train_param[i]; | ||
1647 | I915_WRITE(fdi_tx_reg, temp); | ||
1648 | udelay(500); | ||
1649 | |||
1650 | temp = I915_READ(fdi_rx_iir_reg); | ||
1651 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | ||
1652 | |||
1653 | if (temp & FDI_RX_BIT_LOCK) { | ||
1654 | I915_WRITE(fdi_rx_iir_reg, | ||
1655 | temp | FDI_RX_BIT_LOCK); | ||
1656 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | ||
1657 | break; | ||
1658 | } | ||
1659 | } | ||
1660 | if (i == 4) | ||
1661 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1662 | |||
1663 | /* Train 2 */ | ||
1664 | temp = I915_READ(fdi_tx_reg); | ||
1665 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1666 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1667 | if (IS_GEN6(dev)) { | ||
1668 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
1669 | /* SNB-B */ | ||
1670 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | ||
1671 | } | ||
1672 | I915_WRITE(fdi_tx_reg, temp); | ||
1673 | |||
1674 | temp = I915_READ(fdi_rx_reg); | ||
1675 | if (HAS_PCH_CPT(dev)) { | ||
1676 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1677 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | ||
1678 | } else { | ||
1679 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1680 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1681 | } | ||
1682 | I915_WRITE(fdi_rx_reg, temp); | ||
1683 | udelay(150); | ||
1684 | |||
1685 | for (i = 0; i < 4; i++ ) { | ||
1686 | temp = I915_READ(fdi_tx_reg); | ||
1687 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | ||
1688 | temp |= snb_b_fdi_train_param[i]; | ||
1689 | I915_WRITE(fdi_tx_reg, temp); | ||
1690 | udelay(500); | ||
1691 | |||
1692 | temp = I915_READ(fdi_rx_iir_reg); | ||
1693 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | ||
1694 | |||
1695 | if (temp & FDI_RX_SYMBOL_LOCK) { | ||
1696 | I915_WRITE(fdi_rx_iir_reg, | ||
1697 | temp | FDI_RX_SYMBOL_LOCK); | ||
1698 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | ||
1699 | break; | ||
1700 | } | ||
1701 | } | ||
1702 | if (i == 4) | ||
1703 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1704 | |||
1705 | DRM_DEBUG_KMS("FDI train done.\n"); | ||
1706 | } | ||
1707 | |||
1513 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | 1708 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
1514 | { | 1709 | { |
1515 | struct drm_device *dev = crtc->dev; | 1710 | struct drm_device *dev = crtc->dev; |
@@ -1523,8 +1718,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1523 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; | 1718 | int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; |
1524 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | 1719 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; |
1525 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 1720 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; |
1526 | int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; | ||
1527 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | ||
1528 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1721 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1529 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | 1722 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; |
1530 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | 1723 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; |
@@ -1541,8 +1734,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1541 | int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; | 1734 | int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; |
1542 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | 1735 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; |
1543 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1736 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1737 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
1544 | u32 temp; | 1738 | u32 temp; |
1545 | int tries = 5, j, n; | 1739 | int n; |
1546 | u32 pipe_bpc; | 1740 | u32 pipe_bpc; |
1547 | 1741 | ||
1548 | temp = I915_READ(pipeconf_reg); | 1742 | temp = I915_READ(pipeconf_reg); |
@@ -1569,12 +1763,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1569 | /* enable eDP PLL */ | 1763 | /* enable eDP PLL */ |
1570 | ironlake_enable_pll_edp(crtc); | 1764 | ironlake_enable_pll_edp(crtc); |
1571 | } else { | 1765 | } else { |
1572 | /* enable PCH DPLL */ | ||
1573 | temp = I915_READ(pch_dpll_reg); | ||
1574 | if ((temp & DPLL_VCO_ENABLE) == 0) { | ||
1575 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | ||
1576 | I915_READ(pch_dpll_reg); | ||
1577 | } | ||
1578 | 1766 | ||
1579 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1767 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1580 | temp = I915_READ(fdi_rx_reg); | 1768 | temp = I915_READ(fdi_rx_reg); |
@@ -1584,9 +1772,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1584 | */ | 1772 | */ |
1585 | temp &= ~(0x7 << 16); | 1773 | temp &= ~(0x7 << 16); |
1586 | temp |= (pipe_bpc << 11); | 1774 | temp |= (pipe_bpc << 11); |
1587 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1775 | temp &= ~(7 << 19); |
1588 | FDI_SEL_PCDCLK | | 1776 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
1589 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | 1777 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); |
1778 | I915_READ(fdi_rx_reg); | ||
1779 | udelay(200); | ||
1780 | |||
1781 | /* Switch from Rawclk to PCDclk */ | ||
1782 | temp = I915_READ(fdi_rx_reg); | ||
1783 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | ||
1590 | I915_READ(fdi_rx_reg); | 1784 | I915_READ(fdi_rx_reg); |
1591 | udelay(200); | 1785 | udelay(200); |
1592 | 1786 | ||
@@ -1629,91 +1823,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1629 | } | 1823 | } |
1630 | 1824 | ||
1631 | if (!HAS_eDP) { | 1825 | if (!HAS_eDP) { |
1632 | /* enable CPU FDI TX and PCH FDI RX */ | 1826 | /* For PCH output, training FDI link */ |
1633 | temp = I915_READ(fdi_tx_reg); | 1827 | if (IS_GEN6(dev)) |
1634 | temp |= FDI_TX_ENABLE; | 1828 | gen6_fdi_link_train(crtc); |
1635 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ | 1829 | else |
1636 | temp &= ~FDI_LINK_TRAIN_NONE; | 1830 | ironlake_fdi_link_train(crtc); |
1637 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1638 | I915_WRITE(fdi_tx_reg, temp); | ||
1639 | I915_READ(fdi_tx_reg); | ||
1640 | |||
1641 | temp = I915_READ(fdi_rx_reg); | ||
1642 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1643 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
1644 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | ||
1645 | I915_READ(fdi_rx_reg); | ||
1646 | |||
1647 | udelay(150); | ||
1648 | |||
1649 | /* Train FDI. */ | ||
1650 | /* umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1651 | for train result */ | ||
1652 | temp = I915_READ(fdi_rx_imr_reg); | ||
1653 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1654 | temp &= ~FDI_RX_BIT_LOCK; | ||
1655 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1656 | I915_READ(fdi_rx_imr_reg); | ||
1657 | udelay(150); | ||
1658 | 1831 | ||
1659 | temp = I915_READ(fdi_rx_iir_reg); | 1832 | /* enable PCH DPLL */ |
1660 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1833 | temp = I915_READ(pch_dpll_reg); |
1661 | 1834 | if ((temp & DPLL_VCO_ENABLE) == 0) { | |
1662 | if ((temp & FDI_RX_BIT_LOCK) == 0) { | 1835 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); |
1663 | for (j = 0; j < tries; j++) { | 1836 | I915_READ(pch_dpll_reg); |
1664 | temp = I915_READ(fdi_rx_iir_reg); | ||
1665 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", | ||
1666 | temp); | ||
1667 | if (temp & FDI_RX_BIT_LOCK) | ||
1668 | break; | ||
1669 | udelay(200); | ||
1670 | } | ||
1671 | if (j != tries) | ||
1672 | I915_WRITE(fdi_rx_iir_reg, | ||
1673 | temp | FDI_RX_BIT_LOCK); | ||
1674 | else | ||
1675 | DRM_DEBUG_KMS("train 1 fail\n"); | ||
1676 | } else { | ||
1677 | I915_WRITE(fdi_rx_iir_reg, | ||
1678 | temp | FDI_RX_BIT_LOCK); | ||
1679 | DRM_DEBUG_KMS("train 1 ok 2!\n"); | ||
1680 | } | 1837 | } |
1681 | temp = I915_READ(fdi_tx_reg); | 1838 | udelay(200); |
1682 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1683 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1684 | I915_WRITE(fdi_tx_reg, temp); | ||
1685 | |||
1686 | temp = I915_READ(fdi_rx_reg); | ||
1687 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1688 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1689 | I915_WRITE(fdi_rx_reg, temp); | ||
1690 | |||
1691 | udelay(150); | ||
1692 | 1839 | ||
1693 | temp = I915_READ(fdi_rx_iir_reg); | 1840 | if (HAS_PCH_CPT(dev)) { |
1694 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1841 | /* Be sure PCH DPLL SEL is set */ |
1695 | 1842 | temp = I915_READ(PCH_DPLL_SEL); | |
1696 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { | 1843 | if (trans_dpll_sel == 0 && |
1697 | for (j = 0; j < tries; j++) { | 1844 | (temp & TRANSA_DPLL_ENABLE) == 0) |
1698 | temp = I915_READ(fdi_rx_iir_reg); | 1845 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
1699 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", | 1846 | else if (trans_dpll_sel == 1 && |
1700 | temp); | 1847 | (temp & TRANSB_DPLL_ENABLE) == 0) |
1701 | if (temp & FDI_RX_SYMBOL_LOCK) | 1848 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
1702 | break; | 1849 | I915_WRITE(PCH_DPLL_SEL, temp); |
1703 | udelay(200); | 1850 | I915_READ(PCH_DPLL_SEL); |
1704 | } | ||
1705 | if (j != tries) { | ||
1706 | I915_WRITE(fdi_rx_iir_reg, | ||
1707 | temp | FDI_RX_SYMBOL_LOCK); | ||
1708 | DRM_DEBUG_KMS("train 2 ok 1!\n"); | ||
1709 | } else | ||
1710 | DRM_DEBUG_KMS("train 2 fail\n"); | ||
1711 | } else { | ||
1712 | I915_WRITE(fdi_rx_iir_reg, | ||
1713 | temp | FDI_RX_SYMBOL_LOCK); | ||
1714 | DRM_DEBUG_KMS("train 2 ok 2!\n"); | ||
1715 | } | 1851 | } |
1716 | DRM_DEBUG_KMS("train done\n"); | ||
1717 | 1852 | ||
1718 | /* set transcoder timing */ | 1853 | /* set transcoder timing */ |
1719 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 1854 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); |
@@ -1724,6 +1859,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1724 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | 1859 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); |
1725 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | 1860 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); |
1726 | 1861 | ||
1862 | /* enable normal train */ | ||
1863 | temp = I915_READ(fdi_tx_reg); | ||
1864 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1865 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | ||
1866 | FDI_TX_ENHANCE_FRAME_ENABLE); | ||
1867 | I915_READ(fdi_tx_reg); | ||
1868 | |||
1869 | temp = I915_READ(fdi_rx_reg); | ||
1870 | if (HAS_PCH_CPT(dev)) { | ||
1871 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1872 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1873 | } else { | ||
1874 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1875 | temp |= FDI_LINK_TRAIN_NONE; | ||
1876 | } | ||
1877 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1878 | I915_READ(fdi_rx_reg); | ||
1879 | |||
1880 | /* wait one idle pattern time */ | ||
1881 | udelay(100); | ||
1882 | |||
1883 | /* For PCH DP, enable TRANS_DP_CTL */ | ||
1884 | if (HAS_PCH_CPT(dev) && | ||
1885 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
1886 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | ||
1887 | int reg; | ||
1888 | |||
1889 | reg = I915_READ(trans_dp_ctl); | ||
1890 | reg &= ~TRANS_DP_PORT_SEL_MASK; | ||
1891 | reg = TRANS_DP_OUTPUT_ENABLE | | ||
1892 | TRANS_DP_ENH_FRAMING | | ||
1893 | TRANS_DP_VSYNC_ACTIVE_HIGH | | ||
1894 | TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
1895 | |||
1896 | switch (intel_trans_dp_port_sel(crtc)) { | ||
1897 | case PCH_DP_B: | ||
1898 | reg |= TRANS_DP_PORT_SEL_B; | ||
1899 | break; | ||
1900 | case PCH_DP_C: | ||
1901 | reg |= TRANS_DP_PORT_SEL_C; | ||
1902 | break; | ||
1903 | case PCH_DP_D: | ||
1904 | reg |= TRANS_DP_PORT_SEL_D; | ||
1905 | break; | ||
1906 | default: | ||
1907 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | ||
1908 | reg |= TRANS_DP_PORT_SEL_B; | ||
1909 | break; | ||
1910 | } | ||
1911 | |||
1912 | I915_WRITE(trans_dp_ctl, reg); | ||
1913 | POSTING_READ(trans_dp_ctl); | ||
1914 | } | ||
1915 | |||
1727 | /* enable PCH transcoder */ | 1916 | /* enable PCH transcoder */ |
1728 | temp = I915_READ(transconf_reg); | 1917 | temp = I915_READ(transconf_reg); |
1729 | /* | 1918 | /* |
@@ -1738,23 +1927,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1738 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) | 1927 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) |
1739 | ; | 1928 | ; |
1740 | 1929 | ||
1741 | /* enable normal */ | ||
1742 | |||
1743 | temp = I915_READ(fdi_tx_reg); | ||
1744 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1745 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | ||
1746 | FDI_TX_ENHANCE_FRAME_ENABLE); | ||
1747 | I915_READ(fdi_tx_reg); | ||
1748 | |||
1749 | temp = I915_READ(fdi_rx_reg); | ||
1750 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1751 | I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | | ||
1752 | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1753 | I915_READ(fdi_rx_reg); | ||
1754 | |||
1755 | /* wait one idle pattern time */ | ||
1756 | udelay(100); | ||
1757 | |||
1758 | } | 1930 | } |
1759 | 1931 | ||
1760 | intel_crtc_load_lut(crtc); | 1932 | intel_crtc_load_lut(crtc); |
@@ -1805,6 +1977,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1805 | I915_READ(pf_ctl_reg); | 1977 | I915_READ(pf_ctl_reg); |
1806 | } | 1978 | } |
1807 | I915_WRITE(pf_win_size, 0); | 1979 | I915_WRITE(pf_win_size, 0); |
1980 | POSTING_READ(pf_win_size); | ||
1981 | |||
1808 | 1982 | ||
1809 | /* disable CPU FDI tx and PCH FDI rx */ | 1983 | /* disable CPU FDI tx and PCH FDI rx */ |
1810 | temp = I915_READ(fdi_tx_reg); | 1984 | temp = I915_READ(fdi_tx_reg); |
@@ -1825,11 +1999,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1825 | temp &= ~FDI_LINK_TRAIN_NONE; | 1999 | temp &= ~FDI_LINK_TRAIN_NONE; |
1826 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2000 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1827 | I915_WRITE(fdi_tx_reg, temp); | 2001 | I915_WRITE(fdi_tx_reg, temp); |
2002 | POSTING_READ(fdi_tx_reg); | ||
1828 | 2003 | ||
1829 | temp = I915_READ(fdi_rx_reg); | 2004 | temp = I915_READ(fdi_rx_reg); |
1830 | temp &= ~FDI_LINK_TRAIN_NONE; | 2005 | if (HAS_PCH_CPT(dev)) { |
1831 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 2006 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2007 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2008 | } else { | ||
2009 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2010 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2011 | } | ||
1832 | I915_WRITE(fdi_rx_reg, temp); | 2012 | I915_WRITE(fdi_rx_reg, temp); |
2013 | POSTING_READ(fdi_rx_reg); | ||
1833 | 2014 | ||
1834 | udelay(100); | 2015 | udelay(100); |
1835 | 2016 | ||
@@ -1859,6 +2040,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1859 | } | 2040 | } |
1860 | } | 2041 | } |
1861 | } | 2042 | } |
2043 | |||
1862 | temp = I915_READ(transconf_reg); | 2044 | temp = I915_READ(transconf_reg); |
1863 | /* BPC in transcoder is consistent with that in pipeconf */ | 2045 | /* BPC in transcoder is consistent with that in pipeconf */ |
1864 | temp &= ~PIPE_BPC_MASK; | 2046 | temp &= ~PIPE_BPC_MASK; |
@@ -1867,35 +2049,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1867 | I915_READ(transconf_reg); | 2049 | I915_READ(transconf_reg); |
1868 | udelay(100); | 2050 | udelay(100); |
1869 | 2051 | ||
2052 | if (HAS_PCH_CPT(dev)) { | ||
2053 | /* disable TRANS_DP_CTL */ | ||
2054 | int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; | ||
2055 | int reg; | ||
2056 | |||
2057 | reg = I915_READ(trans_dp_ctl); | ||
2058 | reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | ||
2059 | I915_WRITE(trans_dp_ctl, reg); | ||
2060 | POSTING_READ(trans_dp_ctl); | ||
2061 | |||
2062 | /* disable DPLL_SEL */ | ||
2063 | temp = I915_READ(PCH_DPLL_SEL); | ||
2064 | if (trans_dpll_sel == 0) | ||
2065 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); | ||
2066 | else | ||
2067 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
2068 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
2069 | I915_READ(PCH_DPLL_SEL); | ||
2070 | |||
2071 | } | ||
2072 | |||
1870 | /* disable PCH DPLL */ | 2073 | /* disable PCH DPLL */ |
1871 | temp = I915_READ(pch_dpll_reg); | 2074 | temp = I915_READ(pch_dpll_reg); |
1872 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2075 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); |
1873 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2076 | I915_READ(pch_dpll_reg); |
1874 | I915_READ(pch_dpll_reg); | ||
1875 | } | ||
1876 | 2077 | ||
1877 | if (HAS_eDP) { | 2078 | if (HAS_eDP) { |
1878 | ironlake_disable_pll_edp(crtc); | 2079 | ironlake_disable_pll_edp(crtc); |
1879 | } | 2080 | } |
1880 | 2081 | ||
2082 | /* Switch from PCDclk to Rawclk */ | ||
1881 | temp = I915_READ(fdi_rx_reg); | 2083 | temp = I915_READ(fdi_rx_reg); |
1882 | temp &= ~FDI_SEL_PCDCLK; | 2084 | temp &= ~FDI_SEL_PCDCLK; |
1883 | I915_WRITE(fdi_rx_reg, temp); | 2085 | I915_WRITE(fdi_rx_reg, temp); |
1884 | I915_READ(fdi_rx_reg); | 2086 | I915_READ(fdi_rx_reg); |
1885 | 2087 | ||
2088 | /* Disable CPU FDI TX PLL */ | ||
2089 | temp = I915_READ(fdi_tx_reg); | ||
2090 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
2091 | I915_READ(fdi_tx_reg); | ||
2092 | udelay(100); | ||
2093 | |||
1886 | temp = I915_READ(fdi_rx_reg); | 2094 | temp = I915_READ(fdi_rx_reg); |
1887 | temp &= ~FDI_RX_PLL_ENABLE; | 2095 | temp &= ~FDI_RX_PLL_ENABLE; |
1888 | I915_WRITE(fdi_rx_reg, temp); | 2096 | I915_WRITE(fdi_rx_reg, temp); |
1889 | I915_READ(fdi_rx_reg); | 2097 | I915_READ(fdi_rx_reg); |
1890 | 2098 | ||
1891 | /* Disable CPU FDI TX PLL */ | ||
1892 | temp = I915_READ(fdi_tx_reg); | ||
1893 | if ((temp & FDI_TX_PLL_ENABLE) != 0) { | ||
1894 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
1895 | I915_READ(fdi_tx_reg); | ||
1896 | udelay(100); | ||
1897 | } | ||
1898 | |||
1899 | /* Wait for the clocks to turn off. */ | 2099 | /* Wait for the clocks to turn off. */ |
1900 | udelay(100); | 2100 | udelay(100); |
1901 | break; | 2101 | break; |
@@ -2331,6 +2531,30 @@ static struct intel_watermark_params i830_wm_info = { | |||
2331 | I830_FIFO_LINE_SIZE | 2531 | I830_FIFO_LINE_SIZE |
2332 | }; | 2532 | }; |
2333 | 2533 | ||
2534 | static struct intel_watermark_params ironlake_display_wm_info = { | ||
2535 | ILK_DISPLAY_FIFO, | ||
2536 | ILK_DISPLAY_MAXWM, | ||
2537 | ILK_DISPLAY_DFTWM, | ||
2538 | 2, | ||
2539 | ILK_FIFO_LINE_SIZE | ||
2540 | }; | ||
2541 | |||
2542 | static struct intel_watermark_params ironlake_display_srwm_info = { | ||
2543 | ILK_DISPLAY_SR_FIFO, | ||
2544 | ILK_DISPLAY_MAX_SRWM, | ||
2545 | ILK_DISPLAY_DFT_SRWM, | ||
2546 | 2, | ||
2547 | ILK_FIFO_LINE_SIZE | ||
2548 | }; | ||
2549 | |||
2550 | static struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
2551 | ILK_CURSOR_SR_FIFO, | ||
2552 | ILK_CURSOR_MAX_SRWM, | ||
2553 | ILK_CURSOR_DFT_SRWM, | ||
2554 | 2, | ||
2555 | ILK_FIFO_LINE_SIZE | ||
2556 | }; | ||
2557 | |||
2334 | /** | 2558 | /** |
2335 | * intel_calculate_wm - calculate watermark level | 2559 | * intel_calculate_wm - calculate watermark level |
2336 | * @clock_in_khz: pixel clock | 2560 | * @clock_in_khz: pixel clock |
@@ -2449,66 +2673,6 @@ static void pineview_disable_cxsr(struct drm_device *dev) | |||
2449 | DRM_INFO("Big FIFO is disabled\n"); | 2673 | DRM_INFO("Big FIFO is disabled\n"); |
2450 | } | 2674 | } |
2451 | 2675 | ||
2452 | static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | ||
2453 | int pixel_size) | ||
2454 | { | ||
2455 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2456 | u32 reg; | ||
2457 | unsigned long wm; | ||
2458 | struct cxsr_latency *latency; | ||
2459 | |||
2460 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, | ||
2461 | dev_priv->mem_freq); | ||
2462 | if (!latency) { | ||
2463 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | ||
2464 | pineview_disable_cxsr(dev); | ||
2465 | return; | ||
2466 | } | ||
2467 | |||
2468 | /* Display SR */ | ||
2469 | wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size, | ||
2470 | latency->display_sr); | ||
2471 | reg = I915_READ(DSPFW1); | ||
2472 | reg &= 0x7fffff; | ||
2473 | reg |= wm << 23; | ||
2474 | I915_WRITE(DSPFW1, reg); | ||
2475 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | ||
2476 | |||
2477 | /* cursor SR */ | ||
2478 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size, | ||
2479 | latency->cursor_sr); | ||
2480 | reg = I915_READ(DSPFW3); | ||
2481 | reg &= ~(0x3f << 24); | ||
2482 | reg |= (wm & 0x3f) << 24; | ||
2483 | I915_WRITE(DSPFW3, reg); | ||
2484 | |||
2485 | /* Display HPLL off SR */ | ||
2486 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, | ||
2487 | latency->display_hpll_disable, I915_FIFO_LINE_SIZE); | ||
2488 | reg = I915_READ(DSPFW3); | ||
2489 | reg &= 0xfffffe00; | ||
2490 | reg |= wm & 0x1ff; | ||
2491 | I915_WRITE(DSPFW3, reg); | ||
2492 | |||
2493 | /* cursor HPLL off SR */ | ||
2494 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size, | ||
2495 | latency->cursor_hpll_disable); | ||
2496 | reg = I915_READ(DSPFW3); | ||
2497 | reg &= ~(0x3f << 16); | ||
2498 | reg |= (wm & 0x3f) << 16; | ||
2499 | I915_WRITE(DSPFW3, reg); | ||
2500 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | ||
2501 | |||
2502 | /* activate cxsr */ | ||
2503 | reg = I915_READ(DSPFW3); | ||
2504 | reg |= PINEVIEW_SELF_REFRESH_EN; | ||
2505 | I915_WRITE(DSPFW3, reg); | ||
2506 | |||
2507 | DRM_INFO("Big FIFO is enabled\n"); | ||
2508 | |||
2509 | return; | ||
2510 | } | ||
2511 | |||
2512 | /* | 2676 | /* |
2513 | * Latency for FIFO fetches is dependent on several factors: | 2677 | * Latency for FIFO fetches is dependent on several factors: |
2514 | * - memory configuration (speed, channels) | 2678 | * - memory configuration (speed, channels) |
@@ -2593,6 +2757,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2593 | return size; | 2757 | return size; |
2594 | } | 2758 | } |
2595 | 2759 | ||
2760 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | ||
2761 | int planeb_clock, int sr_hdisplay, int pixel_size) | ||
2762 | { | ||
2763 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2764 | u32 reg; | ||
2765 | unsigned long wm; | ||
2766 | struct cxsr_latency *latency; | ||
2767 | int sr_clock; | ||
2768 | |||
2769 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, | ||
2770 | dev_priv->mem_freq); | ||
2771 | if (!latency) { | ||
2772 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | ||
2773 | pineview_disable_cxsr(dev); | ||
2774 | return; | ||
2775 | } | ||
2776 | |||
2777 | if (!planea_clock || !planeb_clock) { | ||
2778 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
2779 | |||
2780 | /* Display SR */ | ||
2781 | wm = intel_calculate_wm(sr_clock, &pineview_display_wm, | ||
2782 | pixel_size, latency->display_sr); | ||
2783 | reg = I915_READ(DSPFW1); | ||
2784 | reg &= ~DSPFW_SR_MASK; | ||
2785 | reg |= wm << DSPFW_SR_SHIFT; | ||
2786 | I915_WRITE(DSPFW1, reg); | ||
2787 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | ||
2788 | |||
2789 | /* cursor SR */ | ||
2790 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, | ||
2791 | pixel_size, latency->cursor_sr); | ||
2792 | reg = I915_READ(DSPFW3); | ||
2793 | reg &= ~DSPFW_CURSOR_SR_MASK; | ||
2794 | reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; | ||
2795 | I915_WRITE(DSPFW3, reg); | ||
2796 | |||
2797 | /* Display HPLL off SR */ | ||
2798 | wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, | ||
2799 | pixel_size, latency->display_hpll_disable); | ||
2800 | reg = I915_READ(DSPFW3); | ||
2801 | reg &= ~DSPFW_HPLL_SR_MASK; | ||
2802 | reg |= wm & DSPFW_HPLL_SR_MASK; | ||
2803 | I915_WRITE(DSPFW3, reg); | ||
2804 | |||
2805 | /* cursor HPLL off SR */ | ||
2806 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, | ||
2807 | pixel_size, latency->cursor_hpll_disable); | ||
2808 | reg = I915_READ(DSPFW3); | ||
2809 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | ||
2810 | reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; | ||
2811 | I915_WRITE(DSPFW3, reg); | ||
2812 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | ||
2813 | |||
2814 | /* activate cxsr */ | ||
2815 | reg = I915_READ(DSPFW3); | ||
2816 | reg |= PINEVIEW_SELF_REFRESH_EN; | ||
2817 | I915_WRITE(DSPFW3, reg); | ||
2818 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); | ||
2819 | } else { | ||
2820 | pineview_disable_cxsr(dev); | ||
2821 | DRM_DEBUG_KMS("Self-refresh is disabled\n"); | ||
2822 | } | ||
2823 | } | ||
2824 | |||
2596 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 2825 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, |
2597 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2826 | int planeb_clock, int sr_hdisplay, int pixel_size) |
2598 | { | 2827 | { |
@@ -2813,6 +3042,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
2813 | I915_WRITE(FW_BLC, fwater_lo); | 3042 | I915_WRITE(FW_BLC, fwater_lo); |
2814 | } | 3043 | } |
2815 | 3044 | ||
3045 | #define ILK_LP0_PLANE_LATENCY 700 | ||
3046 | |||
3047 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | ||
3048 | int planeb_clock, int sr_hdisplay, int pixel_size) | ||
3049 | { | ||
3050 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3051 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | ||
3052 | int sr_wm, cursor_wm; | ||
3053 | unsigned long line_time_us; | ||
3054 | int sr_clock, entries_required; | ||
3055 | u32 reg_value; | ||
3056 | |||
3057 | /* Calculate and update the watermark for plane A */ | ||
3058 | if (planea_clock) { | ||
3059 | entries_required = ((planea_clock / 1000) * pixel_size * | ||
3060 | ILK_LP0_PLANE_LATENCY) / 1000; | ||
3061 | entries_required = DIV_ROUND_UP(entries_required, | ||
3062 | ironlake_display_wm_info.cacheline_size); | ||
3063 | planea_wm = entries_required + | ||
3064 | ironlake_display_wm_info.guard_size; | ||
3065 | |||
3066 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) | ||
3067 | planea_wm = ironlake_display_wm_info.max_wm; | ||
3068 | |||
3069 | cursora_wm = 16; | ||
3070 | reg_value = I915_READ(WM0_PIPEA_ILK); | ||
3071 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
3072 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | | ||
3073 | (cursora_wm & WM0_PIPE_CURSOR_MASK); | ||
3074 | I915_WRITE(WM0_PIPEA_ILK, reg_value); | ||
3075 | DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " | ||
3076 | "cursor: %d\n", planea_wm, cursora_wm); | ||
3077 | } | ||
3078 | /* Calculate and update the watermark for plane B */ | ||
3079 | if (planeb_clock) { | ||
3080 | entries_required = ((planeb_clock / 1000) * pixel_size * | ||
3081 | ILK_LP0_PLANE_LATENCY) / 1000; | ||
3082 | entries_required = DIV_ROUND_UP(entries_required, | ||
3083 | ironlake_display_wm_info.cacheline_size); | ||
3084 | planeb_wm = entries_required + | ||
3085 | ironlake_display_wm_info.guard_size; | ||
3086 | |||
3087 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) | ||
3088 | planeb_wm = ironlake_display_wm_info.max_wm; | ||
3089 | |||
3090 | cursorb_wm = 16; | ||
3091 | reg_value = I915_READ(WM0_PIPEB_ILK); | ||
3092 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | ||
3093 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | | ||
3094 | (cursorb_wm & WM0_PIPE_CURSOR_MASK); | ||
3095 | I915_WRITE(WM0_PIPEB_ILK, reg_value); | ||
3096 | DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " | ||
3097 | "cursor: %d\n", planeb_wm, cursorb_wm); | ||
3098 | } | ||
3099 | |||
3100 | /* | ||
3101 | * Calculate and update the self-refresh watermark only when one | ||
3102 | * display plane is used. | ||
3103 | */ | ||
3104 | if (!planea_clock || !planeb_clock) { | ||
3105 | int line_count; | ||
3106 | /* Read the self-refresh latency. The unit is 0.5us */ | ||
3107 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | ||
3108 | |||
3109 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
3110 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | ||
3111 | |||
3112 | /* Use ns/us then divide to preserve precision */ | ||
3113 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | ||
3114 | / 1000; | ||
3115 | |||
3116 | /* calculate the self-refresh watermark for display plane */ | ||
3117 | entries_required = line_count * sr_hdisplay * pixel_size; | ||
3118 | entries_required = DIV_ROUND_UP(entries_required, | ||
3119 | ironlake_display_srwm_info.cacheline_size); | ||
3120 | sr_wm = entries_required + | ||
3121 | ironlake_display_srwm_info.guard_size; | ||
3122 | |||
3123 | /* calculate the self-refresh watermark for display cursor */ | ||
3124 | entries_required = line_count * pixel_size * 64; | ||
3125 | entries_required = DIV_ROUND_UP(entries_required, | ||
3126 | ironlake_cursor_srwm_info.cacheline_size); | ||
3127 | cursor_wm = entries_required + | ||
3128 | ironlake_cursor_srwm_info.guard_size; | ||
3129 | |||
3130 | /* configure watermark and enable self-refresh */ | ||
3131 | reg_value = I915_READ(WM1_LP_ILK); | ||
3132 | reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | | ||
3133 | WM1_LP_CURSOR_MASK); | ||
3134 | reg_value |= WM1_LP_SR_EN | | ||
3135 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | ||
3136 | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; | ||
3137 | |||
3138 | I915_WRITE(WM1_LP_ILK, reg_value); | ||
3139 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3140 | "cursor %d\n", sr_wm, cursor_wm); | ||
3141 | |||
3142 | } else { | ||
3143 | /* Turn off self refresh if both pipes are enabled */ | ||
3144 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); | ||
3145 | } | ||
3146 | } | ||
2816 | /** | 3147 | /** |
2817 | * intel_update_watermarks - update FIFO watermark values based on current modes | 3148 | * intel_update_watermarks - update FIFO watermark values based on current modes |
2818 | * | 3149 | * |
@@ -2882,12 +3213,6 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
2882 | if (enabled <= 0) | 3213 | if (enabled <= 0) |
2883 | return; | 3214 | return; |
2884 | 3215 | ||
2885 | /* Single plane configs can enable self refresh */ | ||
2886 | if (enabled == 1 && IS_PINEVIEW(dev)) | ||
2887 | pineview_enable_cxsr(dev, sr_clock, pixel_size); | ||
2888 | else if (IS_PINEVIEW(dev)) | ||
2889 | pineview_disable_cxsr(dev); | ||
2890 | |||
2891 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 3216 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
2892 | sr_hdisplay, pixel_size); | 3217 | sr_hdisplay, pixel_size); |
2893 | } | 3218 | } |
@@ -2924,7 +3249,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2924 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 3249 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
2925 | bool is_edp = false; | 3250 | bool is_edp = false; |
2926 | struct drm_mode_config *mode_config = &dev->mode_config; | 3251 | struct drm_mode_config *mode_config = &dev->mode_config; |
2927 | struct drm_connector *connector; | 3252 | struct drm_encoder *encoder; |
3253 | struct intel_encoder *intel_encoder = NULL; | ||
2928 | const intel_limit_t *limit; | 3254 | const intel_limit_t *limit; |
2929 | int ret; | 3255 | int ret; |
2930 | struct fdi_m_n m_n = {0}; | 3256 | struct fdi_m_n m_n = {0}; |
@@ -2935,6 +3261,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2935 | int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; | 3261 | int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; |
2936 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | 3262 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; |
2937 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; | 3263 | int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; |
3264 | int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; | ||
3265 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | ||
2938 | int lvds_reg = LVDS; | 3266 | int lvds_reg = LVDS; |
2939 | u32 temp; | 3267 | u32 temp; |
2940 | int sdvo_pixel_multiply; | 3268 | int sdvo_pixel_multiply; |
@@ -2942,12 +3270,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2942 | 3270 | ||
2943 | drm_vblank_pre_modeset(dev, pipe); | 3271 | drm_vblank_pre_modeset(dev, pipe); |
2944 | 3272 | ||
2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 3273 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
2946 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
2947 | 3274 | ||
2948 | if (!connector->encoder || connector->encoder->crtc != crtc) | 3275 | if (!encoder || encoder->crtc != crtc) |
2949 | continue; | 3276 | continue; |
2950 | 3277 | ||
3278 | intel_encoder = enc_to_intel_encoder(encoder); | ||
3279 | |||
2951 | switch (intel_encoder->type) { | 3280 | switch (intel_encoder->type) { |
2952 | case INTEL_OUTPUT_LVDS: | 3281 | case INTEL_OUTPUT_LVDS: |
2953 | is_lvds = true; | 3282 | is_lvds = true; |
@@ -3043,14 +3372,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3043 | 3372 | ||
3044 | /* FDI link */ | 3373 | /* FDI link */ |
3045 | if (HAS_PCH_SPLIT(dev)) { | 3374 | if (HAS_PCH_SPLIT(dev)) { |
3046 | int lane, link_bw, bpp; | 3375 | int lane = 0, link_bw, bpp; |
3047 | /* eDP doesn't require FDI link, so just set DP M/N | 3376 | /* eDP doesn't require FDI link, so just set DP M/N |
3048 | according to current link config */ | 3377 | according to current link config */ |
3049 | if (is_edp) { | 3378 | if (is_edp) { |
3050 | struct drm_connector *edp; | ||
3051 | target_clock = mode->clock; | 3379 | target_clock = mode->clock; |
3052 | edp = intel_pipe_get_connector(crtc); | 3380 | intel_edp_link_config(intel_encoder, |
3053 | intel_edp_link_config(to_intel_encoder(edp), | ||
3054 | &lane, &link_bw); | 3381 | &lane, &link_bw); |
3055 | } else { | 3382 | } else { |
3056 | /* DP over FDI requires target mode clock | 3383 | /* DP over FDI requires target mode clock |
@@ -3059,7 +3386,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3059 | target_clock = mode->clock; | 3386 | target_clock = mode->clock; |
3060 | else | 3387 | else |
3061 | target_clock = adjusted_mode->clock; | 3388 | target_clock = adjusted_mode->clock; |
3062 | lane = 4; | ||
3063 | link_bw = 270000; | 3389 | link_bw = 270000; |
3064 | } | 3390 | } |
3065 | 3391 | ||
@@ -3111,6 +3437,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3111 | bpp = 24; | 3437 | bpp = 24; |
3112 | } | 3438 | } |
3113 | 3439 | ||
3440 | if (!lane) { | ||
3441 | /* | ||
3442 | * Account for spread spectrum to avoid | ||
3443 | * oversubscribing the link. Max center spread | ||
3444 | * is 2.5%; use 5% for safety's sake. | ||
3445 | */ | ||
3446 | u32 bps = target_clock * bpp * 21 / 20; | ||
3447 | lane = bps / (link_bw * 8) + 1; | ||
3448 | } | ||
3449 | |||
3450 | intel_crtc->fdi_lanes = lane; | ||
3451 | |||
3114 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | 3452 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); |
3115 | } | 3453 | } |
3116 | 3454 | ||
@@ -3265,11 +3603,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3265 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; | 3603 | pipeconf &= ~PIPEACONF_DOUBLE_WIDE; |
3266 | } | 3604 | } |
3267 | 3605 | ||
3268 | dspcntr |= DISPLAY_PLANE_ENABLE; | ||
3269 | pipeconf |= PIPEACONF_ENABLE; | ||
3270 | dpll |= DPLL_VCO_ENABLE; | ||
3271 | |||
3272 | |||
3273 | /* Disable the panel fitter if it was on our pipe */ | 3606 | /* Disable the panel fitter if it was on our pipe */ |
3274 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) | 3607 | if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) |
3275 | I915_WRITE(PFIT_CONTROL, 0); | 3608 | I915_WRITE(PFIT_CONTROL, 0); |
@@ -3292,6 +3625,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3292 | udelay(150); | 3625 | udelay(150); |
3293 | } | 3626 | } |
3294 | 3627 | ||
3628 | /* enable transcoder DPLL */ | ||
3629 | if (HAS_PCH_CPT(dev)) { | ||
3630 | temp = I915_READ(PCH_DPLL_SEL); | ||
3631 | if (trans_dpll_sel == 0) | ||
3632 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | ||
3633 | else | ||
3634 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | ||
3635 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
3636 | I915_READ(PCH_DPLL_SEL); | ||
3637 | udelay(150); | ||
3638 | } | ||
3639 | |||
3295 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 3640 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
3296 | * This is an exception to the general rule that mode_set doesn't turn | 3641 | * This is an exception to the general rule that mode_set doesn't turn |
3297 | * things on. | 3642 | * things on. |
@@ -3303,7 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3303 | lvds_reg = PCH_LVDS; | 3648 | lvds_reg = PCH_LVDS; |
3304 | 3649 | ||
3305 | lvds = I915_READ(lvds_reg); | 3650 | lvds = I915_READ(lvds_reg); |
3306 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; | 3651 | lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
3652 | if (pipe == 1) { | ||
3653 | if (HAS_PCH_CPT(dev)) | ||
3654 | lvds |= PORT_TRANS_B_SEL_CPT; | ||
3655 | else | ||
3656 | lvds |= LVDS_PIPEB_SELECT; | ||
3657 | } else { | ||
3658 | if (HAS_PCH_CPT(dev)) | ||
3659 | lvds &= ~PORT_TRANS_SEL_MASK; | ||
3660 | else | ||
3661 | lvds &= ~LVDS_PIPEB_SELECT; | ||
3662 | } | ||
3307 | /* set the corresponsding LVDS_BORDER bit */ | 3663 | /* set the corresponsding LVDS_BORDER bit */ |
3308 | lvds |= dev_priv->lvds_border_bits; | 3664 | lvds |= dev_priv->lvds_border_bits; |
3309 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 3665 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
@@ -3337,6 +3693,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3337 | } | 3693 | } |
3338 | if (is_dp) | 3694 | if (is_dp) |
3339 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 3695 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
3696 | else if (HAS_PCH_SPLIT(dev)) { | ||
3697 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | ||
3698 | if (pipe == 0) { | ||
3699 | I915_WRITE(TRANSA_DATA_M1, 0); | ||
3700 | I915_WRITE(TRANSA_DATA_N1, 0); | ||
3701 | I915_WRITE(TRANSA_DP_LINK_M1, 0); | ||
3702 | I915_WRITE(TRANSA_DP_LINK_N1, 0); | ||
3703 | } else { | ||
3704 | I915_WRITE(TRANSB_DATA_M1, 0); | ||
3705 | I915_WRITE(TRANSB_DATA_N1, 0); | ||
3706 | I915_WRITE(TRANSB_DP_LINK_M1, 0); | ||
3707 | I915_WRITE(TRANSB_DP_LINK_N1, 0); | ||
3708 | } | ||
3709 | } | ||
3340 | 3710 | ||
3341 | if (!is_edp) { | 3711 | if (!is_edp) { |
3342 | I915_WRITE(fp_reg, fp); | 3712 | I915_WRITE(fp_reg, fp); |
@@ -3411,6 +3781,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3411 | /* enable FDI RX PLL too */ | 3781 | /* enable FDI RX PLL too */ |
3412 | temp = I915_READ(fdi_rx_reg); | 3782 | temp = I915_READ(fdi_rx_reg); |
3413 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | 3783 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); |
3784 | I915_READ(fdi_rx_reg); | ||
3785 | udelay(200); | ||
3786 | |||
3787 | /* enable FDI TX PLL too */ | ||
3788 | temp = I915_READ(fdi_tx_reg); | ||
3789 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | ||
3790 | I915_READ(fdi_tx_reg); | ||
3791 | |||
3792 | /* enable FDI RX PCDCLK */ | ||
3793 | temp = I915_READ(fdi_rx_reg); | ||
3794 | I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); | ||
3795 | I915_READ(fdi_rx_reg); | ||
3414 | udelay(200); | 3796 | udelay(200); |
3415 | } | 3797 | } |
3416 | } | 3798 | } |
@@ -3671,6 +4053,7 @@ static struct drm_display_mode load_detect_mode = { | |||
3671 | }; | 4053 | }; |
3672 | 4054 | ||
3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 4055 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
4056 | struct drm_connector *connector, | ||
3674 | struct drm_display_mode *mode, | 4057 | struct drm_display_mode *mode, |
3675 | int *dpms_mode) | 4058 | int *dpms_mode) |
3676 | { | 4059 | { |
@@ -3729,7 +4112,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
3729 | } | 4112 | } |
3730 | 4113 | ||
3731 | encoder->crtc = crtc; | 4114 | encoder->crtc = crtc; |
3732 | intel_encoder->base.encoder = encoder; | 4115 | connector->encoder = encoder; |
3733 | intel_encoder->load_detect_temp = true; | 4116 | intel_encoder->load_detect_temp = true; |
3734 | 4117 | ||
3735 | intel_crtc = to_intel_crtc(crtc); | 4118 | intel_crtc = to_intel_crtc(crtc); |
@@ -3755,7 +4138,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
3755 | return crtc; | 4138 | return crtc; |
3756 | } | 4139 | } |
3757 | 4140 | ||
3758 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) | 4141 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
4142 | struct drm_connector *connector, int dpms_mode) | ||
3759 | { | 4143 | { |
3760 | struct drm_encoder *encoder = &intel_encoder->enc; | 4144 | struct drm_encoder *encoder = &intel_encoder->enc; |
3761 | struct drm_device *dev = encoder->dev; | 4145 | struct drm_device *dev = encoder->dev; |
@@ -3765,7 +4149,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm | |||
3765 | 4149 | ||
3766 | if (intel_encoder->load_detect_temp) { | 4150 | if (intel_encoder->load_detect_temp) { |
3767 | encoder->crtc = NULL; | 4151 | encoder->crtc = NULL; |
3768 | intel_encoder->base.encoder = NULL; | 4152 | connector->encoder = NULL; |
3769 | intel_encoder->load_detect_temp = false; | 4153 | intel_encoder->load_detect_temp = false; |
3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 4154 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
3771 | drm_helper_disable_unused_functions(dev); | 4155 | drm_helper_disable_unused_functions(dev); |
@@ -4392,14 +4776,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) | |||
4392 | return crtc; | 4776 | return crtc; |
4393 | } | 4777 | } |
4394 | 4778 | ||
4395 | static int intel_connector_clones(struct drm_device *dev, int type_mask) | 4779 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
4396 | { | 4780 | { |
4397 | int index_mask = 0; | 4781 | int index_mask = 0; |
4398 | struct drm_connector *connector; | 4782 | struct drm_encoder *encoder; |
4399 | int entry = 0; | 4783 | int entry = 0; |
4400 | 4784 | ||
4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4785 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
4402 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 4786 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
4403 | if (type_mask & intel_encoder->clone_mask) | 4787 | if (type_mask & intel_encoder->clone_mask) |
4404 | index_mask |= (1 << entry); | 4788 | index_mask |= (1 << entry); |
4405 | entry++; | 4789 | entry++; |
@@ -4411,7 +4795,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
4411 | static void intel_setup_outputs(struct drm_device *dev) | 4795 | static void intel_setup_outputs(struct drm_device *dev) |
4412 | { | 4796 | { |
4413 | struct drm_i915_private *dev_priv = dev->dev_private; | 4797 | struct drm_i915_private *dev_priv = dev->dev_private; |
4414 | struct drm_connector *connector; | 4798 | struct drm_encoder *encoder; |
4415 | 4799 | ||
4416 | intel_crt_init(dev); | 4800 | intel_crt_init(dev); |
4417 | 4801 | ||
@@ -4426,9 +4810,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4426 | intel_dp_init(dev, DP_A); | 4810 | intel_dp_init(dev, DP_A); |
4427 | 4811 | ||
4428 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 4812 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
4429 | /* check SDVOB */ | 4813 | /* PCH SDVOB multiplex with HDMIB */ |
4430 | /* found = intel_sdvo_init(dev, HDMIB); */ | 4814 | found = intel_sdvo_init(dev, PCH_SDVOB); |
4431 | found = 0; | ||
4432 | if (!found) | 4815 | if (!found) |
4433 | intel_hdmi_init(dev, HDMIB); | 4816 | intel_hdmi_init(dev, HDMIB); |
4434 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | 4817 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
@@ -4494,12 +4877,11 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4494 | if (SUPPORTS_TV(dev)) | 4877 | if (SUPPORTS_TV(dev)) |
4495 | intel_tv_init(dev); | 4878 | intel_tv_init(dev); |
4496 | 4879 | ||
4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4880 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
4498 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 4881 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
4499 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
4500 | 4882 | ||
4501 | encoder->possible_crtcs = intel_encoder->crtc_mask; | 4883 | encoder->possible_crtcs = intel_encoder->crtc_mask; |
4502 | encoder->possible_clones = intel_connector_clones(dev, | 4884 | encoder->possible_clones = intel_encoder_clones(dev, |
4503 | intel_encoder->clone_mask); | 4885 | intel_encoder->clone_mask); |
4504 | } | 4886 | } |
4505 | } | 4887 | } |
@@ -4732,6 +5114,25 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4732 | } | 5114 | } |
4733 | 5115 | ||
4734 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 5116 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5117 | |||
5118 | /* | ||
5119 | * According to the spec the following bits should be set in | ||
5120 | * order to enable memory self-refresh | ||
5121 | * The bit 22/21 of 0x42004 | ||
5122 | * The bit 5 of 0x42020 | ||
5123 | * The bit 15 of 0x45000 | ||
5124 | */ | ||
5125 | if (IS_IRONLAKE(dev)) { | ||
5126 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
5127 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
5128 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | ||
5129 | I915_WRITE(ILK_DSPCLK_GATE, | ||
5130 | (I915_READ(ILK_DSPCLK_GATE) | | ||
5131 | ILK_DPARB_CLK_GATE)); | ||
5132 | I915_WRITE(DISP_ARB_CTL, | ||
5133 | (I915_READ(DISP_ARB_CTL) | | ||
5134 | DISP_FBC_WM_DIS)); | ||
5135 | } | ||
4735 | return; | 5136 | return; |
4736 | } else if (IS_G4X(dev)) { | 5137 | } else if (IS_G4X(dev)) { |
4737 | uint32_t dspclk_gate; | 5138 | uint32_t dspclk_gate; |
@@ -4847,9 +5248,31 @@ static void intel_init_display(struct drm_device *dev) | |||
4847 | i830_get_display_clock_speed; | 5248 | i830_get_display_clock_speed; |
4848 | 5249 | ||
4849 | /* For FIFO watermark updates */ | 5250 | /* For FIFO watermark updates */ |
4850 | if (HAS_PCH_SPLIT(dev)) | 5251 | if (HAS_PCH_SPLIT(dev)) { |
4851 | dev_priv->display.update_wm = NULL; | 5252 | if (IS_IRONLAKE(dev)) { |
4852 | else if (IS_G4X(dev)) | 5253 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
5254 | dev_priv->display.update_wm = ironlake_update_wm; | ||
5255 | else { | ||
5256 | DRM_DEBUG_KMS("Failed to get proper latency. " | ||
5257 | "Disable CxSR\n"); | ||
5258 | dev_priv->display.update_wm = NULL; | ||
5259 | } | ||
5260 | } else | ||
5261 | dev_priv->display.update_wm = NULL; | ||
5262 | } else if (IS_PINEVIEW(dev)) { | ||
5263 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | ||
5264 | dev_priv->fsb_freq, | ||
5265 | dev_priv->mem_freq)) { | ||
5266 | DRM_INFO("failed to find known CxSR latency " | ||
5267 | "(found fsb freq %d, mem freq %d), " | ||
5268 | "disabling CxSR\n", | ||
5269 | dev_priv->fsb_freq, dev_priv->mem_freq); | ||
5270 | /* Disable CxSR and never update its watermark again */ | ||
5271 | pineview_disable_cxsr(dev); | ||
5272 | dev_priv->display.update_wm = NULL; | ||
5273 | } else | ||
5274 | dev_priv->display.update_wm = pineview_update_wm; | ||
5275 | } else if (IS_G4X(dev)) | ||
4853 | dev_priv->display.update_wm = g4x_update_wm; | 5276 | dev_priv->display.update_wm = g4x_update_wm; |
4854 | else if (IS_I965G(dev)) | 5277 | else if (IS_I965G(dev)) |
4855 | dev_priv->display.update_wm = i965_update_wm; | 5278 | dev_priv->display.update_wm = i965_update_wm; |
@@ -4922,13 +5345,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
4922 | (unsigned long)dev); | 5345 | (unsigned long)dev); |
4923 | 5346 | ||
4924 | intel_setup_overlay(dev); | 5347 | intel_setup_overlay(dev); |
4925 | |||
4926 | if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev), | ||
4927 | dev_priv->fsb_freq, | ||
4928 | dev_priv->mem_freq)) | ||
4929 | DRM_INFO("failed to find known CxSR latency " | ||
4930 | "(found fsb freq %d, mem freq %d), disabling CxSR\n", | ||
4931 | dev_priv->fsb_freq, dev_priv->mem_freq); | ||
4932 | } | 5348 | } |
4933 | 5349 | ||
4934 | void intel_modeset_cleanup(struct drm_device *dev) | 5350 | void intel_modeset_cleanup(struct drm_device *dev) |
@@ -4973,14 +5389,29 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4973 | } | 5389 | } |
4974 | 5390 | ||
4975 | 5391 | ||
4976 | /* current intel driver doesn't take advantage of encoders | 5392 | /* |
4977 | always give back the encoder for the connector | 5393 | * Return which encoder is currently attached for connector. |
4978 | */ | 5394 | */ |
4979 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | 5395 | struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) |
4980 | { | 5396 | { |
4981 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 5397 | struct drm_mode_object *obj; |
5398 | struct drm_encoder *encoder; | ||
5399 | int i; | ||
5400 | |||
5401 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
5402 | if (connector->encoder_ids[i] == 0) | ||
5403 | break; | ||
4982 | 5404 | ||
4983 | return &intel_encoder->enc; | 5405 | obj = drm_mode_object_find(connector->dev, |
5406 | connector->encoder_ids[i], | ||
5407 | DRM_MODE_OBJECT_ENCODER); | ||
5408 | if (!obj) | ||
5409 | continue; | ||
5410 | |||
5411 | encoder = obj_to_encoder(obj); | ||
5412 | return encoder; | ||
5413 | } | ||
5414 | return NULL; | ||
4984 | } | 5415 | } |
4985 | 5416 | ||
4986 | /* | 5417 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 77e40cfcf216..f6299bb788e5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -48,8 +48,6 @@ struct intel_dp_priv { | |||
48 | uint32_t output_reg; | 48 | uint32_t output_reg; |
49 | uint32_t DP; | 49 | uint32_t DP; |
50 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 50 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
51 | uint32_t save_DP; | ||
52 | uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
53 | bool has_audio; | 51 | bool has_audio; |
54 | int dpms_mode; | 52 | int dpms_mode; |
55 | uint8_t link_bw; | 53 | uint8_t link_bw; |
@@ -141,7 +139,8 @@ static int | |||
141 | intel_dp_mode_valid(struct drm_connector *connector, | 139 | intel_dp_mode_valid(struct drm_connector *connector, |
142 | struct drm_display_mode *mode) | 140 | struct drm_display_mode *mode) |
143 | { | 141 | { |
144 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 142 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
143 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
146 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 145 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
147 | 146 | ||
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, | |||
215 | { | 214 | { |
216 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 215 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
217 | uint32_t output_reg = dp_priv->output_reg; | 216 | uint32_t output_reg = dp_priv->output_reg; |
218 | struct drm_device *dev = intel_encoder->base.dev; | 217 | struct drm_device *dev = intel_encoder->enc.dev; |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 218 | struct drm_i915_private *dev_priv = dev->dev_private; |
220 | uint32_t ch_ctl = output_reg + 0x10; | 219 | uint32_t ch_ctl = output_reg + 0x10; |
221 | uint32_t ch_data = ch_ctl + 4; | 220 | uint32_t ch_data = ch_ctl + 4; |
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, | |||
224 | uint32_t ctl; | 223 | uint32_t ctl; |
225 | uint32_t status; | 224 | uint32_t status; |
226 | uint32_t aux_clock_divider; | 225 | uint32_t aux_clock_divider; |
227 | int try; | 226 | int try, precharge; |
228 | 227 | ||
229 | /* The clock divider is based off the hrawclk, | 228 | /* The clock divider is based off the hrawclk, |
230 | * and would like to run at 2MHz. So, take the | 229 | * and would like to run at 2MHz. So, take the |
231 | * hrawclk value and divide by 2 and use that | 230 | * hrawclk value and divide by 2 and use that |
232 | */ | 231 | */ |
233 | if (IS_eDP(intel_encoder)) | 232 | if (IS_eDP(intel_encoder)) { |
234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 233 | if (IS_GEN6(dev)) |
235 | else if (HAS_PCH_SPLIT(dev)) | 234 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
235 | else | ||
236 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | ||
237 | } else if (HAS_PCH_SPLIT(dev)) | ||
236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ | 238 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ |
237 | else | 239 | else |
238 | aux_clock_divider = intel_hrawclk(dev) / 2; | 240 | aux_clock_divider = intel_hrawclk(dev) / 2; |
239 | 241 | ||
242 | if (IS_GEN6(dev)) | ||
243 | precharge = 3; | ||
244 | else | ||
245 | precharge = 5; | ||
246 | |||
240 | /* Must try at least 3 times according to DP spec */ | 247 | /* Must try at least 3 times according to DP spec */ |
241 | for (try = 0; try < 5; try++) { | 248 | for (try = 0; try < 5; try++) { |
242 | /* Load the send data into the aux channel data registers */ | 249 | /* Load the send data into the aux channel data registers */ |
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, | |||
249 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | | 256 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | |
250 | DP_AUX_CH_CTL_TIME_OUT_400us | | 257 | DP_AUX_CH_CTL_TIME_OUT_400us | |
251 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | 258 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
252 | (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | 259 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
253 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | 260 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | |
254 | DP_AUX_CH_CTL_DONE | | 261 | DP_AUX_CH_CTL_DONE | |
255 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 262 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
465 | } | 472 | } |
466 | 473 | ||
467 | static int | 474 | static int |
468 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) | 475 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, |
476 | struct intel_connector *intel_connector, const char *name) | ||
469 | { | 477 | { |
470 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 478 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
471 | 479 | ||
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) | |||
480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | 488 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); |
481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | 489 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; |
482 | dp_priv->adapter.algo_data = &dp_priv->algo; | 490 | dp_priv->adapter.algo_data = &dp_priv->algo; |
483 | dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; | 491 | dp_priv->adapter.dev.parent = &intel_connector->base.kdev; |
484 | 492 | ||
485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | 493 | return i2c_dp_aux_add_bus(&dp_priv->adapter); |
486 | } | 494 | } |
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
555 | { | 563 | { |
556 | struct drm_device *dev = crtc->dev; | 564 | struct drm_device *dev = crtc->dev; |
557 | struct drm_mode_config *mode_config = &dev->mode_config; | 565 | struct drm_mode_config *mode_config = &dev->mode_config; |
558 | struct drm_connector *connector; | 566 | struct drm_encoder *encoder; |
559 | struct drm_i915_private *dev_priv = dev->dev_private; | 567 | struct drm_i915_private *dev_priv = dev->dev_private; |
560 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 568 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
561 | int lane_count = 4; | 569 | int lane_count = 4; |
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
564 | /* | 572 | /* |
565 | * Find the lane count in the intel_encoder private | 573 | * Find the lane count in the intel_encoder private |
566 | */ | 574 | */ |
567 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 575 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
568 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 576 | struct intel_encoder *intel_encoder; |
569 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 577 | struct intel_dp_priv *dp_priv; |
570 | 578 | ||
571 | if (!connector->encoder || connector->encoder->crtc != crtc) | 579 | if (!encoder || encoder->crtc != crtc) |
572 | continue; | 580 | continue; |
573 | 581 | ||
582 | intel_encoder = enc_to_intel_encoder(encoder); | ||
583 | dp_priv = intel_encoder->dev_priv; | ||
584 | |||
574 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | 585 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
575 | lane_count = dp_priv->lane_count; | 586 | lane_count = dp_priv->lane_count; |
576 | break; | 587 | break; |
@@ -626,16 +637,24 @@ static void | |||
626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 637 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
627 | struct drm_display_mode *adjusted_mode) | 638 | struct drm_display_mode *adjusted_mode) |
628 | { | 639 | { |
640 | struct drm_device *dev = encoder->dev; | ||
629 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 641 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
630 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 642 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
631 | struct drm_crtc *crtc = intel_encoder->enc.crtc; | 643 | struct drm_crtc *crtc = intel_encoder->enc.crtc; |
632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 644 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
633 | 645 | ||
634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | 646 | dp_priv->DP = (DP_VOLTAGE_0_4 | |
635 | DP_VOLTAGE_0_4 | | 647 | DP_PRE_EMPHASIS_0); |
636 | DP_PRE_EMPHASIS_0 | | 648 | |
637 | DP_SYNC_VS_HIGH | | 649 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
638 | DP_SYNC_HS_HIGH); | 650 | dp_priv->DP |= DP_SYNC_HS_HIGH; |
651 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | ||
652 | dp_priv->DP |= DP_SYNC_VS_HIGH; | ||
653 | |||
654 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | ||
655 | dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; | ||
656 | else | ||
657 | dp_priv->DP |= DP_LINK_TRAIN_OFF; | ||
639 | 658 | ||
640 | switch (dp_priv->lane_count) { | 659 | switch (dp_priv->lane_count) { |
641 | case 1: | 660 | case 1: |
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
664 | dp_priv->DP |= DP_ENHANCED_FRAMING; | 683 | dp_priv->DP |= DP_ENHANCED_FRAMING; |
665 | } | 684 | } |
666 | 685 | ||
667 | if (intel_crtc->pipe == 1) | 686 | /* CPT DP's pipe select is decided in TRANS_DP_CTL */ |
687 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | ||
668 | dp_priv->DP |= DP_PIPEB_SELECT; | 688 | dp_priv->DP |= DP_PIPEB_SELECT; |
669 | 689 | ||
670 | if (IS_eDP(intel_encoder)) { | 690 | if (IS_eDP(intel_encoder)) { |
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
704 | { | 724 | { |
705 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 725 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
706 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 726 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
707 | struct drm_device *dev = intel_encoder->base.dev; | 727 | struct drm_device *dev = encoder->dev; |
708 | struct drm_i915_private *dev_priv = dev->dev_private; | 728 | struct drm_i915_private *dev_priv = dev->dev_private; |
709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 729 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); |
710 | 730 | ||
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | |||
749 | return link_status[r - DP_LANE0_1_STATUS]; | 769 | return link_status[r - DP_LANE0_1_STATUS]; |
750 | } | 770 | } |
751 | 771 | ||
752 | static void | ||
753 | intel_dp_save(struct drm_connector *connector) | ||
754 | { | ||
755 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
756 | struct drm_device *dev = intel_encoder->base.dev; | ||
757 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
758 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
759 | |||
760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | ||
761 | intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, | ||
762 | dp_priv->save_link_configuration, | ||
763 | sizeof (dp_priv->save_link_configuration)); | ||
764 | } | ||
765 | |||
766 | static uint8_t | 772 | static uint8_t |
767 | intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | 773 | intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], |
768 | int lane) | 774 | int lane) |
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) | |||
892 | return signal_levels; | 898 | return signal_levels; |
893 | } | 899 | } |
894 | 900 | ||
901 | /* Gen6's DP voltage swing and pre-emphasis control */ | ||
902 | static uint32_t | ||
903 | intel_gen6_edp_signal_levels(uint8_t train_set) | ||
904 | { | ||
905 | switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { | ||
906 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | ||
907 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | ||
908 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | ||
909 | return EDP_LINK_TRAIN_400MV_6DB_SNB_B; | ||
910 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
911 | return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; | ||
912 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | ||
913 | return EDP_LINK_TRAIN_800MV_0DB_SNB_B; | ||
914 | default: | ||
915 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); | ||
916 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | ||
917 | } | ||
918 | } | ||
919 | |||
895 | static uint8_t | 920 | static uint8_t |
896 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | 921 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], |
897 | int lane) | 922 | int lane) |
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder, | |||
948 | uint8_t train_set[4], | 973 | uint8_t train_set[4], |
949 | bool first) | 974 | bool first) |
950 | { | 975 | { |
951 | struct drm_device *dev = intel_encoder->base.dev; | 976 | struct drm_device *dev = intel_encoder->enc.dev; |
952 | struct drm_i915_private *dev_priv = dev->dev_private; | 977 | struct drm_i915_private *dev_priv = dev->dev_private; |
953 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 978 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
954 | int ret; | 979 | int ret; |
@@ -974,7 +999,7 @@ static void | |||
974 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | 999 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | 1000 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) |
976 | { | 1001 | { |
977 | struct drm_device *dev = intel_encoder->base.dev; | 1002 | struct drm_device *dev = intel_encoder->enc.dev; |
978 | struct drm_i915_private *dev_priv = dev->dev_private; | 1003 | struct drm_i915_private *dev_priv = dev->dev_private; |
979 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1004 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
980 | uint8_t train_set[4]; | 1005 | uint8_t train_set[4]; |
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
985 | bool channel_eq = false; | 1010 | bool channel_eq = false; |
986 | bool first = true; | 1011 | bool first = true; |
987 | int tries; | 1012 | int tries; |
1013 | u32 reg; | ||
988 | 1014 | ||
989 | /* Write the link configuration data */ | 1015 | /* Write the link configuration data */ |
990 | intel_dp_aux_native_write(intel_encoder, 0x100, | 1016 | intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, |
991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | 1017 | link_configuration, DP_LINK_CONFIGURATION_SIZE); |
992 | 1018 | ||
993 | DP |= DP_PORT_EN; | 1019 | DP |= DP_PORT_EN; |
994 | DP &= ~DP_LINK_TRAIN_MASK; | 1020 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) |
1021 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | ||
1022 | else | ||
1023 | DP &= ~DP_LINK_TRAIN_MASK; | ||
995 | memset(train_set, 0, 4); | 1024 | memset(train_set, 0, 4); |
996 | voltage = 0xff; | 1025 | voltage = 0xff; |
997 | tries = 0; | 1026 | tries = 0; |
998 | clock_recovery = false; | 1027 | clock_recovery = false; |
999 | for (;;) { | 1028 | for (;;) { |
1000 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1029 | /* Use train_set[0] to set the voltage and pre emphasis values */ |
1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1030 | uint32_t signal_levels; |
1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1031 | if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { |
1032 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | ||
1033 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | ||
1034 | } else { | ||
1035 | signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
1036 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
1037 | } | ||
1003 | 1038 | ||
1004 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, | 1039 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) |
1040 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | ||
1041 | else | ||
1042 | reg = DP | DP_LINK_TRAIN_PAT_1; | ||
1043 | |||
1044 | if (!intel_dp_set_link_train(intel_encoder, reg, | ||
1005 | DP_TRAINING_PATTERN_1, train_set, first)) | 1045 | DP_TRAINING_PATTERN_1, train_set, first)) |
1006 | break; | 1046 | break; |
1007 | first = false; | 1047 | first = false; |
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1041 | channel_eq = false; | 1081 | channel_eq = false; |
1042 | for (;;) { | 1082 | for (;;) { |
1043 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1083 | /* Use train_set[0] to set the voltage and pre emphasis values */ |
1044 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1084 | uint32_t signal_levels; |
1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1085 | |
1086 | if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { | ||
1087 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | ||
1088 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | ||
1089 | } else { | ||
1090 | signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
1091 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
1092 | } | ||
1093 | |||
1094 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | ||
1095 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | ||
1096 | else | ||
1097 | reg = DP | DP_LINK_TRAIN_PAT_2; | ||
1046 | 1098 | ||
1047 | /* channel eq pattern */ | 1099 | /* channel eq pattern */ |
1048 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, | 1100 | if (!intel_dp_set_link_train(intel_encoder, reg, |
1049 | DP_TRAINING_PATTERN_2, train_set, | 1101 | DP_TRAINING_PATTERN_2, train_set, |
1050 | false)) | 1102 | false)) |
1051 | break; | 1103 | break; |
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1068 | ++tries; | 1120 | ++tries; |
1069 | } | 1121 | } |
1070 | 1122 | ||
1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | 1123 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) |
1124 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | ||
1125 | else | ||
1126 | reg = DP | DP_LINK_TRAIN_OFF; | ||
1127 | |||
1128 | I915_WRITE(dp_priv->output_reg, reg); | ||
1072 | POSTING_READ(dp_priv->output_reg); | 1129 | POSTING_READ(dp_priv->output_reg); |
1073 | intel_dp_aux_native_write_1(intel_encoder, | 1130 | intel_dp_aux_native_write_1(intel_encoder, |
1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | 1131 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); |
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1077 | static void | 1134 | static void |
1078 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) | 1135 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) |
1079 | { | 1136 | { |
1080 | struct drm_device *dev = intel_encoder->base.dev; | 1137 | struct drm_device *dev = intel_encoder->enc.dev; |
1081 | struct drm_i915_private *dev_priv = dev->dev_private; | 1138 | struct drm_i915_private *dev_priv = dev->dev_private; |
1082 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1139 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1083 | 1140 | ||
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) | |||
1090 | udelay(100); | 1147 | udelay(100); |
1091 | } | 1148 | } |
1092 | 1149 | ||
1093 | DP &= ~DP_LINK_TRAIN_MASK; | 1150 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { |
1094 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | 1151 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1095 | POSTING_READ(dp_priv->output_reg); | 1152 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1153 | POSTING_READ(dp_priv->output_reg); | ||
1154 | } else { | ||
1155 | DP &= ~DP_LINK_TRAIN_MASK; | ||
1156 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | ||
1157 | POSTING_READ(dp_priv->output_reg); | ||
1158 | } | ||
1096 | 1159 | ||
1097 | udelay(17000); | 1160 | udelay(17000); |
1098 | 1161 | ||
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) | |||
1102 | POSTING_READ(dp_priv->output_reg); | 1165 | POSTING_READ(dp_priv->output_reg); |
1103 | } | 1166 | } |
1104 | 1167 | ||
1105 | static void | ||
1106 | intel_dp_restore(struct drm_connector *connector) | ||
1107 | { | ||
1108 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
1109 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1110 | |||
1111 | if (dp_priv->save_DP & DP_PORT_EN) | ||
1112 | intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); | ||
1113 | else | ||
1114 | intel_dp_link_down(intel_encoder, dp_priv->save_DP); | ||
1115 | } | ||
1116 | |||
1117 | /* | 1168 | /* |
1118 | * According to DP spec | 1169 | * According to DP spec |
1119 | * 5.1.2: | 1170 | * 5.1.2: |
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder) | |||
1144 | static enum drm_connector_status | 1195 | static enum drm_connector_status |
1145 | ironlake_dp_detect(struct drm_connector *connector) | 1196 | ironlake_dp_detect(struct drm_connector *connector) |
1146 | { | 1197 | { |
1147 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1198 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1199 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1148 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1200 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1149 | enum drm_connector_status status; | 1201 | enum drm_connector_status status; |
1150 | 1202 | ||
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1168 | static enum drm_connector_status | 1220 | static enum drm_connector_status |
1169 | intel_dp_detect(struct drm_connector *connector) | 1221 | intel_dp_detect(struct drm_connector *connector) |
1170 | { | 1222 | { |
1171 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1223 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1172 | struct drm_device *dev = intel_encoder->base.dev; | 1224 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1225 | struct drm_device *dev = intel_encoder->enc.dev; | ||
1173 | struct drm_i915_private *dev_priv = dev->dev_private; | 1226 | struct drm_i915_private *dev_priv = dev->dev_private; |
1174 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1227 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1175 | uint32_t temp, bit; | 1228 | uint32_t temp, bit; |
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector) | |||
1180 | if (HAS_PCH_SPLIT(dev)) | 1233 | if (HAS_PCH_SPLIT(dev)) |
1181 | return ironlake_dp_detect(connector); | 1234 | return ironlake_dp_detect(connector); |
1182 | 1235 | ||
1183 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
1184 | |||
1185 | I915_WRITE(PORT_HOTPLUG_EN, | ||
1186 | temp | | ||
1187 | DPB_HOTPLUG_INT_EN | | ||
1188 | DPC_HOTPLUG_INT_EN | | ||
1189 | DPD_HOTPLUG_INT_EN); | ||
1190 | |||
1191 | POSTING_READ(PORT_HOTPLUG_EN); | ||
1192 | |||
1193 | switch (dp_priv->output_reg) { | 1236 | switch (dp_priv->output_reg) { |
1194 | case DP_B: | 1237 | case DP_B: |
1195 | bit = DPB_HOTPLUG_INT_STATUS; | 1238 | bit = DPB_HOTPLUG_INT_STATUS; |
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector) | |||
1222 | 1265 | ||
1223 | static int intel_dp_get_modes(struct drm_connector *connector) | 1266 | static int intel_dp_get_modes(struct drm_connector *connector) |
1224 | { | 1267 | { |
1225 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1268 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1226 | struct drm_device *dev = intel_encoder->base.dev; | 1269 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1270 | struct drm_device *dev = intel_encoder->enc.dev; | ||
1227 | struct drm_i915_private *dev_priv = dev->dev_private; | 1271 | struct drm_i915_private *dev_priv = dev->dev_private; |
1228 | int ret; | 1272 | int ret; |
1229 | 1273 | ||
1230 | /* We should parse the EDID data and find out if it has an audio sink | 1274 | /* We should parse the EDID data and find out if it has an audio sink |
1231 | */ | 1275 | */ |
1232 | 1276 | ||
1233 | ret = intel_ddc_get_modes(intel_encoder); | 1277 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
1234 | if (ret) | 1278 | if (ret) |
1235 | return ret; | 1279 | return ret; |
1236 | 1280 | ||
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1249 | static void | 1293 | static void |
1250 | intel_dp_destroy (struct drm_connector *connector) | 1294 | intel_dp_destroy (struct drm_connector *connector) |
1251 | { | 1295 | { |
1252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
1253 | |||
1254 | if (intel_encoder->i2c_bus) | ||
1255 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
1256 | drm_sysfs_connector_remove(connector); | 1296 | drm_sysfs_connector_remove(connector); |
1257 | drm_connector_cleanup(connector); | 1297 | drm_connector_cleanup(connector); |
1258 | kfree(intel_encoder); | 1298 | kfree(connector); |
1259 | } | 1299 | } |
1260 | 1300 | ||
1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1301 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | |||
1268 | 1308 | ||
1269 | static const struct drm_connector_funcs intel_dp_connector_funcs = { | 1309 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
1270 | .dpms = drm_helper_connector_dpms, | 1310 | .dpms = drm_helper_connector_dpms, |
1271 | .save = intel_dp_save, | ||
1272 | .restore = intel_dp_restore, | ||
1273 | .detect = intel_dp_detect, | 1311 | .detect = intel_dp_detect, |
1274 | .fill_modes = drm_helper_probe_single_connector_modes, | 1312 | .fill_modes = drm_helper_probe_single_connector_modes, |
1275 | .destroy = intel_dp_destroy, | 1313 | .destroy = intel_dp_destroy, |
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { | |||
1278 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { | 1316 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
1279 | .get_modes = intel_dp_get_modes, | 1317 | .get_modes = intel_dp_get_modes, |
1280 | .mode_valid = intel_dp_mode_valid, | 1318 | .mode_valid = intel_dp_mode_valid, |
1281 | .best_encoder = intel_best_encoder, | 1319 | .best_encoder = intel_attached_encoder, |
1282 | }; | 1320 | }; |
1283 | 1321 | ||
1284 | static void intel_dp_enc_destroy(struct drm_encoder *encoder) | 1322 | static void intel_dp_enc_destroy(struct drm_encoder *encoder) |
1285 | { | 1323 | { |
1324 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1325 | |||
1326 | if (intel_encoder->i2c_bus) | ||
1327 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
1286 | drm_encoder_cleanup(encoder); | 1328 | drm_encoder_cleanup(encoder); |
1329 | kfree(intel_encoder); | ||
1287 | } | 1330 | } |
1288 | 1331 | ||
1289 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | 1332 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) | |||
1299 | intel_dp_check_link_status(intel_encoder); | 1342 | intel_dp_check_link_status(intel_encoder); |
1300 | } | 1343 | } |
1301 | 1344 | ||
1345 | /* Return which DP Port should be selected for Transcoder DP control */ | ||
1346 | int | ||
1347 | intel_trans_dp_port_sel (struct drm_crtc *crtc) | ||
1348 | { | ||
1349 | struct drm_device *dev = crtc->dev; | ||
1350 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
1351 | struct drm_encoder *encoder; | ||
1352 | struct intel_encoder *intel_encoder = NULL; | ||
1353 | |||
1354 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
1355 | if (!encoder || encoder->crtc != crtc) | ||
1356 | continue; | ||
1357 | |||
1358 | intel_encoder = enc_to_intel_encoder(encoder); | ||
1359 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | ||
1360 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1361 | return dp_priv->output_reg; | ||
1362 | } | ||
1363 | } | ||
1364 | return -1; | ||
1365 | } | ||
1366 | |||
1302 | void | 1367 | void |
1303 | intel_dp_init(struct drm_device *dev, int output_reg) | 1368 | intel_dp_init(struct drm_device *dev, int output_reg) |
1304 | { | 1369 | { |
1305 | struct drm_i915_private *dev_priv = dev->dev_private; | 1370 | struct drm_i915_private *dev_priv = dev->dev_private; |
1306 | struct drm_connector *connector; | 1371 | struct drm_connector *connector; |
1307 | struct intel_encoder *intel_encoder; | 1372 | struct intel_encoder *intel_encoder; |
1373 | struct intel_connector *intel_connector; | ||
1308 | struct intel_dp_priv *dp_priv; | 1374 | struct intel_dp_priv *dp_priv; |
1309 | const char *name = NULL; | 1375 | const char *name = NULL; |
1310 | 1376 | ||
@@ -1313,9 +1379,15 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1313 | if (!intel_encoder) | 1379 | if (!intel_encoder) |
1314 | return; | 1380 | return; |
1315 | 1381 | ||
1382 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
1383 | if (!intel_connector) { | ||
1384 | kfree(intel_encoder); | ||
1385 | return; | ||
1386 | } | ||
1387 | |||
1316 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); | 1388 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
1317 | 1389 | ||
1318 | connector = &intel_encoder->base; | 1390 | connector = &intel_connector->base; |
1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1391 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, |
1320 | DRM_MODE_CONNECTOR_DisplayPort); | 1392 | DRM_MODE_CONNECTOR_DisplayPort); |
1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1393 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
@@ -1349,7 +1421,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1349 | DRM_MODE_ENCODER_TMDS); | 1421 | DRM_MODE_ENCODER_TMDS); |
1350 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); | 1422 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); |
1351 | 1423 | ||
1352 | drm_mode_connector_attach_encoder(&intel_encoder->base, | 1424 | drm_mode_connector_attach_encoder(&intel_connector->base, |
1353 | &intel_encoder->enc); | 1425 | &intel_encoder->enc); |
1354 | drm_sysfs_connector_add(connector); | 1426 | drm_sysfs_connector_add(connector); |
1355 | 1427 | ||
@@ -1378,7 +1450,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1378 | break; | 1450 | break; |
1379 | } | 1451 | } |
1380 | 1452 | ||
1381 | intel_dp_i2c_init(intel_encoder, name); | 1453 | intel_dp_i2c_init(intel_encoder, intel_connector, name); |
1382 | 1454 | ||
1383 | intel_encoder->ddc_bus = &dp_priv->adapter; | 1455 | intel_encoder->ddc_bus = &dp_priv->adapter; |
1384 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1456 | intel_encoder->hot_plug = intel_dp_hot_plug; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e30253755f12..1ee4717f431f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -96,8 +96,6 @@ struct intel_framebuffer { | |||
96 | 96 | ||
97 | 97 | ||
98 | struct intel_encoder { | 98 | struct intel_encoder { |
99 | struct drm_connector base; | ||
100 | |||
101 | struct drm_encoder enc; | 99 | struct drm_encoder enc; |
102 | int type; | 100 | int type; |
103 | struct i2c_adapter *i2c_bus; | 101 | struct i2c_adapter *i2c_bus; |
@@ -110,6 +108,11 @@ struct intel_encoder { | |||
110 | int clone_mask; | 108 | int clone_mask; |
111 | }; | 109 | }; |
112 | 110 | ||
111 | struct intel_connector { | ||
112 | struct drm_connector base; | ||
113 | void *dev_priv; | ||
114 | }; | ||
115 | |||
113 | struct intel_crtc; | 116 | struct intel_crtc; |
114 | struct intel_overlay { | 117 | struct intel_overlay { |
115 | struct drm_device *dev; | 118 | struct drm_device *dev; |
@@ -149,17 +152,18 @@ struct intel_crtc { | |||
149 | bool lowfreq_avail; | 152 | bool lowfreq_avail; |
150 | struct intel_overlay *overlay; | 153 | struct intel_overlay *overlay; |
151 | struct intel_unpin_work *unpin_work; | 154 | struct intel_unpin_work *unpin_work; |
155 | int fdi_lanes; | ||
152 | }; | 156 | }; |
153 | 157 | ||
154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 158 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
155 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 159 | #define to_intel_connector(x) container_of(x, struct intel_connector, base) |
156 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) | 160 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 161 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
158 | 162 | ||
159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 163 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
160 | const char *name); | 164 | const char *name); |
161 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 165 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
162 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder); | 166 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
163 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); | 167 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); |
164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 168 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
165 | void intel_i2c_reset_gmbus(struct drm_device *dev); | 169 | void intel_i2c_reset_gmbus(struct drm_device *dev); |
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); | |||
183 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 187 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
184 | extern void intel_encoder_commit (struct drm_encoder *encoder); | 188 | extern void intel_encoder_commit (struct drm_encoder *encoder); |
185 | 189 | ||
186 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | 190 | extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); |
187 | 191 | ||
188 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | 192 | extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
189 | struct drm_crtc *crtc); | 193 | struct drm_crtc *crtc); |
@@ -192,9 +196,11 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
192 | extern void intel_wait_for_vblank(struct drm_device *dev); | 196 | extern void intel_wait_for_vblank(struct drm_device *dev); |
193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 197 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 198 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
199 | struct drm_connector *connector, | ||
195 | struct drm_display_mode *mode, | 200 | struct drm_display_mode *mode, |
196 | int *dpms_mode); | 201 | int *dpms_mode); |
197 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | 202 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
203 | struct drm_connector *connector, | ||
198 | int dpms_mode); | 204 | int dpms_mode); |
199 | 205 | ||
200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | 206 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index ebf213c96b9c..227feca7cf8d 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | static void intel_dvo_save(struct drm_connector *connector) | ||
100 | { | ||
101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
102 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
103 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
104 | |||
105 | /* Each output should probably just save the registers it touches, | ||
106 | * but for now, use more overkill. | ||
107 | */ | ||
108 | dev_priv->saveDVOA = I915_READ(DVOA); | ||
109 | dev_priv->saveDVOB = I915_READ(DVOB); | ||
110 | dev_priv->saveDVOC = I915_READ(DVOC); | ||
111 | |||
112 | dvo->dev_ops->save(dvo); | ||
113 | } | ||
114 | |||
115 | static void intel_dvo_restore(struct drm_connector *connector) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
118 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
119 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
120 | |||
121 | dvo->dev_ops->restore(dvo); | ||
122 | |||
123 | I915_WRITE(DVOA, dev_priv->saveDVOA); | ||
124 | I915_WRITE(DVOB, dev_priv->saveDVOB); | ||
125 | I915_WRITE(DVOC, dev_priv->saveDVOC); | ||
126 | } | ||
127 | |||
128 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 99 | static int intel_dvo_mode_valid(struct drm_connector *connector, |
129 | struct drm_display_mode *mode) | 100 | struct drm_display_mode *mode) |
130 | { | 101 | { |
131 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 102 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
103 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
132 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 104 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
133 | 105 | ||
134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 106 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
241 | */ | 213 | */ |
242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 214 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) |
243 | { | 215 | { |
244 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 216 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
217 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
245 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 218 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
246 | 219 | ||
247 | return dvo->dev_ops->detect(dvo); | 220 | return dvo->dev_ops->detect(dvo); |
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto | |||
249 | 222 | ||
250 | static int intel_dvo_get_modes(struct drm_connector *connector) | 223 | static int intel_dvo_get_modes(struct drm_connector *connector) |
251 | { | 224 | { |
252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 225 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
226 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
253 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 227 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
254 | 228 | ||
255 | /* We should probably have an i2c driver get_modes function for those | 229 | /* We should probably have an i2c driver get_modes function for those |
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
257 | * (TV-out, for example), but for now with just TMDS and LVDS, | 231 | * (TV-out, for example), but for now with just TMDS and LVDS, |
258 | * that's not the case. | 232 | * that's not the case. |
259 | */ | 233 | */ |
260 | intel_ddc_get_modes(intel_encoder); | 234 | intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
261 | if (!list_empty(&connector->probed_modes)) | 235 | if (!list_empty(&connector->probed_modes)) |
262 | return 1; | 236 | return 1; |
263 | 237 | ||
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
275 | 249 | ||
276 | static void intel_dvo_destroy (struct drm_connector *connector) | 250 | static void intel_dvo_destroy (struct drm_connector *connector) |
277 | { | 251 | { |
278 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
279 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
280 | |||
281 | if (dvo) { | ||
282 | if (dvo->dev_ops->destroy) | ||
283 | dvo->dev_ops->destroy(dvo); | ||
284 | if (dvo->panel_fixed_mode) | ||
285 | kfree(dvo->panel_fixed_mode); | ||
286 | /* no need, in i830_dvoices[] now */ | ||
287 | //kfree(dvo); | ||
288 | } | ||
289 | if (intel_encoder->i2c_bus) | ||
290 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
291 | if (intel_encoder->ddc_bus) | ||
292 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
293 | drm_sysfs_connector_remove(connector); | 252 | drm_sysfs_connector_remove(connector); |
294 | drm_connector_cleanup(connector); | 253 | drm_connector_cleanup(connector); |
295 | kfree(intel_encoder); | 254 | kfree(connector); |
296 | } | ||
297 | |||
298 | #ifdef RANDR_GET_CRTC_INTERFACE | ||
299 | static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) | ||
300 | { | ||
301 | struct drm_device *dev = connector->dev; | ||
302 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
303 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
304 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); | ||
306 | |||
307 | return intel_pipe_to_crtc(pScrn, pipe); | ||
308 | } | 255 | } |
309 | #endif | ||
310 | 256 | ||
311 | static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { | 257 | static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { |
312 | .dpms = intel_dvo_dpms, | 258 | .dpms = intel_dvo_dpms, |
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { | |||
318 | 264 | ||
319 | static const struct drm_connector_funcs intel_dvo_connector_funcs = { | 265 | static const struct drm_connector_funcs intel_dvo_connector_funcs = { |
320 | .dpms = drm_helper_connector_dpms, | 266 | .dpms = drm_helper_connector_dpms, |
321 | .save = intel_dvo_save, | ||
322 | .restore = intel_dvo_restore, | ||
323 | .detect = intel_dvo_detect, | 267 | .detect = intel_dvo_detect, |
324 | .destroy = intel_dvo_destroy, | 268 | .destroy = intel_dvo_destroy, |
325 | .fill_modes = drm_helper_probe_single_connector_modes, | 269 | .fill_modes = drm_helper_probe_single_connector_modes, |
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { | |||
328 | static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { | 272 | static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { |
329 | .mode_valid = intel_dvo_mode_valid, | 273 | .mode_valid = intel_dvo_mode_valid, |
330 | .get_modes = intel_dvo_get_modes, | 274 | .get_modes = intel_dvo_get_modes, |
331 | .best_encoder = intel_best_encoder, | 275 | .best_encoder = intel_attached_encoder, |
332 | }; | 276 | }; |
333 | 277 | ||
334 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) | 278 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) |
335 | { | 279 | { |
280 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
281 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
282 | |||
283 | if (dvo) { | ||
284 | if (dvo->dev_ops->destroy) | ||
285 | dvo->dev_ops->destroy(dvo); | ||
286 | if (dvo->panel_fixed_mode) | ||
287 | kfree(dvo->panel_fixed_mode); | ||
288 | } | ||
289 | if (intel_encoder->i2c_bus) | ||
290 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
291 | if (intel_encoder->ddc_bus) | ||
292 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
336 | drm_encoder_cleanup(encoder); | 293 | drm_encoder_cleanup(encoder); |
294 | kfree(intel_encoder); | ||
337 | } | 295 | } |
338 | 296 | ||
339 | static const struct drm_encoder_funcs intel_dvo_enc_funcs = { | 297 | static const struct drm_encoder_funcs intel_dvo_enc_funcs = { |
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
352 | { | 310 | { |
353 | struct drm_device *dev = connector->dev; | 311 | struct drm_device *dev = connector->dev; |
354 | struct drm_i915_private *dev_priv = dev->dev_private; | 312 | struct drm_i915_private *dev_priv = dev->dev_private; |
355 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 313 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
314 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
356 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 315 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
357 | uint32_t dvo_reg = dvo->dvo_reg; | 316 | uint32_t dvo_reg = dvo->dvo_reg; |
358 | uint32_t dvo_val = I915_READ(dvo_reg); | 317 | uint32_t dvo_val = I915_READ(dvo_reg); |
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
384 | void intel_dvo_init(struct drm_device *dev) | 343 | void intel_dvo_init(struct drm_device *dev) |
385 | { | 344 | { |
386 | struct intel_encoder *intel_encoder; | 345 | struct intel_encoder *intel_encoder; |
346 | struct intel_connector *intel_connector; | ||
387 | struct intel_dvo_device *dvo; | 347 | struct intel_dvo_device *dvo; |
388 | struct i2c_adapter *i2cbus = NULL; | 348 | struct i2c_adapter *i2cbus = NULL; |
389 | int ret = 0; | 349 | int ret = 0; |
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev) | |||
393 | if (!intel_encoder) | 353 | if (!intel_encoder) |
394 | return; | 354 | return; |
395 | 355 | ||
356 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
357 | if (!intel_connector) { | ||
358 | kfree(intel_encoder); | ||
359 | return; | ||
360 | } | ||
361 | |||
396 | /* Set up the DDC bus */ | 362 | /* Set up the DDC bus */ |
397 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | 363 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); |
398 | if (!intel_encoder->ddc_bus) | 364 | if (!intel_encoder->ddc_bus) |
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
400 | 366 | ||
401 | /* Now, try to find a controller */ | 367 | /* Now, try to find a controller */ |
402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 368 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
403 | struct drm_connector *connector = &intel_encoder->base; | 369 | struct drm_connector *connector = &intel_connector->base; |
404 | int gpio; | 370 | int gpio; |
405 | 371 | ||
406 | dvo = &intel_dvo_devices[i]; | 372 | dvo = &intel_dvo_devices[i]; |
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
471 | drm_encoder_helper_add(&intel_encoder->enc, | 437 | drm_encoder_helper_add(&intel_encoder->enc, |
472 | &intel_dvo_helper_funcs); | 438 | &intel_dvo_helper_funcs); |
473 | 439 | ||
474 | drm_mode_connector_attach_encoder(&intel_encoder->base, | 440 | drm_mode_connector_attach_encoder(&intel_connector->base, |
475 | &intel_encoder->enc); | 441 | &intel_encoder->enc); |
476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | 442 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { |
477 | /* For our LVDS chipsets, we should hopefully be able | 443 | /* For our LVDS chipsets, we should hopefully be able |
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev) | |||
496 | intel_i2c_destroy(i2cbus); | 462 | intel_i2c_destroy(i2cbus); |
497 | free_intel: | 463 | free_intel: |
498 | kfree(intel_encoder); | 464 | kfree(intel_encoder); |
465 | kfree(intel_connector); | ||
499 | } | 466 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 48cade0cf7b1..8a1c4eddc030 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -39,7 +39,6 @@ | |||
39 | 39 | ||
40 | struct intel_hdmi_priv { | 40 | struct intel_hdmi_priv { |
41 | u32 sdvox_reg; | 41 | u32 sdvox_reg; |
42 | u32 save_SDVOX; | ||
43 | bool has_hdmi_sink; | 42 | bool has_hdmi_sink; |
44 | }; | 43 | }; |
45 | 44 | ||
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
63 | if (hdmi_priv->has_hdmi_sink) | 62 | if (hdmi_priv->has_hdmi_sink) |
64 | sdvox |= SDVO_AUDIO_ENABLE; | 63 | sdvox |= SDVO_AUDIO_ENABLE; |
65 | 64 | ||
66 | if (intel_crtc->pipe == 1) | 65 | if (intel_crtc->pipe == 1) { |
67 | sdvox |= SDVO_PIPE_B_SELECT; | 66 | if (HAS_PCH_CPT(dev)) |
67 | sdvox |= PORT_TRANS_B_SEL_CPT; | ||
68 | else | ||
69 | sdvox |= SDVO_PIPE_B_SELECT; | ||
70 | } | ||
68 | 71 | ||
69 | I915_WRITE(hdmi_priv->sdvox_reg, sdvox); | 72 | I915_WRITE(hdmi_priv->sdvox_reg, sdvox); |
70 | POSTING_READ(hdmi_priv->sdvox_reg); | 73 | POSTING_READ(hdmi_priv->sdvox_reg); |
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
106 | } | 109 | } |
107 | } | 110 | } |
108 | 111 | ||
109 | static void intel_hdmi_save(struct drm_connector *connector) | ||
110 | { | ||
111 | struct drm_device *dev = connector->dev; | ||
112 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
113 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
114 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | ||
115 | |||
116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); | ||
117 | } | ||
118 | |||
119 | static void intel_hdmi_restore(struct drm_connector *connector) | ||
120 | { | ||
121 | struct drm_device *dev = connector->dev; | ||
122 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
123 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
124 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | ||
125 | |||
126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); | ||
127 | POSTING_READ(hdmi_priv->sdvox_reg); | ||
128 | } | ||
129 | |||
130 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 112 | static int intel_hdmi_mode_valid(struct drm_connector *connector, |
131 | struct drm_display_mode *mode) | 113 | struct drm_display_mode *mode) |
132 | { | 114 | { |
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
151 | static enum drm_connector_status | 133 | static enum drm_connector_status |
152 | intel_hdmi_detect(struct drm_connector *connector) | 134 | intel_hdmi_detect(struct drm_connector *connector) |
153 | { | 135 | { |
154 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 136 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
137 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
155 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | 138 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
156 | struct edid *edid = NULL; | 139 | struct edid *edid = NULL; |
157 | enum drm_connector_status status = connector_status_disconnected; | 140 | enum drm_connector_status status = connector_status_disconnected; |
158 | 141 | ||
159 | hdmi_priv->has_hdmi_sink = false; | 142 | hdmi_priv->has_hdmi_sink = false; |
160 | edid = drm_get_edid(&intel_encoder->base, | 143 | edid = drm_get_edid(connector, |
161 | intel_encoder->ddc_bus); | 144 | intel_encoder->ddc_bus); |
162 | 145 | ||
163 | if (edid) { | 146 | if (edid) { |
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
165 | status = connector_status_connected; | 148 | status = connector_status_connected; |
166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 149 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
167 | } | 150 | } |
168 | intel_encoder->base.display_info.raw_edid = NULL; | 151 | connector->display_info.raw_edid = NULL; |
169 | kfree(edid); | 152 | kfree(edid); |
170 | } | 153 | } |
171 | 154 | ||
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
174 | 157 | ||
175 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 158 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
176 | { | 159 | { |
177 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 160 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
161 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
178 | 162 | ||
179 | /* We should parse the EDID data and find out if it's an HDMI sink so | 163 | /* We should parse the EDID data and find out if it's an HDMI sink so |
180 | * we can send audio to it. | 164 | * we can send audio to it. |
181 | */ | 165 | */ |
182 | 166 | ||
183 | return intel_ddc_get_modes(intel_encoder); | 167 | return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
184 | } | 168 | } |
185 | 169 | ||
186 | static void intel_hdmi_destroy(struct drm_connector *connector) | 170 | static void intel_hdmi_destroy(struct drm_connector *connector) |
187 | { | 171 | { |
188 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
189 | |||
190 | if (intel_encoder->i2c_bus) | ||
191 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
192 | drm_sysfs_connector_remove(connector); | 172 | drm_sysfs_connector_remove(connector); |
193 | drm_connector_cleanup(connector); | 173 | drm_connector_cleanup(connector); |
194 | kfree(intel_encoder); | 174 | kfree(connector); |
195 | } | 175 | } |
196 | 176 | ||
197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | 177 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | |||
204 | 184 | ||
205 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | 185 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { |
206 | .dpms = drm_helper_connector_dpms, | 186 | .dpms = drm_helper_connector_dpms, |
207 | .save = intel_hdmi_save, | ||
208 | .restore = intel_hdmi_restore, | ||
209 | .detect = intel_hdmi_detect, | 187 | .detect = intel_hdmi_detect, |
210 | .fill_modes = drm_helper_probe_single_connector_modes, | 188 | .fill_modes = drm_helper_probe_single_connector_modes, |
211 | .destroy = intel_hdmi_destroy, | 189 | .destroy = intel_hdmi_destroy, |
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | |||
214 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { | 192 | static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { |
215 | .get_modes = intel_hdmi_get_modes, | 193 | .get_modes = intel_hdmi_get_modes, |
216 | .mode_valid = intel_hdmi_mode_valid, | 194 | .mode_valid = intel_hdmi_mode_valid, |
217 | .best_encoder = intel_best_encoder, | 195 | .best_encoder = intel_attached_encoder, |
218 | }; | 196 | }; |
219 | 197 | ||
220 | static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) | 198 | static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) |
221 | { | 199 | { |
200 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
201 | |||
202 | if (intel_encoder->i2c_bus) | ||
203 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
222 | drm_encoder_cleanup(encoder); | 204 | drm_encoder_cleanup(encoder); |
205 | kfree(intel_encoder); | ||
223 | } | 206 | } |
224 | 207 | ||
225 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | 208 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { |
@@ -231,15 +214,23 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 214 | struct drm_i915_private *dev_priv = dev->dev_private; |
232 | struct drm_connector *connector; | 215 | struct drm_connector *connector; |
233 | struct intel_encoder *intel_encoder; | 216 | struct intel_encoder *intel_encoder; |
217 | struct intel_connector *intel_connector; | ||
234 | struct intel_hdmi_priv *hdmi_priv; | 218 | struct intel_hdmi_priv *hdmi_priv; |
235 | 219 | ||
236 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + | 220 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 221 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
238 | if (!intel_encoder) | 222 | if (!intel_encoder) |
239 | return; | 223 | return; |
224 | |||
225 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
226 | if (!intel_connector) { | ||
227 | kfree(intel_encoder); | ||
228 | return; | ||
229 | } | ||
230 | |||
240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); | 231 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); |
241 | 232 | ||
242 | connector = &intel_encoder->base; | 233 | connector = &intel_connector->base; |
243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 234 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
244 | DRM_MODE_CONNECTOR_HDMIA); | 235 | DRM_MODE_CONNECTOR_HDMIA); |
245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 236 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
@@ -285,7 +276,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
285 | DRM_MODE_ENCODER_TMDS); | 276 | DRM_MODE_ENCODER_TMDS); |
286 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); | 277 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); |
287 | 278 | ||
288 | drm_mode_connector_attach_encoder(&intel_encoder->base, | 279 | drm_mode_connector_attach_encoder(&intel_connector->base, |
289 | &intel_encoder->enc); | 280 | &intel_encoder->enc); |
290 | drm_sysfs_connector_add(connector); | 281 | drm_sysfs_connector_add(connector); |
291 | 282 | ||
@@ -303,6 +294,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
303 | err_connector: | 294 | err_connector: |
304 | drm_connector_cleanup(connector); | 295 | drm_connector_cleanup(connector); |
305 | kfree(intel_encoder); | 296 | kfree(intel_encoder); |
297 | kfree(intel_connector); | ||
306 | 298 | ||
307 | return; | 299 | return; |
308 | } | 300 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b66806a37d37..6a1accd83aec 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
139 | /* XXX: We never power down the LVDS pairs. */ | 139 | /* XXX: We never power down the LVDS pairs. */ |
140 | } | 140 | } |
141 | 141 | ||
142 | static void intel_lvds_save(struct drm_connector *connector) | ||
143 | { | ||
144 | struct drm_device *dev = connector->dev; | ||
145 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
146 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; | ||
147 | u32 pwm_ctl_reg; | ||
148 | |||
149 | if (HAS_PCH_SPLIT(dev)) { | ||
150 | pp_on_reg = PCH_PP_ON_DELAYS; | ||
151 | pp_off_reg = PCH_PP_OFF_DELAYS; | ||
152 | pp_ctl_reg = PCH_PP_CONTROL; | ||
153 | pp_div_reg = PCH_PP_DIVISOR; | ||
154 | pwm_ctl_reg = BLC_PWM_CPU_CTL; | ||
155 | } else { | ||
156 | pp_on_reg = PP_ON_DELAYS; | ||
157 | pp_off_reg = PP_OFF_DELAYS; | ||
158 | pp_ctl_reg = PP_CONTROL; | ||
159 | pp_div_reg = PP_DIVISOR; | ||
160 | pwm_ctl_reg = BLC_PWM_CTL; | ||
161 | } | ||
162 | |||
163 | dev_priv->savePP_ON = I915_READ(pp_on_reg); | ||
164 | dev_priv->savePP_OFF = I915_READ(pp_off_reg); | ||
165 | dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg); | ||
166 | dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg); | ||
167 | dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg); | ||
168 | dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & | ||
169 | BACKLIGHT_DUTY_CYCLE_MASK); | ||
170 | |||
171 | /* | ||
172 | * If the light is off at server startup, just make it full brightness | ||
173 | */ | ||
174 | if (dev_priv->backlight_duty_cycle == 0) | ||
175 | dev_priv->backlight_duty_cycle = | ||
176 | intel_lvds_get_max_backlight(dev); | ||
177 | } | ||
178 | |||
179 | static void intel_lvds_restore(struct drm_connector *connector) | ||
180 | { | ||
181 | struct drm_device *dev = connector->dev; | ||
182 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
183 | u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; | ||
184 | u32 pwm_ctl_reg; | ||
185 | |||
186 | if (HAS_PCH_SPLIT(dev)) { | ||
187 | pp_on_reg = PCH_PP_ON_DELAYS; | ||
188 | pp_off_reg = PCH_PP_OFF_DELAYS; | ||
189 | pp_ctl_reg = PCH_PP_CONTROL; | ||
190 | pp_div_reg = PCH_PP_DIVISOR; | ||
191 | pwm_ctl_reg = BLC_PWM_CPU_CTL; | ||
192 | } else { | ||
193 | pp_on_reg = PP_ON_DELAYS; | ||
194 | pp_off_reg = PP_OFF_DELAYS; | ||
195 | pp_ctl_reg = PP_CONTROL; | ||
196 | pp_div_reg = PP_DIVISOR; | ||
197 | pwm_ctl_reg = BLC_PWM_CTL; | ||
198 | } | ||
199 | |||
200 | I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL); | ||
201 | I915_WRITE(pp_on_reg, dev_priv->savePP_ON); | ||
202 | I915_WRITE(pp_off_reg, dev_priv->savePP_OFF); | ||
203 | I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR); | ||
204 | I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL); | ||
205 | if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) | ||
206 | intel_lvds_set_power(dev, true); | ||
207 | else | ||
208 | intel_lvds_set_power(dev, false); | ||
209 | } | ||
210 | |||
211 | static int intel_lvds_mode_valid(struct drm_connector *connector, | 142 | static int intel_lvds_mode_valid(struct drm_connector *connector, |
212 | struct drm_display_mode *mode) | 143 | struct drm_display_mode *mode) |
213 | { | 144 | { |
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
635 | static int intel_lvds_get_modes(struct drm_connector *connector) | 566 | static int intel_lvds_get_modes(struct drm_connector *connector) |
636 | { | 567 | { |
637 | struct drm_device *dev = connector->dev; | 568 | struct drm_device *dev = connector->dev; |
638 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 569 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
570 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
639 | struct drm_i915_private *dev_priv = dev->dev_private; | 571 | struct drm_i915_private *dev_priv = dev->dev_private; |
640 | int ret = 0; | 572 | int ret = 0; |
641 | 573 | ||
642 | if (dev_priv->lvds_edid_good) { | 574 | if (dev_priv->lvds_edid_good) { |
643 | ret = intel_ddc_get_modes(intel_encoder); | 575 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
644 | 576 | ||
645 | if (ret) | 577 | if (ret) |
646 | return ret; | 578 | return ret; |
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
717 | static void intel_lvds_destroy(struct drm_connector *connector) | 649 | static void intel_lvds_destroy(struct drm_connector *connector) |
718 | { | 650 | { |
719 | struct drm_device *dev = connector->dev; | 651 | struct drm_device *dev = connector->dev; |
720 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
721 | struct drm_i915_private *dev_priv = dev->dev_private; | 652 | struct drm_i915_private *dev_priv = dev->dev_private; |
722 | 653 | ||
723 | if (intel_encoder->ddc_bus) | ||
724 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
725 | if (dev_priv->lid_notifier.notifier_call) | 654 | if (dev_priv->lid_notifier.notifier_call) |
726 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 655 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
727 | drm_sysfs_connector_remove(connector); | 656 | drm_sysfs_connector_remove(connector); |
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
734 | uint64_t value) | 663 | uint64_t value) |
735 | { | 664 | { |
736 | struct drm_device *dev = connector->dev; | 665 | struct drm_device *dev = connector->dev; |
737 | struct intel_encoder *intel_encoder = | ||
738 | to_intel_encoder(connector); | ||
739 | 666 | ||
740 | if (property == dev->mode_config.scaling_mode_property && | 667 | if (property == dev->mode_config.scaling_mode_property && |
741 | connector->encoder) { | 668 | connector->encoder) { |
742 | struct drm_crtc *crtc = connector->encoder->crtc; | 669 | struct drm_crtc *crtc = connector->encoder->crtc; |
670 | struct drm_encoder *encoder = connector->encoder; | ||
671 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
743 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | 672 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
673 | |||
744 | if (value == DRM_MODE_SCALE_NONE) { | 674 | if (value == DRM_MODE_SCALE_NONE) { |
745 | DRM_DEBUG_KMS("no scaling not supported\n"); | 675 | DRM_DEBUG_KMS("no scaling not supported\n"); |
746 | return 0; | 676 | return 0; |
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { | |||
774 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { | 704 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { |
775 | .get_modes = intel_lvds_get_modes, | 705 | .get_modes = intel_lvds_get_modes, |
776 | .mode_valid = intel_lvds_mode_valid, | 706 | .mode_valid = intel_lvds_mode_valid, |
777 | .best_encoder = intel_best_encoder, | 707 | .best_encoder = intel_attached_encoder, |
778 | }; | 708 | }; |
779 | 709 | ||
780 | static const struct drm_connector_funcs intel_lvds_connector_funcs = { | 710 | static const struct drm_connector_funcs intel_lvds_connector_funcs = { |
781 | .dpms = drm_helper_connector_dpms, | 711 | .dpms = drm_helper_connector_dpms, |
782 | .save = intel_lvds_save, | ||
783 | .restore = intel_lvds_restore, | ||
784 | .detect = intel_lvds_detect, | 712 | .detect = intel_lvds_detect, |
785 | .fill_modes = drm_helper_probe_single_connector_modes, | 713 | .fill_modes = drm_helper_probe_single_connector_modes, |
786 | .set_property = intel_lvds_set_property, | 714 | .set_property = intel_lvds_set_property, |
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { | |||
790 | 718 | ||
791 | static void intel_lvds_enc_destroy(struct drm_encoder *encoder) | 719 | static void intel_lvds_enc_destroy(struct drm_encoder *encoder) |
792 | { | 720 | { |
721 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
722 | |||
723 | if (intel_encoder->ddc_bus) | ||
724 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
793 | drm_encoder_cleanup(encoder); | 725 | drm_encoder_cleanup(encoder); |
726 | kfree(intel_encoder); | ||
794 | } | 727 | } |
795 | 728 | ||
796 | static const struct drm_encoder_funcs intel_lvds_enc_funcs = { | 729 | static const struct drm_encoder_funcs intel_lvds_enc_funcs = { |
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
979 | { | 912 | { |
980 | struct drm_i915_private *dev_priv = dev->dev_private; | 913 | struct drm_i915_private *dev_priv = dev->dev_private; |
981 | struct intel_encoder *intel_encoder; | 914 | struct intel_encoder *intel_encoder; |
915 | struct intel_connector *intel_connector; | ||
982 | struct drm_connector *connector; | 916 | struct drm_connector *connector; |
983 | struct drm_encoder *encoder; | 917 | struct drm_encoder *encoder; |
984 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 918 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev) | |||
1012 | return; | 946 | return; |
1013 | } | 947 | } |
1014 | 948 | ||
1015 | connector = &intel_encoder->base; | 949 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
950 | if (!intel_connector) { | ||
951 | kfree(intel_encoder); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | connector = &intel_connector->base; | ||
1016 | encoder = &intel_encoder->enc; | 956 | encoder = &intel_encoder->enc; |
1017 | drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, | 957 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
1018 | DRM_MODE_CONNECTOR_LVDS); | 958 | DRM_MODE_CONNECTOR_LVDS); |
1019 | 959 | ||
1020 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, | 960 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, |
1021 | DRM_MODE_ENCODER_LVDS); | 961 | DRM_MODE_ENCODER_LVDS); |
1022 | 962 | ||
1023 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); | 963 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); |
1024 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 964 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
1025 | 965 | ||
1026 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 966 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
1027 | intel_encoder->crtc_mask = (1 << 1); | 967 | intel_encoder->crtc_mask = (1 << 1); |
968 | if (IS_I965G(dev)) | ||
969 | intel_encoder->crtc_mask |= (1 << 0); | ||
1028 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 970 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
1029 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 971 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
1030 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 972 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
1039 | * the initial panel fitting mode will be FULL_SCREEN. | 981 | * the initial panel fitting mode will be FULL_SCREEN. |
1040 | */ | 982 | */ |
1041 | 983 | ||
1042 | drm_connector_attach_property(&intel_encoder->base, | 984 | drm_connector_attach_property(&intel_connector->base, |
1043 | dev->mode_config.scaling_mode_property, | 985 | dev->mode_config.scaling_mode_property, |
1044 | DRM_MODE_SCALE_FULLSCREEN); | 986 | DRM_MODE_SCALE_FULLSCREEN); |
1045 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 987 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; |
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
1067 | */ | 1009 | */ |
1068 | dev_priv->lvds_edid_good = true; | 1010 | dev_priv->lvds_edid_good = true; |
1069 | 1011 | ||
1070 | if (!intel_ddc_get_modes(intel_encoder)) | 1012 | if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) |
1071 | dev_priv->lvds_edid_good = false; | 1013 | dev_priv->lvds_edid_good = false; |
1072 | 1014 | ||
1073 | list_for_each_entry(scan, &connector->probed_modes, head) { | 1015 | list_for_each_entry(scan, &connector->probed_modes, head) { |
@@ -1151,4 +1093,5 @@ failed: | |||
1151 | drm_connector_cleanup(connector); | 1093 | drm_connector_cleanup(connector); |
1152 | drm_encoder_cleanup(encoder); | 1094 | drm_encoder_cleanup(encoder); |
1153 | kfree(intel_encoder); | 1095 | kfree(intel_encoder); |
1096 | kfree(intel_connector); | ||
1154 | } | 1097 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 8e5c83b2d120..4b1fd3d9c73c 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) | |||
54 | } | 54 | } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | intel_i2c_quirk_set(intel_encoder->base.dev, true); | 57 | intel_i2c_quirk_set(intel_encoder->enc.dev, true); |
58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); | 58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); |
59 | intel_i2c_quirk_set(intel_encoder->base.dev, false); | 59 | intel_i2c_quirk_set(intel_encoder->enc.dev, false); |
60 | if (ret == 2) | 60 | if (ret == 2) |
61 | return true; | 61 | return true; |
62 | 62 | ||
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) | |||
66 | /** | 66 | /** |
67 | * intel_ddc_get_modes - get modelist from monitor | 67 | * intel_ddc_get_modes - get modelist from monitor |
68 | * @connector: DRM connector device to use | 68 | * @connector: DRM connector device to use |
69 | * @adapter: i2c adapter | ||
69 | * | 70 | * |
70 | * Fetch the EDID information from @connector using the DDC bus. | 71 | * Fetch the EDID information from @connector using the DDC bus. |
71 | */ | 72 | */ |
72 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder) | 73 | int intel_ddc_get_modes(struct drm_connector *connector, |
74 | struct i2c_adapter *adapter) | ||
73 | { | 75 | { |
74 | struct edid *edid; | 76 | struct edid *edid; |
75 | int ret = 0; | 77 | int ret = 0; |
76 | 78 | ||
77 | intel_i2c_quirk_set(intel_encoder->base.dev, true); | 79 | intel_i2c_quirk_set(connector->dev, true); |
78 | edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); | 80 | edid = drm_get_edid(connector, adapter); |
79 | intel_i2c_quirk_set(intel_encoder->base.dev, false); | 81 | intel_i2c_quirk_set(connector->dev, false); |
80 | if (edid) { | 82 | if (edid) { |
81 | drm_mode_connector_update_edid_property(&intel_encoder->base, | 83 | drm_mode_connector_update_edid_property(connector, edid); |
82 | edid); | 84 | ret = drm_add_edid_modes(connector, edid); |
83 | ret = drm_add_edid_modes(&intel_encoder->base, edid); | 85 | connector->display_info.raw_edid = NULL; |
84 | intel_encoder->base.display_info.raw_edid = NULL; | ||
85 | kfree(edid); | 86 | kfree(edid); |
86 | } | 87 | } |
87 | 88 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 87d953664cb0..42ceb15da689 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -36,7 +36,18 @@ | |||
36 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | #include "intel_sdvo_regs.h" | 38 | #include "intel_sdvo_regs.h" |
39 | #include <linux/dmi.h> | 39 | |
40 | #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) | ||
41 | #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) | ||
42 | #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) | ||
43 | #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) | ||
44 | |||
45 | #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ | ||
46 | SDVO_TV_MASK) | ||
47 | |||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | ||
49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) | ||
50 | |||
40 | 51 | ||
41 | static char *tv_format_names[] = { | 52 | static char *tv_format_names[] = { |
42 | "NTSC_M" , "NTSC_J" , "NTSC_443", | 53 | "NTSC_M" , "NTSC_J" , "NTSC_443", |
@@ -86,12 +97,6 @@ struct intel_sdvo_priv { | |||
86 | /* This is for current tv format name */ | 97 | /* This is for current tv format name */ |
87 | char *tv_format_name; | 98 | char *tv_format_name; |
88 | 99 | ||
89 | /* This contains all current supported TV format */ | ||
90 | char *tv_format_supported[TV_FORMAT_NUM]; | ||
91 | int format_supported_num; | ||
92 | struct drm_property *tv_format_property; | ||
93 | struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; | ||
94 | |||
95 | /** | 100 | /** |
96 | * This is set if we treat the device as HDMI, instead of DVI. | 101 | * This is set if we treat the device as HDMI, instead of DVI. |
97 | */ | 102 | */ |
@@ -112,12 +117,6 @@ struct intel_sdvo_priv { | |||
112 | */ | 117 | */ |
113 | struct drm_display_mode *sdvo_lvds_fixed_mode; | 118 | struct drm_display_mode *sdvo_lvds_fixed_mode; |
114 | 119 | ||
115 | /** | ||
116 | * Returned SDTV resolutions allowed for the current format, if the | ||
117 | * device reported it. | ||
118 | */ | ||
119 | struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; | ||
120 | |||
121 | /* | 120 | /* |
122 | * supported encoding mode, used to determine whether HDMI is | 121 | * supported encoding mode, used to determine whether HDMI is |
123 | * supported | 122 | * supported |
@@ -130,11 +129,24 @@ struct intel_sdvo_priv { | |||
130 | /* Mac mini hack -- use the same DDC as the analog connector */ | 129 | /* Mac mini hack -- use the same DDC as the analog connector */ |
131 | struct i2c_adapter *analog_ddc_bus; | 130 | struct i2c_adapter *analog_ddc_bus; |
132 | 131 | ||
133 | int save_sdvo_mult; | 132 | }; |
134 | u16 save_active_outputs; | 133 | |
135 | struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; | 134 | struct intel_sdvo_connector { |
136 | struct intel_sdvo_dtd save_output_dtd[16]; | 135 | /* Mark the type of connector */ |
137 | u32 save_SDVOX; | 136 | uint16_t output_flag; |
137 | |||
138 | /* This contains all current supported TV format */ | ||
139 | char *tv_format_supported[TV_FORMAT_NUM]; | ||
140 | int format_supported_num; | ||
141 | struct drm_property *tv_format_property; | ||
142 | struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; | ||
143 | |||
144 | /** | ||
145 | * Returned SDTV resolutions allowed for the current format, if the | ||
146 | * device reported it. | ||
147 | */ | ||
148 | struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; | ||
149 | |||
138 | /* add the property for the SDVO-TV */ | 150 | /* add the property for the SDVO-TV */ |
139 | struct drm_property *left_property; | 151 | struct drm_property *left_property; |
140 | struct drm_property *right_property; | 152 | struct drm_property *right_property; |
@@ -162,7 +174,12 @@ struct intel_sdvo_priv { | |||
162 | }; | 174 | }; |
163 | 175 | ||
164 | static bool | 176 | static bool |
165 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); | 177 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, |
178 | uint16_t flags); | ||
179 | static void | ||
180 | intel_sdvo_tv_create_property(struct drm_connector *connector, int type); | ||
181 | static void | ||
182 | intel_sdvo_create_enhance_property(struct drm_connector *connector); | ||
166 | 183 | ||
167 | /** | 184 | /** |
168 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 185 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); | |||
171 | */ | 188 | */ |
172 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) | 189 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) |
173 | { | 190 | { |
174 | struct drm_device *dev = intel_encoder->base.dev; | 191 | struct drm_device *dev = intel_encoder->enc.dev; |
175 | struct drm_i915_private *dev_priv = dev->dev_private; | 192 | struct drm_i915_private *dev_priv = dev->dev_private; |
176 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 193 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
177 | u32 bval = val, cval = val; | 194 | u32 bval = val, cval = val; |
178 | int i; | 195 | int i; |
179 | 196 | ||
197 | if (sdvo_priv->sdvo_reg == PCH_SDVOB) { | ||
198 | I915_WRITE(sdvo_priv->sdvo_reg, val); | ||
199 | I915_READ(sdvo_priv->sdvo_reg); | ||
200 | return; | ||
201 | } | ||
202 | |||
180 | if (sdvo_priv->sdvo_reg == SDVOB) { | 203 | if (sdvo_priv->sdvo_reg == SDVOB) { |
181 | cval = I915_READ(SDVOC); | 204 | cval = I915_READ(SDVOC); |
182 | } else { | 205 | } else { |
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name { | |||
353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | 376 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), |
354 | }; | 377 | }; |
355 | 378 | ||
356 | #define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") | 379 | #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) |
380 | #define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") | ||
357 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) | 381 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) |
358 | 382 | ||
359 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, | 383 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, |
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b | |||
563 | return true; | 587 | return true; |
564 | } | 588 | } |
565 | 589 | ||
566 | static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, | ||
567 | u16 *outputs) | ||
568 | { | ||
569 | u8 status; | ||
570 | |||
571 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); | ||
572 | status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); | ||
573 | |||
574 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
575 | } | ||
576 | |||
577 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, | 590 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, |
578 | u16 outputs) | 591 | u16 outputs) |
579 | { | 592 | { |
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, | |||
646 | return (status == SDVO_CMD_STATUS_SUCCESS); | 659 | return (status == SDVO_CMD_STATUS_SUCCESS); |
647 | } | 660 | } |
648 | 661 | ||
649 | static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, | ||
650 | struct intel_sdvo_dtd *dtd) | ||
651 | { | ||
652 | u8 status; | ||
653 | |||
654 | intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); | ||
655 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, | ||
656 | sizeof(dtd->part1)); | ||
657 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
658 | return false; | ||
659 | |||
660 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); | ||
661 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, | ||
662 | sizeof(dtd->part2)); | ||
663 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
664 | return false; | ||
665 | |||
666 | return true; | ||
667 | } | ||
668 | |||
669 | static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, | ||
670 | struct intel_sdvo_dtd *dtd) | ||
671 | { | ||
672 | return intel_sdvo_get_timing(intel_encoder, | ||
673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); | ||
674 | } | ||
675 | |||
676 | static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, | ||
677 | struct intel_sdvo_dtd *dtd) | ||
678 | { | ||
679 | return intel_sdvo_get_timing(intel_encoder, | ||
680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); | ||
681 | } | ||
682 | |||
683 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, | 662 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, |
684 | struct intel_sdvo_dtd *dtd) | 663 | struct intel_sdvo_dtd *dtd) |
685 | { | 664 | { |
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en | |||
767 | return false; | 746 | return false; |
768 | } | 747 | } |
769 | 748 | ||
770 | static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) | ||
771 | { | ||
772 | u8 response, status; | ||
773 | |||
774 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); | ||
775 | status = intel_sdvo_read_response(intel_encoder, &response, 1); | ||
776 | |||
777 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
778 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); | ||
779 | return SDVO_CLOCK_RATE_MULT_1X; | ||
780 | } else { | ||
781 | DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response); | ||
782 | } | ||
783 | |||
784 | return response; | ||
785 | } | ||
786 | |||
787 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) | 749 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) |
788 | { | 750 | { |
789 | u8 status; | 751 | u8 status; |
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) | |||
1071 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? | 1033 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? |
1072 | sizeof(format) : sizeof(format_map)); | 1034 | sizeof(format) : sizeof(format_map)); |
1073 | 1035 | ||
1074 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, | 1036 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, |
1075 | sizeof(format)); | 1037 | sizeof(format)); |
1076 | 1038 | ||
1077 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | 1039 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1101 | /* Set output timings */ | 1063 | /* Set output timings */ |
1102 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1064 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
1103 | intel_sdvo_set_target_output(intel_encoder, | 1065 | intel_sdvo_set_target_output(intel_encoder, |
1104 | dev_priv->controlled_output); | 1066 | dev_priv->attached_output); |
1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); | 1067 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1106 | 1068 | ||
1107 | /* Set the input timing to the screen. Assume always input 0. */ | 1069 | /* Set the input timing to the screen. Assume always input 0. */ |
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1139 | dev_priv->sdvo_lvds_fixed_mode); | 1101 | dev_priv->sdvo_lvds_fixed_mode); |
1140 | 1102 | ||
1141 | intel_sdvo_set_target_output(intel_encoder, | 1103 | intel_sdvo_set_target_output(intel_encoder, |
1142 | dev_priv->controlled_output); | 1104 | dev_priv->attached_output); |
1143 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); | 1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1144 | 1106 | ||
1145 | /* Set the input timing to the screen. Assume always input 0. */ | 1107 | /* Set the input timing to the screen. Assume always input 0. */ |
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1204 | * channel on the motherboard. In a two-input device, the first input | 1166 | * channel on the motherboard. In a two-input device, the first input |
1205 | * will be SDVOB and the second SDVOC. | 1167 | * will be SDVOB and the second SDVOC. |
1206 | */ | 1168 | */ |
1207 | in_out.in0 = sdvo_priv->controlled_output; | 1169 | in_out.in0 = sdvo_priv->attached_output; |
1208 | in_out.in1 = 0; | 1170 | in_out.in1 = 0; |
1209 | 1171 | ||
1210 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, | 1172 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, |
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1230 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { | 1192 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { |
1231 | /* Set the output timing to the screen */ | 1193 | /* Set the output timing to the screen */ |
1232 | intel_sdvo_set_target_output(intel_encoder, | 1194 | intel_sdvo_set_target_output(intel_encoder, |
1233 | sdvo_priv->controlled_output); | 1195 | sdvo_priv->attached_output); |
1234 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); | 1196 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); |
1235 | } | 1197 | } |
1236 | 1198 | ||
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1352 | 1314 | ||
1353 | if (0) | 1315 | if (0) |
1354 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); | 1316 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
1355 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); | 1317 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); |
1356 | } | 1318 | } |
1357 | return; | 1319 | return; |
1358 | } | 1320 | } |
1359 | 1321 | ||
1360 | static void intel_sdvo_save(struct drm_connector *connector) | ||
1361 | { | ||
1362 | struct drm_device *dev = connector->dev; | ||
1363 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1364 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
1365 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1366 | int o; | ||
1367 | |||
1368 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); | ||
1369 | intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); | ||
1370 | |||
1371 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | ||
1372 | intel_sdvo_set_target_input(intel_encoder, true, false); | ||
1373 | intel_sdvo_get_input_timing(intel_encoder, | ||
1374 | &sdvo_priv->save_input_dtd_1); | ||
1375 | } | ||
1376 | |||
1377 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | ||
1378 | intel_sdvo_set_target_input(intel_encoder, false, true); | ||
1379 | intel_sdvo_get_input_timing(intel_encoder, | ||
1380 | &sdvo_priv->save_input_dtd_2); | ||
1381 | } | ||
1382 | |||
1383 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | ||
1384 | { | ||
1385 | u16 this_output = (1 << o); | ||
1386 | if (sdvo_priv->caps.output_flags & this_output) | ||
1387 | { | ||
1388 | intel_sdvo_set_target_output(intel_encoder, this_output); | ||
1389 | intel_sdvo_get_output_timing(intel_encoder, | ||
1390 | &sdvo_priv->save_output_dtd[o]); | ||
1391 | } | ||
1392 | } | ||
1393 | if (sdvo_priv->is_tv) { | ||
1394 | /* XXX: Save TV format/enhancements. */ | ||
1395 | } | ||
1396 | |||
1397 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); | ||
1398 | } | ||
1399 | |||
1400 | static void intel_sdvo_restore(struct drm_connector *connector) | ||
1401 | { | ||
1402 | struct drm_device *dev = connector->dev; | ||
1403 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
1404 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1405 | int o; | ||
1406 | int i; | ||
1407 | bool input1, input2; | ||
1408 | u8 status; | ||
1409 | |||
1410 | intel_sdvo_set_active_outputs(intel_encoder, 0); | ||
1411 | |||
1412 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | ||
1413 | { | ||
1414 | u16 this_output = (1 << o); | ||
1415 | if (sdvo_priv->caps.output_flags & this_output) { | ||
1416 | intel_sdvo_set_target_output(intel_encoder, this_output); | ||
1417 | intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); | ||
1418 | } | ||
1419 | } | ||
1420 | |||
1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | ||
1422 | intel_sdvo_set_target_input(intel_encoder, true, false); | ||
1423 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); | ||
1424 | } | ||
1425 | |||
1426 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | ||
1427 | intel_sdvo_set_target_input(intel_encoder, false, true); | ||
1428 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); | ||
1429 | } | ||
1430 | |||
1431 | intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); | ||
1432 | |||
1433 | if (sdvo_priv->is_tv) { | ||
1434 | /* XXX: Restore TV format/enhancements. */ | ||
1435 | } | ||
1436 | |||
1437 | intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); | ||
1438 | |||
1439 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | ||
1440 | { | ||
1441 | for (i = 0; i < 2; i++) | ||
1442 | intel_wait_for_vblank(dev); | ||
1443 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); | ||
1444 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) | ||
1445 | DRM_DEBUG_KMS("First %s output reported failure to " | ||
1446 | "sync\n", SDVO_NAME(sdvo_priv)); | ||
1447 | } | ||
1448 | |||
1449 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); | ||
1450 | } | ||
1451 | |||
1452 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1322 | static int intel_sdvo_mode_valid(struct drm_connector *connector, |
1453 | struct drm_display_mode *mode) | 1323 | struct drm_display_mode *mode) |
1454 | { | 1324 | { |
1455 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1325 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1326 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1456 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1327 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1457 | 1328 | ||
1458 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1329 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str | |||
1490 | return true; | 1361 | return true; |
1491 | } | 1362 | } |
1492 | 1363 | ||
1364 | /* No use! */ | ||
1365 | #if 0 | ||
1493 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | 1366 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) |
1494 | { | 1367 | { |
1495 | struct drm_connector *connector = NULL; | 1368 | struct drm_connector *connector = NULL; |
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1560 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1433 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1561 | intel_sdvo_read_response(intel_encoder, &response, 2); | 1434 | intel_sdvo_read_response(intel_encoder, &response, 2); |
1562 | } | 1435 | } |
1436 | #endif | ||
1563 | 1437 | ||
1564 | static bool | 1438 | static bool |
1565 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) | 1439 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) |
@@ -1598,12 +1472,17 @@ static struct drm_connector * | |||
1598 | intel_find_analog_connector(struct drm_device *dev) | 1472 | intel_find_analog_connector(struct drm_device *dev) |
1599 | { | 1473 | { |
1600 | struct drm_connector *connector; | 1474 | struct drm_connector *connector; |
1475 | struct drm_encoder *encoder; | ||
1601 | struct intel_encoder *intel_encoder; | 1476 | struct intel_encoder *intel_encoder; |
1602 | 1477 | ||
1603 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1478 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1604 | intel_encoder = to_intel_encoder(connector); | 1479 | intel_encoder = enc_to_intel_encoder(encoder); |
1605 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) | 1480 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { |
1606 | return connector; | 1481 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1482 | if (connector && encoder == intel_attached_encoder(connector)) | ||
1483 | return connector; | ||
1484 | } | ||
1485 | } | ||
1607 | } | 1486 | } |
1608 | return NULL; | 1487 | return NULL; |
1609 | } | 1488 | } |
@@ -1627,12 +1506,13 @@ intel_analog_is_connected(struct drm_device *dev) | |||
1627 | enum drm_connector_status | 1506 | enum drm_connector_status |
1628 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | 1507 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) |
1629 | { | 1508 | { |
1630 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1509 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1510 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1631 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1511 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1632 | enum drm_connector_status status = connector_status_connected; | 1512 | enum drm_connector_status status = connector_status_connected; |
1633 | struct edid *edid = NULL; | 1513 | struct edid *edid = NULL; |
1634 | 1514 | ||
1635 | edid = drm_get_edid(&intel_encoder->base, | 1515 | edid = drm_get_edid(connector, |
1636 | intel_encoder->ddc_bus); | 1516 | intel_encoder->ddc_bus); |
1637 | 1517 | ||
1638 | /* This is only applied to SDVO cards with multiple outputs */ | 1518 | /* This is only applied to SDVO cards with multiple outputs */ |
@@ -1646,7 +1526,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1646 | */ | 1526 | */ |
1647 | while(temp_ddc > 1) { | 1527 | while(temp_ddc > 1) { |
1648 | sdvo_priv->ddc_bus = temp_ddc; | 1528 | sdvo_priv->ddc_bus = temp_ddc; |
1649 | edid = drm_get_edid(&intel_encoder->base, | 1529 | edid = drm_get_edid(connector, |
1650 | intel_encoder->ddc_bus); | 1530 | intel_encoder->ddc_bus); |
1651 | if (edid) { | 1531 | if (edid) { |
1652 | /* | 1532 | /* |
@@ -1666,8 +1546,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1666 | */ | 1546 | */ |
1667 | if (edid == NULL && | 1547 | if (edid == NULL && |
1668 | sdvo_priv->analog_ddc_bus && | 1548 | sdvo_priv->analog_ddc_bus && |
1669 | !intel_analog_is_connected(intel_encoder->base.dev)) | 1549 | !intel_analog_is_connected(connector->dev)) |
1670 | edid = drm_get_edid(&intel_encoder->base, | 1550 | edid = drm_get_edid(connector, |
1671 | sdvo_priv->analog_ddc_bus); | 1551 | sdvo_priv->analog_ddc_bus); |
1672 | if (edid != NULL) { | 1552 | if (edid != NULL) { |
1673 | /* Don't report the output as connected if it's a DVI-I | 1553 | /* Don't report the output as connected if it's a DVI-I |
@@ -1682,7 +1562,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1682 | } | 1562 | } |
1683 | 1563 | ||
1684 | kfree(edid); | 1564 | kfree(edid); |
1685 | intel_encoder->base.display_info.raw_edid = NULL; | 1565 | connector->display_info.raw_edid = NULL; |
1686 | 1566 | ||
1687 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | 1567 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
1688 | status = connector_status_disconnected; | 1568 | status = connector_status_disconnected; |
@@ -1694,8 +1574,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1694 | { | 1574 | { |
1695 | uint16_t response; | 1575 | uint16_t response; |
1696 | u8 status; | 1576 | u8 status; |
1697 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1577 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1578 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1579 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1698 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1580 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1581 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1582 | enum drm_connector_status ret; | ||
1699 | 1583 | ||
1700 | intel_sdvo_write_cmd(intel_encoder, | 1584 | intel_sdvo_write_cmd(intel_encoder, |
1701 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1585 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
@@ -1713,24 +1597,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1713 | if (response == 0) | 1597 | if (response == 0) |
1714 | return connector_status_disconnected; | 1598 | return connector_status_disconnected; |
1715 | 1599 | ||
1716 | if (intel_sdvo_multifunc_encoder(intel_encoder) && | 1600 | sdvo_priv->attached_output = response; |
1717 | sdvo_priv->attached_output != response) { | 1601 | |
1718 | if (sdvo_priv->controlled_output != response && | 1602 | if ((sdvo_connector->output_flag & response) == 0) |
1719 | intel_sdvo_output_setup(intel_encoder, response) != true) | 1603 | ret = connector_status_disconnected; |
1720 | return connector_status_unknown; | 1604 | else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
1721 | sdvo_priv->attached_output = response; | 1605 | ret = intel_sdvo_hdmi_sink_detect(connector, response); |
1606 | else | ||
1607 | ret = connector_status_connected; | ||
1608 | |||
1609 | /* May update encoder flag for like clock for SDVO TV, etc.*/ | ||
1610 | if (ret == connector_status_connected) { | ||
1611 | sdvo_priv->is_tv = false; | ||
1612 | sdvo_priv->is_lvds = false; | ||
1613 | intel_encoder->needs_tv_clock = false; | ||
1614 | |||
1615 | if (response & SDVO_TV_MASK) { | ||
1616 | sdvo_priv->is_tv = true; | ||
1617 | intel_encoder->needs_tv_clock = true; | ||
1618 | } | ||
1619 | if (response & SDVO_LVDS_MASK) | ||
1620 | sdvo_priv->is_lvds = true; | ||
1722 | } | 1621 | } |
1723 | return intel_sdvo_hdmi_sink_detect(connector, response); | 1622 | |
1623 | return ret; | ||
1724 | } | 1624 | } |
1725 | 1625 | ||
1726 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1626 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
1727 | { | 1627 | { |
1728 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1628 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1629 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1729 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1630 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1730 | int num_modes; | 1631 | int num_modes; |
1731 | 1632 | ||
1732 | /* set the bus switch and get the modes */ | 1633 | /* set the bus switch and get the modes */ |
1733 | num_modes = intel_ddc_get_modes(intel_encoder); | 1634 | num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
1734 | 1635 | ||
1735 | /* | 1636 | /* |
1736 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1637 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
@@ -1740,17 +1641,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1740 | */ | 1641 | */ |
1741 | if (num_modes == 0 && | 1642 | if (num_modes == 0 && |
1742 | sdvo_priv->analog_ddc_bus && | 1643 | sdvo_priv->analog_ddc_bus && |
1743 | !intel_analog_is_connected(intel_encoder->base.dev)) { | 1644 | !intel_analog_is_connected(connector->dev)) { |
1744 | struct i2c_adapter *digital_ddc_bus; | ||
1745 | |||
1746 | /* Switch to the analog ddc bus and try that | 1645 | /* Switch to the analog ddc bus and try that |
1747 | */ | 1646 | */ |
1748 | digital_ddc_bus = intel_encoder->ddc_bus; | 1647 | (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); |
1749 | intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; | ||
1750 | |||
1751 | (void) intel_ddc_get_modes(intel_encoder); | ||
1752 | |||
1753 | intel_encoder->ddc_bus = digital_ddc_bus; | ||
1754 | } | 1648 | } |
1755 | } | 1649 | } |
1756 | 1650 | ||
@@ -1821,8 +1715,9 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
1821 | 1715 | ||
1822 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1716 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
1823 | { | 1717 | { |
1824 | struct intel_encoder *output = to_intel_encoder(connector); | 1718 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1825 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1719 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1720 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1826 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1721 | struct intel_sdvo_sdtv_resolution_request tv_res; |
1827 | uint32_t reply = 0, format_map = 0; | 1722 | uint32_t reply = 0, format_map = 0; |
1828 | int i; | 1723 | int i; |
@@ -1842,11 +1737,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1842 | sizeof(format_map) ? sizeof(format_map) : | 1737 | sizeof(format_map) ? sizeof(format_map) : |
1843 | sizeof(struct intel_sdvo_sdtv_resolution_request)); | 1738 | sizeof(struct intel_sdvo_sdtv_resolution_request)); |
1844 | 1739 | ||
1845 | intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); | 1740 | intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); |
1846 | 1741 | ||
1847 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | 1742 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, |
1848 | &tv_res, sizeof(tv_res)); | 1743 | &tv_res, sizeof(tv_res)); |
1849 | status = intel_sdvo_read_response(output, &reply, 3); | 1744 | status = intel_sdvo_read_response(intel_encoder, &reply, 3); |
1850 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1745 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1851 | return; | 1746 | return; |
1852 | 1747 | ||
@@ -1863,7 +1758,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1863 | 1758 | ||
1864 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1759 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1865 | { | 1760 | { |
1866 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1761 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1762 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1867 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1763 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1868 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1764 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1869 | struct drm_display_mode *newmode; | 1765 | struct drm_display_mode *newmode; |
@@ -1873,7 +1769,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1873 | * Assume that the preferred modes are | 1769 | * Assume that the preferred modes are |
1874 | * arranged in priority order. | 1770 | * arranged in priority order. |
1875 | */ | 1771 | */ |
1876 | intel_ddc_get_modes(intel_encoder); | 1772 | intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
1877 | if (list_empty(&connector->probed_modes) == false) | 1773 | if (list_empty(&connector->probed_modes) == false) |
1878 | goto end; | 1774 | goto end; |
1879 | 1775 | ||
@@ -1902,12 +1798,12 @@ end: | |||
1902 | 1798 | ||
1903 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1799 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
1904 | { | 1800 | { |
1905 | struct intel_encoder *output = to_intel_encoder(connector); | 1801 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1906 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1802 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; |
1907 | 1803 | ||
1908 | if (sdvo_priv->is_tv) | 1804 | if (IS_TV(sdvo_connector)) |
1909 | intel_sdvo_get_tv_modes(connector); | 1805 | intel_sdvo_get_tv_modes(connector); |
1910 | else if (sdvo_priv->is_lvds == true) | 1806 | else if (IS_LVDS(sdvo_connector)) |
1911 | intel_sdvo_get_lvds_modes(connector); | 1807 | intel_sdvo_get_lvds_modes(connector); |
1912 | else | 1808 | else |
1913 | intel_sdvo_get_ddc_modes(connector); | 1809 | intel_sdvo_get_ddc_modes(connector); |
@@ -1920,11 +1816,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1920 | static | 1816 | static |
1921 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | 1817 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) |
1922 | { | 1818 | { |
1923 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1819 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1924 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1820 | struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; |
1925 | struct drm_device *dev = connector->dev; | 1821 | struct drm_device *dev = connector->dev; |
1926 | 1822 | ||
1927 | if (sdvo_priv->is_tv) { | 1823 | if (IS_TV(sdvo_priv)) { |
1928 | if (sdvo_priv->left_property) | 1824 | if (sdvo_priv->left_property) |
1929 | drm_property_destroy(dev, sdvo_priv->left_property); | 1825 | drm_property_destroy(dev, sdvo_priv->left_property); |
1930 | if (sdvo_priv->right_property) | 1826 | if (sdvo_priv->right_property) |
@@ -1937,8 +1833,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
1937 | drm_property_destroy(dev, sdvo_priv->hpos_property); | 1833 | drm_property_destroy(dev, sdvo_priv->hpos_property); |
1938 | if (sdvo_priv->vpos_property) | 1834 | if (sdvo_priv->vpos_property) |
1939 | drm_property_destroy(dev, sdvo_priv->vpos_property); | 1835 | drm_property_destroy(dev, sdvo_priv->vpos_property); |
1940 | } | ||
1941 | if (sdvo_priv->is_tv) { | ||
1942 | if (sdvo_priv->saturation_property) | 1836 | if (sdvo_priv->saturation_property) |
1943 | drm_property_destroy(dev, | 1837 | drm_property_destroy(dev, |
1944 | sdvo_priv->saturation_property); | 1838 | sdvo_priv->saturation_property); |
@@ -1948,7 +1842,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
1948 | if (sdvo_priv->hue_property) | 1842 | if (sdvo_priv->hue_property) |
1949 | drm_property_destroy(dev, sdvo_priv->hue_property); | 1843 | drm_property_destroy(dev, sdvo_priv->hue_property); |
1950 | } | 1844 | } |
1951 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 1845 | if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { |
1952 | if (sdvo_priv->brightness_property) | 1846 | if (sdvo_priv->brightness_property) |
1953 | drm_property_destroy(dev, | 1847 | drm_property_destroy(dev, |
1954 | sdvo_priv->brightness_property); | 1848 | sdvo_priv->brightness_property); |
@@ -1958,31 +1852,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
1958 | 1852 | ||
1959 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1853 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1960 | { | 1854 | { |
1961 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1855 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1962 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1856 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; |
1963 | 1857 | ||
1964 | if (intel_encoder->i2c_bus) | 1858 | if (sdvo_connector->tv_format_property) |
1965 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
1966 | if (intel_encoder->ddc_bus) | ||
1967 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
1968 | if (sdvo_priv->analog_ddc_bus) | ||
1969 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | ||
1970 | |||
1971 | if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) | ||
1972 | drm_mode_destroy(connector->dev, | ||
1973 | sdvo_priv->sdvo_lvds_fixed_mode); | ||
1974 | |||
1975 | if (sdvo_priv->tv_format_property) | ||
1976 | drm_property_destroy(connector->dev, | 1859 | drm_property_destroy(connector->dev, |
1977 | sdvo_priv->tv_format_property); | 1860 | sdvo_connector->tv_format_property); |
1978 | |||
1979 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
1980 | intel_sdvo_destroy_enhance_property(connector); | ||
1981 | 1861 | ||
1862 | intel_sdvo_destroy_enhance_property(connector); | ||
1982 | drm_sysfs_connector_remove(connector); | 1863 | drm_sysfs_connector_remove(connector); |
1983 | drm_connector_cleanup(connector); | 1864 | drm_connector_cleanup(connector); |
1984 | 1865 | kfree(connector); | |
1985 | kfree(intel_encoder); | ||
1986 | } | 1866 | } |
1987 | 1867 | ||
1988 | static int | 1868 | static int |
@@ -1990,9 +1870,11 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1990 | struct drm_property *property, | 1870 | struct drm_property *property, |
1991 | uint64_t val) | 1871 | uint64_t val) |
1992 | { | 1872 | { |
1993 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1873 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1874 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1994 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1875 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1995 | struct drm_encoder *encoder = &intel_encoder->enc; | 1876 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1877 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1996 | struct drm_crtc *crtc = encoder->crtc; | 1878 | struct drm_crtc *crtc = encoder->crtc; |
1997 | int ret = 0; | 1879 | int ret = 0; |
1998 | bool changed = false; | 1880 | bool changed = false; |
@@ -2003,101 +1885,101 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
2003 | if (ret < 0) | 1885 | if (ret < 0) |
2004 | goto out; | 1886 | goto out; |
2005 | 1887 | ||
2006 | if (property == sdvo_priv->tv_format_property) { | 1888 | if (property == sdvo_connector->tv_format_property) { |
2007 | if (val >= TV_FORMAT_NUM) { | 1889 | if (val >= TV_FORMAT_NUM) { |
2008 | ret = -EINVAL; | 1890 | ret = -EINVAL; |
2009 | goto out; | 1891 | goto out; |
2010 | } | 1892 | } |
2011 | if (sdvo_priv->tv_format_name == | 1893 | if (sdvo_priv->tv_format_name == |
2012 | sdvo_priv->tv_format_supported[val]) | 1894 | sdvo_connector->tv_format_supported[val]) |
2013 | goto out; | 1895 | goto out; |
2014 | 1896 | ||
2015 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val]; | 1897 | sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; |
2016 | changed = true; | 1898 | changed = true; |
2017 | } | 1899 | } |
2018 | 1900 | ||
2019 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 1901 | if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { |
2020 | cmd = 0; | 1902 | cmd = 0; |
2021 | temp_value = val; | 1903 | temp_value = val; |
2022 | if (sdvo_priv->left_property == property) { | 1904 | if (sdvo_connector->left_property == property) { |
2023 | drm_connector_property_set_value(connector, | 1905 | drm_connector_property_set_value(connector, |
2024 | sdvo_priv->right_property, val); | 1906 | sdvo_connector->right_property, val); |
2025 | if (sdvo_priv->left_margin == temp_value) | 1907 | if (sdvo_connector->left_margin == temp_value) |
2026 | goto out; | 1908 | goto out; |
2027 | 1909 | ||
2028 | sdvo_priv->left_margin = temp_value; | 1910 | sdvo_connector->left_margin = temp_value; |
2029 | sdvo_priv->right_margin = temp_value; | 1911 | sdvo_connector->right_margin = temp_value; |
2030 | temp_value = sdvo_priv->max_hscan - | 1912 | temp_value = sdvo_connector->max_hscan - |
2031 | sdvo_priv->left_margin; | 1913 | sdvo_connector->left_margin; |
2032 | cmd = SDVO_CMD_SET_OVERSCAN_H; | 1914 | cmd = SDVO_CMD_SET_OVERSCAN_H; |
2033 | } else if (sdvo_priv->right_property == property) { | 1915 | } else if (sdvo_connector->right_property == property) { |
2034 | drm_connector_property_set_value(connector, | 1916 | drm_connector_property_set_value(connector, |
2035 | sdvo_priv->left_property, val); | 1917 | sdvo_connector->left_property, val); |
2036 | if (sdvo_priv->right_margin == temp_value) | 1918 | if (sdvo_connector->right_margin == temp_value) |
2037 | goto out; | 1919 | goto out; |
2038 | 1920 | ||
2039 | sdvo_priv->left_margin = temp_value; | 1921 | sdvo_connector->left_margin = temp_value; |
2040 | sdvo_priv->right_margin = temp_value; | 1922 | sdvo_connector->right_margin = temp_value; |
2041 | temp_value = sdvo_priv->max_hscan - | 1923 | temp_value = sdvo_connector->max_hscan - |
2042 | sdvo_priv->left_margin; | 1924 | sdvo_connector->left_margin; |
2043 | cmd = SDVO_CMD_SET_OVERSCAN_H; | 1925 | cmd = SDVO_CMD_SET_OVERSCAN_H; |
2044 | } else if (sdvo_priv->top_property == property) { | 1926 | } else if (sdvo_connector->top_property == property) { |
2045 | drm_connector_property_set_value(connector, | 1927 | drm_connector_property_set_value(connector, |
2046 | sdvo_priv->bottom_property, val); | 1928 | sdvo_connector->bottom_property, val); |
2047 | if (sdvo_priv->top_margin == temp_value) | 1929 | if (sdvo_connector->top_margin == temp_value) |
2048 | goto out; | 1930 | goto out; |
2049 | 1931 | ||
2050 | sdvo_priv->top_margin = temp_value; | 1932 | sdvo_connector->top_margin = temp_value; |
2051 | sdvo_priv->bottom_margin = temp_value; | 1933 | sdvo_connector->bottom_margin = temp_value; |
2052 | temp_value = sdvo_priv->max_vscan - | 1934 | temp_value = sdvo_connector->max_vscan - |
2053 | sdvo_priv->top_margin; | 1935 | sdvo_connector->top_margin; |
2054 | cmd = SDVO_CMD_SET_OVERSCAN_V; | 1936 | cmd = SDVO_CMD_SET_OVERSCAN_V; |
2055 | } else if (sdvo_priv->bottom_property == property) { | 1937 | } else if (sdvo_connector->bottom_property == property) { |
2056 | drm_connector_property_set_value(connector, | 1938 | drm_connector_property_set_value(connector, |
2057 | sdvo_priv->top_property, val); | 1939 | sdvo_connector->top_property, val); |
2058 | if (sdvo_priv->bottom_margin == temp_value) | 1940 | if (sdvo_connector->bottom_margin == temp_value) |
2059 | goto out; | 1941 | goto out; |
2060 | sdvo_priv->top_margin = temp_value; | 1942 | sdvo_connector->top_margin = temp_value; |
2061 | sdvo_priv->bottom_margin = temp_value; | 1943 | sdvo_connector->bottom_margin = temp_value; |
2062 | temp_value = sdvo_priv->max_vscan - | 1944 | temp_value = sdvo_connector->max_vscan - |
2063 | sdvo_priv->top_margin; | 1945 | sdvo_connector->top_margin; |
2064 | cmd = SDVO_CMD_SET_OVERSCAN_V; | 1946 | cmd = SDVO_CMD_SET_OVERSCAN_V; |
2065 | } else if (sdvo_priv->hpos_property == property) { | 1947 | } else if (sdvo_connector->hpos_property == property) { |
2066 | if (sdvo_priv->cur_hpos == temp_value) | 1948 | if (sdvo_connector->cur_hpos == temp_value) |
2067 | goto out; | 1949 | goto out; |
2068 | 1950 | ||
2069 | cmd = SDVO_CMD_SET_POSITION_H; | 1951 | cmd = SDVO_CMD_SET_POSITION_H; |
2070 | sdvo_priv->cur_hpos = temp_value; | 1952 | sdvo_connector->cur_hpos = temp_value; |
2071 | } else if (sdvo_priv->vpos_property == property) { | 1953 | } else if (sdvo_connector->vpos_property == property) { |
2072 | if (sdvo_priv->cur_vpos == temp_value) | 1954 | if (sdvo_connector->cur_vpos == temp_value) |
2073 | goto out; | 1955 | goto out; |
2074 | 1956 | ||
2075 | cmd = SDVO_CMD_SET_POSITION_V; | 1957 | cmd = SDVO_CMD_SET_POSITION_V; |
2076 | sdvo_priv->cur_vpos = temp_value; | 1958 | sdvo_connector->cur_vpos = temp_value; |
2077 | } else if (sdvo_priv->saturation_property == property) { | 1959 | } else if (sdvo_connector->saturation_property == property) { |
2078 | if (sdvo_priv->cur_saturation == temp_value) | 1960 | if (sdvo_connector->cur_saturation == temp_value) |
2079 | goto out; | 1961 | goto out; |
2080 | 1962 | ||
2081 | cmd = SDVO_CMD_SET_SATURATION; | 1963 | cmd = SDVO_CMD_SET_SATURATION; |
2082 | sdvo_priv->cur_saturation = temp_value; | 1964 | sdvo_connector->cur_saturation = temp_value; |
2083 | } else if (sdvo_priv->contrast_property == property) { | 1965 | } else if (sdvo_connector->contrast_property == property) { |
2084 | if (sdvo_priv->cur_contrast == temp_value) | 1966 | if (sdvo_connector->cur_contrast == temp_value) |
2085 | goto out; | 1967 | goto out; |
2086 | 1968 | ||
2087 | cmd = SDVO_CMD_SET_CONTRAST; | 1969 | cmd = SDVO_CMD_SET_CONTRAST; |
2088 | sdvo_priv->cur_contrast = temp_value; | 1970 | sdvo_connector->cur_contrast = temp_value; |
2089 | } else if (sdvo_priv->hue_property == property) { | 1971 | } else if (sdvo_connector->hue_property == property) { |
2090 | if (sdvo_priv->cur_hue == temp_value) | 1972 | if (sdvo_connector->cur_hue == temp_value) |
2091 | goto out; | 1973 | goto out; |
2092 | 1974 | ||
2093 | cmd = SDVO_CMD_SET_HUE; | 1975 | cmd = SDVO_CMD_SET_HUE; |
2094 | sdvo_priv->cur_hue = temp_value; | 1976 | sdvo_connector->cur_hue = temp_value; |
2095 | } else if (sdvo_priv->brightness_property == property) { | 1977 | } else if (sdvo_connector->brightness_property == property) { |
2096 | if (sdvo_priv->cur_brightness == temp_value) | 1978 | if (sdvo_connector->cur_brightness == temp_value) |
2097 | goto out; | 1979 | goto out; |
2098 | 1980 | ||
2099 | cmd = SDVO_CMD_SET_BRIGHTNESS; | 1981 | cmd = SDVO_CMD_SET_BRIGHTNESS; |
2100 | sdvo_priv->cur_brightness = temp_value; | 1982 | sdvo_connector->cur_brightness = temp_value; |
2101 | } | 1983 | } |
2102 | if (cmd) { | 1984 | if (cmd) { |
2103 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); | 1985 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); |
@@ -2127,8 +2009,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { | |||
2127 | 2009 | ||
2128 | static const struct drm_connector_funcs intel_sdvo_connector_funcs = { | 2010 | static const struct drm_connector_funcs intel_sdvo_connector_funcs = { |
2129 | .dpms = drm_helper_connector_dpms, | 2011 | .dpms = drm_helper_connector_dpms, |
2130 | .save = intel_sdvo_save, | ||
2131 | .restore = intel_sdvo_restore, | ||
2132 | .detect = intel_sdvo_detect, | 2012 | .detect = intel_sdvo_detect, |
2133 | .fill_modes = drm_helper_probe_single_connector_modes, | 2013 | .fill_modes = drm_helper_probe_single_connector_modes, |
2134 | .set_property = intel_sdvo_set_property, | 2014 | .set_property = intel_sdvo_set_property, |
@@ -2138,12 +2018,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { | |||
2138 | static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { | 2018 | static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { |
2139 | .get_modes = intel_sdvo_get_modes, | 2019 | .get_modes = intel_sdvo_get_modes, |
2140 | .mode_valid = intel_sdvo_mode_valid, | 2020 | .mode_valid = intel_sdvo_mode_valid, |
2141 | .best_encoder = intel_best_encoder, | 2021 | .best_encoder = intel_attached_encoder, |
2142 | }; | 2022 | }; |
2143 | 2023 | ||
2144 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) | 2024 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) |
2145 | { | 2025 | { |
2026 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
2027 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2028 | |||
2029 | if (intel_encoder->i2c_bus) | ||
2030 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
2031 | if (intel_encoder->ddc_bus) | ||
2032 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2033 | if (sdvo_priv->analog_ddc_bus) | ||
2034 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | ||
2035 | |||
2036 | if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) | ||
2037 | drm_mode_destroy(encoder->dev, | ||
2038 | sdvo_priv->sdvo_lvds_fixed_mode); | ||
2039 | |||
2146 | drm_encoder_cleanup(encoder); | 2040 | drm_encoder_cleanup(encoder); |
2041 | kfree(intel_encoder); | ||
2147 | } | 2042 | } |
2148 | 2043 | ||
2149 | static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | 2044 | static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { |
@@ -2196,12 +2091,15 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | |||
2196 | } | 2091 | } |
2197 | 2092 | ||
2198 | static bool | 2093 | static bool |
2199 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) | 2094 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) |
2200 | { | 2095 | { |
2201 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 2096 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
2202 | uint8_t status; | 2097 | uint8_t status; |
2203 | 2098 | ||
2204 | intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); | 2099 | if (device == 0) |
2100 | intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); | ||
2101 | else | ||
2102 | intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); | ||
2205 | 2103 | ||
2206 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); | 2104 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); |
2207 | status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); | 2105 | status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); |
@@ -2214,15 +2112,13 @@ static struct intel_encoder * | |||
2214 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) | 2112 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) |
2215 | { | 2113 | { |
2216 | struct drm_device *dev = chan->drm_dev; | 2114 | struct drm_device *dev = chan->drm_dev; |
2217 | struct drm_connector *connector; | 2115 | struct drm_encoder *encoder; |
2218 | struct intel_encoder *intel_encoder = NULL; | 2116 | struct intel_encoder *intel_encoder = NULL; |
2219 | 2117 | ||
2220 | list_for_each_entry(connector, | 2118 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
2221 | &dev->mode_config.connector_list, head) { | 2119 | intel_encoder = enc_to_intel_encoder(encoder); |
2222 | if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { | 2120 | if (intel_encoder->ddc_bus == &chan->adapter) |
2223 | intel_encoder = to_intel_encoder(connector); | ||
2224 | break; | 2121 | break; |
2225 | } | ||
2226 | } | 2122 | } |
2227 | return intel_encoder; | 2123 | return intel_encoder; |
2228 | } | 2124 | } |
@@ -2259,7 +2155,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) | |||
2259 | struct drm_i915_private *dev_priv = dev->dev_private; | 2155 | struct drm_i915_private *dev_priv = dev->dev_private; |
2260 | struct sdvo_device_mapping *my_mapping, *other_mapping; | 2156 | struct sdvo_device_mapping *my_mapping, *other_mapping; |
2261 | 2157 | ||
2262 | if (sdvo_reg == SDVOB) { | 2158 | if (IS_SDVOB(sdvo_reg)) { |
2263 | my_mapping = &dev_priv->sdvo_mappings[0]; | 2159 | my_mapping = &dev_priv->sdvo_mappings[0]; |
2264 | other_mapping = &dev_priv->sdvo_mappings[1]; | 2160 | other_mapping = &dev_priv->sdvo_mappings[1]; |
2265 | } else { | 2161 | } else { |
@@ -2284,120 +2180,235 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) | |||
2284 | /* No SDVO device info is found for another DVO port, | 2180 | /* No SDVO device info is found for another DVO port, |
2285 | * so use mapping assumption we had before BIOS parsing. | 2181 | * so use mapping assumption we had before BIOS parsing. |
2286 | */ | 2182 | */ |
2287 | if (sdvo_reg == SDVOB) | 2183 | if (IS_SDVOB(sdvo_reg)) |
2288 | return 0x70; | 2184 | return 0x70; |
2289 | else | 2185 | else |
2290 | return 0x72; | 2186 | return 0x72; |
2291 | } | 2187 | } |
2292 | 2188 | ||
2293 | static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) | 2189 | static bool |
2190 | intel_sdvo_connector_alloc (struct intel_connector **ret) | ||
2294 | { | 2191 | { |
2295 | DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); | 2192 | struct intel_connector *intel_connector; |
2296 | return 1; | 2193 | struct intel_sdvo_connector *sdvo_connector; |
2194 | |||
2195 | *ret = kzalloc(sizeof(*intel_connector) + | ||
2196 | sizeof(*sdvo_connector), GFP_KERNEL); | ||
2197 | if (!*ret) | ||
2198 | return false; | ||
2199 | |||
2200 | intel_connector = *ret; | ||
2201 | sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); | ||
2202 | intel_connector->dev_priv = sdvo_connector; | ||
2203 | |||
2204 | return true; | ||
2297 | } | 2205 | } |
2298 | 2206 | ||
2299 | static struct dmi_system_id intel_sdvo_bad_tv[] = { | 2207 | static void |
2300 | { | 2208 | intel_sdvo_connector_create (struct drm_encoder *encoder, |
2301 | .callback = intel_sdvo_bad_tv_callback, | 2209 | struct drm_connector *connector) |
2302 | .ident = "IntelG45/ICH10R/DME1737", | 2210 | { |
2303 | .matches = { | 2211 | drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, |
2304 | DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), | 2212 | connector->connector_type); |
2305 | DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), | ||
2306 | }, | ||
2307 | }, | ||
2308 | 2213 | ||
2309 | { } /* terminating entry */ | 2214 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); |
2310 | }; | 2215 | |
2216 | connector->interlace_allowed = 0; | ||
2217 | connector->doublescan_allowed = 0; | ||
2218 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
2219 | |||
2220 | drm_mode_connector_attach_encoder(connector, encoder); | ||
2221 | drm_sysfs_connector_add(connector); | ||
2222 | } | ||
2311 | 2223 | ||
2312 | static bool | 2224 | static bool |
2313 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) | 2225 | intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) |
2314 | { | 2226 | { |
2315 | struct drm_connector *connector = &intel_encoder->base; | ||
2316 | struct drm_encoder *encoder = &intel_encoder->enc; | 2227 | struct drm_encoder *encoder = &intel_encoder->enc; |
2317 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 2228 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2318 | bool ret = true, registered = false; | 2229 | struct drm_connector *connector; |
2230 | struct intel_connector *intel_connector; | ||
2231 | struct intel_sdvo_connector *sdvo_connector; | ||
2232 | |||
2233 | if (!intel_sdvo_connector_alloc(&intel_connector)) | ||
2234 | return false; | ||
2235 | |||
2236 | sdvo_connector = intel_connector->dev_priv; | ||
2237 | |||
2238 | if (device == 0) { | ||
2239 | sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; | ||
2240 | sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; | ||
2241 | } else if (device == 1) { | ||
2242 | sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; | ||
2243 | sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; | ||
2244 | } | ||
2245 | |||
2246 | connector = &intel_connector->base; | ||
2247 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | ||
2248 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | ||
2249 | |||
2250 | if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) | ||
2251 | && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) | ||
2252 | && sdvo_priv->is_hdmi) { | ||
2253 | /* enable hdmi encoding mode if supported */ | ||
2254 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); | ||
2255 | intel_sdvo_set_colorimetry(intel_encoder, | ||
2256 | SDVO_COLORIMETRY_RGB256); | ||
2257 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
2258 | } | ||
2259 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2260 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2261 | |||
2262 | intel_sdvo_connector_create(encoder, connector); | ||
2263 | |||
2264 | return true; | ||
2265 | } | ||
2266 | |||
2267 | static bool | ||
2268 | intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) | ||
2269 | { | ||
2270 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
2271 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2272 | struct drm_connector *connector; | ||
2273 | struct intel_connector *intel_connector; | ||
2274 | struct intel_sdvo_connector *sdvo_connector; | ||
2275 | |||
2276 | if (!intel_sdvo_connector_alloc(&intel_connector)) | ||
2277 | return false; | ||
2278 | |||
2279 | connector = &intel_connector->base; | ||
2280 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2281 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2282 | sdvo_connector = intel_connector->dev_priv; | ||
2283 | |||
2284 | sdvo_priv->controlled_output |= type; | ||
2285 | sdvo_connector->output_flag = type; | ||
2286 | |||
2287 | sdvo_priv->is_tv = true; | ||
2288 | intel_encoder->needs_tv_clock = true; | ||
2289 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2290 | |||
2291 | intel_sdvo_connector_create(encoder, connector); | ||
2292 | |||
2293 | intel_sdvo_tv_create_property(connector, type); | ||
2294 | |||
2295 | intel_sdvo_create_enhance_property(connector); | ||
2296 | |||
2297 | return true; | ||
2298 | } | ||
2299 | |||
2300 | static bool | ||
2301 | intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) | ||
2302 | { | ||
2303 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
2304 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2305 | struct drm_connector *connector; | ||
2306 | struct intel_connector *intel_connector; | ||
2307 | struct intel_sdvo_connector *sdvo_connector; | ||
2308 | |||
2309 | if (!intel_sdvo_connector_alloc(&intel_connector)) | ||
2310 | return false; | ||
2311 | |||
2312 | connector = &intel_connector->base; | ||
2313 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
2314 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
2315 | sdvo_connector = intel_connector->dev_priv; | ||
2316 | |||
2317 | if (device == 0) { | ||
2318 | sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; | ||
2319 | sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; | ||
2320 | } else if (device == 1) { | ||
2321 | sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; | ||
2322 | sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; | ||
2323 | } | ||
2324 | |||
2325 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2326 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2327 | |||
2328 | intel_sdvo_connector_create(encoder, connector); | ||
2329 | return true; | ||
2330 | } | ||
2331 | |||
2332 | static bool | ||
2333 | intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) | ||
2334 | { | ||
2335 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
2336 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2337 | struct drm_connector *connector; | ||
2338 | struct intel_connector *intel_connector; | ||
2339 | struct intel_sdvo_connector *sdvo_connector; | ||
2340 | |||
2341 | if (!intel_sdvo_connector_alloc(&intel_connector)) | ||
2342 | return false; | ||
2343 | |||
2344 | connector = &intel_connector->base; | ||
2345 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
2346 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
2347 | sdvo_connector = intel_connector->dev_priv; | ||
2348 | |||
2349 | sdvo_priv->is_lvds = true; | ||
2350 | |||
2351 | if (device == 0) { | ||
2352 | sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; | ||
2353 | sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; | ||
2354 | } else if (device == 1) { | ||
2355 | sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; | ||
2356 | sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; | ||
2357 | } | ||
2358 | |||
2359 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | ||
2360 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
2361 | |||
2362 | intel_sdvo_connector_create(encoder, connector); | ||
2363 | intel_sdvo_create_enhance_property(connector); | ||
2364 | return true; | ||
2365 | } | ||
2366 | |||
2367 | static bool | ||
2368 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) | ||
2369 | { | ||
2370 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2319 | 2371 | ||
2320 | sdvo_priv->is_tv = false; | 2372 | sdvo_priv->is_tv = false; |
2321 | intel_encoder->needs_tv_clock = false; | 2373 | intel_encoder->needs_tv_clock = false; |
2322 | sdvo_priv->is_lvds = false; | 2374 | sdvo_priv->is_lvds = false; |
2323 | 2375 | ||
2324 | if (device_is_registered(&connector->kdev)) { | 2376 | /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ |
2325 | drm_sysfs_connector_remove(connector); | ||
2326 | registered = true; | ||
2327 | } | ||
2328 | 2377 | ||
2329 | if (flags & | 2378 | if (flags & SDVO_OUTPUT_TMDS0) |
2330 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | 2379 | if (!intel_sdvo_dvi_init(intel_encoder, 0)) |
2331 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | 2380 | return false; |
2332 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | 2381 | |
2333 | else | 2382 | if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) |
2334 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | 2383 | if (!intel_sdvo_dvi_init(intel_encoder, 1)) |
2335 | 2384 | return false; | |
2336 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2385 | |
2337 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2386 | /* TV has no XXX1 function block */ |
2338 | 2387 | if (flags & SDVO_OUTPUT_SVID0) | |
2339 | if (intel_sdvo_get_supp_encode(intel_encoder, | 2388 | if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) |
2340 | &sdvo_priv->encode) && | 2389 | return false; |
2341 | intel_sdvo_get_digital_encoding_mode(intel_encoder) && | 2390 | |
2342 | sdvo_priv->is_hdmi) { | 2391 | if (flags & SDVO_OUTPUT_CVBS0) |
2343 | /* enable hdmi encoding mode if supported */ | 2392 | if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) |
2344 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); | 2393 | return false; |
2345 | intel_sdvo_set_colorimetry(intel_encoder, | ||
2346 | SDVO_COLORIMETRY_RGB256); | ||
2347 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
2348 | intel_encoder->clone_mask = | ||
2349 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2350 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2351 | } | ||
2352 | } else if ((flags & SDVO_OUTPUT_SVID0) && | ||
2353 | !dmi_check_system(intel_sdvo_bad_tv)) { | ||
2354 | |||
2355 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
2356 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2357 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2358 | sdvo_priv->is_tv = true; | ||
2359 | intel_encoder->needs_tv_clock = true; | ||
2360 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2361 | } else if (flags & SDVO_OUTPUT_RGB0) { | ||
2362 | |||
2363 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
2364 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
2365 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
2366 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2367 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2368 | } else if (flags & SDVO_OUTPUT_RGB1) { | ||
2369 | |||
2370 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
2371 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
2372 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
2373 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | ||
2374 | (1 << INTEL_ANALOG_CLONE_BIT); | ||
2375 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
2376 | |||
2377 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
2378 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2379 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2380 | sdvo_priv->is_tv = true; | ||
2381 | intel_encoder->needs_tv_clock = true; | ||
2382 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2383 | } else if (flags & SDVO_OUTPUT_LVDS0) { | ||
2384 | |||
2385 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
2386 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
2387 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
2388 | sdvo_priv->is_lvds = true; | ||
2389 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | ||
2390 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
2391 | } else if (flags & SDVO_OUTPUT_LVDS1) { | ||
2392 | |||
2393 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
2394 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
2395 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
2396 | sdvo_priv->is_lvds = true; | ||
2397 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | ||
2398 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | ||
2399 | } else { | ||
2400 | 2394 | ||
2395 | if (flags & SDVO_OUTPUT_RGB0) | ||
2396 | if (!intel_sdvo_analog_init(intel_encoder, 0)) | ||
2397 | return false; | ||
2398 | |||
2399 | if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) | ||
2400 | if (!intel_sdvo_analog_init(intel_encoder, 1)) | ||
2401 | return false; | ||
2402 | |||
2403 | if (flags & SDVO_OUTPUT_LVDS0) | ||
2404 | if (!intel_sdvo_lvds_init(intel_encoder, 0)) | ||
2405 | return false; | ||
2406 | |||
2407 | if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) | ||
2408 | if (!intel_sdvo_lvds_init(intel_encoder, 1)) | ||
2409 | return false; | ||
2410 | |||
2411 | if ((flags & SDVO_OUTPUT_MASK) == 0) { | ||
2401 | unsigned char bytes[2]; | 2412 | unsigned char bytes[2]; |
2402 | 2413 | ||
2403 | sdvo_priv->controlled_output = 0; | 2414 | sdvo_priv->controlled_output = 0; |
@@ -2405,28 +2416,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) | |||
2405 | DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", | 2416 | DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", |
2406 | SDVO_NAME(sdvo_priv), | 2417 | SDVO_NAME(sdvo_priv), |
2407 | bytes[0], bytes[1]); | 2418 | bytes[0], bytes[1]); |
2408 | ret = false; | 2419 | return false; |
2409 | } | 2420 | } |
2410 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 2421 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
2411 | 2422 | ||
2412 | if (ret && registered) | 2423 | return true; |
2413 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | ||
2414 | |||
2415 | |||
2416 | return ret; | ||
2417 | |||
2418 | } | 2424 | } |
2419 | 2425 | ||
2420 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) | 2426 | static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) |
2421 | { | 2427 | { |
2422 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 2428 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
2429 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
2423 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 2430 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2431 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2432 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
2424 | struct intel_sdvo_tv_format format; | 2433 | struct intel_sdvo_tv_format format; |
2425 | uint32_t format_map, i; | 2434 | uint32_t format_map, i; |
2426 | uint8_t status; | 2435 | uint8_t status; |
2427 | 2436 | ||
2428 | intel_sdvo_set_target_output(intel_encoder, | 2437 | intel_sdvo_set_target_output(intel_encoder, type); |
2429 | sdvo_priv->controlled_output); | ||
2430 | 2438 | ||
2431 | intel_sdvo_write_cmd(intel_encoder, | 2439 | intel_sdvo_write_cmd(intel_encoder, |
2432 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); | 2440 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); |
@@ -2441,35 +2449,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
2441 | if (format_map == 0) | 2449 | if (format_map == 0) |
2442 | return; | 2450 | return; |
2443 | 2451 | ||
2444 | sdvo_priv->format_supported_num = 0; | 2452 | sdvo_connector->format_supported_num = 0; |
2445 | for (i = 0 ; i < TV_FORMAT_NUM; i++) | 2453 | for (i = 0 ; i < TV_FORMAT_NUM; i++) |
2446 | if (format_map & (1 << i)) { | 2454 | if (format_map & (1 << i)) { |
2447 | sdvo_priv->tv_format_supported | 2455 | sdvo_connector->tv_format_supported |
2448 | [sdvo_priv->format_supported_num++] = | 2456 | [sdvo_connector->format_supported_num++] = |
2449 | tv_format_names[i]; | 2457 | tv_format_names[i]; |
2450 | } | 2458 | } |
2451 | 2459 | ||
2452 | 2460 | ||
2453 | sdvo_priv->tv_format_property = | 2461 | sdvo_connector->tv_format_property = |
2454 | drm_property_create( | 2462 | drm_property_create( |
2455 | connector->dev, DRM_MODE_PROP_ENUM, | 2463 | connector->dev, DRM_MODE_PROP_ENUM, |
2456 | "mode", sdvo_priv->format_supported_num); | 2464 | "mode", sdvo_connector->format_supported_num); |
2457 | 2465 | ||
2458 | for (i = 0; i < sdvo_priv->format_supported_num; i++) | 2466 | for (i = 0; i < sdvo_connector->format_supported_num; i++) |
2459 | drm_property_add_enum( | 2467 | drm_property_add_enum( |
2460 | sdvo_priv->tv_format_property, i, | 2468 | sdvo_connector->tv_format_property, i, |
2461 | i, sdvo_priv->tv_format_supported[i]); | 2469 | i, sdvo_connector->tv_format_supported[i]); |
2462 | 2470 | ||
2463 | sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0]; | 2471 | sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; |
2464 | drm_connector_attach_property( | 2472 | drm_connector_attach_property( |
2465 | connector, sdvo_priv->tv_format_property, 0); | 2473 | connector, sdvo_connector->tv_format_property, 0); |
2466 | 2474 | ||
2467 | } | 2475 | } |
2468 | 2476 | ||
2469 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | 2477 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) |
2470 | { | 2478 | { |
2471 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 2479 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
2472 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 2480 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
2481 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2482 | struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; | ||
2473 | struct intel_sdvo_enhancements_reply sdvo_data; | 2483 | struct intel_sdvo_enhancements_reply sdvo_data; |
2474 | struct drm_device *dev = connector->dev; | 2484 | struct drm_device *dev = connector->dev; |
2475 | uint8_t status; | 2485 | uint8_t status; |
@@ -2488,7 +2498,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2488 | DRM_DEBUG_KMS("No enhancement is supported\n"); | 2498 | DRM_DEBUG_KMS("No enhancement is supported\n"); |
2489 | return; | 2499 | return; |
2490 | } | 2500 | } |
2491 | if (sdvo_priv->is_tv) { | 2501 | if (IS_TV(sdvo_priv)) { |
2492 | /* when horizontal overscan is supported, Add the left/right | 2502 | /* when horizontal overscan is supported, Add the left/right |
2493 | * property | 2503 | * property |
2494 | */ | 2504 | */ |
@@ -2636,8 +2646,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2636 | "default %d, current %d\n", | 2646 | "default %d, current %d\n", |
2637 | data_value[0], data_value[1], response); | 2647 | data_value[0], data_value[1], response); |
2638 | } | 2648 | } |
2639 | } | ||
2640 | if (sdvo_priv->is_tv) { | ||
2641 | if (sdvo_data.saturation) { | 2649 | if (sdvo_data.saturation) { |
2642 | intel_sdvo_write_cmd(intel_encoder, | 2650 | intel_sdvo_write_cmd(intel_encoder, |
2643 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | 2651 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); |
@@ -2733,7 +2741,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2733 | data_value[0], data_value[1], response); | 2741 | data_value[0], data_value[1], response); |
2734 | } | 2742 | } |
2735 | } | 2743 | } |
2736 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 2744 | if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { |
2737 | if (sdvo_data.brightness) { | 2745 | if (sdvo_data.brightness) { |
2738 | intel_sdvo_write_cmd(intel_encoder, | 2746 | intel_sdvo_write_cmd(intel_encoder, |
2739 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | 2747 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); |
@@ -2773,12 +2781,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2773 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | 2781 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
2774 | { | 2782 | { |
2775 | struct drm_i915_private *dev_priv = dev->dev_private; | 2783 | struct drm_i915_private *dev_priv = dev->dev_private; |
2776 | struct drm_connector *connector; | ||
2777 | struct intel_encoder *intel_encoder; | 2784 | struct intel_encoder *intel_encoder; |
2778 | struct intel_sdvo_priv *sdvo_priv; | 2785 | struct intel_sdvo_priv *sdvo_priv; |
2779 | |||
2780 | u8 ch[0x40]; | 2786 | u8 ch[0x40]; |
2781 | int i; | 2787 | int i; |
2788 | u32 i2c_reg, ddc_reg, analog_ddc_reg; | ||
2782 | 2789 | ||
2783 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2790 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
2784 | if (!intel_encoder) { | 2791 | if (!intel_encoder) { |
@@ -2791,11 +2798,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2791 | intel_encoder->dev_priv = sdvo_priv; | 2798 | intel_encoder->dev_priv = sdvo_priv; |
2792 | intel_encoder->type = INTEL_OUTPUT_SDVO; | 2799 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2793 | 2800 | ||
2801 | if (HAS_PCH_SPLIT(dev)) { | ||
2802 | i2c_reg = PCH_GPIOE; | ||
2803 | ddc_reg = PCH_GPIOE; | ||
2804 | analog_ddc_reg = PCH_GPIOA; | ||
2805 | } else { | ||
2806 | i2c_reg = GPIOE; | ||
2807 | ddc_reg = GPIOE; | ||
2808 | analog_ddc_reg = GPIOA; | ||
2809 | } | ||
2810 | |||
2794 | /* setup the DDC bus. */ | 2811 | /* setup the DDC bus. */ |
2795 | if (sdvo_reg == SDVOB) | 2812 | if (IS_SDVOB(sdvo_reg)) |
2796 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 2813 | intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); |
2797 | else | 2814 | else |
2798 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 2815 | intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); |
2799 | 2816 | ||
2800 | if (!intel_encoder->i2c_bus) | 2817 | if (!intel_encoder->i2c_bus) |
2801 | goto err_inteloutput; | 2818 | goto err_inteloutput; |
@@ -2809,20 +2826,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2809 | for (i = 0; i < 0x40; i++) { | 2826 | for (i = 0; i < 0x40; i++) { |
2810 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { | 2827 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { |
2811 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2828 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
2812 | sdvo_reg == SDVOB ? 'B' : 'C'); | 2829 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2813 | goto err_i2c; | 2830 | goto err_i2c; |
2814 | } | 2831 | } |
2815 | } | 2832 | } |
2816 | 2833 | ||
2817 | /* setup the DDC bus. */ | 2834 | /* setup the DDC bus. */ |
2818 | if (sdvo_reg == SDVOB) { | 2835 | if (IS_SDVOB(sdvo_reg)) { |
2819 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2836 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); |
2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2837 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, |
2821 | "SDVOB/VGA DDC BUS"); | 2838 | "SDVOB/VGA DDC BUS"); |
2822 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2839 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
2823 | } else { | 2840 | } else { |
2824 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2841 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); |
2825 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2842 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, |
2826 | "SDVOC/VGA DDC BUS"); | 2843 | "SDVOC/VGA DDC BUS"); |
2827 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2844 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
2828 | } | 2845 | } |
@@ -2833,40 +2850,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2833 | /* Wrap with our custom algo which switches to DDC mode */ | 2850 | /* Wrap with our custom algo which switches to DDC mode */ |
2834 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2851 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
2835 | 2852 | ||
2853 | /* encoder type will be decided later */ | ||
2854 | drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); | ||
2855 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); | ||
2856 | |||
2836 | /* In default case sdvo lvds is false */ | 2857 | /* In default case sdvo lvds is false */ |
2837 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); | 2858 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); |
2838 | 2859 | ||
2839 | if (intel_sdvo_output_setup(intel_encoder, | 2860 | if (intel_sdvo_output_setup(intel_encoder, |
2840 | sdvo_priv->caps.output_flags) != true) { | 2861 | sdvo_priv->caps.output_flags) != true) { |
2841 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2862 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
2842 | sdvo_reg == SDVOB ? 'B' : 'C'); | 2863 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2843 | goto err_i2c; | 2864 | goto err_i2c; |
2844 | } | 2865 | } |
2845 | 2866 | ||
2846 | |||
2847 | connector = &intel_encoder->base; | ||
2848 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | ||
2849 | connector->connector_type); | ||
2850 | |||
2851 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); | ||
2852 | connector->interlace_allowed = 0; | ||
2853 | connector->doublescan_allowed = 0; | ||
2854 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
2855 | |||
2856 | drm_encoder_init(dev, &intel_encoder->enc, | ||
2857 | &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); | ||
2858 | |||
2859 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); | ||
2860 | |||
2861 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); | ||
2862 | if (sdvo_priv->is_tv) | ||
2863 | intel_sdvo_tv_create_property(connector); | ||
2864 | |||
2865 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) | ||
2866 | intel_sdvo_create_enhance_property(connector); | ||
2867 | |||
2868 | drm_sysfs_connector_add(connector); | ||
2869 | |||
2870 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2867 | intel_sdvo_select_ddc_bus(sdvo_priv); |
2871 | 2868 | ||
2872 | /* Set the input timing to the screen. Assume always input 0. */ | 2869 | /* Set the input timing to the screen. Assume always input 0. */ |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d7d39b2327df..081cb9014525 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) | |||
916 | } | 916 | } |
917 | } | 917 | } |
918 | 918 | ||
919 | static void | ||
920 | intel_tv_save(struct drm_connector *connector) | ||
921 | { | ||
922 | struct drm_device *dev = connector->dev; | ||
923 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
924 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
925 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | ||
926 | int i; | ||
927 | |||
928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); | ||
929 | tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2); | ||
930 | tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3); | ||
931 | tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1); | ||
932 | tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2); | ||
933 | tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3); | ||
934 | tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4); | ||
935 | tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5); | ||
936 | tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6); | ||
937 | tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7); | ||
938 | tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1); | ||
939 | tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2); | ||
940 | tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3); | ||
941 | |||
942 | tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y); | ||
943 | tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2); | ||
944 | tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U); | ||
945 | tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2); | ||
946 | tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V); | ||
947 | tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2); | ||
948 | tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS); | ||
949 | tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL); | ||
950 | tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS); | ||
951 | tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE); | ||
952 | tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1); | ||
953 | tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2); | ||
954 | tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3); | ||
955 | |||
956 | for (i = 0; i < 60; i++) | ||
957 | tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2)); | ||
958 | for (i = 0; i < 60; i++) | ||
959 | tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2)); | ||
960 | for (i = 0; i < 43; i++) | ||
961 | tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2)); | ||
962 | for (i = 0; i < 43; i++) | ||
963 | tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2)); | ||
964 | |||
965 | tv_priv->save_TV_DAC = I915_READ(TV_DAC); | ||
966 | tv_priv->save_TV_CTL = I915_READ(TV_CTL); | ||
967 | } | ||
968 | |||
969 | static void | ||
970 | intel_tv_restore(struct drm_connector *connector) | ||
971 | { | ||
972 | struct drm_device *dev = connector->dev; | ||
973 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
974 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
975 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | ||
976 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
977 | struct intel_crtc *intel_crtc; | ||
978 | int i; | ||
979 | |||
980 | /* FIXME: No CRTC? */ | ||
981 | if (!crtc) | ||
982 | return; | ||
983 | |||
984 | intel_crtc = to_intel_crtc(crtc); | ||
985 | I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1); | ||
986 | I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2); | ||
987 | I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3); | ||
988 | I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1); | ||
989 | I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2); | ||
990 | I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3); | ||
991 | I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4); | ||
992 | I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5); | ||
993 | I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6); | ||
994 | I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7); | ||
995 | I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1); | ||
996 | I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2); | ||
997 | I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3); | ||
998 | |||
999 | I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y); | ||
1000 | I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2); | ||
1001 | I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U); | ||
1002 | I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2); | ||
1003 | I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V); | ||
1004 | I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2); | ||
1005 | I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS); | ||
1006 | I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL); | ||
1007 | |||
1008 | { | ||
1009 | int pipeconf_reg = (intel_crtc->pipe == 0) ? | ||
1010 | PIPEACONF : PIPEBCONF; | ||
1011 | int dspcntr_reg = (intel_crtc->plane == 0) ? | ||
1012 | DSPACNTR : DSPBCNTR; | ||
1013 | int pipeconf = I915_READ(pipeconf_reg); | ||
1014 | int dspcntr = I915_READ(dspcntr_reg); | ||
1015 | int dspbase_reg = (intel_crtc->plane == 0) ? | ||
1016 | DSPAADDR : DSPBADDR; | ||
1017 | /* Pipe must be off here */ | ||
1018 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); | ||
1019 | /* Flush the plane changes */ | ||
1020 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1021 | |||
1022 | if (!IS_I9XX(dev)) { | ||
1023 | /* Wait for vblank for the disable to take effect */ | ||
1024 | intel_wait_for_vblank(dev); | ||
1025 | } | ||
1026 | |||
1027 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); | ||
1028 | /* Wait for vblank for the disable to take effect. */ | ||
1029 | intel_wait_for_vblank(dev); | ||
1030 | |||
1031 | /* Filter ctl must be set before TV_WIN_SIZE */ | ||
1032 | I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1); | ||
1033 | I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2); | ||
1034 | I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3); | ||
1035 | I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS); | ||
1036 | I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE); | ||
1037 | I915_WRITE(pipeconf_reg, pipeconf); | ||
1038 | I915_WRITE(dspcntr_reg, dspcntr); | ||
1039 | /* Flush the plane changes */ | ||
1040 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1041 | } | ||
1042 | |||
1043 | for (i = 0; i < 60; i++) | ||
1044 | I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]); | ||
1045 | for (i = 0; i < 60; i++) | ||
1046 | I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]); | ||
1047 | for (i = 0; i < 43; i++) | ||
1048 | I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]); | ||
1049 | for (i = 0; i < 43; i++) | ||
1050 | I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]); | ||
1051 | |||
1052 | I915_WRITE(TV_DAC, tv_priv->save_TV_DAC); | ||
1053 | I915_WRITE(TV_CTL, tv_priv->save_TV_CTL); | ||
1054 | } | ||
1055 | |||
1056 | static const struct tv_mode * | 919 | static const struct tv_mode * |
1057 | intel_tv_mode_lookup (char *tv_format) | 920 | intel_tv_mode_lookup (char *tv_format) |
1058 | { | 921 | { |
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder) | |||
1078 | static enum drm_mode_status | 941 | static enum drm_mode_status |
1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 942 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) |
1080 | { | 943 | { |
1081 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 944 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
945 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 946 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1083 | 947 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 948 | /* Ensure TV refresh is close to desired refresh */ |
@@ -1399,6 +1263,15 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
1399 | DAC_A_0_7_V | | 1263 | DAC_A_0_7_V | |
1400 | DAC_B_0_7_V | | 1264 | DAC_B_0_7_V | |
1401 | DAC_C_0_7_V); | 1265 | DAC_C_0_7_V); |
1266 | |||
1267 | /* | ||
1268 | * The TV sense state should be cleared to zero on cantiga platform. Otherwise | ||
1269 | * the TV is misdetected. This is hardware requirement. | ||
1270 | */ | ||
1271 | if (IS_GM45(dev)) | ||
1272 | tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | | ||
1273 | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); | ||
1274 | |||
1402 | I915_WRITE(TV_CTL, tv_ctl); | 1275 | I915_WRITE(TV_CTL, tv_ctl); |
1403 | I915_WRITE(TV_DAC, tv_dac); | 1276 | I915_WRITE(TV_DAC, tv_dac); |
1404 | intel_wait_for_vblank(dev); | 1277 | intel_wait_for_vblank(dev); |
@@ -1441,7 +1314,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
1441 | */ | 1314 | */ |
1442 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1315 | static void intel_tv_find_better_format(struct drm_connector *connector) |
1443 | { | 1316 | { |
1444 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1317 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1318 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1445 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 1319 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 1320 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1447 | int i; | 1321 | int i; |
@@ -1475,9 +1349,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
1475 | { | 1349 | { |
1476 | struct drm_crtc *crtc; | 1350 | struct drm_crtc *crtc; |
1477 | struct drm_display_mode mode; | 1351 | struct drm_display_mode mode; |
1478 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1352 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1353 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1479 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 1354 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1480 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
1481 | int dpms_mode; | 1355 | int dpms_mode; |
1482 | int type = tv_priv->type; | 1356 | int type = tv_priv->type; |
1483 | 1357 | ||
@@ -1487,10 +1361,12 @@ intel_tv_detect(struct drm_connector *connector) | |||
1487 | if (encoder->crtc && encoder->crtc->enabled) { | 1361 | if (encoder->crtc && encoder->crtc->enabled) { |
1488 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); | 1362 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); |
1489 | } else { | 1363 | } else { |
1490 | crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); | 1364 | crtc = intel_get_load_detect_pipe(intel_encoder, connector, |
1365 | &mode, &dpms_mode); | ||
1491 | if (crtc) { | 1366 | if (crtc) { |
1492 | type = intel_tv_detect_type(crtc, intel_encoder); | 1367 | type = intel_tv_detect_type(crtc, intel_encoder); |
1493 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); | 1368 | intel_release_load_detect_pipe(intel_encoder, connector, |
1369 | dpms_mode); | ||
1494 | } else | 1370 | } else |
1495 | type = -1; | 1371 | type = -1; |
1496 | } | 1372 | } |
@@ -1525,7 +1401,8 @@ static void | |||
1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | 1401 | intel_tv_chose_preferred_modes(struct drm_connector *connector, |
1526 | struct drm_display_mode *mode_ptr) | 1402 | struct drm_display_mode *mode_ptr) |
1527 | { | 1403 | { |
1528 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1404 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1405 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 1406 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1530 | 1407 | ||
1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1408 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
@@ -1550,7 +1427,8 @@ static int | |||
1550 | intel_tv_get_modes(struct drm_connector *connector) | 1427 | intel_tv_get_modes(struct drm_connector *connector) |
1551 | { | 1428 | { |
1552 | struct drm_display_mode *mode_ptr; | 1429 | struct drm_display_mode *mode_ptr; |
1553 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1430 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1431 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 1432 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1555 | int j, count = 0; | 1433 | int j, count = 0; |
1556 | u64 tmp; | 1434 | u64 tmp; |
@@ -1604,11 +1482,9 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1604 | static void | 1482 | static void |
1605 | intel_tv_destroy (struct drm_connector *connector) | 1483 | intel_tv_destroy (struct drm_connector *connector) |
1606 | { | 1484 | { |
1607 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | ||
1608 | |||
1609 | drm_sysfs_connector_remove(connector); | 1485 | drm_sysfs_connector_remove(connector); |
1610 | drm_connector_cleanup(connector); | 1486 | drm_connector_cleanup(connector); |
1611 | kfree(intel_encoder); | 1487 | kfree(connector); |
1612 | } | 1488 | } |
1613 | 1489 | ||
1614 | 1490 | ||
@@ -1617,9 +1493,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1617 | uint64_t val) | 1493 | uint64_t val) |
1618 | { | 1494 | { |
1619 | struct drm_device *dev = connector->dev; | 1495 | struct drm_device *dev = connector->dev; |
1620 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1496 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1497 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1621 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 1498 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1622 | struct drm_encoder *encoder = &intel_encoder->enc; | ||
1623 | struct drm_crtc *crtc = encoder->crtc; | 1499 | struct drm_crtc *crtc = encoder->crtc; |
1624 | int ret = 0; | 1500 | int ret = 0; |
1625 | bool changed = false; | 1501 | bool changed = false; |
@@ -1676,8 +1552,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { | |||
1676 | 1552 | ||
1677 | static const struct drm_connector_funcs intel_tv_connector_funcs = { | 1553 | static const struct drm_connector_funcs intel_tv_connector_funcs = { |
1678 | .dpms = drm_helper_connector_dpms, | 1554 | .dpms = drm_helper_connector_dpms, |
1679 | .save = intel_tv_save, | ||
1680 | .restore = intel_tv_restore, | ||
1681 | .detect = intel_tv_detect, | 1555 | .detect = intel_tv_detect, |
1682 | .destroy = intel_tv_destroy, | 1556 | .destroy = intel_tv_destroy, |
1683 | .set_property = intel_tv_set_property, | 1557 | .set_property = intel_tv_set_property, |
@@ -1687,12 +1561,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { | |||
1687 | static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { | 1561 | static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { |
1688 | .mode_valid = intel_tv_mode_valid, | 1562 | .mode_valid = intel_tv_mode_valid, |
1689 | .get_modes = intel_tv_get_modes, | 1563 | .get_modes = intel_tv_get_modes, |
1690 | .best_encoder = intel_best_encoder, | 1564 | .best_encoder = intel_attached_encoder, |
1691 | }; | 1565 | }; |
1692 | 1566 | ||
1693 | static void intel_tv_enc_destroy(struct drm_encoder *encoder) | 1567 | static void intel_tv_enc_destroy(struct drm_encoder *encoder) |
1694 | { | 1568 | { |
1569 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1570 | |||
1695 | drm_encoder_cleanup(encoder); | 1571 | drm_encoder_cleanup(encoder); |
1572 | kfree(intel_encoder); | ||
1696 | } | 1573 | } |
1697 | 1574 | ||
1698 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { | 1575 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { |
@@ -1741,6 +1618,7 @@ intel_tv_init(struct drm_device *dev) | |||
1741 | struct drm_i915_private *dev_priv = dev->dev_private; | 1618 | struct drm_i915_private *dev_priv = dev->dev_private; |
1742 | struct drm_connector *connector; | 1619 | struct drm_connector *connector; |
1743 | struct intel_encoder *intel_encoder; | 1620 | struct intel_encoder *intel_encoder; |
1621 | struct intel_connector *intel_connector; | ||
1744 | struct intel_tv_priv *tv_priv; | 1622 | struct intel_tv_priv *tv_priv; |
1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1623 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
1746 | char **tv_format_names; | 1624 | char **tv_format_names; |
@@ -1786,7 +1664,13 @@ intel_tv_init(struct drm_device *dev) | |||
1786 | return; | 1664 | return; |
1787 | } | 1665 | } |
1788 | 1666 | ||
1789 | connector = &intel_encoder->base; | 1667 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1668 | if (!intel_connector) { | ||
1669 | kfree(intel_encoder); | ||
1670 | return; | ||
1671 | } | ||
1672 | |||
1673 | connector = &intel_connector->base; | ||
1790 | 1674 | ||
1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1675 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1792 | DRM_MODE_CONNECTOR_SVIDEO); | 1676 | DRM_MODE_CONNECTOR_SVIDEO); |
@@ -1794,7 +1678,7 @@ intel_tv_init(struct drm_device *dev) | |||
1794 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, | 1678 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, |
1795 | DRM_MODE_ENCODER_TVDAC); | 1679 | DRM_MODE_ENCODER_TVDAC); |
1796 | 1680 | ||
1797 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); | 1681 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); |
1798 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); | 1682 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); |
1799 | intel_encoder->type = INTEL_OUTPUT_TVOUT; | 1683 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1800 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1684 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |