diff options
Diffstat (limited to 'drivers')
34 files changed, 569 insertions, 228 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8bea100a160..b34390347c16 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
2868 | }, | 2868 | }, |
2869 | .driver_data = "F.23", /* cutoff BIOS version */ | 2869 | .driver_data = "F.23", /* cutoff BIOS version */ |
2870 | }, | 2870 | }, |
2871 | /* | ||
2872 | * Acer eMachines G725 has the same problem. BIOS | ||
2873 | * V1.03 is known to be broken. V3.04 is known to | ||
2874 | * work. Inbetween, there are V1.06, V2.06 and V3.03 | ||
2875 | * that we don't have much idea about. For now, | ||
2876 | * blacklist anything older than V3.04. | ||
2877 | */ | ||
2878 | { | ||
2879 | .ident = "G725", | ||
2880 | .matches = { | ||
2881 | DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), | ||
2882 | DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), | ||
2883 | }, | ||
2884 | .driver_data = "V3.04", /* cutoff BIOS version */ | ||
2885 | }, | ||
2871 | { } /* terminate list */ | 2886 | { } /* terminate list */ |
2872 | }; | 2887 | }; |
2873 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | 2888 | const struct dmi_system_id *dmi = dmi_first_match(sysids); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4ea5a8c325b..d096fbcbc771 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2875 | * write indication (used for PIO/DMA setup), result TF is | 2875 | * write indication (used for PIO/DMA setup), result TF is |
2876 | * copied back and we don't whine too much about its failure. | 2876 | * copied back and we don't whine too much about its failure. |
2877 | */ | 2877 | */ |
2878 | tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 2878 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) | 2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) |
2880 | tf->flags |= ATA_TFLAG_WRITE; | 2880 | tf->flags |= ATA_TFLAG_WRITE; |
2881 | 2881 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 741065c9da67..730ef3c384ca 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
893 | do_write); | 893 | do_write); |
894 | } | 894 | } |
895 | 895 | ||
896 | if (!do_write) | ||
897 | flush_dcache_page(page); | ||
898 | |||
896 | qc->curbytes += qc->sect_size; | 899 | qc->curbytes += qc->sect_size; |
897 | qc->cursg_ofs += qc->sect_size; | 900 | qc->cursg_ofs += qc->sect_size; |
898 | 901 | ||
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c6f3b48be9dd..dcb9083ecde0 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on) | |||
1951 | pid = task_pid(current); | 1951 | pid = task_pid(current); |
1952 | type = PIDTYPE_PID; | 1952 | type = PIDTYPE_PID; |
1953 | } | 1953 | } |
1954 | retval = __f_setown(filp, pid, type, 0); | 1954 | get_pid(pid); |
1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | 1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
1956 | retval = __f_setown(filp, pid, type, 0); | ||
1957 | put_pid(pid); | ||
1956 | if (retval) | 1958 | if (retval) |
1957 | goto out; | 1959 | goto out; |
1958 | } else { | 1960 | } else { |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d10cc899c460..b75ce8b84c46 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -48,23 +48,20 @@ enum sh_dmae_desc_status { | |||
48 | */ | 48 | */ |
49 | #define RS_DEFAULT (RS_DUAL) | 49 | #define RS_DEFAULT (RS_DUAL) |
50 | 50 | ||
51 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | ||
52 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | ||
53 | |||
51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
52 | 55 | ||
53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | 56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
55 | { | 58 | { |
56 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
57 | } | 60 | } |
58 | 61 | ||
59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
60 | { | 63 | { |
61 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
62 | } | ||
63 | |||
64 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
65 | { | ||
66 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
67 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
68 | } | 65 | } |
69 | 66 | ||
70 | /* | 67 | /* |
@@ -95,27 +92,30 @@ static int sh_dmae_rst(int id) | |||
95 | return 0; | 92 | return 0; |
96 | } | 93 | } |
97 | 94 | ||
98 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) | 95 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
99 | { | 96 | { |
100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 97 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
101 | if (chcr & CHCR_DE) { | 98 | |
102 | if (!(chcr & CHCR_TE)) | 99 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
103 | return -EBUSY; /* working */ | 100 | return true; /* working */ |
104 | } | 101 | |
105 | return 0; /* waiting */ | 102 | return false; /* waiting */ |
106 | } | 103 | } |
107 | 104 | ||
108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | 105 | static unsigned int ts_shift[] = TS_SHIFT; |
106 | static inline unsigned int calc_xmit_shift(u32 chcr) | ||
109 | { | 107 | { |
110 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 108 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | |
111 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | 109 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); |
110 | |||
111 | return ts_shift[cnt]; | ||
112 | } | 112 | } |
113 | 113 | ||
114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
115 | { | 115 | { |
116 | sh_dmae_writel(sh_chan, hw->sar, SAR); | 116 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
117 | sh_dmae_writel(sh_chan, hw->dar, DAR); | 117 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
118 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); | 118 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 121 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
@@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) | |||
123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
124 | 124 | ||
125 | chcr |= CHCR_DE | CHCR_IE; | 125 | chcr |= CHCR_DE | CHCR_IE; |
126 | sh_dmae_writel(sh_chan, chcr, CHCR); | 126 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); |
127 | } | 127 | } |
128 | 128 | ||
129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
@@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
134 | sh_dmae_writel(sh_chan, chcr, CHCR); | 134 | sh_dmae_writel(sh_chan, chcr, CHCR); |
135 | } | 135 | } |
136 | 136 | ||
137 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
138 | { | ||
139 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
140 | sh_chan->xmit_shift = calc_xmit_shift(chcr); | ||
141 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
142 | } | ||
143 | |||
137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 144 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
138 | { | 145 | { |
139 | int ret = dmae_is_busy(sh_chan); | ||
140 | /* When DMA was working, can not set data to CHCR */ | 146 | /* When DMA was working, can not set data to CHCR */ |
141 | if (ret) | 147 | if (dmae_is_busy(sh_chan)) |
142 | return ret; | 148 | return -EBUSY; |
143 | 149 | ||
150 | sh_chan->xmit_shift = calc_xmit_shift(val); | ||
144 | sh_dmae_writel(sh_chan, val, CHCR); | 151 | sh_dmae_writel(sh_chan, val, CHCR); |
152 | |||
145 | return 0; | 153 | return 0; |
146 | } | 154 | } |
147 | 155 | ||
148 | #define DMARS1_ADDR 0x04 | 156 | #define DMARS_SHIFT 8 |
149 | #define DMARS2_ADDR 0x08 | 157 | #define DMARS_CHAN_MSK 0x01 |
150 | #define DMARS_SHIFT 8 | ||
151 | #define DMARS_CHAN_MSK 0x01 | ||
152 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
153 | { | 159 | { |
154 | u32 addr; | 160 | u32 addr; |
155 | int shift = 0; | 161 | int shift = 0; |
156 | int ret = dmae_is_busy(sh_chan); | 162 | |
157 | if (ret) | 163 | if (dmae_is_busy(sh_chan)) |
158 | return ret; | 164 | return -EBUSY; |
159 | 165 | ||
160 | if (sh_chan->id & DMARS_CHAN_MSK) | 166 | if (sh_chan->id & DMARS_CHAN_MSK) |
161 | shift = DMARS_SHIFT; | 167 | shift = DMARS_SHIFT; |
162 | 168 | ||
163 | switch (sh_chan->id) { | 169 | if (sh_chan->id < 6) |
164 | /* DMARS0 */ | 170 | /* DMA0RS0 - DMA0RS2 */ |
165 | case 0: | 171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; |
166 | case 1: | 172 | #ifdef SH_DMARS_BASE1 |
167 | addr = SH_DMARS_BASE; | 173 | else if (sh_chan->id < 12) |
168 | break; | 174 | /* DMA1RS0 - DMA1RS2 */ |
169 | /* DMARS1 */ | 175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; |
170 | case 2: | 176 | #endif |
171 | case 3: | 177 | else |
172 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | ||
173 | break; | ||
174 | /* DMARS2 */ | ||
175 | case 4: | ||
176 | case 5: | ||
177 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | ||
178 | break; | ||
179 | default: | ||
180 | return -EINVAL; | 178 | return -EINVAL; |
181 | } | ||
182 | 179 | ||
183 | ctrl_outw((val << shift) | | 180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); |
184 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | ||
185 | addr); | ||
186 | 181 | ||
187 | return 0; | 182 | return 0; |
188 | } | 183 | } |
@@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | |||
250 | return NULL; | 245 | return NULL; |
251 | } | 246 | } |
252 | 247 | ||
248 | static struct sh_dmae_slave_config *sh_dmae_find_slave( | ||
249 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) | ||
250 | { | ||
251 | struct dma_device *dma_dev = sh_chan->common.device; | ||
252 | struct sh_dmae_device *shdev = container_of(dma_dev, | ||
253 | struct sh_dmae_device, common); | ||
254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | ||
255 | int i; | ||
256 | |||
257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | ||
258 | return NULL; | ||
259 | |||
260 | for (i = 0; i < pdata->config_num; i++) | ||
261 | if (pdata->config[i].slave_id == slave_id) | ||
262 | return pdata->config + i; | ||
263 | |||
264 | return NULL; | ||
265 | } | ||
266 | |||
253 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 267 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
254 | { | 268 | { |
255 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 269 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
256 | struct sh_desc *desc; | 270 | struct sh_desc *desc; |
271 | struct sh_dmae_slave *param = chan->private; | ||
272 | |||
273 | /* | ||
274 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
275 | * never runs concurrently with itself or free_chan_resources. | ||
276 | */ | ||
277 | if (param) { | ||
278 | struct sh_dmae_slave_config *cfg; | ||
279 | |||
280 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | ||
281 | if (!cfg) | ||
282 | return -EINVAL; | ||
283 | |||
284 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) | ||
285 | return -EBUSY; | ||
286 | |||
287 | param->config = cfg; | ||
288 | |||
289 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
290 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
291 | } else { | ||
292 | if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) | ||
293 | dmae_set_chcr(sh_chan, RS_DEFAULT); | ||
294 | } | ||
257 | 295 | ||
258 | spin_lock_bh(&sh_chan->desc_lock); | 296 | spin_lock_bh(&sh_chan->desc_lock); |
259 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 297 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
@@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
286 | struct sh_desc *desc, *_desc; | 324 | struct sh_desc *desc, *_desc; |
287 | LIST_HEAD(list); | 325 | LIST_HEAD(list); |
288 | 326 | ||
327 | dmae_halt(sh_chan); | ||
328 | |||
289 | /* Prepared and not submitted descriptors can still be on the queue */ | 329 | /* Prepared and not submitted descriptors can still be on the queue */ |
290 | if (!list_empty(&sh_chan->ld_queue)) | 330 | if (!list_empty(&sh_chan->ld_queue)) |
291 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 331 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
292 | 332 | ||
333 | if (chan->private) { | ||
334 | /* The caller is holding dma_list_mutex */ | ||
335 | struct sh_dmae_slave *param = chan->private; | ||
336 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
337 | } | ||
338 | |||
293 | spin_lock_bh(&sh_chan->desc_lock); | 339 | spin_lock_bh(&sh_chan->desc_lock); |
294 | 340 | ||
295 | list_splice_init(&sh_chan->ld_free, &list); | 341 | list_splice_init(&sh_chan->ld_free, &list); |
@@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
301 | kfree(desc); | 347 | kfree(desc); |
302 | } | 348 | } |
303 | 349 | ||
304 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 350 | /** |
305 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 351 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
306 | size_t len, unsigned long flags) | 352 | * @sh_chan: DMA channel |
353 | * @flags: DMA transfer flags | ||
354 | * @dest: destination DMA address, incremented when direction equals | ||
355 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | ||
356 | * @src: source DMA address, incremented when direction equals | ||
357 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | ||
358 | * @len: DMA transfer length | ||
359 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
360 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
361 | * equals DMA_BIDIRECTIONAL for MEMCPY | ||
362 | * Returns 0 or an error | ||
363 | * Locks: called with desc_lock held | ||
364 | */ | ||
365 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
366 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
367 | struct sh_desc **first, enum dma_data_direction direction) | ||
307 | { | 368 | { |
308 | struct sh_dmae_chan *sh_chan; | 369 | struct sh_desc *new; |
309 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
310 | size_t copy_size; | 370 | size_t copy_size; |
311 | LIST_HEAD(tx_list); | ||
312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | ||
313 | 371 | ||
314 | if (!chan) | 372 | if (!*len) |
315 | return NULL; | 373 | return NULL; |
316 | 374 | ||
317 | if (!len) | 375 | /* Allocate the link descriptor from the free list */ |
376 | new = sh_dmae_get_desc(sh_chan); | ||
377 | if (!new) { | ||
378 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
318 | return NULL; | 379 | return NULL; |
380 | } | ||
319 | 381 | ||
320 | sh_chan = to_sh_chan(chan); | 382 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
383 | |||
384 | new->hw.sar = *src; | ||
385 | new->hw.dar = *dest; | ||
386 | new->hw.tcr = copy_size; | ||
387 | |||
388 | if (!*first) { | ||
389 | /* First desc */ | ||
390 | new->async_tx.cookie = -EBUSY; | ||
391 | *first = new; | ||
392 | } else { | ||
393 | /* Other desc - invisible to the user */ | ||
394 | new->async_tx.cookie = -EINVAL; | ||
395 | } | ||
396 | |||
397 | dev_dbg(sh_chan->dev, | ||
398 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
399 | copy_size, *len, *src, *dest, &new->async_tx, | ||
400 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
401 | |||
402 | new->mark = DESC_PREPARED; | ||
403 | new->async_tx.flags = flags; | ||
404 | new->direction = direction; | ||
405 | |||
406 | *len -= copy_size; | ||
407 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | ||
408 | *src += copy_size; | ||
409 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | ||
410 | *dest += copy_size; | ||
411 | |||
412 | return new; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
417 | * | ||
418 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
419 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
420 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
421 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
422 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | ||
423 | * and the SG list contains only one element and points at the source buffer. | ||
424 | */ | ||
425 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
426 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
427 | enum dma_data_direction direction, unsigned long flags) | ||
428 | { | ||
429 | struct scatterlist *sg; | ||
430 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
431 | LIST_HEAD(tx_list); | ||
432 | int chunks = 0; | ||
433 | int i; | ||
434 | |||
435 | if (!sg_len) | ||
436 | return NULL; | ||
437 | |||
438 | for_each_sg(sgl, sg, sg_len, i) | ||
439 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
440 | (SH_DMA_TCR_MAX + 1); | ||
321 | 441 | ||
322 | /* Have to lock the whole loop to protect against concurrent release */ | 442 | /* Have to lock the whole loop to protect against concurrent release */ |
323 | spin_lock_bh(&sh_chan->desc_lock); | 443 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
333 | * only during this function, then they are immediately spliced | 453 | * only during this function, then they are immediately spliced |
334 | * back onto the free list in form of a chain | 454 | * back onto the free list in form of a chain |
335 | */ | 455 | */ |
336 | do { | 456 | for_each_sg(sgl, sg, sg_len, i) { |
337 | /* Allocate the link descriptor from the free list */ | 457 | dma_addr_t sg_addr = sg_dma_address(sg); |
338 | new = sh_dmae_get_desc(sh_chan); | 458 | size_t len = sg_dma_len(sg); |
339 | if (!new) { | 459 | |
340 | dev_err(sh_chan->dev, | 460 | if (!len) |
341 | "No free memory for link descriptor\n"); | 461 | goto err_get_desc; |
342 | list_for_each_entry(new, &tx_list, node) | 462 | |
343 | new->mark = DESC_IDLE; | 463 | do { |
344 | list_splice(&tx_list, &sh_chan->ld_free); | 464 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
345 | spin_unlock_bh(&sh_chan->desc_lock); | 465 | i, sg, len, (unsigned long long)sg_addr); |
346 | return NULL; | 466 | |
347 | } | 467 | if (direction == DMA_FROM_DEVICE) |
348 | 468 | new = sh_dmae_add_desc(sh_chan, flags, | |
349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); | 469 | &sg_addr, addr, &len, &first, |
350 | 470 | direction); | |
351 | new->hw.sar = dma_src; | 471 | else |
352 | new->hw.dar = dma_dest; | 472 | new = sh_dmae_add_desc(sh_chan, flags, |
353 | new->hw.tcr = copy_size; | 473 | addr, &sg_addr, &len, &first, |
354 | if (!first) { | 474 | direction); |
355 | /* First desc */ | 475 | if (!new) |
356 | new->async_tx.cookie = -EBUSY; | 476 | goto err_get_desc; |
357 | first = new; | 477 | |
358 | } else { | 478 | new->chunks = chunks--; |
359 | /* Other desc - invisible to the user */ | 479 | list_add_tail(&new->node, &tx_list); |
360 | new->async_tx.cookie = -EINVAL; | 480 | } while (len); |
361 | } | 481 | } |
362 | |||
363 | dev_dbg(sh_chan->dev, | ||
364 | "chaining %u of %u with %p, dst %x, cookie %d\n", | ||
365 | copy_size, len, &new->async_tx, dma_dest, | ||
366 | new->async_tx.cookie); | ||
367 | |||
368 | new->mark = DESC_PREPARED; | ||
369 | new->async_tx.flags = flags; | ||
370 | new->chunks = chunks--; | ||
371 | |||
372 | prev = new; | ||
373 | len -= copy_size; | ||
374 | dma_src += copy_size; | ||
375 | dma_dest += copy_size; | ||
376 | /* Insert the link descriptor to the LD ring */ | ||
377 | list_add_tail(&new->node, &tx_list); | ||
378 | } while (len); | ||
379 | 482 | ||
380 | if (new != first) | 483 | if (new != first) |
381 | new->async_tx.cookie = -ENOSPC; | 484 | new->async_tx.cookie = -ENOSPC; |
@@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
386 | spin_unlock_bh(&sh_chan->desc_lock); | 489 | spin_unlock_bh(&sh_chan->desc_lock); |
387 | 490 | ||
388 | return &first->async_tx; | 491 | return &first->async_tx; |
492 | |||
493 | err_get_desc: | ||
494 | list_for_each_entry(new, &tx_list, node) | ||
495 | new->mark = DESC_IDLE; | ||
496 | list_splice(&tx_list, &sh_chan->ld_free); | ||
497 | |||
498 | spin_unlock_bh(&sh_chan->desc_lock); | ||
499 | |||
500 | return NULL; | ||
501 | } | ||
502 | |||
503 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
504 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
505 | size_t len, unsigned long flags) | ||
506 | { | ||
507 | struct sh_dmae_chan *sh_chan; | ||
508 | struct scatterlist sg; | ||
509 | |||
510 | if (!chan || !len) | ||
511 | return NULL; | ||
512 | |||
513 | chan->private = NULL; | ||
514 | |||
515 | sh_chan = to_sh_chan(chan); | ||
516 | |||
517 | sg_init_table(&sg, 1); | ||
518 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
519 | offset_in_page(dma_src)); | ||
520 | sg_dma_address(&sg) = dma_src; | ||
521 | sg_dma_len(&sg) = len; | ||
522 | |||
523 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | ||
524 | flags); | ||
525 | } | ||
526 | |||
527 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | ||
528 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
529 | enum dma_data_direction direction, unsigned long flags) | ||
530 | { | ||
531 | struct sh_dmae_slave *param; | ||
532 | struct sh_dmae_chan *sh_chan; | ||
533 | |||
534 | if (!chan) | ||
535 | return NULL; | ||
536 | |||
537 | sh_chan = to_sh_chan(chan); | ||
538 | param = chan->private; | ||
539 | |||
540 | /* Someone calling slave DMA on a public channel? */ | ||
541 | if (!param || !sg_len) { | ||
542 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
543 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * if (param != NULL), this is a successfully requested slave channel, | ||
549 | * therefore param->config != NULL too. | ||
550 | */ | ||
551 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, | ||
552 | direction, flags); | ||
553 | } | ||
554 | |||
555 | static void sh_dmae_terminate_all(struct dma_chan *chan) | ||
556 | { | ||
557 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
558 | |||
559 | if (!chan) | ||
560 | return; | ||
561 | |||
562 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
389 | } | 563 | } |
390 | 564 | ||
391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 565 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
@@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
419 | cookie = tx->cookie; | 593 | cookie = tx->cookie; |
420 | 594 | ||
421 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 595 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
422 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); | 596 | if (sh_chan->completed_cookie != desc->cookie - 1) |
597 | dev_dbg(sh_chan->dev, | ||
598 | "Completing cookie %d, expected %d\n", | ||
599 | desc->cookie, | ||
600 | sh_chan->completed_cookie + 1); | ||
423 | sh_chan->completed_cookie = desc->cookie; | 601 | sh_chan->completed_cookie = desc->cookie; |
424 | } | 602 | } |
425 | 603 | ||
@@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
492 | return; | 670 | return; |
493 | } | 671 | } |
494 | 672 | ||
495 | /* Find the first un-transfer desciptor */ | 673 | /* Find the first not transferred desciptor */ |
496 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
497 | if (sd->mark == DESC_SUBMITTED) { | 675 | if (sd->mark == DESC_SUBMITTED) { |
498 | /* Get the ld start address from ld_queue */ | 676 | /* Get the ld start address from ld_queue */ |
@@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
559 | 737 | ||
560 | /* IRQ Multi */ | 738 | /* IRQ Multi */ |
561 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
562 | int cnt = 0; | 740 | int __maybe_unused cnt = 0; |
563 | switch (irq) { | 741 | switch (irq) { |
564 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
565 | case DMTE6_IRQ: | 743 | case DMTE6_IRQ: |
@@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data) | |||
596 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 774 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
597 | struct sh_desc *desc; | 775 | struct sh_desc *desc; |
598 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 776 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
777 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
599 | 778 | ||
600 | spin_lock(&sh_chan->desc_lock); | 779 | spin_lock(&sh_chan->desc_lock); |
601 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 780 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
602 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && | 781 | if (desc->mark == DESC_SUBMITTED && |
603 | desc->mark == DESC_SUBMITTED) { | 782 | ((desc->direction == DMA_FROM_DEVICE && |
783 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
784 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
604 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 785 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
605 | desc->async_tx.cookie, &desc->async_tx, | 786 | desc->async_tx.cookie, &desc->async_tx, |
606 | desc->hw.dar); | 787 | desc->hw.dar); |
@@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
673 | } | 854 | } |
674 | 855 | ||
675 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 856 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
676 | "sh-dmae%d", new_sh_chan->id); | 857 | "sh-dmae%d", new_sh_chan->id); |
677 | 858 | ||
678 | /* set up channel irq */ | 859 | /* set up channel irq */ |
679 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 860 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
@@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
684 | goto err_no_irq; | 865 | goto err_no_irq; |
685 | } | 866 | } |
686 | 867 | ||
687 | /* CHCR register control function */ | ||
688 | new_sh_chan->set_chcr = dmae_set_chcr; | ||
689 | /* DMARS register control function */ | ||
690 | new_sh_chan->set_dmars = dmae_set_dmars; | ||
691 | |||
692 | shdev->chan[id] = new_sh_chan; | 868 | shdev->chan[id] = new_sh_chan; |
693 | return 0; | 869 | return 0; |
694 | 870 | ||
@@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
759 | INIT_LIST_HEAD(&shdev->common.channels); | 935 | INIT_LIST_HEAD(&shdev->common.channels); |
760 | 936 | ||
761 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 937 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
938 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
939 | |||
762 | shdev->common.device_alloc_chan_resources | 940 | shdev->common.device_alloc_chan_resources |
763 | = sh_dmae_alloc_chan_resources; | 941 | = sh_dmae_alloc_chan_resources; |
764 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 942 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
765 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 943 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
766 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | 944 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
767 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 945 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
946 | |||
947 | /* Compulsory for DMA_SLAVE fields */ | ||
948 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
949 | shdev->common.device_terminate_all = sh_dmae_terminate_all; | ||
950 | |||
768 | shdev->common.dev = &pdev->dev; | 951 | shdev->common.dev = &pdev->dev; |
769 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 952 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
770 | shdev->common.copy_align = 5; | 953 | shdev->common.copy_align = 5; |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 108f1cffb6f5..7e227f3c87c4 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -29,6 +29,7 @@ struct sh_desc { | |||
29 | struct sh_dmae_regs hw; | 29 | struct sh_dmae_regs hw; |
30 | struct list_head node; | 30 | struct list_head node; |
31 | struct dma_async_tx_descriptor async_tx; | 31 | struct dma_async_tx_descriptor async_tx; |
32 | enum dma_data_direction direction; | ||
32 | dma_cookie_t cookie; | 33 | dma_cookie_t cookie; |
33 | int chunks; | 34 | int chunks; |
34 | int mark; | 35 | int mark; |
@@ -45,13 +46,9 @@ struct sh_dmae_chan { | |||
45 | struct device *dev; /* Channel device */ | 46 | struct device *dev; /* Channel device */ |
46 | struct tasklet_struct tasklet; /* Tasklet */ | 47 | struct tasklet_struct tasklet; /* Tasklet */ |
47 | int descs_allocated; /* desc count */ | 48 | int descs_allocated; /* desc count */ |
49 | int xmit_shift; /* log_2(bytes_per_xfer) */ | ||
48 | int id; /* Raw id of this channel */ | 50 | int id; /* Raw id of this channel */ |
49 | char dev_id[16]; /* unique name per DMAC of channel */ | 51 | char dev_id[16]; /* unique name per DMAC of channel */ |
50 | |||
51 | /* Set chcr */ | ||
52 | int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); | ||
53 | /* Set DMA resource */ | ||
54 | int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); | ||
55 | }; | 52 | }; |
56 | 53 | ||
57 | struct sh_dmae_device { | 54 | struct sh_dmae_device { |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index a1fce68e3bbe..17be051b7aa3 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c | |||
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
113 | 113 | ||
114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { | 114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { |
115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", | 115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", |
116 | gart_info->table_mask); | 116 | (unsigned long long)gart_info->table_mask); |
117 | ret = 1; | 117 | ret = 1; |
118 | goto done; | 118 | goto done; |
119 | } | 119 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d88965852a..ecac882e1d54 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { | |||
120 | 120 | ||
121 | const static struct intel_device_info intel_pineview_info = { | 121 | const static struct intel_device_info intel_pineview_info = { |
122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
123 | .has_pipe_cxsr = 1, | 123 | .need_gfx_hws = 1, |
124 | .has_hotplug = 1, | 124 | .has_hotplug = 1, |
125 | }; | 125 | }; |
126 | 126 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dda787aafcc6..b4c8c0230689 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | |||
3564 | uint32_t reloc_count = 0, i; | 3564 | uint32_t reloc_count = 0, i; |
3565 | int ret = 0; | 3565 | int ret = 0; |
3566 | 3566 | ||
3567 | if (relocs == NULL) | ||
3568 | return 0; | ||
3569 | |||
3567 | for (i = 0; i < buffer_count; i++) { | 3570 | for (i = 0; i < buffer_count; i++) { |
3568 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3571 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3569 | int unwritten; | 3572 | int unwritten; |
@@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3653 | struct drm_gem_object *batch_obj; | 3656 | struct drm_gem_object *batch_obj; |
3654 | struct drm_i915_gem_object *obj_priv; | 3657 | struct drm_i915_gem_object *obj_priv; |
3655 | struct drm_clip_rect *cliprects = NULL; | 3658 | struct drm_clip_rect *cliprects = NULL; |
3656 | struct drm_i915_gem_relocation_entry *relocs; | 3659 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3657 | int ret = 0, ret2, i, pinned = 0; | 3660 | int ret = 0, ret2, i, pinned = 0; |
3658 | uint64_t exec_offset; | 3661 | uint64_t exec_offset; |
3659 | uint32_t seqno, flush_domains, reloc_index; | 3662 | uint32_t seqno, flush_domains, reloc_index; |
@@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3722 | if (object_list[i] == NULL) { | 3725 | if (object_list[i] == NULL) { |
3723 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3726 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3724 | exec_list[i].handle, i); | 3727 | exec_list[i].handle, i); |
3728 | /* prevent error path from reading uninitialized data */ | ||
3729 | args->buffer_count = i + 1; | ||
3725 | ret = -EBADF; | 3730 | ret = -EBADF; |
3726 | goto err; | 3731 | goto err; |
3727 | } | 3732 | } |
@@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3730 | if (obj_priv->in_execbuffer) { | 3735 | if (obj_priv->in_execbuffer) { |
3731 | DRM_ERROR("Object %p appears more than once in object list\n", | 3736 | DRM_ERROR("Object %p appears more than once in object list\n", |
3732 | object_list[i]); | 3737 | object_list[i]); |
3738 | /* prevent error path from reading uninitialized data */ | ||
3739 | args->buffer_count = i + 1; | ||
3733 | ret = -EBADF; | 3740 | ret = -EBADF; |
3734 | goto err; | 3741 | goto err; |
3735 | } | 3742 | } |
@@ -3926,6 +3933,7 @@ err: | |||
3926 | 3933 | ||
3927 | mutex_unlock(&dev->struct_mutex); | 3934 | mutex_unlock(&dev->struct_mutex); |
3928 | 3935 | ||
3936 | pre_mutex_err: | ||
3929 | /* Copy the updated relocations out regardless of current error | 3937 | /* Copy the updated relocations out regardless of current error |
3930 | * state. Failure to update the relocs would mean that the next | 3938 | * state. Failure to update the relocs would mean that the next |
3931 | * time userland calls execbuf, it would do so with presumed offset | 3939 | * time userland calls execbuf, it would do so with presumed offset |
@@ -3940,7 +3948,6 @@ err: | |||
3940 | ret = ret2; | 3948 | ret = ret2; |
3941 | } | 3949 | } |
3942 | 3950 | ||
3943 | pre_mutex_err: | ||
3944 | drm_free_large(object_list); | 3951 | drm_free_large(object_list); |
3945 | kfree(cliprects); | 3952 | kfree(cliprects); |
3946 | 3953 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a3e6fb..50ddf4a95c5e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
309 | if (de_iir & DE_GSE) | 309 | if (de_iir & DE_GSE) |
310 | ironlake_opregion_gse_intr(dev); | 310 | ironlake_opregion_gse_intr(dev); |
311 | 311 | ||
312 | if (de_iir & DE_PLANEA_FLIP_DONE) | ||
313 | intel_prepare_page_flip(dev, 0); | ||
314 | |||
315 | if (de_iir & DE_PLANEB_FLIP_DONE) | ||
316 | intel_prepare_page_flip(dev, 1); | ||
317 | |||
318 | if (de_iir & DE_PIPEA_VBLANK) { | ||
319 | drm_handle_vblank(dev, 0); | ||
320 | intel_finish_page_flip(dev, 0); | ||
321 | } | ||
322 | |||
323 | if (de_iir & DE_PIPEB_VBLANK) { | ||
324 | drm_handle_vblank(dev, 1); | ||
325 | intel_finish_page_flip(dev, 1); | ||
326 | } | ||
327 | |||
312 | /* check event from PCH */ | 328 | /* check event from PCH */ |
313 | if ((de_iir & DE_PCH_EVENT) && | 329 | if ((de_iir & DE_PCH_EVENT) && |
314 | (pch_iir & SDE_HOTPLUG_MASK)) { | 330 | (pch_iir & SDE_HOTPLUG_MASK)) { |
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
844 | if (!(pipeconf & PIPEACONF_ENABLE)) | 860 | if (!(pipeconf & PIPEACONF_ENABLE)) |
845 | return -EINVAL; | 861 | return -EINVAL; |
846 | 862 | ||
847 | if (IS_IRONLAKE(dev)) | ||
848 | return 0; | ||
849 | |||
850 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 863 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
851 | if (IS_I965G(dev)) | 864 | if (IS_IRONLAKE(dev)) |
865 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
866 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
867 | else if (IS_I965G(dev)) | ||
852 | i915_enable_pipestat(dev_priv, pipe, | 868 | i915_enable_pipestat(dev_priv, pipe, |
853 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 869 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
854 | else | 870 | else |
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
866 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 882 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
867 | unsigned long irqflags; | 883 | unsigned long irqflags; |
868 | 884 | ||
869 | if (IS_IRONLAKE(dev)) | ||
870 | return; | ||
871 | |||
872 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 885 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
873 | i915_disable_pipestat(dev_priv, pipe, | 886 | if (IS_IRONLAKE(dev)) |
874 | PIPE_VBLANK_INTERRUPT_ENABLE | | 887 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
875 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 888 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
889 | else | ||
890 | i915_disable_pipestat(dev_priv, pipe, | ||
891 | PIPE_VBLANK_INTERRUPT_ENABLE | | ||
892 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
876 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 893 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
877 | } | 894 | } |
878 | 895 | ||
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1015 | { | 1032 | { |
1016 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1033 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1017 | /* enable kind of interrupts always enabled */ | 1034 | /* enable kind of interrupts always enabled */ |
1018 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; | 1035 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1036 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | ||
1019 | u32 render_mask = GT_USER_INTERRUPT; | 1037 | u32 render_mask = GT_USER_INTERRUPT; |
1020 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1038 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1021 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1039 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1022 | 1040 | ||
1023 | dev_priv->irq_mask_reg = ~display_mask; | 1041 | dev_priv->irq_mask_reg = ~display_mask; |
1024 | dev_priv->de_irq_enable_reg = display_mask; | 1042 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
1025 | 1043 | ||
1026 | /* should always can generate irq */ | 1044 | /* should always can generate irq */ |
1027 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1045 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc871edfe..79dd4026586f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
157 | adpa = I915_READ(PCH_ADPA); | 157 | adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | /* disable HPD first */ | ||
161 | I915_WRITE(PCH_ADPA, adpa); | ||
162 | (void)I915_READ(PCH_ADPA); | ||
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
162 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 165 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78ef4a92..12775df1bbfd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1638 | case DRM_MODE_DPMS_OFF: | 1638 | case DRM_MODE_DPMS_OFF: |
1639 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 1639 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
1640 | 1640 | ||
1641 | drm_vblank_off(dev, pipe); | ||
1641 | /* Disable display plane */ | 1642 | /* Disable display plane */ |
1642 | temp = I915_READ(dspcntr_reg); | 1643 | temp = I915_READ(dspcntr_reg); |
1643 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 1644 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
@@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2519 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2520 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2520 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2521 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2521 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2522 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2523 | } else { | ||
2524 | /* Turn off self refresh if both pipes are enabled */ | ||
2525 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2526 | & ~FW_BLC_SELF_EN); | ||
2522 | } | 2527 | } |
2523 | 2528 | ||
2524 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 2529 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", |
@@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2562 | srwm = 1; | 2567 | srwm = 1; |
2563 | srwm &= 0x3f; | 2568 | srwm &= 0x3f; |
2564 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2569 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2570 | } else { | ||
2571 | /* Turn off self refresh if both pipes are enabled */ | ||
2572 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2573 | & ~FW_BLC_SELF_EN); | ||
2565 | } | 2574 | } |
2566 | 2575 | ||
2567 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2576 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2630 | if (srwm < 0) | 2639 | if (srwm < 0) |
2631 | srwm = 1; | 2640 | srwm = 1; |
2632 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2641 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); |
2642 | } else { | ||
2643 | /* Turn off self refresh if both pipes are enabled */ | ||
2644 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2645 | & ~FW_BLC_SELF_EN); | ||
2633 | } | 2646 | } |
2634 | 2647 | ||
2635 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2648 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
@@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
3984 | spin_lock_irqsave(&dev->event_lock, flags); | 3997 | spin_lock_irqsave(&dev->event_lock, flags); |
3985 | work = intel_crtc->unpin_work; | 3998 | work = intel_crtc->unpin_work; |
3986 | if (work == NULL || !work->pending) { | 3999 | if (work == NULL || !work->pending) { |
4000 | if (work && !work->pending) { | ||
4001 | obj_priv = work->obj->driver_private; | ||
4002 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
4003 | obj_priv, | ||
4004 | atomic_read(&obj_priv->pending_flip)); | ||
4005 | } | ||
3987 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4006 | spin_unlock_irqrestore(&dev->event_lock, flags); |
3988 | return; | 4007 | return; |
3989 | } | 4008 | } |
@@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4005 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4024 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4006 | 4025 | ||
4007 | obj_priv = work->obj->driver_private; | 4026 | obj_priv = work->obj->driver_private; |
4008 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | 4027 | |
4028 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4029 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4030 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4009 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | 4031 | DRM_WAKEUP(&dev_priv->pending_flip_queue); |
4010 | schedule_work(&work->work); | 4032 | schedule_work(&work->work); |
4011 | } | 4033 | } |
@@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4018 | unsigned long flags; | 4040 | unsigned long flags; |
4019 | 4041 | ||
4020 | spin_lock_irqsave(&dev->event_lock, flags); | 4042 | spin_lock_irqsave(&dev->event_lock, flags); |
4021 | if (intel_crtc->unpin_work) | 4043 | if (intel_crtc->unpin_work) { |
4022 | intel_crtc->unpin_work->pending = 1; | 4044 | intel_crtc->unpin_work->pending = 1; |
4045 | } else { | ||
4046 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | ||
4047 | } | ||
4023 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4048 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4024 | } | 4049 | } |
4025 | 4050 | ||
@@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4053 | /* We borrow the event spin lock for protecting unpin_work */ | 4078 | /* We borrow the event spin lock for protecting unpin_work */ |
4054 | spin_lock_irqsave(&dev->event_lock, flags); | 4079 | spin_lock_irqsave(&dev->event_lock, flags); |
4055 | if (intel_crtc->unpin_work) { | 4080 | if (intel_crtc->unpin_work) { |
4081 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
4056 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4082 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4057 | kfree(work); | 4083 | kfree(work); |
4058 | mutex_unlock(&dev->struct_mutex); | 4084 | mutex_unlock(&dev->struct_mutex); |
@@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4066 | 4092 | ||
4067 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4093 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4068 | if (ret != 0) { | 4094 | if (ret != 0) { |
4095 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4096 | obj->driver_private); | ||
4069 | kfree(work); | 4097 | kfree(work); |
4098 | intel_crtc->unpin_work = NULL; | ||
4070 | mutex_unlock(&dev->struct_mutex); | 4099 | mutex_unlock(&dev->struct_mutex); |
4071 | return ret; | 4100 | return ret; |
4072 | } | 4101 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59bec61..b1d0acbae4e4 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
611 | { | 611 | { |
612 | .ident = "Samsung SX20S", | 612 | .ident = "Samsung SX20S", |
613 | .matches = { | 613 | .matches = { |
614 | DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), | 614 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), |
615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | 615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), |
616 | }, | 616 | }, |
617 | }, | 617 | }, |
@@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
623 | }, | 623 | }, |
624 | }, | 624 | }, |
625 | { | 625 | { |
626 | .ident = "Aspire 1810T", | ||
627 | .matches = { | ||
628 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
629 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
630 | }, | ||
631 | }, | ||
632 | { | ||
626 | .ident = "PC-81005", | 633 | .ident = "PC-81005", |
627 | .matches = { | 634 | .matches = { |
628 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | 635 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), |
@@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
643 | { | 650 | { |
644 | enum drm_connector_status status = connector_status_connected; | 651 | enum drm_connector_status status = connector_status_connected; |
645 | 652 | ||
646 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | 653 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) |
647 | status = connector_status_disconnected; | 654 | status = connector_status_disconnected; |
648 | 655 | ||
649 | return status; | 656 | return status; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0920df..82678d30ab06 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2347 | (1 << INTEL_ANALOG_CLONE_BIT); | 2347 | (1 << INTEL_ANALOG_CLONE_BIT); |
2348 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
2349 | |||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2353 | sdvo_priv->is_tv = true; | ||
2354 | intel_output->needs_tv_clock = true; | ||
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2348 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2356 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2349 | 2357 | ||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 11c9a3fe6810..c0d4650cdb79 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
355 | } | 355 | } |
356 | 356 | ||
357 | /* Who ever call radeon_fence_emit should call ring_lock and ask | ||
358 | * for enough space (today caller are ib schedule and buffer move) */ | ||
357 | void r100_fence_ring_emit(struct radeon_device *rdev, | 359 | void r100_fence_ring_emit(struct radeon_device *rdev, |
358 | struct radeon_fence *fence) | 360 | struct radeon_fence *fence) |
359 | { | 361 | { |
360 | /* Who ever call radeon_fence_emit should call ring_lock and ask | 362 | /* We have to make sure that caches are flushed before |
361 | * for enough space (today caller are ib schedule and buffer move) */ | 363 | * CPU might read something from VRAM. */ |
364 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
365 | radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); | ||
366 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); | ||
367 | radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); | ||
362 | /* Wait until IDLE & CLEAN */ | 368 | /* Wait until IDLE & CLEAN */ |
363 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 369 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
364 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | 370 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); |
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev) | |||
3369 | 3375 | ||
3370 | void r100_fini(struct radeon_device *rdev) | 3376 | void r100_fini(struct radeon_device *rdev) |
3371 | { | 3377 | { |
3372 | r100_suspend(rdev); | ||
3373 | r100_cp_fini(rdev); | 3378 | r100_cp_fini(rdev); |
3374 | r100_wb_fini(rdev); | 3379 | r100_wb_fini(rdev); |
3375 | r100_ib_fini(rdev); | 3380 | r100_ib_fini(rdev); |
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev) | |||
3481 | if (r) { | 3486 | if (r) { |
3482 | /* Somethings want wront with the accel init stop accel */ | 3487 | /* Somethings want wront with the accel init stop accel */ |
3483 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3488 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
3484 | r100_suspend(rdev); | ||
3485 | r100_cp_fini(rdev); | 3489 | r100_cp_fini(rdev); |
3486 | r100_wb_fini(rdev); | 3490 | r100_wb_fini(rdev); |
3487 | r100_ib_fini(rdev); | 3491 | r100_ib_fini(rdev); |
3492 | radeon_irq_kms_fini(rdev); | ||
3488 | if (rdev->flags & RADEON_IS_PCI) | 3493 | if (rdev->flags & RADEON_IS_PCI) |
3489 | r100_pci_gart_fini(rdev); | 3494 | r100_pci_gart_fini(rdev); |
3490 | radeon_irq_kms_fini(rdev); | ||
3491 | rdev->accel_working = false; | 3495 | rdev->accel_working = false; |
3492 | } | 3496 | } |
3493 | return 0; | 3497 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0051d11b907c..43b55a030b4d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev) | |||
506 | 506 | ||
507 | /* DDR for all card after R300 & IGP */ | 507 | /* DDR for all card after R300 & IGP */ |
508 | rdev->mc.vram_is_ddr = true; | 508 | rdev->mc.vram_is_ddr = true; |
509 | |||
509 | tmp = RREG32(RADEON_MEM_CNTL); | 510 | tmp = RREG32(RADEON_MEM_CNTL); |
510 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | 511 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
511 | rdev->mc.vram_width = 128; | 512 | switch (tmp) { |
512 | } else { | 513 | case 0: rdev->mc.vram_width = 64; break; |
513 | rdev->mc.vram_width = 64; | 514 | case 1: rdev->mc.vram_width = 128; break; |
515 | case 2: rdev->mc.vram_width = 256; break; | ||
516 | default: rdev->mc.vram_width = 128; break; | ||
514 | } | 517 | } |
515 | 518 | ||
516 | r100_vram_init_sizes(rdev); | 519 | r100_vram_init_sizes(rdev); |
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev) | |||
1327 | 1330 | ||
1328 | void r300_fini(struct radeon_device *rdev) | 1331 | void r300_fini(struct radeon_device *rdev) |
1329 | { | 1332 | { |
1330 | r300_suspend(rdev); | ||
1331 | r100_cp_fini(rdev); | 1333 | r100_cp_fini(rdev); |
1332 | r100_wb_fini(rdev); | 1334 | r100_wb_fini(rdev); |
1333 | r100_ib_fini(rdev); | 1335 | r100_ib_fini(rdev); |
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev) | |||
1418 | if (r) { | 1420 | if (r) { |
1419 | /* Somethings want wront with the accel init stop accel */ | 1421 | /* Somethings want wront with the accel init stop accel */ |
1420 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 1422 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1421 | r300_suspend(rdev); | ||
1422 | r100_cp_fini(rdev); | 1423 | r100_cp_fini(rdev); |
1423 | r100_wb_fini(rdev); | 1424 | r100_wb_fini(rdev); |
1424 | r100_ib_fini(rdev); | 1425 | r100_ib_fini(rdev); |
1426 | radeon_irq_kms_fini(rdev); | ||
1425 | if (rdev->flags & RADEON_IS_PCIE) | 1427 | if (rdev->flags & RADEON_IS_PCIE) |
1426 | rv370_pcie_gart_fini(rdev); | 1428 | rv370_pcie_gart_fini(rdev); |
1427 | if (rdev->flags & RADEON_IS_PCI) | 1429 | if (rdev->flags & RADEON_IS_PCI) |
1428 | r100_pci_gart_fini(rdev); | 1430 | r100_pci_gart_fini(rdev); |
1429 | radeon_irq_kms_fini(rdev); | 1431 | radeon_agp_fini(rdev); |
1430 | rdev->accel_working = false; | 1432 | rdev->accel_working = false; |
1431 | } | 1433 | } |
1432 | return 0; | 1434 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4526faaacca8..d9373246c97f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev) | |||
389 | if (r) { | 389 | if (r) { |
390 | /* Somethings want wront with the accel init stop accel */ | 390 | /* Somethings want wront with the accel init stop accel */ |
391 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 391 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
392 | r420_suspend(rdev); | ||
393 | r100_cp_fini(rdev); | 392 | r100_cp_fini(rdev); |
394 | r100_wb_fini(rdev); | 393 | r100_wb_fini(rdev); |
395 | r100_ib_fini(rdev); | 394 | r100_ib_fini(rdev); |
395 | radeon_irq_kms_fini(rdev); | ||
396 | if (rdev->flags & RADEON_IS_PCIE) | 396 | if (rdev->flags & RADEON_IS_PCIE) |
397 | rv370_pcie_gart_fini(rdev); | 397 | rv370_pcie_gart_fini(rdev); |
398 | if (rdev->flags & RADEON_IS_PCI) | 398 | if (rdev->flags & RADEON_IS_PCI) |
399 | r100_pci_gart_fini(rdev); | 399 | r100_pci_gart_fini(rdev); |
400 | radeon_agp_fini(rdev); | 400 | radeon_agp_fini(rdev); |
401 | radeon_irq_kms_fini(rdev); | ||
402 | rdev->accel_working = false; | 401 | rdev->accel_working = false; |
403 | } | 402 | } |
404 | return 0; | 403 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 9a189072f2b9..ddf5731eba0d 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev) | |||
294 | if (r) { | 294 | if (r) { |
295 | /* Somethings want wront with the accel init stop accel */ | 295 | /* Somethings want wront with the accel init stop accel */ |
296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
297 | rv515_suspend(rdev); | ||
298 | r100_cp_fini(rdev); | 297 | r100_cp_fini(rdev); |
299 | r100_wb_fini(rdev); | 298 | r100_wb_fini(rdev); |
300 | r100_ib_fini(rdev); | 299 | r100_ib_fini(rdev); |
300 | radeon_irq_kms_fini(rdev); | ||
301 | rv370_pcie_gart_fini(rdev); | 301 | rv370_pcie_gart_fini(rdev); |
302 | radeon_agp_fini(rdev); | 302 | radeon_agp_fini(rdev); |
303 | radeon_irq_kms_fini(rdev); | ||
304 | rdev->accel_working = false; | 303 | rdev->accel_working = false; |
305 | } | 304 | } |
306 | return 0; | 305 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1b6d0001b20e..a1198d99cdf9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
1654 | rdev->cp.align_mask = 16 - 1; | 1654 | rdev->cp.align_mask = 16 - 1; |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | void r600_cp_fini(struct radeon_device *rdev) | ||
1658 | { | ||
1659 | r600_cp_stop(rdev); | ||
1660 | radeon_ring_fini(rdev); | ||
1661 | } | ||
1662 | |||
1657 | 1663 | ||
1658 | /* | 1664 | /* |
1659 | * GPU scratch registers helpers function. | 1665 | * GPU scratch registers helpers function. |
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev) | |||
1861 | return r; | 1867 | return r; |
1862 | } | 1868 | } |
1863 | r600_gpu_init(rdev); | 1869 | r600_gpu_init(rdev); |
1870 | r = r600_blit_init(rdev); | ||
1871 | if (r) { | ||
1872 | r600_blit_fini(rdev); | ||
1873 | rdev->asic->copy = NULL; | ||
1874 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
1875 | } | ||
1864 | /* pin copy shader into vram */ | 1876 | /* pin copy shader into vram */ |
1865 | if (rdev->r600_blit.shader_obj) { | 1877 | if (rdev->r600_blit.shader_obj) { |
1866 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1878 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
@@ -2045,19 +2057,15 @@ int r600_init(struct radeon_device *rdev) | |||
2045 | r = r600_pcie_gart_init(rdev); | 2057 | r = r600_pcie_gart_init(rdev); |
2046 | if (r) | 2058 | if (r) |
2047 | return r; | 2059 | return r; |
2048 | r = r600_blit_init(rdev); | ||
2049 | if (r) { | ||
2050 | r600_blit_fini(rdev); | ||
2051 | rdev->asic->copy = NULL; | ||
2052 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
2053 | } | ||
2054 | 2060 | ||
2055 | rdev->accel_working = true; | 2061 | rdev->accel_working = true; |
2056 | r = r600_startup(rdev); | 2062 | r = r600_startup(rdev); |
2057 | if (r) { | 2063 | if (r) { |
2058 | r600_suspend(rdev); | 2064 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2065 | r600_cp_fini(rdev); | ||
2059 | r600_wb_fini(rdev); | 2066 | r600_wb_fini(rdev); |
2060 | radeon_ring_fini(rdev); | 2067 | r600_irq_fini(rdev); |
2068 | radeon_irq_kms_fini(rdev); | ||
2061 | r600_pcie_gart_fini(rdev); | 2069 | r600_pcie_gart_fini(rdev); |
2062 | rdev->accel_working = false; | 2070 | rdev->accel_working = false; |
2063 | } | 2071 | } |
@@ -2083,20 +2091,17 @@ int r600_init(struct radeon_device *rdev) | |||
2083 | 2091 | ||
2084 | void r600_fini(struct radeon_device *rdev) | 2092 | void r600_fini(struct radeon_device *rdev) |
2085 | { | 2093 | { |
2086 | /* Suspend operations */ | ||
2087 | r600_suspend(rdev); | ||
2088 | |||
2089 | r600_audio_fini(rdev); | 2094 | r600_audio_fini(rdev); |
2090 | r600_blit_fini(rdev); | 2095 | r600_blit_fini(rdev); |
2096 | r600_cp_fini(rdev); | ||
2097 | r600_wb_fini(rdev); | ||
2091 | r600_irq_fini(rdev); | 2098 | r600_irq_fini(rdev); |
2092 | radeon_irq_kms_fini(rdev); | 2099 | radeon_irq_kms_fini(rdev); |
2093 | radeon_ring_fini(rdev); | ||
2094 | r600_wb_fini(rdev); | ||
2095 | r600_pcie_gart_fini(rdev); | 2100 | r600_pcie_gart_fini(rdev); |
2101 | radeon_agp_fini(rdev); | ||
2096 | radeon_gem_fini(rdev); | 2102 | radeon_gem_fini(rdev); |
2097 | radeon_fence_driver_fini(rdev); | 2103 | radeon_fence_driver_fini(rdev); |
2098 | radeon_clocks_fini(rdev); | 2104 | radeon_clocks_fini(rdev); |
2099 | radeon_agp_fini(rdev); | ||
2100 | radeon_bo_fini(rdev); | 2105 | radeon_bo_fini(rdev); |
2101 | radeon_atombios_fini(rdev); | 2106 | radeon_atombios_fini(rdev); |
2102 | kfree(rdev->bios); | 2107 | kfree(rdev->bios); |
@@ -2900,3 +2905,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
2900 | return 0; | 2905 | return 0; |
2901 | #endif | 2906 | #endif |
2902 | } | 2907 | } |
2908 | |||
2909 | /** | ||
2910 | * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl | ||
2911 | * rdev: radeon device structure | ||
2912 | * bo: buffer object struct which userspace is waiting for idle | ||
2913 | * | ||
2914 | * Some R6XX/R7XX doesn't seems to take into account HDP flush performed | ||
2915 | * through ring buffer, this leads to corruption in rendering, see | ||
2916 | * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we | ||
2917 | * directly perform HDP flush by writing register through MMIO. | ||
2918 | */ | ||
2919 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | ||
2920 | { | ||
2921 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
2922 | } | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 99e2c3891a7d..b1c1d3433454 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
37 | { | 37 | { |
38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) |
39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2d5f2bfa7201..f57480ba1355 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -661,6 +661,13 @@ struct radeon_asic { | |||
661 | void (*hpd_fini)(struct radeon_device *rdev); | 661 | void (*hpd_fini)(struct radeon_device *rdev); |
662 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 662 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
663 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 663 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
664 | /* ioctl hw specific callback. Some hw might want to perform special | ||
665 | * operation on specific ioctl. For instance on wait idle some hw | ||
666 | * might want to perform and HDP flush through MMIO as it seems that | ||
667 | * some R6XX/R7XX hw doesn't take HDP flush into account if programmed | ||
668 | * through ring. | ||
669 | */ | ||
670 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); | ||
664 | }; | 671 | }; |
665 | 672 | ||
666 | /* | 673 | /* |
@@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev); | |||
1143 | extern void r600_cp_stop(struct radeon_device *rdev); | 1150 | extern void r600_cp_stop(struct radeon_device *rdev); |
1144 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | 1151 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); |
1145 | extern int r600_cp_resume(struct radeon_device *rdev); | 1152 | extern int r600_cp_resume(struct radeon_device *rdev); |
1153 | extern void r600_cp_fini(struct radeon_device *rdev); | ||
1146 | extern int r600_count_pipe_bits(uint32_t val); | 1154 | extern int r600_count_pipe_bits(uint32_t val); |
1147 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); | 1155 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); |
1148 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); | 1156 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2fbd2e4e9df..05ee1aeac3fd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = { | |||
117 | .hpd_fini = &r100_hpd_fini, | 117 | .hpd_fini = &r100_hpd_fini, |
118 | .hpd_sense = &r100_hpd_sense, | 118 | .hpd_sense = &r100_hpd_sense, |
119 | .hpd_set_polarity = &r100_hpd_set_polarity, | 119 | .hpd_set_polarity = &r100_hpd_set_polarity, |
120 | .ioctl_wait_idle = NULL, | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | 123 | ||
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = { | |||
176 | .hpd_fini = &r100_hpd_fini, | 177 | .hpd_fini = &r100_hpd_fini, |
177 | .hpd_sense = &r100_hpd_sense, | 178 | .hpd_sense = &r100_hpd_sense, |
178 | .hpd_set_polarity = &r100_hpd_set_polarity, | 179 | .hpd_set_polarity = &r100_hpd_set_polarity, |
180 | .ioctl_wait_idle = NULL, | ||
179 | }; | 181 | }; |
180 | 182 | ||
181 | /* | 183 | /* |
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = { | |||
219 | .hpd_fini = &r100_hpd_fini, | 221 | .hpd_fini = &r100_hpd_fini, |
220 | .hpd_sense = &r100_hpd_sense, | 222 | .hpd_sense = &r100_hpd_sense, |
221 | .hpd_set_polarity = &r100_hpd_set_polarity, | 223 | .hpd_set_polarity = &r100_hpd_set_polarity, |
224 | .ioctl_wait_idle = NULL, | ||
222 | }; | 225 | }; |
223 | 226 | ||
224 | 227 | ||
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = { | |||
267 | .hpd_fini = &r100_hpd_fini, | 270 | .hpd_fini = &r100_hpd_fini, |
268 | .hpd_sense = &r100_hpd_sense, | 271 | .hpd_sense = &r100_hpd_sense, |
269 | .hpd_set_polarity = &r100_hpd_set_polarity, | 272 | .hpd_set_polarity = &r100_hpd_set_polarity, |
273 | .ioctl_wait_idle = NULL, | ||
270 | }; | 274 | }; |
271 | 275 | ||
272 | 276 | ||
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = { | |||
323 | .hpd_fini = &rs600_hpd_fini, | 327 | .hpd_fini = &rs600_hpd_fini, |
324 | .hpd_sense = &rs600_hpd_sense, | 328 | .hpd_sense = &rs600_hpd_sense, |
325 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 329 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
330 | .ioctl_wait_idle = NULL, | ||
326 | }; | 331 | }; |
327 | 332 | ||
328 | 333 | ||
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = { | |||
370 | .hpd_fini = &rs600_hpd_fini, | 375 | .hpd_fini = &rs600_hpd_fini, |
371 | .hpd_sense = &rs600_hpd_sense, | 376 | .hpd_sense = &rs600_hpd_sense, |
372 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 377 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
378 | .ioctl_wait_idle = NULL, | ||
373 | }; | 379 | }; |
374 | 380 | ||
375 | 381 | ||
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = { | |||
421 | .hpd_fini = &rs600_hpd_fini, | 427 | .hpd_fini = &rs600_hpd_fini, |
422 | .hpd_sense = &rs600_hpd_sense, | 428 | .hpd_sense = &rs600_hpd_sense, |
423 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 429 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
430 | .ioctl_wait_idle = NULL, | ||
424 | }; | 431 | }; |
425 | 432 | ||
426 | 433 | ||
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = { | |||
463 | .hpd_fini = &rs600_hpd_fini, | 470 | .hpd_fini = &rs600_hpd_fini, |
464 | .hpd_sense = &rs600_hpd_sense, | 471 | .hpd_sense = &rs600_hpd_sense, |
465 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 472 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
473 | .ioctl_wait_idle = NULL, | ||
466 | }; | 474 | }; |
467 | 475 | ||
468 | /* | 476 | /* |
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev); | |||
504 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 512 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
505 | void r600_hpd_set_polarity(struct radeon_device *rdev, | 513 | void r600_hpd_set_polarity(struct radeon_device *rdev, |
506 | enum radeon_hpd_id hpd); | 514 | enum radeon_hpd_id hpd); |
515 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | ||
507 | 516 | ||
508 | static struct radeon_asic r600_asic = { | 517 | static struct radeon_asic r600_asic = { |
509 | .init = &r600_init, | 518 | .init = &r600_init, |
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = { | |||
538 | .hpd_fini = &r600_hpd_fini, | 547 | .hpd_fini = &r600_hpd_fini, |
539 | .hpd_sense = &r600_hpd_sense, | 548 | .hpd_sense = &r600_hpd_sense, |
540 | .hpd_set_polarity = &r600_hpd_set_polarity, | 549 | .hpd_set_polarity = &r600_hpd_set_polarity, |
550 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
541 | }; | 551 | }; |
542 | 552 | ||
543 | /* | 553 | /* |
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = { | |||
582 | .hpd_fini = &r600_hpd_fini, | 592 | .hpd_fini = &r600_hpd_fini, |
583 | .hpd_sense = &r600_hpd_sense, | 593 | .hpd_sense = &r600_hpd_sense, |
584 | .hpd_set_polarity = &r600_hpd_set_polarity, | 594 | .hpd_set_polarity = &r600_hpd_set_polarity, |
595 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
585 | }; | 596 | }; |
586 | 597 | ||
587 | #endif | 598 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 579c8920e081..e7b19440102e 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
971 | lvds->native_mode.vdisplay); | 971 | lvds->native_mode.vdisplay); |
972 | 972 | ||
973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); | 973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); |
974 | if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) | 974 | lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); |
975 | lvds->panel_vcc_delay = 2000; | ||
976 | 975 | ||
977 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); | 976 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); |
978 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; | 977 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 55266416fa47..2d8e5a70f284 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1343 | radeon_connector->dac_load_detect = false; | 1343 | radeon_connector->dac_load_detect = false; |
1344 | drm_connector_attach_property(&radeon_connector->base, | 1344 | drm_connector_attach_property(&radeon_connector->base, |
1345 | rdev->mode_info.load_detect_property, | 1345 | rdev->mode_info.load_detect_property, |
1346 | 1); | 1346 | radeon_connector->dac_load_detect); |
1347 | drm_connector_attach_property(&radeon_connector->base, | 1347 | drm_connector_attach_property(&radeon_connector->base, |
1348 | rdev->mode_info.tv_std_property, | 1348 | rdev->mode_info.tv_std_property, |
1349 | radeon_combios_get_tv_info(rdev)); | 1349 | radeon_combios_get_tv_info(rdev)); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0e1325e18534..db8e9a355a01 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
308 | } | 308 | } |
309 | robj = gobj->driver_private; | 309 | robj = gobj->driver_private; |
310 | r = radeon_bo_wait(robj, NULL, false); | 310 | r = radeon_bo_wait(robj, NULL, false); |
311 | /* callback hw specific functions if any */ | ||
312 | if (robj->rdev->asic->ioctl_wait_idle) | ||
313 | robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); | ||
311 | mutex_lock(&dev->struct_mutex); | 314 | mutex_lock(&dev->struct_mutex); |
312 | drm_gem_object_unreference(gobj); | 315 | drm_gem_object_unreference(gobj); |
313 | mutex_unlock(&dev->struct_mutex); | 316 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9f5418983e2a..287fcebfb4e6 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | 225 | ||
226 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | ||
227 | { | ||
228 | unsigned i; | ||
229 | uint32_t tmp; | ||
230 | |||
231 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
232 | /* read MC_STATUS */ | ||
233 | tmp = RREG32(0x0150); | ||
234 | if (tmp & (1 << 2)) { | ||
235 | return 0; | ||
236 | } | ||
237 | DRM_UDELAY(1); | ||
238 | } | ||
239 | return -1; | ||
240 | } | ||
241 | |||
226 | void rs400_gpu_init(struct radeon_device *rdev) | 242 | void rs400_gpu_init(struct radeon_device *rdev) |
227 | { | 243 | { |
228 | /* FIXME: HDP same place on rs400 ? */ | 244 | /* FIXME: HDP same place on rs400 ? */ |
229 | r100_hdp_reset(rdev); | 245 | r100_hdp_reset(rdev); |
230 | /* FIXME: is this correct ? */ | 246 | /* FIXME: is this correct ? */ |
231 | r420_pipes_init(rdev); | 247 | r420_pipes_init(rdev); |
232 | if (r300_mc_wait_for_idle(rdev)) { | 248 | if (rs400_mc_wait_for_idle(rdev)) { |
233 | printk(KERN_WARNING "Failed to wait MC idle while " | 249 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
234 | "programming pipes. Bad things might happen.\n"); | 250 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); |
235 | } | 251 | } |
236 | } | 252 | } |
237 | 253 | ||
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev) | |||
370 | r100_mc_stop(rdev, &save); | 386 | r100_mc_stop(rdev, &save); |
371 | 387 | ||
372 | /* Wait for mc idle */ | 388 | /* Wait for mc idle */ |
373 | if (r300_mc_wait_for_idle(rdev)) | 389 | if (rs400_mc_wait_for_idle(rdev)) |
374 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 390 | dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); |
375 | WREG32(R_000148_MC_FB_LOCATION, | 391 | WREG32(R_000148_MC_FB_LOCATION, |
376 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | 392 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
377 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 393 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev) | |||
448 | 464 | ||
449 | void rs400_fini(struct radeon_device *rdev) | 465 | void rs400_fini(struct radeon_device *rdev) |
450 | { | 466 | { |
451 | rs400_suspend(rdev); | ||
452 | r100_cp_fini(rdev); | 467 | r100_cp_fini(rdev); |
453 | r100_wb_fini(rdev); | 468 | r100_wb_fini(rdev); |
454 | r100_ib_fini(rdev); | 469 | r100_ib_fini(rdev); |
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev) | |||
527 | if (r) { | 542 | if (r) { |
528 | /* Somethings want wront with the accel init stop accel */ | 543 | /* Somethings want wront with the accel init stop accel */ |
529 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 544 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
530 | rs400_suspend(rdev); | ||
531 | r100_cp_fini(rdev); | 545 | r100_cp_fini(rdev); |
532 | r100_wb_fini(rdev); | 546 | r100_wb_fini(rdev); |
533 | r100_ib_fini(rdev); | 547 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d5255751e7b3..c3818562a13e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev) | |||
610 | 610 | ||
611 | void rs600_fini(struct radeon_device *rdev) | 611 | void rs600_fini(struct radeon_device *rdev) |
612 | { | 612 | { |
613 | rs600_suspend(rdev); | ||
614 | r100_cp_fini(rdev); | 613 | r100_cp_fini(rdev); |
615 | r100_wb_fini(rdev); | 614 | r100_wb_fini(rdev); |
616 | r100_ib_fini(rdev); | 615 | r100_ib_fini(rdev); |
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev) | |||
689 | if (r) { | 688 | if (r) { |
690 | /* Somethings want wront with the accel init stop accel */ | 689 | /* Somethings want wront with the accel init stop accel */ |
691 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 690 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
692 | rs600_suspend(rdev); | ||
693 | r100_cp_fini(rdev); | 691 | r100_cp_fini(rdev); |
694 | r100_wb_fini(rdev); | 692 | r100_wb_fini(rdev); |
695 | r100_ib_fini(rdev); | 693 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index cd31da913771..06e2771aee5a 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) | |||
676 | 676 | ||
677 | void rs690_fini(struct radeon_device *rdev) | 677 | void rs690_fini(struct radeon_device *rdev) |
678 | { | 678 | { |
679 | rs690_suspend(rdev); | ||
680 | r100_cp_fini(rdev); | 679 | r100_cp_fini(rdev); |
681 | r100_wb_fini(rdev); | 680 | r100_wb_fini(rdev); |
682 | r100_ib_fini(rdev); | 681 | r100_ib_fini(rdev); |
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev) | |||
756 | if (r) { | 755 | if (r) { |
757 | /* Somethings want wront with the accel init stop accel */ | 756 | /* Somethings want wront with the accel init stop accel */ |
758 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 757 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
759 | rs690_suspend(rdev); | ||
760 | r100_cp_fini(rdev); | 758 | r100_cp_fini(rdev); |
761 | r100_wb_fini(rdev); | 759 | r100_wb_fini(rdev); |
762 | r100_ib_fini(rdev); | 760 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 62756717b044..0e1e6b8632b8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
537 | 537 | ||
538 | void rv515_fini(struct radeon_device *rdev) | 538 | void rv515_fini(struct radeon_device *rdev) |
539 | { | 539 | { |
540 | rv515_suspend(rdev); | ||
541 | r100_cp_fini(rdev); | 540 | r100_cp_fini(rdev); |
542 | r100_wb_fini(rdev); | 541 | r100_wb_fini(rdev); |
543 | r100_ib_fini(rdev); | 542 | r100_ib_fini(rdev); |
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev) | |||
615 | if (r) { | 614 | if (r) { |
616 | /* Somethings want wront with the accel init stop accel */ | 615 | /* Somethings want wront with the accel init stop accel */ |
617 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 616 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
618 | rv515_suspend(rdev); | ||
619 | r100_cp_fini(rdev); | 617 | r100_cp_fini(rdev); |
620 | r100_wb_fini(rdev); | 618 | r100_wb_fini(rdev); |
621 | r100_ib_fini(rdev); | 619 | r100_ib_fini(rdev); |
620 | radeon_irq_kms_fini(rdev); | ||
622 | rv370_pcie_gart_fini(rdev); | 621 | rv370_pcie_gart_fini(rdev); |
623 | radeon_agp_fini(rdev); | 622 | radeon_agp_fini(rdev); |
624 | radeon_irq_kms_fini(rdev); | ||
625 | rdev->accel_working = false; | 623 | rdev->accel_working = false; |
626 | } | 624 | } |
627 | return 0; | 625 | return 0; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index afd9e8213c29..5943d561fd1e 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
887 | return r; | 887 | return r; |
888 | } | 888 | } |
889 | rv770_gpu_init(rdev); | 889 | rv770_gpu_init(rdev); |
890 | r = r600_blit_init(rdev); | ||
891 | if (r) { | ||
892 | r600_blit_fini(rdev); | ||
893 | rdev->asic->copy = NULL; | ||
894 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
895 | } | ||
890 | /* pin copy shader into vram */ | 896 | /* pin copy shader into vram */ |
891 | if (rdev->r600_blit.shader_obj) { | 897 | if (rdev->r600_blit.shader_obj) { |
892 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 898 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
@@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev) | |||
1055 | r = r600_pcie_gart_init(rdev); | 1061 | r = r600_pcie_gart_init(rdev); |
1056 | if (r) | 1062 | if (r) |
1057 | return r; | 1063 | return r; |
1058 | r = r600_blit_init(rdev); | ||
1059 | if (r) { | ||
1060 | r600_blit_fini(rdev); | ||
1061 | rdev->asic->copy = NULL; | ||
1062 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
1063 | } | ||
1064 | 1064 | ||
1065 | rdev->accel_working = true; | 1065 | rdev->accel_working = true; |
1066 | r = rv770_startup(rdev); | 1066 | r = rv770_startup(rdev); |
1067 | if (r) { | 1067 | if (r) { |
1068 | rv770_suspend(rdev); | 1068 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1069 | r600_cp_fini(rdev); | ||
1069 | r600_wb_fini(rdev); | 1070 | r600_wb_fini(rdev); |
1070 | radeon_ring_fini(rdev); | 1071 | r600_irq_fini(rdev); |
1072 | radeon_irq_kms_fini(rdev); | ||
1071 | rv770_pcie_gart_fini(rdev); | 1073 | rv770_pcie_gart_fini(rdev); |
1072 | rdev->accel_working = false; | 1074 | rdev->accel_working = false; |
1073 | } | 1075 | } |
@@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev) | |||
1089 | 1091 | ||
1090 | void rv770_fini(struct radeon_device *rdev) | 1092 | void rv770_fini(struct radeon_device *rdev) |
1091 | { | 1093 | { |
1092 | rv770_suspend(rdev); | ||
1093 | |||
1094 | r600_blit_fini(rdev); | 1094 | r600_blit_fini(rdev); |
1095 | r600_cp_fini(rdev); | ||
1096 | r600_wb_fini(rdev); | ||
1095 | r600_irq_fini(rdev); | 1097 | r600_irq_fini(rdev); |
1096 | radeon_irq_kms_fini(rdev); | 1098 | radeon_irq_kms_fini(rdev); |
1097 | radeon_ring_fini(rdev); | ||
1098 | r600_wb_fini(rdev); | ||
1099 | rv770_pcie_gart_fini(rdev); | 1099 | rv770_pcie_gart_fini(rdev); |
1100 | radeon_gem_fini(rdev); | 1100 | radeon_gem_fini(rdev); |
1101 | radeon_fence_driver_fini(rdev); | 1101 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a31e77c776ae..b8156b4893bb 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c | |||
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; | |||
179 | * | 179 | * |
180 | * Some, but not all, of these voltages have low/high limits. | 180 | * Some, but not all, of these voltages have low/high limits. |
181 | */ | 181 | */ |
182 | #define ADT7462_VOLT_COUNT 12 | 182 | #define ADT7462_VOLT_COUNT 13 |
183 | 183 | ||
184 | #define ADT7462_VENDOR 0x41 | 184 | #define ADT7462_VENDOR 0x41 |
185 | #define ADT7462_DEVICE 0x62 | 185 | #define ADT7462_DEVICE 0x62 |
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cadcbd90ff3b..72ff2c4e757d 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c | |||
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev) | |||
851 | static int __init lm78_isa_found(unsigned short address) | 851 | static int __init lm78_isa_found(unsigned short address) |
852 | { | 852 | { |
853 | int val, save, found = 0; | 853 | int val, save, found = 0; |
854 | 854 | int port; | |
855 | /* We have to request the region in two parts because some | 855 | |
856 | boards declare base+4 to base+7 as a PNP device */ | 856 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
857 | if (!request_region(address, 4, "lm78")) { | 857 | * to base+7 and some base+5 to base+6. So we better request each port |
858 | pr_debug("lm78: Failed to request low part of region\n"); | 858 | * individually for the probing phase. */ |
859 | return 0; | 859 | for (port = address; port < address + LM78_EXTENT; port++) { |
860 | } | 860 | if (!request_region(port, 1, "lm78")) { |
861 | if (!request_region(address + 4, 4, "lm78")) { | 861 | pr_debug("lm78: Failed to request port 0x%x\n", port); |
862 | pr_debug("lm78: Failed to request high part of region\n"); | 862 | goto release; |
863 | release_region(address, 4); | 863 | } |
864 | return 0; | ||
865 | } | 864 | } |
866 | 865 | ||
867 | #define REALLY_SLOW_IO | 866 | #define REALLY_SLOW_IO |
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address) | |||
925 | val & 0x80 ? "LM79" : "LM78", (int)address); | 924 | val & 0x80 ? "LM79" : "LM78", (int)address); |
926 | 925 | ||
927 | release: | 926 | release: |
928 | release_region(address + 4, 4); | 927 | for (port--; port >= address; port--) |
929 | release_region(address, 4); | 928 | release_region(port, 1); |
930 | return found; | 929 | return found; |
931 | } | 930 | } |
932 | 931 | ||
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 05f9225b6f94..32d4adee73db 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c | |||
@@ -1793,17 +1793,17 @@ static int __init | |||
1793 | w83781d_isa_found(unsigned short address) | 1793 | w83781d_isa_found(unsigned short address) |
1794 | { | 1794 | { |
1795 | int val, save, found = 0; | 1795 | int val, save, found = 0; |
1796 | 1796 | int port; | |
1797 | /* We have to request the region in two parts because some | 1797 | |
1798 | boards declare base+4 to base+7 as a PNP device */ | 1798 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
1799 | if (!request_region(address, 4, "w83781d")) { | 1799 | * to base+7 and some base+5 to base+6. So we better request each port |
1800 | pr_debug("w83781d: Failed to request low part of region\n"); | 1800 | * individually for the probing phase. */ |
1801 | return 0; | 1801 | for (port = address; port < address + W83781D_EXTENT; port++) { |
1802 | } | 1802 | if (!request_region(port, 1, "w83781d")) { |
1803 | if (!request_region(address + 4, 4, "w83781d")) { | 1803 | pr_debug("w83781d: Failed to request port 0x%x\n", |
1804 | pr_debug("w83781d: Failed to request high part of region\n"); | 1804 | port); |
1805 | release_region(address, 4); | 1805 | goto release; |
1806 | return 0; | 1806 | } |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | #define REALLY_SLOW_IO | 1809 | #define REALLY_SLOW_IO |
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) | |||
1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); | 1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); |
1878 | 1878 | ||
1879 | release: | 1879 | release: |
1880 | release_region(address + 4, 4); | 1880 | for (port--; port >= address; port--) |
1881 | release_region(address, 4); | 1881 | release_region(port, 1); |
1882 | return found; | 1882 | return found; |
1883 | } | 1883 | } |
1884 | 1884 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c74694345b6e..d58b94030ef3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) | |||
338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
340 | 340 | ||
341 | /* | ||
342 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | ||
343 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | ||
344 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | ||
345 | * (which conflicts w/ BAR1's memory range). | ||
346 | */ | ||
347 | static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) | ||
348 | { | ||
349 | if (pci_resource_len(dev, 0) != 8) { | ||
350 | struct resource *res = &dev->resource[0]; | ||
351 | res->end = res->start + 8 - 1; | ||
352 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected " | ||
353 | "(incorrect header); workaround applied.\n"); | ||
354 | } | ||
355 | } | ||
356 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | ||
357 | |||
341 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 358 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
342 | unsigned size, int nr, const char *name) | 359 | unsigned size, int nr, const char *name) |
343 | { | 360 | { |