aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-12-15 22:38:58 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-15 22:38:58 -0500
commit1e1c568d6c66d1e2e345fd15e2a1ceafc5d7e33a (patch)
tree0cf88547108a750d6eb910564ef5bf0ffb5ceef3 /drivers
parent91cac623262c1c0cd298c5c648a8bd2b647c264d (diff)
parent23e0e8afafd9ac065d81506524adf3339584044b (diff)
Merge branch 'merge' into next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/ata_piix.c9
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_ninja32.c9
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/cdrom/cdrom.c16
-rw-r--r--drivers/char/cp437.uni12
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/vt.c2
-rw-r--r--drivers/firewire/fw-ohci.c11
-rw-r--r--drivers/firewire/fw-transaction.c3
-rw-r--r--drivers/firewire/fw-transaction.h2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c639
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c8
-rw-r--r--drivers/i2c/busses/i2c-highlander.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/ide/Kconfig14
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/pmac.c30
-rw-r--r--drivers/ide/sgiioc4.c6
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c6
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c4
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/mtd/devices/m25p80.c28
-rw-r--r--drivers/mtd/maps/physmap.c26
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mtd/onenand/omap2.c17
-rw-r--r--drivers/net/bnx2.c35
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/enc28j60.c16
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/pci/pcie/aspm.c29
-rw-r--r--drivers/pci/slot.c1
-rw-r--r--drivers/rtc/rtc-ds1672.c6
-rw-r--r--drivers/rtc/rtc-max6900.c6
-rw-r--r--drivers/rtc/rtc-twl4030.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/ioc3_serial.c6
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/video/aty/radeon_accel.c294
-rw-r--r--drivers/video/aty/radeon_backlight.c2
-rw-r--r--drivers/video/aty/radeon_base.c40
-rw-r--r--drivers/video/aty/radeon_pm.c6
-rw-r--r--drivers/video/aty/radeonfb.h38
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c4
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
70 files changed, 841 insertions, 694 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a0a178dd189c..1423b0c0cd2e 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
174 break; 174 break;
175 case POWER_SUPPLY_PROP_CURRENT_NOW: 175 case POWER_SUPPLY_PROP_CURRENT_NOW:
176 val->intval = battery->current_now * 1000; 176 val->intval = battery->current_now * 1000;
177 /* if power units are mW, convert to mA by
178 dividing by current voltage (mV/1000) */
179 if (!battery->power_unit) {
180 if (battery->voltage_now) {
181 val->intval /= battery->voltage_now;
182 val->intval *= 1000;
183 } else
184 val->intval = -1;
185 }
186 break; 177 break;
187 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 178 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
188 case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: 179 case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 78fbec8ceda0..421b7c71e72d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,7 +153,7 @@ config SATA_PROMISE
153 If unsure, say N. 153 If unsure, say N.
154 154
155config SATA_SX4 155config SATA_SX4
156 tristate "Promise SATA SX4 support" 156 tristate "Promise SATA SX4 support (Experimental)"
157 depends on PCI && EXPERIMENTAL 157 depends on PCI && EXPERIMENTAL
158 help 158 help
159 This option enables support for Promise Serial ATA SX4. 159 This option enables support for Promise Serial ATA SX4.
@@ -219,8 +219,8 @@ config PATA_ACPI
219 otherwise unsupported hardware. 219 otherwise unsupported hardware.
220 220
221config PATA_ALI 221config PATA_ALI
222 tristate "ALi PATA support (Experimental)" 222 tristate "ALi PATA support"
223 depends on PCI && EXPERIMENTAL 223 depends on PCI
224 help 224 help
225 This option enables support for the ALi ATA interfaces 225 This option enables support for the ALi ATA interfaces
226 found on the many ALi chipsets. 226 found on the many ALi chipsets.
@@ -263,7 +263,7 @@ config PATA_ATIIXP
263 If unsure, say N. 263 If unsure, say N.
264 264
265config PATA_CMD640_PCI 265config PATA_CMD640_PCI
266 tristate "CMD640 PCI PATA support (Very Experimental)" 266 tristate "CMD640 PCI PATA support (Experimental)"
267 depends on PCI && EXPERIMENTAL 267 depends on PCI && EXPERIMENTAL
268 help 268 help
269 This option enables support for the CMD640 PCI IDE 269 This option enables support for the CMD640 PCI IDE
@@ -291,8 +291,8 @@ config PATA_CS5520
291 If unsure, say N. 291 If unsure, say N.
292 292
293config PATA_CS5530 293config PATA_CS5530
294 tristate "CS5530 PATA support (Experimental)" 294 tristate "CS5530 PATA support"
295 depends on PCI && EXPERIMENTAL 295 depends on PCI
296 help 296 help
297 This option enables support for the Cyrix/NatSemi/AMD CS5530 297 This option enables support for the Cyrix/NatSemi/AMD CS5530
298 companion chip used with the MediaGX/Geode processor family. 298 companion chip used with the MediaGX/Geode processor family.
@@ -309,8 +309,8 @@ config PATA_CS5535
309 If unsure, say N. 309 If unsure, say N.
310 310
311config PATA_CS5536 311config PATA_CS5536
312 tristate "CS5536 PATA support (Experimental)" 312 tristate "CS5536 PATA support"
313 depends on PCI && X86 && !X86_64 && EXPERIMENTAL 313 depends on PCI && X86 && !X86_64
314 help 314 help
315 This option enables support for the AMD CS5536 315 This option enables support for the AMD CS5536
316 companion chip used with the Geode LX processor family. 316 companion chip used with the Geode LX processor family.
@@ -363,7 +363,7 @@ config PATA_HPT37X
363 If unsure, say N. 363 If unsure, say N.
364 364
365config PATA_HPT3X2N 365config PATA_HPT3X2N
366 tristate "HPT 372N/302N PATA support (Very Experimental)" 366 tristate "HPT 372N/302N PATA support (Experimental)"
367 depends on PCI && EXPERIMENTAL 367 depends on PCI && EXPERIMENTAL
368 help 368 help
369 This option enables support for the N variant HPT PATA 369 This option enables support for the N variant HPT PATA
@@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
389 problems with DMA on this chipset. 389 problems with DMA on this chipset.
390 390
391config PATA_ISAPNP 391config PATA_ISAPNP
392 tristate "ISA Plug and Play PATA support (Experimental)" 392 tristate "ISA Plug and Play PATA support"
393 depends on EXPERIMENTAL && ISAPNP 393 depends on ISAPNP
394 help 394 help
395 This option enables support for ISA plug & play ATA 395 This option enables support for ISA plug & play ATA
396 controllers such as those found on old soundcards. 396 controllers such as those found on old soundcards.
@@ -498,8 +498,8 @@ config PATA_NINJA32
498 If unsure, say N. 498 If unsure, say N.
499 499
500config PATA_NS87410 500config PATA_NS87410
501 tristate "Nat Semi NS87410 PATA support (Experimental)" 501 tristate "Nat Semi NS87410 PATA support"
502 depends on PCI && EXPERIMENTAL 502 depends on PCI
503 help 503 help
504 This option enables support for the National Semiconductor 504 This option enables support for the National Semiconductor
505 NS87410 PCI-IDE controller. 505 NS87410 PCI-IDE controller.
@@ -507,8 +507,8 @@ config PATA_NS87410
507 If unsure, say N. 507 If unsure, say N.
508 508
509config PATA_NS87415 509config PATA_NS87415
510 tristate "Nat Semi NS87415 PATA support (Experimental)" 510 tristate "Nat Semi NS87415 PATA support"
511 depends on PCI && EXPERIMENTAL 511 depends on PCI
512 help 512 help
513 This option enables support for the National Semiconductor 513 This option enables support for the National Semiconductor
514 NS87415 PCI-IDE controller. 514 NS87415 PCI-IDE controller.
@@ -544,8 +544,8 @@ config PATA_PCMCIA
544 If unsure, say N. 544 If unsure, say N.
545 545
546config PATA_PDC_OLD 546config PATA_PDC_OLD
547 tristate "Older Promise PATA controller support (Experimental)" 547 tristate "Older Promise PATA controller support"
548 depends on PCI && EXPERIMENTAL 548 depends on PCI
549 help 549 help
550 This option enables support for the Promise 20246, 20262, 20263, 550 This option enables support for the Promise 20246, 20262, 20263,
551 20265 and 20267 adapters. 551 20265 and 20267 adapters.
@@ -559,7 +559,7 @@ config PATA_QDI
559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus. 559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
560 560
561config PATA_RADISYS 561config PATA_RADISYS
562 tristate "RADISYS 82600 PATA support (Very Experimental)" 562 tristate "RADISYS 82600 PATA support (Experimental)"
563 depends on PCI && EXPERIMENTAL 563 depends on PCI && EXPERIMENTAL
564 help 564 help
565 This option enables support for the RADISYS 82600 565 This option enables support for the RADISYS 82600
@@ -586,8 +586,8 @@ config PATA_RZ1000
586 If unsure, say N. 586 If unsure, say N.
587 587
588config PATA_SC1200 588config PATA_SC1200
589 tristate "SC1200 PATA support (Very Experimental)" 589 tristate "SC1200 PATA support"
590 depends on PCI && EXPERIMENTAL 590 depends on PCI
591 help 591 help
592 This option enables support for the NatSemi/AMD SC1200 SoC 592 This option enables support for the NatSemi/AMD SC1200 SoC
593 companion chip used with the Geode processor family. 593 companion chip used with the Geode processor family.
@@ -620,8 +620,8 @@ config PATA_SIL680
620 If unsure, say N. 620 If unsure, say N.
621 621
622config PATA_SIS 622config PATA_SIS
623 tristate "SiS PATA support (Experimental)" 623 tristate "SiS PATA support"
624 depends on PCI && EXPERIMENTAL 624 depends on PCI
625 help 625 help
626 This option enables support for SiS PATA controllers 626 This option enables support for SiS PATA controllers
627 627
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d6d97d8f3fa4..c11936e13dd3 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
1072 * matching is necessary because dmi_system_id.matches is 1072 * matching is necessary because dmi_system_id.matches is
1073 * limited to four entries. 1073 * limited to four entries.
1074 */ 1074 */
1075 if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") && 1075 if (dmi_get_system_info(DMI_SYS_VENDOR) &&
1076 dmi_get_system_info(DMI_PRODUCT_NAME) &&
1077 dmi_get_system_info(DMI_PRODUCT_VERSION) &&
1078 dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
1079 dmi_get_system_info(DMI_BOARD_VENDOR) &&
1080 dmi_get_system_info(DMI_BOARD_NAME) &&
1081 dmi_get_system_info(DMI_BOARD_VERSION) &&
1082 !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
1076 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") && 1083 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
1077 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") && 1084 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
1078 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") && 1085 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index f2b83eabc7c7..a098ba8eaab6 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
382 /* PCI clocking determines the ATA timing values to use */ 382 /* PCI clocking determines the ATA timing values to use */
383 /* info_hpt366 is safe against re-entry so we can scribble on it */ 383 /* info_hpt366 is safe against re-entry so we can scribble on it */
384 switch((reg1 & 0x700) >> 8) { 384 switch((reg1 & 0x700) >> 8) {
385 case 5: 385 case 9:
386 hpriv = &hpt366_40; 386 hpriv = &hpt366_40;
387 break; 387 break;
388 case 9: 388 case 5:
389 hpriv = &hpt366_25; 389 hpriv = &hpt366_25;
390 break; 390 break;
391 default: 391 default:
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 4e466eae8b46..4dd9a3b031e4 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "pata_ninja32" 46#define DRV_NAME "pata_ninja32"
47#define DRV_VERSION "0.1.1" 47#define DRV_VERSION "0.1.3"
48 48
49 49
50/** 50/**
@@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
130 return rc; 130 return rc;
131 pci_set_master(dev); 131 pci_set_master(dev);
132 132
133 /* Set up the register mappings */ 133 /* Set up the register mappings. We use the I/O mapping as only the
134 older chips also have MMIO on BAR 1 */
134 base = host->iomap[0]; 135 base = host->iomap[0];
135 if (!base) 136 if (!base)
136 return -ENOMEM; 137 return -ENOMEM;
@@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
167#endif 168#endif
168 169
169static const struct pci_device_id ninja32[] = { 170static const struct pci_device_id ninja32[] = {
171 { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
172 { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
173 { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
170 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 174 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
171 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 175 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
176 { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
172 { }, 177 { },
173}; 178};
174 179
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index d34236611752..e4be55e047f6 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ 56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ 57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ 58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
59 { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
60 /* end marker */ 59 /* end marker */
61 { 0, } 60 { 0, }
62}; 61};
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f20bf359b84f..dc7a8c352da2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
303{ 303{
304 if (class_pktcdvd) { 304 if (class_pktcdvd) {
305 pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL, 305 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
306 "%s", pd->name); 306 "%s", pd->name);
307 if (IS_ERR(pd->dev)) 307 if (IS_ERR(pd->dev))
308 pd->dev = NULL; 308 pd->dev = NULL;
@@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2790 return 0; 2790 return 0;
2791 2791
2792out_mem: 2792out_mem:
2793 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 2793 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2794 /* This is safe: open() is still holding a reference. */ 2794 /* This is safe: open() is still holding a reference. */
2795 module_put(THIS_MODULE); 2795 module_put(THIS_MODULE);
2796 return ret; 2796 return ret;
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
2975 pkt_debugfs_dev_remove(pd); 2975 pkt_debugfs_dev_remove(pd);
2976 pkt_sysfs_dev_remove(pd); 2976 pkt_sysfs_dev_remove(pd);
2977 2977
2978 blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE); 2978 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2979 2979
2980 remove_proc_entry(pd->name, pkt_proc); 2980 remove_proc_entry(pd->name, pkt_proc);
2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); 2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d16b02423d61..7d2e91cccb13 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2081,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2081 if (!q) 2081 if (!q)
2082 return -ENXIO; 2082 return -ENXIO;
2083 2083
2084 rq = blk_get_request(q, READ, GFP_KERNEL);
2085 if (!rq)
2086 return -ENOMEM;
2087
2088 cdi->last_sense = 0; 2084 cdi->last_sense = 0;
2089 2085
2090 while (nframes) { 2086 while (nframes) {
@@ -2096,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2096 2092
2097 len = nr * CD_FRAMESIZE_RAW; 2093 len = nr * CD_FRAMESIZE_RAW;
2098 2094
2095 rq = blk_get_request(q, READ, GFP_KERNEL);
2096 if (!rq) {
2097 ret = -ENOMEM;
2098 break;
2099 }
2100
2099 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); 2101 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
2100 if (ret) 2102 if (ret) {
2103 blk_put_request(rq);
2101 break; 2104 break;
2105 }
2102 2106
2103 rq->cmd[0] = GPCMD_READ_CD; 2107 rq->cmd[0] = GPCMD_READ_CD;
2104 rq->cmd[1] = 1 << 2; 2108 rq->cmd[1] = 1 << 2;
@@ -2124,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2124 2128
2125 if (blk_rq_unmap_user(bio)) 2129 if (blk_rq_unmap_user(bio))
2126 ret = -EFAULT; 2130 ret = -EFAULT;
2131 blk_put_request(rq);
2127 2132
2128 if (ret) 2133 if (ret)
2129 break; 2134 break;
@@ -2133,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2133 ubuf += len; 2138 ubuf += len;
2134 } 2139 }
2135 2140
2136 blk_put_request(rq);
2137 return ret; 2141 return ret;
2138} 2142}
2139 2143
diff --git a/drivers/char/cp437.uni b/drivers/char/cp437.uni
index 1f06889a96b9..bc6163484f62 100644
--- a/drivers/char/cp437.uni
+++ b/drivers/char/cp437.uni
@@ -27,7 +27,7 @@
270x0c U+2640 270x0c U+2640
280x0d U+266a 280x0d U+266a
290x0e U+266b 290x0e U+266b
300x0f U+263c 300x0f U+263c U+00a4
310x10 U+25b6 U+25ba 310x10 U+25b6 U+25ba
320x11 U+25c0 U+25c4 320x11 U+25c0 U+25c4
330x12 U+2195 330x12 U+2195
@@ -55,7 +55,7 @@
550x24 U+0024 550x24 U+0024
560x25 U+0025 560x25 U+0025
570x26 U+0026 570x26 U+0026
580x27 U+0027 580x27 U+0027 U+00b4
590x28 U+0028 590x28 U+0028
600x29 U+0029 600x29 U+0029
610x2a U+002a 610x2a U+002a
@@ -84,7 +84,7 @@
840x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3 840x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
850x42 U+0042 850x42 U+0042
860x43 U+0043 U+00a9 860x43 U+0043 U+00a9
870x44 U+0044 870x44 U+0044 U+00d0
880x45 U+0045 U+00c8 U+00ca U+00cb 880x45 U+0045 U+00c8 U+00ca U+00cb
890x46 U+0046 890x46 U+0046
900x47 U+0047 900x47 U+0047
@@ -140,7 +140,7 @@
1400x79 U+0079 U+00fd 1400x79 U+0079 U+00fd
1410x7a U+007a 1410x7a U+007a
1420x7b U+007b 1420x7b U+007b
1430x7c U+007c U+00a5 1430x7c U+007c U+00a6
1440x7d U+007d 1440x7d U+007d
1450x7e U+007e 1450x7e U+007e
146# 146#
@@ -263,10 +263,10 @@
2630xe8 U+03a6 U+00d8 2630xe8 U+03a6 U+00d8
2640xe9 U+0398 2640xe9 U+0398
2650xea U+03a9 U+2126 2650xea U+03a9 U+2126
2660xeb U+03b4 2660xeb U+03b4 U+00f0
2670xec U+221e 2670xec U+221e
2680xed U+03c6 U+00f8 2680xed U+03c6 U+00f8
2690xee U+03b5 2690xee U+03b5 U+2208
2700xef U+2229 2700xef U+2229
2710xf0 U+2261 2710xf0 U+2261
2720xf1 U+00b1 2720xf1 U+00b1
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3b23270eaa65..a8f15e6be594 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
418 TTY_OVERRUN); 418 TTY_OVERRUN);
419 /* 419 /*
420 If the flip buffer itself is 420 If the flip buffer itself is
421 overflowing, we still loose 421 overflowing, we still lose
422 the next incoming character. 422 the next incoming character.
423 */ 423 */
424 if (tty_buffer_request_room(tty, 1) != 424 if (tty_buffer_request_room(tty, 1) !=
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index a5af6072e2b3..008176edbd64 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2274,7 +2274,7 @@ rescan_last_byte:
2274 continue; /* nothing to display */ 2274 continue; /* nothing to display */
2275 } 2275 }
2276 /* Glyph not found */ 2276 /* Glyph not found */
2277 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { 2277 if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
2278 /* In legacy mode use the glyph we get by a 1:1 mapping. 2278 /* In legacy mode use the glyph we get by a 1:1 mapping.
2279 This would make absolutely no sense with Unicode in mind, 2279 This would make absolutely no sense with Unicode in mind,
2280 but do this for ASCII characters since a font may lack 2280 but do this for ASCII characters since a font may lack
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 46610b090415..ab9c01e462ef 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
974 packet->ack = RCODE_SEND_ERROR; 974 packet->ack = RCODE_SEND_ERROR;
975 return -1; 975 return -1;
976 } 976 }
977 packet->payload_bus = payload_bus;
977 978
978 d[2].req_count = cpu_to_le16(packet->payload_length); 979 d[2].req_count = cpu_to_le16(packet->payload_length);
979 d[2].data_address = cpu_to_le32(payload_bus); 980 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context,
1025 struct driver_data *driver_data; 1026 struct driver_data *driver_data;
1026 struct fw_packet *packet; 1027 struct fw_packet *packet;
1027 struct fw_ohci *ohci = context->ohci; 1028 struct fw_ohci *ohci = context->ohci;
1028 dma_addr_t payload_bus;
1029 int evt; 1029 int evt;
1030 1030
1031 if (last->transfer_status == 0) 1031 if (last->transfer_status == 0)
@@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context,
1038 /* This packet was cancelled, just continue. */ 1038 /* This packet was cancelled, just continue. */
1039 return 1; 1039 return 1;
1040 1040
1041 payload_bus = le32_to_cpu(last->data_address); 1041 if (packet->payload_bus)
1042 if (payload_bus != 0) 1042 dma_unmap_single(ohci->card.device, packet->payload_bus,
1043 dma_unmap_single(ohci->card.device, payload_bus,
1044 packet->payload_length, DMA_TO_DEVICE); 1043 packet->payload_length, DMA_TO_DEVICE);
1045 1044
1046 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1045 evt = le16_to_cpu(last->transfer_status) & 0x1f;
@@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1697 if (packet->ack != 0) 1696 if (packet->ack != 0)
1698 goto out; 1697 goto out;
1699 1698
1699 if (packet->payload_bus)
1700 dma_unmap_single(ohci->card.device, packet->payload_bus,
1701 packet->payload_length, DMA_TO_DEVICE);
1702
1700 log_ar_at_event('T', packet->speed, packet->header, 0x20); 1703 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1701 driver_data->packet = NULL; 1704 driver_data->packet = NULL;
1702 packet->ack = RCODE_CANCELLED; 1705 packet->ack = RCODE_CANCELLED;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 022ac4fabb67..2884f876397b 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
207 packet->speed = speed; 207 packet->speed = speed;
208 packet->generation = generation; 208 packet->generation = generation;
209 packet->ack = 0; 209 packet->ack = 0;
210 packet->payload_bus = 0;
210} 211}
211 212
212/** 213/**
@@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
581 BUG(); 582 BUG();
582 return; 583 return;
583 } 584 }
585
586 response->payload_bus = 0;
584} 587}
585EXPORT_SYMBOL(fw_fill_response); 588EXPORT_SYMBOL(fw_fill_response);
586 589
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index aed7dbb17cda..839466f0a795 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/spinlock_types.h> 28#include <linux/spinlock_types.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/types.h>
30#include <linux/workqueue.h> 31#include <linux/workqueue.h>
31 32
32#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 33#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@@ -153,6 +154,7 @@ struct fw_packet {
153 size_t header_length; 154 size_t header_length;
154 void *payload; 155 void *payload;
155 size_t payload_length; 156 size_t payload_length;
157 dma_addr_t payload_bus;
156 u32 timestamp; 158 u32 timestamp;
157 159
158 /* 160 /*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba89b42f790a..553dd4bc3075 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
847 * and the registers being closely associated. 847 * and the registers being closely associated.
848 * 848 *
849 * According to chipset errata, on the 965GM, MSI interrupts may 849 * According to chipset errata, on the 965GM, MSI interrupts may
850 * be lost or delayed 850 * be lost or delayed, but we use them anyways to avoid
851 * stuck interrupts on some machines.
851 */ 852 */
852 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 853 if (!IS_I945G(dev) && !IS_I945GM(dev))
853 pci_enable_msi(dev->pdev); 854 pci_enable_msi(dev->pdev);
854 855
855 intel_opregion_init(dev); 856 intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0ec..adc972cc6bfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
244 * List of objects currently involved in rendering from the 244 * List of objects currently involved in rendering from the
245 * ringbuffer. 245 * ringbuffer.
246 * 246 *
247 * Includes buffers having the contents of their GPU caches
248 * flushed, not necessarily primitives. last_rendering_seqno
249 * represents when the rendering involved will be completed.
250 *
247 * A reference is held on the buffer while on this list. 251 * A reference is held on the buffer while on this list.
248 */ 252 */
249 struct list_head active_list; 253 struct list_head active_list;
@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
253 * still have a write_domain which needs to be flushed before 257 * still have a write_domain which needs to be flushed before
254 * unbinding. 258 * unbinding.
255 * 259 *
260 * last_rendering_seqno is 0 while an object is in this list.
261 *
256 * A reference is held on the buffer while on this list. 262 * A reference is held on the buffer while on this list.
257 */ 263 */
258 struct list_head flushing_list; 264 struct list_head flushing_list;
@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
261 * LRU list of objects which are not in the ringbuffer and 267 * LRU list of objects which are not in the ringbuffer and
262 * are ready to unbind, but are still in the GTT. 268 * are ready to unbind, but are still in the GTT.
263 * 269 *
270 * last_rendering_seqno is 0 while an object is in this list.
271 *
264 * A reference is not held on the buffer while on this list, 272 * A reference is not held on the buffer while on this list,
265 * as merely being GTT-bound shouldn't prevent its being 273 * as merely being GTT-bound shouldn't prevent its being
266 * freed, and we'll pull it off the list in the free path. 274 * freed, and we'll pull it off the list in the free path.
@@ -371,8 +379,8 @@ struct drm_i915_gem_object {
371 uint32_t agp_type; 379 uint32_t agp_type;
372 380
373 /** 381 /**
374 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 382 * If present, while GEM_DOMAIN_CPU is in the read domain this array
375 * GEM_DOMAIN_CPU is not in the object's read domain. 383 * flags which individual pages are valid.
376 */ 384 */
377 uint8_t *page_cpu_valid; 385 uint8_t *page_cpu_valid;
378}; 386};
@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
394 /** Time at which this request was emitted, in jiffies. */ 402 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies; 403 unsigned long emitted_jiffies;
396 404
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list; 405 struct list_head list;
401}; 406};
402 407
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f8..ad672d854828 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,21 +33,21 @@
33 33
34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35 35
36static int 36static void
37i915_gem_object_set_domain(struct drm_gem_object *obj, 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38 uint32_t read_domains, 38 uint32_t read_domains,
39 uint32_t write_domain); 39 uint32_t write_domain);
40static int 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41i915_gem_object_set_domain_range(struct drm_gem_object *obj, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint64_t offset, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint64_t size, 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44 uint32_t read_domains, 44 int write);
45 uint32_t write_domain); 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46static int 46 int write);
47i915_gem_set_domain(struct drm_gem_object *obj, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 struct drm_file *file_priv, 48 uint64_t offset,
49 uint32_t read_domains, 49 uint64_t size);
50 uint32_t write_domain); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
162 162
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 164
165 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
166 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
167 if (ret != 0) { 167 if (ret != 0) {
168 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
260 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
261 return ret; 261 return ret;
262 } 262 }
263 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
264 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
265 if (ret) 264 if (ret)
266 goto fail; 265 goto fail;
267 266
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
320 319
321 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
322 321
323 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
324 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
325 if (ret) { 323 if (ret) {
326 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
327 return ret; 325 return ret;
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
397} 395}
398 396
399/** 397/**
400 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
401 */ 400 */
402int 401int
403i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
405{ 404{
406 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
407 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
408 int ret; 409 int ret;
409 410
410 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
411 return -ENODEV; 412 return -ENODEV;
412 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
414 if (obj == NULL) 428 if (obj == NULL)
415 return -EBADF; 429 return -EBADF;
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
417 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
418#if WATCH_BUF 432#if WATCH_BUF
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
421#endif 435#endif
422 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
423 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
424 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
426 return ret; 451 return ret;
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
455 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
456 481
457 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
458 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
459 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
460 drm_agp_chipset_flush(dev); 485
461 }
462 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
463 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
464 return ret; 488 return ret;
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
532} 556}
533 557
534static void 558static void
535i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
536{ 560{
537 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
538 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
546 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
548 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
549} 574}
550 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
551 587
552static void 588static void
553i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
562 else 598 else
563 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
564 600
601 obj_priv->last_rendering_seqno = 0;
565 if (obj_priv->active) { 602 if (obj_priv->active) {
566 obj_priv->active = 0; 603 obj_priv->active = 0;
567 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
610 647
611 request->seqno = seqno; 648 request->seqno = seqno;
612 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
613 request->flush_domains = flush_domains;
614 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
615 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
616 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
617 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
618 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
619 return seqno; 674 return seqno;
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
676 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
677#endif 732#endif
678 733
679 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
680 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
681 &dev_priv->mm.flushing_list); 736 else
682 } else {
683 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
684 }
685 }
686
687 if (request->flush_domains != 0) {
688 struct drm_i915_gem_object *obj_priv, *next;
689
690 /* Clear the write domain and activity from any buffers
691 * that are just waiting for a flush matching the one retired.
692 */
693 list_for_each_entry_safe(obj_priv, next,
694 &dev_priv->mm.flushing_list, list) {
695 struct drm_gem_object *obj = obj_priv->obj;
696
697 if (obj->write_domain & request->flush_domains) {
698 obj->write_domain = 0;
699 i915_gem_object_move_to_inactive(obj);
700 }
701 }
702
703 } 738 }
704} 739}
705 740
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
892 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
893 int ret; 928 int ret;
894 929
895 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
896 * create a new seqno to wait for. 931 * not for emitting required flushes.
897 */ 932 */
898 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
899 uint32_t write_domain = obj->write_domain;
900#if WATCH_BUF
901 DRM_INFO("%s: flushing object %p from write domain %08x\n",
902 __func__, obj, write_domain);
903#endif
904 i915_gem_flush(dev, 0, write_domain);
905
906 i915_gem_object_move_to_active(obj);
907 obj_priv->last_rendering_seqno = i915_add_request(dev,
908 write_domain);
909 BUG_ON(obj_priv->last_rendering_seqno == 0);
910#if WATCH_LRU
911 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
912#endif
913 }
914 934
915 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
916 * it. 936 * it.
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
950 return -EINVAL; 970 return -EINVAL;
951 } 971 }
952 972
953 /* Wait for any rendering to complete
954 */
955 ret = i915_gem_object_wait_rendering(obj);
956 if (ret) {
957 DRM_ERROR("wait_rendering failed: %d\n", ret);
958 return ret;
959 }
960
961 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
962 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
963 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
964 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
965 * before we unbind. 977 * before we unbind.
966 */ 978 */
967 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
968 I915_GEM_DOMAIN_CPU);
969 if (ret) { 980 if (ret) {
970 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
971 return ret; 983 return ret;
972 } 984 }
973 985
@@ -1083,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
1083} 1095}
1084 1096
1085static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 if (ret == -ENOMEM)
1108 return 0;
1109 return ret;
1110}
1111
1112static int
1086i915_gem_object_get_page_list(struct drm_gem_object *obj) 1113i915_gem_object_get_page_list(struct drm_gem_object *obj)
1087{ 1114{
1088 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1115 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1168 1195
1169 ret = i915_gem_evict_something(dev); 1196 ret = i915_gem_evict_something(dev);
1170 if (ret != 0) { 1197 if (ret != 0) {
1171 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1198 if (ret != -ERESTARTSYS)
1199 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1172 return ret; 1200 return ret;
1173 } 1201 }
1174 goto search_free; 1202 goto search_free;
@@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1228 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1256 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1229} 1257}
1230 1258
1259/** Flushes any GPU write domain for the object if it's dirty. */
1260static void
1261i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1262{
1263 struct drm_device *dev = obj->dev;
1264 uint32_t seqno;
1265
1266 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1267 return;
1268
1269 /* Queue the GPU write cache flushing we need. */
1270 i915_gem_flush(dev, 0, obj->write_domain);
1271 seqno = i915_add_request(dev, obj->write_domain);
1272 obj->write_domain = 0;
1273 i915_gem_object_move_to_active(obj, seqno);
1274}
1275
1276/** Flushes the GTT write domain for the object if it's dirty. */
1277static void
1278i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1279{
1280 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1281 return;
1282
1283 /* No actual flushing is required for the GTT write domain. Writes
1284 * to it immediately go to main memory as far as we know, so there's
1285 * no chipset flush. It also doesn't land in render cache.
1286 */
1287 obj->write_domain = 0;
1288}
1289
1290/** Flushes the CPU write domain for the object if it's dirty. */
1291static void
1292i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1293{
1294 struct drm_device *dev = obj->dev;
1295
1296 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1297 return;
1298
1299 i915_gem_clflush_object(obj);
1300 drm_agp_chipset_flush(dev);
1301 obj->write_domain = 0;
1302}
1303
1304/**
1305 * Moves a single object to the GTT read, and possibly write domain.
1306 *
1307 * This function returns when the move is complete, including waiting on
1308 * flushes to occur.
1309 */
1310static int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1314 int ret;
1315
1316 /* Not valid to be called on unbound objects. */
1317 if (obj_priv->gtt_space == NULL)
1318 return -EINVAL;
1319
1320 i915_gem_object_flush_gpu_write_domain(obj);
1321 /* Wait on any GPU rendering and flushing to occur. */
1322 ret = i915_gem_object_wait_rendering(obj);
1323 if (ret != 0)
1324 return ret;
1325
1326 /* If we're writing through the GTT domain, then CPU and GPU caches
1327 * will need to be invalidated at next use.
1328 */
1329 if (write)
1330 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1331
1332 i915_gem_object_flush_cpu_write_domain(obj);
1333
1334 /* It should now be out of any other write domains, and we can update
1335 * the domain values for our changes.
1336 */
1337 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1338 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1339 if (write) {
1340 obj->write_domain = I915_GEM_DOMAIN_GTT;
1341 obj_priv->dirty = 1;
1342 }
1343
1344 return 0;
1345}
1346
1347/**
1348 * Moves a single object to the CPU read, and possibly write domain.
1349 *
1350 * This function returns when the move is complete, including waiting on
1351 * flushes to occur.
1352 */
1353static int
1354i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1355{
1356 struct drm_device *dev = obj->dev;
1357 int ret;
1358
1359 i915_gem_object_flush_gpu_write_domain(obj);
1360 /* Wait on any GPU rendering and flushing to occur. */
1361 ret = i915_gem_object_wait_rendering(obj);
1362 if (ret != 0)
1363 return ret;
1364
1365 i915_gem_object_flush_gtt_write_domain(obj);
1366
1367 /* If we have a partially-valid cache of the object in the CPU,
1368 * finish invalidating it and free the per-page flags.
1369 */
1370 i915_gem_object_set_to_full_cpu_read_domain(obj);
1371
1372 /* Flush the CPU cache if it's still invalid. */
1373 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1374 i915_gem_clflush_object(obj);
1375 drm_agp_chipset_flush(dev);
1376
1377 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1378 }
1379
1380 /* It should now be out of any other write domains, and we can update
1381 * the domain values for our changes.
1382 */
1383 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1384
1385 /* If we're writing through the CPU, then the GPU read domains will
1386 * need to be invalidated at next use.
1387 */
1388 if (write) {
1389 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1390 obj->write_domain = I915_GEM_DOMAIN_CPU;
1391 }
1392
1393 return 0;
1394}
1395
1231/* 1396/*
1232 * Set the next domain for the specified object. This 1397 * Set the next domain for the specified object. This
1233 * may not actually perform the necessary flushing/invaliding though, 1398 * may not actually perform the necessary flushing/invaliding though,
@@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1339 * MI_FLUSH 1504 * MI_FLUSH
1340 * drm_agp_chipset_flush 1505 * drm_agp_chipset_flush
1341 */ 1506 */
1342static int 1507static void
1343i915_gem_object_set_domain(struct drm_gem_object *obj, 1508i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1344 uint32_t read_domains, 1509 uint32_t read_domains,
1345 uint32_t write_domain) 1510 uint32_t write_domain)
1346{ 1511{
1347 struct drm_device *dev = obj->dev; 1512 struct drm_device *dev = obj->dev;
1348 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1513 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1349 uint32_t invalidate_domains = 0; 1514 uint32_t invalidate_domains = 0;
1350 uint32_t flush_domains = 0; 1515 uint32_t flush_domains = 0;
1351 int ret; 1516
1517 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1518 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1352 1519
1353#if WATCH_BUF 1520#if WATCH_BUF
1354 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1521 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1385 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1552 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1386 __func__, flush_domains, invalidate_domains); 1553 __func__, flush_domains, invalidate_domains);
1387#endif 1554#endif
1388 /*
1389 * If we're invaliding the CPU cache and flushing a GPU cache,
1390 * then pause for rendering so that the GPU caches will be
1391 * flushed before the cpu cache is invalidated
1392 */
1393 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1394 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1395 I915_GEM_DOMAIN_GTT))) {
1396 ret = i915_gem_object_wait_rendering(obj);
1397 if (ret)
1398 return ret;
1399 }
1400 i915_gem_clflush_object(obj); 1555 i915_gem_clflush_object(obj);
1401 } 1556 }
1402 1557
1403 if ((write_domain | flush_domains) != 0) 1558 if ((write_domain | flush_domains) != 0)
1404 obj->write_domain = write_domain; 1559 obj->write_domain = write_domain;
1405
1406 /* If we're invalidating the CPU domain, clear the per-page CPU
1407 * domain list as well.
1408 */
1409 if (obj_priv->page_cpu_valid != NULL &&
1410 (write_domain != 0 ||
1411 read_domains & I915_GEM_DOMAIN_CPU)) {
1412 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1413 DRM_MEM_DRIVER);
1414 obj_priv->page_cpu_valid = NULL;
1415 }
1416 obj->read_domains = read_domains; 1560 obj->read_domains = read_domains;
1417 1561
1418 dev->invalidate_domains |= invalidate_domains; 1562 dev->invalidate_domains |= invalidate_domains;
@@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1423 obj->read_domains, obj->write_domain, 1567 obj->read_domains, obj->write_domain,
1424 dev->invalidate_domains, dev->flush_domains); 1568 dev->invalidate_domains, dev->flush_domains);
1425#endif 1569#endif
1426 return 0;
1427} 1570}
1428 1571
1429/** 1572/**
1430 * Set the read/write domain on a range of the object. 1573 * Moves the object from a partially CPU read to a full one.
1431 * 1574 *
1432 * Currently only implemented for CPU reads, otherwise drops to normal 1575 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1433 * i915_gem_object_set_domain(). 1576 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1434 */ 1577 */
1435static int 1578static void
1436i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1437 uint64_t offset,
1438 uint64_t size,
1439 uint32_t read_domains,
1440 uint32_t write_domain)
1441{ 1580{
1581 struct drm_device *dev = obj->dev;
1442 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1582 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1443 int ret, i;
1444 1583
1445 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1584 if (!obj_priv->page_cpu_valid)
1446 return 0; 1585 return;
1447 1586
1448 if (read_domains != I915_GEM_DOMAIN_CPU || 1587 /* If we're partially in the CPU read domain, finish moving it in.
1449 write_domain != 0) 1588 */
1450 return i915_gem_object_set_domain(obj, 1589 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1451 read_domains, write_domain); 1590 int i;
1452 1591
1453 /* Wait on any GPU rendering to the object to be flushed. */ 1592 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1593 if (obj_priv->page_cpu_valid[i])
1594 continue;
1595 drm_clflush_pages(obj_priv->page_list + i, 1);
1596 }
1597 drm_agp_chipset_flush(dev);
1598 }
1599
1600 /* Free the page_cpu_valid mappings which are now stale, whether
1601 * or not we've got I915_GEM_DOMAIN_CPU.
1602 */
1603 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1604 DRM_MEM_DRIVER);
1605 obj_priv->page_cpu_valid = NULL;
1606}
1607
1608/**
1609 * Set the CPU read domain on a range of the object.
1610 *
1611 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1612 * not entirely valid. The page_cpu_valid member of the object flags which
1613 * pages have been flushed, and will be respected by
1614 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1615 * of the whole object.
1616 *
1617 * This function returns when the move is complete, including waiting on
1618 * flushes to occur.
1619 */
1620static int
1621i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1622 uint64_t offset, uint64_t size)
1623{
1624 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1625 int i, ret;
1626
1627 if (offset == 0 && size == obj->size)
1628 return i915_gem_object_set_to_cpu_domain(obj, 0);
1629
1630 i915_gem_object_flush_gpu_write_domain(obj);
1631 /* Wait on any GPU rendering and flushing to occur. */
1454 ret = i915_gem_object_wait_rendering(obj); 1632 ret = i915_gem_object_wait_rendering(obj);
1455 if (ret) 1633 if (ret != 0)
1456 return ret; 1634 return ret;
1635 i915_gem_object_flush_gtt_write_domain(obj);
1457 1636
1637 /* If we're already fully in the CPU read domain, we're done. */
1638 if (obj_priv->page_cpu_valid == NULL &&
1639 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1640 return 0;
1641
1642 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1643 * newly adding I915_GEM_DOMAIN_CPU
1644 */
1458 if (obj_priv->page_cpu_valid == NULL) { 1645 if (obj_priv->page_cpu_valid == NULL) {
1459 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1646 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1460 DRM_MEM_DRIVER); 1647 DRM_MEM_DRIVER);
1461 } 1648 if (obj_priv->page_cpu_valid == NULL)
1649 return -ENOMEM;
1650 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1651 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1462 1652
1463 /* Flush the cache on any pages that are still invalid from the CPU's 1653 /* Flush the cache on any pages that are still invalid from the CPU's
1464 * perspective. 1654 * perspective.
1465 */ 1655 */
1466 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1656 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1657 i++) {
1467 if (obj_priv->page_cpu_valid[i]) 1658 if (obj_priv->page_cpu_valid[i])
1468 continue; 1659 continue;
1469 1660
@@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1472 obj_priv->page_cpu_valid[i] = 1; 1663 obj_priv->page_cpu_valid[i] = 1;
1473 } 1664 }
1474 1665
1475 return 0; 1666 /* It should now be out of any other write domains, and we can update
1476} 1667 * the domain values for our changes.
1477
1478/**
1479 * Once all of the objects have been set in the proper domain,
1480 * perform the necessary flush and invalidate operations.
1481 *
1482 * Returns the write domains flushed, for use in flush tracking.
1483 */
1484static uint32_t
1485i915_gem_dev_set_domain(struct drm_device *dev)
1486{
1487 uint32_t flush_domains = dev->flush_domains;
1488
1489 /*
1490 * Now that all the buffers are synced to the proper domains,
1491 * flush and invalidate the collected domains
1492 */ 1668 */
1493 if (dev->invalidate_domains | dev->flush_domains) { 1669 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1494#if WATCH_EXEC
1495 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1496 __func__,
1497 dev->invalidate_domains,
1498 dev->flush_domains);
1499#endif
1500 i915_gem_flush(dev,
1501 dev->invalidate_domains,
1502 dev->flush_domains);
1503 dev->invalidate_domains = 0;
1504 dev->flush_domains = 0;
1505 }
1506 1670
1507 return flush_domains; 1671 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1672
1673 return 0;
1508} 1674}
1509 1675
1510/** 1676/**
@@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1585 return -EINVAL; 1751 return -EINVAL;
1586 } 1752 }
1587 1753
1754 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1755 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1756 DRM_ERROR("reloc with read/write CPU domains: "
1757 "obj %p target %d offset %d "
1758 "read %08x write %08x",
1759 obj, reloc.target_handle,
1760 (int) reloc.offset,
1761 reloc.read_domains,
1762 reloc.write_domain);
1763 return -EINVAL;
1764 }
1765
1588 if (reloc.write_domain && target_obj->pending_write_domain && 1766 if (reloc.write_domain && target_obj->pending_write_domain &&
1589 reloc.write_domain != target_obj->pending_write_domain) { 1767 reloc.write_domain != target_obj->pending_write_domain) {
1590 DRM_ERROR("Write domain conflict: " 1768 DRM_ERROR("Write domain conflict: "
@@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1625 continue; 1803 continue;
1626 } 1804 }
1627 1805
1628 /* Now that we're going to actually write some data in, 1806 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1629 * make sure that any rendering using this buffer's contents 1807 if (ret != 0) {
1630 * is completed. 1808 drm_gem_object_unreference(target_obj);
1631 */ 1809 i915_gem_object_unpin(obj);
1632 i915_gem_object_wait_rendering(obj); 1810 return -EINVAL;
1633
1634 /* As we're writing through the gtt, flush
1635 * any CPU writes before we write the relocations
1636 */
1637 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1638 i915_gem_clflush_object(obj);
1639 drm_agp_chipset_flush(dev);
1640 obj->write_domain = 0;
1641 } 1811 }
1642 1812
1643 /* Map the page containing the relocation we're going to 1813 /* Map the page containing the relocation we're going to
@@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1779 int ret, i, pinned = 0; 1949 int ret, i, pinned = 0;
1780 uint64_t exec_offset; 1950 uint64_t exec_offset;
1781 uint32_t seqno, flush_domains; 1951 uint32_t seqno, flush_domains;
1952 int pin_tries;
1782 1953
1783#if WATCH_EXEC 1954#if WATCH_EXEC
1784 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1955 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1827 return -EBUSY; 1998 return -EBUSY;
1828 } 1999 }
1829 2000
1830 /* Zero the gloabl flush/invalidate flags. These 2001 /* Look up object handles */
1831 * will be modified as each object is bound to the
1832 * gtt
1833 */
1834 dev->invalidate_domains = 0;
1835 dev->flush_domains = 0;
1836
1837 /* Look up object handles and perform the relocations */
1838 for (i = 0; i < args->buffer_count; i++) { 2002 for (i = 0; i < args->buffer_count; i++) {
1839 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2003 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1840 exec_list[i].handle); 2004 exec_list[i].handle);
@@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1844 ret = -EBADF; 2008 ret = -EBADF;
1845 goto err; 2009 goto err;
1846 } 2010 }
2011 }
1847 2012
1848 object_list[i]->pending_read_domains = 0; 2013 /* Pin and relocate */
1849 object_list[i]->pending_write_domain = 0; 2014 for (pin_tries = 0; ; pin_tries++) {
1850 ret = i915_gem_object_pin_and_relocate(object_list[i], 2015 ret = 0;
1851 file_priv, 2016 for (i = 0; i < args->buffer_count; i++) {
1852 &exec_list[i]); 2017 object_list[i]->pending_read_domains = 0;
1853 if (ret) { 2018 object_list[i]->pending_write_domain = 0;
1854 DRM_ERROR("object bind and relocate failed %d\n", ret); 2019 ret = i915_gem_object_pin_and_relocate(object_list[i],
2020 file_priv,
2021 &exec_list[i]);
2022 if (ret)
2023 break;
2024 pinned = i + 1;
2025 }
2026 /* success */
2027 if (ret == 0)
2028 break;
2029
2030 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret);
1855 goto err; 2033 goto err;
1856 } 2034 }
1857 pinned = i + 1; 2035
2036 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]);
2039
2040 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev);
2042 if (ret)
2043 goto err;
1858 } 2044 }
1859 2045
1860 /* Set the pending read domains for the batch buffer to COMMAND */ 2046 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1864 2050
1865 i915_verify_inactive(dev, __FILE__, __LINE__); 2051 i915_verify_inactive(dev, __FILE__, __LINE__);
1866 2052
2053 /* Zero the global flush/invalidate flags. These
2054 * will be modified as new domains are computed
2055 * for each object
2056 */
2057 dev->invalidate_domains = 0;
2058 dev->flush_domains = 0;
2059
1867 for (i = 0; i < args->buffer_count; i++) { 2060 for (i = 0; i < args->buffer_count; i++) {
1868 struct drm_gem_object *obj = object_list[i]; 2061 struct drm_gem_object *obj = object_list[i];
1869 2062
1870 /* make sure all previous memory operations have passed */ 2063 /* Compute new gpu domains and update invalidate/flush */
1871 ret = i915_gem_object_set_domain(obj, 2064 i915_gem_object_set_to_gpu_domain(obj,
1872 obj->pending_read_domains, 2065 obj->pending_read_domains,
1873 obj->pending_write_domain); 2066 obj->pending_write_domain);
1874 if (ret)
1875 goto err;
1876 } 2067 }
1877 2068
1878 i915_verify_inactive(dev, __FILE__, __LINE__); 2069 i915_verify_inactive(dev, __FILE__, __LINE__);
1879 2070
1880 /* Flush/invalidate caches and chipset buffer */ 2071 if (dev->invalidate_domains | dev->flush_domains) {
1881 flush_domains = i915_gem_dev_set_domain(dev); 2072#if WATCH_EXEC
2073 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2074 __func__,
2075 dev->invalidate_domains,
2076 dev->flush_domains);
2077#endif
2078 i915_gem_flush(dev,
2079 dev->invalidate_domains,
2080 dev->flush_domains);
2081 if (dev->flush_domains)
2082 (void)i915_add_request(dev, dev->flush_domains);
2083 }
1882 2084
1883 i915_verify_inactive(dev, __FILE__, __LINE__); 2085 i915_verify_inactive(dev, __FILE__, __LINE__);
1884 2086
@@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1898 ~0); 2100 ~0);
1899#endif 2101#endif
1900 2102
1901 (void)i915_add_request(dev, flush_domains);
1902
1903 /* Exec the batchbuffer */ 2103 /* Exec the batchbuffer */
1904 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1905 if (ret) { 2105 if (ret) {
@@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1927 i915_file_priv->mm.last_gem_seqno = seqno; 2127 i915_file_priv->mm.last_gem_seqno = seqno;
1928 for (i = 0; i < args->buffer_count; i++) { 2128 for (i = 0; i < args->buffer_count; i++) {
1929 struct drm_gem_object *obj = object_list[i]; 2129 struct drm_gem_object *obj = object_list[i];
1930 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1931 2130
1932 i915_gem_object_move_to_active(obj); 2131 i915_gem_object_move_to_active(obj, seqno);
1933 obj_priv->last_rendering_seqno = seqno;
1934#if WATCH_LRU 2132#if WATCH_LRU
1935 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1936#endif 2134#endif
@@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2061 /* XXX - flush the CPU caches for pinned objects 2259 /* XXX - flush the CPU caches for pinned objects
2062 * as the X server doesn't manage domains yet 2260 * as the X server doesn't manage domains yet
2063 */ 2261 */
2064 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2262 i915_gem_object_flush_cpu_write_domain(obj);
2065 i915_gem_clflush_object(obj);
2066 drm_agp_chipset_flush(dev);
2067 obj->write_domain = 0;
2068 }
2069 args->offset = obj_priv->gtt_offset; 2263 args->offset = obj_priv->gtt_offset;
2070 drm_gem_object_unreference(obj); 2264 drm_gem_object_unreference(obj);
2071 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
@@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2167 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2361 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2168} 2362}
2169 2363
2170static int
2171i915_gem_set_domain(struct drm_gem_object *obj,
2172 struct drm_file *file_priv,
2173 uint32_t read_domains,
2174 uint32_t write_domain)
2175{
2176 struct drm_device *dev = obj->dev;
2177 int ret;
2178 uint32_t flush_domains;
2179
2180 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2181
2182 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2183 if (ret)
2184 return ret;
2185 flush_domains = i915_gem_dev_set_domain(obj->dev);
2186
2187 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2188 (void) i915_add_request(dev, flush_domains);
2189
2190 return 0;
2191}
2192
2193/** Unbinds all objects that are on the given buffer list. */ 2364/** Unbinds all objects that are on the given buffer list. */
2194static int 2365static int
2195i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2366i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) { 122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
123 /* GM965 only does bit 11-based channel 123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
124 * randomization 124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
125 */ 126 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e6..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0) 523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
525 526
526/** 965 MCH register controlling DRAM channel configuration */ 527/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206 528#define C0DRB3 0x10206
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 7a183789be97..3bbb871b25d5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
299 atomic_t swi_emitted; 299 atomic_t swi_emitted;
300 int vblank_crtc; 300 int vblank_crtc;
301 uint32_t irq_enable_reg; 301 uint32_t irq_enable_reg;
302 int irq_enabled;
303 uint32_t r500_disp_irq_reg; 302 uint32_t r500_disp_irq_reg;
304 303
305 struct radeon_surface surfaces[RADEON_MAX_SURFACES]; 304 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 97c0599fdb1e..99be11418ac2 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
44 else 44 else
45 dev_priv->irq_enable_reg &= ~mask; 45 dev_priv->irq_enable_reg &= ~mask;
46 46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 47 if (!dev->irq_enabled)
48 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48} 49}
49 50
50static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) 51static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
56 else 57 else
57 dev_priv->r500_disp_irq_reg &= ~mask; 58 dev_priv->r500_disp_irq_reg &= ~mask;
58 59
59 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 60 if (!dev->irq_enabled)
61 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
60} 62}
61 63
62int radeon_enable_vblank(struct drm_device *dev, int crtc) 64int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
355 if (!dev_priv) 357 if (!dev_priv)
356 return; 358 return;
357 359
358 dev_priv->irq_enabled = 0;
359
360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
362 /* Disable *all* interrupts */ 362 /* Disable *all* interrupts */
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index f4d22ae9d294..e5a8dae4a289 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -92,7 +92,7 @@ static void highlander_i2c_setup(struct highlander_i2c_dev *dev)
92static void smbus_write_data(u8 *src, u16 *dst, int len) 92static void smbus_write_data(u8 *src, u16 *dst, int len)
93{ 93{
94 for (; len > 1; len -= 2) { 94 for (; len > 1; len -= 2) {
95 *dst++ = be16_to_cpup((u16 *)src); 95 *dst++ = be16_to_cpup((__be16 *)src);
96 src += 2; 96 src += 2;
97 } 97 }
98 98
@@ -103,7 +103,7 @@ static void smbus_write_data(u8 *src, u16 *dst, int len)
103static void smbus_read_data(u16 *src, u8 *dst, int len) 103static void smbus_read_data(u16 *src, u8 *dst, int len)
104{ 104{
105 for (; len > 1; len -= 2) { 105 for (; len > 1; len -= 2) {
106 *(u16 *)dst = cpu_to_be16p(src++); 106 *(__be16 *)dst = cpu_to_be16p(src++);
107 dst += 2; 107 dst += 2;
108 } 108 }
109 109
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index dcf2045b5222..0bdb2d7f0570 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -486,7 +486,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
486 486
487 if (cmd->type == MSP_TWI_CMD_WRITE || 487 if (cmd->type == MSP_TWI_CMD_WRITE ||
488 cmd->type == MSP_TWI_CMD_WRITE_READ) { 488 cmd->type == MSP_TWI_CMD_WRITE_READ) {
489 __be64 tmp = cpu_to_be64p((u64 *)cmd->write_data); 489 u64 tmp = be64_to_cpup((__be64 *)cmd->write_data);
490 tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8; 490 tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
491 dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp); 491 dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
492 pmcmsptwi_writel(tmp & 0x00000000ffffffffLL, 492 pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 6d7401772a8f..e6857e01d1ba 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
669 669
670endif 670endif
671 671
672# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
672config BLK_DEV_IDE_PMAC 673config BLK_DEV_IDE_PMAC
673 tristate "PowerMac on-board IDE support" 674 tristate "PowerMac on-board IDE support"
674 depends on PPC_PMAC && IDE=y 675 depends on PPC_PMAC && IDE=y
675 select IDE_TIMINGS 676 select IDE_TIMINGS
677 select BLK_DEV_IDEDMA_PCI
676 help 678 help
677 This driver provides support for the on-board IDE controller on 679 This driver provides support for the on-board IDE controller on
678 most of the recent Apple Power Macintoshes and PowerBooks. 680 most of the recent Apple Power Macintoshes and PowerBooks.
@@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
689 CD-ROM on hda. This option changes this to more natural hda for 691 CD-ROM on hda. This option changes this to more natural hda for
690 hard disk and hdc for CD-ROM. 692 hard disk and hdc for CD-ROM.
691 693
692config BLK_DEV_IDEDMA_PMAC
693 bool "PowerMac IDE DMA support"
694 depends on BLK_DEV_IDE_PMAC
695 select BLK_DEV_IDEDMA_PCI
696 help
697 This option allows the driver for the on-board IDE controller on
698 Power Macintoshes and PowerBooks to use DMA (direct memory access)
699 to transfer data to and from memory. Saying Y is safe and improves
700 performance.
701
702config BLK_DEV_IDE_AU1XXX 694config BLK_DEV_IDE_AU1XXX
703 bool "IDE for AMD Alchemy Au1200" 695 bool "IDE for AMD Alchemy Au1200"
704 depends on SOC_AU1200 696 depends on SOC_AU1200
@@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
912endif 904endif
913 905
914config BLK_DEV_IDEDMA 906config BLK_DEV_IDEDMA
915 def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \ 907 def_bool BLK_DEV_IDEDMA_SFF || \
916 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 908 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
917 909
918endif # IDE 910endif # IDE
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7d275b2af3eb..cc35d6dbd410 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -208,8 +208,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
208 */ 208 */
209 if (drive->hwif->dma_ops == NULL) 209 if (drive->hwif->dma_ops == NULL)
210 break; 210 break;
211 if (drive->dev_flags & IDE_DFLAG_USING_DMA) 211 /*
212 ide_set_dma(drive); 212 * TODO: respect IDE_DFLAG_USING_DMA
213 */
214 ide_set_dma(drive);
213 break; 215 break;
214 } 216 }
215 217
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 2e19d6298536..7c481bb56fab 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
66 struct macio_dev *mdev; 66 struct macio_dev *mdev;
67 u32 timings[4]; 67 u32 timings[4];
68 volatile u32 __iomem * *kauai_fcr; 68 volatile u32 __iomem * *kauai_fcr;
69#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
70 /* Those fields are duplicating what is in hwif. We currently 69 /* Those fields are duplicating what is in hwif. We currently
71 * can't use the hwif ones because of some assumptions that are 70 * can't use the hwif ones because of some assumptions that are
72 * beeing done by the generic code about the kind of dma controller 71 * beeing done by the generic code about the kind of dma controller
@@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
74 */ 73 */
75 volatile struct dbdma_regs __iomem * dma_regs; 74 volatile struct dbdma_regs __iomem * dma_regs;
76 struct dbdma_cmd* dma_table_cpu; 75 struct dbdma_cmd* dma_table_cpu;
77#endif
78
79} pmac_ide_hwif_t; 76} pmac_ide_hwif_t;
80 77
81enum { 78enum {
@@ -222,8 +219,6 @@ static const char* model_name[] = {
222#define KAUAI_FCR_UATA_RESET_N 0x00000002 219#define KAUAI_FCR_UATA_RESET_N 0x00000002
223#define KAUAI_FCR_UATA_ENABLE 0x00000001 220#define KAUAI_FCR_UATA_ENABLE 0x00000001
224 221
225#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
226
227/* Rounded Multiword DMA timings 222/* Rounded Multiword DMA timings
228 * 223 *
229 * I gave up finding a generic formula for all controller 224 * I gave up finding a generic formula for all controller
@@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
413static void pmac_ide_selectproc(ide_drive_t *drive); 408static void pmac_ide_selectproc(ide_drive_t *drive);
414static void pmac_ide_kauai_selectproc(ide_drive_t *drive); 409static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
415 410
416#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
417
418#define PMAC_IDE_REG(x) \ 411#define PMAC_IDE_REG(x) \
419 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) 412 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
420 413
@@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
584 pmac_ide_do_update_timings(drive); 577 pmac_ide_do_update_timings(drive);
585} 578}
586 579
587#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
588
589/* 580/*
590 * Calculate KeyLargo ATA/66 UDMA timings 581 * Calculate KeyLargo ATA/66 UDMA timings
591 */ 582 */
@@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
786 drive->name, speed & 0xf, *timings); 777 drive->name, speed & 0xf, *timings);
787#endif 778#endif
788} 779}
789#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
790 780
791static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 781static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
792{ 782{
@@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
804 tl[0] = *timings; 794 tl[0] = *timings;
805 tl[1] = *timings2; 795 tl[1] = *timings2;
806 796
807#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
808 if (speed >= XFER_UDMA_0) { 797 if (speed >= XFER_UDMA_0) {
809 if (pmif->kind == controller_kl_ata4) 798 if (pmif->kind == controller_kl_ata4)
810 ret = set_timings_udma_ata4(&tl[0], speed); 799 ret = set_timings_udma_ata4(&tl[0], speed);
@@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
817 ret = -1; 806 ret = -1;
818 } else 807 } else
819 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); 808 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
820#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 809
821 if (ret) 810 if (ret)
822 return; 811 return;
823 812
@@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
1008 .chipset = ide_pmac, 997 .chipset = ide_pmac,
1009 .tp_ops = &pmac_tp_ops, 998 .tp_ops = &pmac_tp_ops,
1010 .port_ops = &pmac_ide_port_ops, 999 .port_ops = &pmac_ide_port_ops,
1011#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1012 .dma_ops = &pmac_dma_ops, 1000 .dma_ops = &pmac_dma_ops,
1013#endif
1014 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1001 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1015 IDE_HFLAG_POST_SET_MODE | 1002 IDE_HFLAG_POST_SET_MODE |
1016 IDE_HFLAG_MMIO | 1003 IDE_HFLAG_MMIO |
@@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1182 pmif->regbase = regbase; 1169 pmif->regbase = regbase;
1183 pmif->irq = irq; 1170 pmif->irq = irq;
1184 pmif->kauai_fcr = NULL; 1171 pmif->kauai_fcr = NULL;
1185#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1172
1186 if (macio_resource_count(mdev) >= 2) { 1173 if (macio_resource_count(mdev) >= 2) {
1187 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1174 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1188 printk(KERN_WARNING "ide-pmac: can't request DMA " 1175 printk(KERN_WARNING "ide-pmac: can't request DMA "
@@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1192 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1179 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1193 } else 1180 } else
1194 pmif->dma_regs = NULL; 1181 pmif->dma_regs = NULL;
1195#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1182
1196 dev_set_drvdata(&mdev->ofdev.dev, pmif); 1183 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1197 1184
1198 memset(&hw, 0, sizeof(hw)); 1185 memset(&hw, 0, sizeof(hw));
@@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1300 1287
1301 base = ioremap(rbase, rlen); 1288 base = ioremap(rbase, rlen);
1302 pmif->regbase = (unsigned long) base + 0x2000; 1289 pmif->regbase = (unsigned long) base + 0x2000;
1303#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1304 pmif->dma_regs = base + 0x1000; 1290 pmif->dma_regs = base + 0x1000;
1305#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1306 pmif->kauai_fcr = base; 1291 pmif->kauai_fcr = base;
1307 pmif->irq = pdev->irq; 1292 pmif->irq = pdev->irq;
1308 1293
@@ -1434,8 +1419,6 @@ out:
1434 return error; 1419 return error;
1435} 1420}
1436 1421
1437#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1438
1439/* 1422/*
1440 * pmac_ide_build_dmatable builds the DBDMA command list 1423 * pmac_ide_build_dmatable builds the DBDMA command list
1441 * for a transfer and sets the DBDMA channel to point to it. 1424 * for a transfer and sets the DBDMA channel to point to it.
@@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1723 1706
1724 return 0; 1707 return 0;
1725} 1708}
1726#else
1727static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1728 const struct ide_port_info *d)
1729{
1730 return -EOPNOTSUPP;
1731}
1732#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1733 1709
1734module_init(pmac_ide_probe); 1710module_init(pmac_ide_probe);
1735 1711
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index 7defa0ae2014..a687a7dfea6f 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
550 .dma_timeout = ide_dma_timeout, 550 .dma_timeout = ide_dma_timeout,
551}; 551};
552 552
553static const struct ide_port_info sgiioc4_port_info __devinitdata = { 553static const struct ide_port_info sgiioc4_port_info __devinitconst = {
554 .name = DRV_NAME, 554 .name = DRV_NAME,
555 .chipset = ide_pci, 555 .chipset = ide_pci,
556 .init_dma = ide_dma_sgiioc4, 556 .init_dma = ide_dma_sgiioc4,
@@ -633,7 +633,7 @@ out:
633 return ret; 633 return ret;
634} 634}
635 635
636int 636int __devinit
637ioc4_ide_attach_one(struct ioc4_driver_data *idd) 637ioc4_ide_attach_one(struct ioc4_driver_data *idd)
638{ 638{
639 /* PCI-RT does not bring out IDE connection. 639 /* PCI-RT does not bring out IDE connection.
@@ -645,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd)
645 return pci_init_sgiioc4(idd->idd_pdev); 645 return pci_init_sgiioc4(idd->idd_pdev);
646} 646}
647 647
648static struct ioc4_submodule ioc4_ide_submodule = { 648static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
649 .is_name = "IOC4_ide", 649 .is_name = "IOC4_ide",
650 .is_owner = THIS_MODULE, 650 .is_owner = THIS_MODULE,
651 .is_probe = ioc4_ide_attach_one, 651 .is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 9e39f73282ee..d333ae22459c 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1685,6 +1685,7 @@ static int nodemgr_host_thread(void *data)
1685 g = get_hpsb_generation(host); 1685 g = get_hpsb_generation(host);
1686 for (i = 0; i < 4 ; i++) { 1686 for (i = 0; i < 4 ; i++) {
1687 msleep_interruptible(63); 1687 msleep_interruptible(63);
1688 try_to_freeze();
1688 if (kthread_should_stop()) 1689 if (kthread_should_stop())
1689 goto exit; 1690 goto exit;
1690 1691
@@ -1725,6 +1726,7 @@ static int nodemgr_host_thread(void *data)
1725 /* Sleep 3 seconds */ 1726 /* Sleep 3 seconds */
1726 for (i = 3000/200; i; i--) { 1727 for (i = 3000/200; i; i--) {
1727 msleep_interruptible(200); 1728 msleep_interruptible(200);
1729 try_to_freeze();
1728 if (kthread_should_stop()) 1730 if (kthread_should_stop())
1729 goto exit; 1731 goto exit;
1730 1732
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 1e288eeb5e2a..6461a32bc838 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -233,10 +233,8 @@ static void __exit b1isa_exit(void)
233 int i; 233 int i;
234 234
235 for (i = 0; i < MAX_CARDS; i++) { 235 for (i = 0; i < MAX_CARDS; i++) {
236 if (!io[i]) 236 if (isa_dev[i].resource[0].start)
237 break; 237 b1isa_remove(&isa_dev[i]);
238
239 b1isa_remove(&isa_dev[i]);
240 } 238 }
241 unregister_capi_driver(&capi_driver_b1isa); 239 unregister_capi_driver(&capi_driver_b1isa);
242} 240}
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index cfa8fa5e44ab..3f2a0a20c19b 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -83,12 +83,12 @@ net_open(struct net_device *dev)
83 83
84 /* Fill in the MAC-level header (if not already set) */ 84 /* Fill in the MAC-level header (if not already set) */
85 if (!card->mac_addr[0]) { 85 if (!card->mac_addr[0]) {
86 for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) 86 for (i = 0; i < ETH_ALEN; i++)
87 dev->dev_addr[i] = 0xfc; 87 dev->dev_addr[i] = 0xfc;
88 if ((in_dev = dev->ip_ptr) != NULL) { 88 if ((in_dev = dev->ip_ptr) != NULL) {
89 struct in_ifaddr *ifa = in_dev->ifa_list; 89 struct in_ifaddr *ifa = in_dev->ifa_list;
90 if (ifa != NULL) 90 if (ifa != NULL)
91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); 91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
92 } 92 }
93 } else 93 } else
94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); 94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a63161aec487..04e5fd742c2c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
668 if (!rs->max_segment_size) 668 if (!rs->max_segment_size)
669 rs->max_segment_size = MAX_SEGMENT_SIZE; 669 rs->max_segment_size = MAX_SEGMENT_SIZE;
670 if (!rs->seg_boundary_mask) 670 if (!rs->seg_boundary_mask)
671 rs->seg_boundary_mask = -1; 671 rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
672 if (!rs->bounce_pfn) 672 if (!rs->bounce_pfn)
673 rs->bounce_pfn = -1; 673 rs->bounce_pfn = -1;
674} 674}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 84bdc2ee69e6..a443e136dc41 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
354 * @req: the request to prepare 354 * @req: the request to prepare
355 * 355 *
356 * Allocate the necessary i2o_block_request struct and connect it to 356 * Allocate the necessary i2o_block_request struct and connect it to
357 * the request. This is needed that we not loose the SG list later on. 357 * the request. This is needed that we not lose the SG list later on.
358 * 358 *
359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
360 */ 360 */
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index be2b5926d26c..6e53a30bfd38 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
49/** 49/**
50 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
51 * @c: I2O controller 51 * @c: I2O controller
52 * @msg: pointer to a I2O message pointer
53 * @wait: how long to wait until timeout 52 * @wait: how long to wait until timeout
54 * 53 *
55 * This function waits up to wait seconds for a message slot to be 54 * This function waits up to wait seconds for a message slot to be
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8c389d606c30..3ee698ad8599 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
254 return 1; 254 return 1;
255 255
256 *paddr = pte_pfn(pte) << PAGE_SHIFT; 256 *paddr = pte_pfn(pte) << PAGE_SHIFT;
257#ifdef CONFIG_HUGETLB_PAGE
257 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; 258 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
259#else
260 *pageshift = PAGE_SHIFT;
261#endif
258 return 0; 262 return 0;
259 263
260err: 264err:
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 76a76751da36..6659b2275c0c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -37,9 +37,9 @@
37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ 37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ 38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ 40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ 41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
42#define OPCODE_BE 0xc7 /* Erase whole flash block */ 42#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ 43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
44#define OPCODE_RDID 0x9f /* Read JEDEC ID */ 44#define OPCODE_RDID 0x9f /* Read JEDEC ID */
45 45
@@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
167 * 167 *
168 * Returns 0 if successful, non-zero otherwise. 168 * Returns 0 if successful, non-zero otherwise.
169 */ 169 */
170static int erase_block(struct m25p *flash) 170static int erase_chip(struct m25p *flash)
171{ 171{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
173 flash->spi->dev.bus_id, __func__, 173 flash->spi->dev.bus_id, __func__,
@@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
181 write_enable(flash); 181 write_enable(flash);
182 182
183 /* Set up command buffer. */ 183 /* Set up command buffer. */
184 flash->command[0] = OPCODE_BE; 184 flash->command[0] = OPCODE_CHIP_ERASE;
185 185
186 spi_write(flash->spi, flash->command, 1); 186 spi_write(flash->spi, flash->command, 1);
187 187
@@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
250 250
251 mutex_lock(&flash->lock); 251 mutex_lock(&flash->lock);
252 252
253 /* REVISIT in some cases we could speed up erasing large regions 253 /* whole-chip erase? */
254 * by using OPCODE_SE instead of OPCODE_BE_4K 254 if (len == flash->mtd.size && erase_chip(flash)) {
255 */
256
257 /* now erase those sectors */
258 if (len == flash->mtd.size && erase_block(flash)) {
259 instr->state = MTD_ERASE_FAILED; 255 instr->state = MTD_ERASE_FAILED;
260 mutex_unlock(&flash->lock); 256 mutex_unlock(&flash->lock);
261 return -EIO; 257 return -EIO;
258
259 /* REVISIT in some cases we could speed up erasing large regions
260 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
261 * to use "small sector erase", but that's not always optimal.
262 */
263
264 /* "sector"-at-a-time erase */
262 } else { 265 } else {
263 while (len) { 266 while (len) {
264 if (erase_sector(flash, addr)) { 267 if (erase_sector(flash, addr)) {
@@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
574 for (tmp = 0, info = m25p_data; 577 for (tmp = 0, info = m25p_data;
575 tmp < ARRAY_SIZE(m25p_data); 578 tmp < ARRAY_SIZE(m25p_data);
576 tmp++, info++) { 579 tmp++, info++) {
577 if (info->jedec_id == jedec) 580 if (info->jedec_id == jedec) {
578 if (ext_jedec != 0 && info->ext_id != ext_jedec) 581 if (info->ext_id != 0 && info->ext_id != ext_jedec)
579 continue; 582 continue;
580 return info; 583 return info;
584 }
581 } 585 }
582 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); 586 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
583 return NULL; 587 return NULL;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 42d844f8f6bf..dfbf3f270cea 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -19,7 +19,7 @@
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20#include <linux/mtd/physmap.h> 20#include <linux/mtd/physmap.h>
21#include <linux/mtd/concat.h> 21#include <linux/mtd/concat.h>
22#include <asm/io.h> 22#include <linux/io.h>
23 23
24#define MAX_RESOURCES 4 24#define MAX_RESOURCES 4
25 25
@@ -27,7 +27,6 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 struct resource *res;
31#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
32 int nr_parts; 31 int nr_parts;
33 struct mtd_partition *parts; 32 struct mtd_partition *parts;
@@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
70#endif 69#endif
71 map_destroy(info->mtd[i]); 70 map_destroy(info->mtd[i]);
72 } 71 }
73
74 if (info->map[i].virt != NULL)
75 iounmap(info->map[i].virt);
76 }
77
78 if (info->res != NULL) {
79 release_resource(info->res);
80 kfree(info->res);
81 } 72 }
82
83 return 0; 73 return 0;
84} 74}
85 75
@@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
101 if (physmap_data == NULL) 91 if (physmap_data == NULL)
102 return -ENODEV; 92 return -ENODEV;
103 93
104 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 94 info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
95 GFP_KERNEL);
105 if (info == NULL) { 96 if (info == NULL) {
106 err = -ENOMEM; 97 err = -ENOMEM;
107 goto err_out; 98 goto err_out;
@@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
114 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), 105 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
115 (unsigned long long)dev->resource[i].start); 106 (unsigned long long)dev->resource[i].start);
116 107
117 info->res = request_mem_region(dev->resource[i].start, 108 if (!devm_request_mem_region(&dev->dev,
118 dev->resource[i].end - dev->resource[i].start + 1, 109 dev->resource[i].start,
119 dev->dev.bus_id); 110 dev->resource[i].end - dev->resource[i].start + 1,
120 if (info->res == NULL) { 111 dev->dev.bus_id)) {
121 dev_err(&dev->dev, "Could not reserve memory region\n"); 112 dev_err(&dev->dev, "Could not reserve memory region\n");
122 err = -ENOMEM; 113 err = -ENOMEM;
123 goto err_out; 114 goto err_out;
@@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
129 info->map[i].bankwidth = physmap_data->width; 120 info->map[i].bankwidth = physmap_data->width;
130 info->map[i].set_vpp = physmap_data->set_vpp; 121 info->map[i].set_vpp = physmap_data->set_vpp;
131 122
132 info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); 123 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
124 info->map[i].size);
133 if (info->map[i].virt == NULL) { 125 if (info->map[i].virt == NULL) {
134 dev_err(&dev->dev, "Failed to ioremap flash region\n"); 126 dev_err(&dev->dev, "Failed to ioremap flash region\n");
135 err = EIO; 127 err = EIO;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 024e3fffd4bb..a83192f80eba 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
164 164
165#ifdef CONFIG_MTD_OF_PARTS 165#ifdef CONFIG_MTD_OF_PARTS
166 if (ret == 0) 166 if (ret == 0) {
167 ret = of_mtd_parse_partitions(fun->dev, &fun->mtd, 167 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
168 flash_np, &fun->parts); 168 if (ret < 0)
169 goto err;
170 }
169#endif 171#endif
170 if (ret > 0) 172 if (ret > 0)
171 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret); 173 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 75c899039023..9bd6c9ac8443 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
141 } 141 }
142 142
143 lpcctl = pci_resource_start(pdev, 0); 143 lpcctl = pci_resource_start(pdev, 0);
144 pci_dev_put(pdev);
144 145
145 if (!request_region(lpcctl, 4, driver_name)) { 146 if (!request_region(lpcctl, 4, driver_name)) {
146 err = -EBUSY; 147 err = -EBUSY;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c0fa9c9edf08..15f0a26730ae 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
269 269
270static struct pxa3xx_nand_flash stm2GbX16 = { 270static struct pxa3xx_nand_flash stm2GbX16 = {
271 .timing = &stm2GbX16_timing, 271 .timing = &stm2GbX16_timing,
272 .cmdset = &largepage_cmdset,
272 .page_per_block = 64, 273 .page_per_block = 64,
273 .page_size = 2048, 274 .page_size = 2048,
274 .flash_width = 16, 275 .flash_width = 16,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index e39b21d3e168..a7e4d985f5ef 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -32,19 +32,18 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
35 37
36#include <asm/io.h>
37#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
38#include <asm/arch/gpmc.h> 39#include <mach/gpmc.h>
39#include <asm/arch/onenand.h> 40#include <mach/onenand.h>
40#include <asm/arch/gpio.h> 41#include <mach/gpio.h>
41#include <asm/arch/pm.h> 42#include <mach/pm.h>
42 43
43#include <linux/dma-mapping.h> 44#include <mach/dma.h>
44#include <asm/dma-mapping.h>
45#include <asm/arch/dma.h>
46 45
47#include <asm/arch/board.h> 46#include <mach/board.h>
48 47
49#define DRIVER_NAME "omap2-onenand" 48#define DRIVER_NAME "omap2-onenand"
50 49
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d07e3f148951..a1a3d0e5d2b4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3144 return 0; 3144 return 0;
3145} 3145}
3146 3146
3147static void
3148bnx2_chk_missed_msi(struct bnx2 *bp)
3149{
3150 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3151 u32 msi_ctrl;
3152
3153 if (bnx2_has_work(bnapi)) {
3154 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3155 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3156 return;
3157
3158 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3159 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3160 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3161 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3162 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3163 }
3164 }
3165
3166 bp->idle_chk_status_idx = bnapi->last_status_idx;
3167}
3168
3147static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3169static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3148{ 3170{
3149 struct status_block *sblk = bnapi->status_blk.msi; 3171 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3218 3240
3219 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3241 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3220 3242
3221 if (unlikely(work_done >= budget))
3222 break;
3223
3224 /* bnapi->last_status_idx is used below to tell the hw how 3243 /* bnapi->last_status_idx is used below to tell the hw how
3225 * much work has been processed, so we must read it before 3244 * much work has been processed, so we must read it before
3226 * checking for more work. 3245 * checking for more work.
3227 */ 3246 */
3228 bnapi->last_status_idx = sblk->status_idx; 3247 bnapi->last_status_idx = sblk->status_idx;
3248
3249 if (unlikely(work_done >= budget))
3250 break;
3251
3229 rmb(); 3252 rmb();
3230 if (likely(!bnx2_has_work(bnapi))) { 3253 if (likely(!bnx2_has_work(bnapi))) {
3231 netif_rx_complete(bp->dev, napi); 3254 netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
4570 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4571 bp->bnx2_napi[i].last_status_idx = 0; 4594 bp->bnx2_napi[i].last_status_idx = 0;
4572 4595
4596 bp->idle_chk_status_idx = 0xffff;
4597
4573 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4598 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4574 4599
4575 /* Set up how to generate a link change interrupt. */ 4600 /* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
5718 if (atomic_read(&bp->intr_sem) != 0) 5743 if (atomic_read(&bp->intr_sem) != 0)
5719 goto bnx2_restart_timer; 5744 goto bnx2_restart_timer;
5720 5745
5746 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5747 BNX2_FLAG_USING_MSI)
5748 bnx2_chk_missed_msi(bp);
5749
5721 bnx2_send_heart_beat(bp); 5750 bnx2_send_heart_beat(bp);
5722 5751
5723 bp->stats_blk->stat_FwRxDrop = 5752 bp->stats_blk->stat_FwRxDrop =
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d95340160..0b032c3c7b61 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
378 * pci_config_l definition 378 * pci_config_l definition
379 * offset: 0000 379 * offset: 0000
380 */ 380 */
381#define BNX2_PCICFG_MSI_CONTROL 0x00000058
382#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
383
381#define BNX2_PCICFG_MISC_CONFIG 0x00000068 384#define BNX2_PCICFG_MISC_CONFIG 0x00000068
382#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) 385#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
383#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) 386#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
6863 6866
6864 u8 num_tx_rings; 6867 u8 num_tx_rings;
6865 u8 num_rx_rings; 6868 u8 num_rx_rings;
6869
6870 u32 idle_chk_status_idx;
6871
6866}; 6872};
6867 6873
6868#define REG_RD(bp, offset) \ 6874#define REG_RD(bp, offset) \
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbbe..c414554ac321 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
568 return erxrdpt; 568 return erxrdpt;
569} 569}
570 570
571/*
572 * Calculate wrap around when reading beyond the end of the RX buffer
573 */
574static u16 rx_packet_start(u16 ptr)
575{
576 if (ptr + RSV_SIZE > RXEND_INIT)
577 return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
578 else
579 return ptr + RSV_SIZE;
580}
581
571static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) 582static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
572{ 583{
573 u16 erxrdpt; 584 u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
938 skb->dev = ndev; 949 skb->dev = ndev;
939 skb_reserve(skb, NET_IP_ALIGN); 950 skb_reserve(skb, NET_IP_ALIGN);
940 /* copy the packet from the receive buffer */ 951 /* copy the packet from the receive buffer */
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 952 enc28j60_mem_read(priv,
942 len, skb_put(skb, len)); 953 rx_packet_start(priv->next_pk_ptr),
954 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 955 if (netif_msg_pktdata(priv))
944 dump_packet(__func__, skb->len, skb->data); 956 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 957 skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d21..b289a0a2b945 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
401 priv->xmac_base = priv->xc->xmac_base; 401 priv->xmac_base = priv->xc->xmac_base;
402 priv->sram_base = priv->xc->sram_base; 402 priv->sram_base = priv->xc->sram_base;
403 403
404 spin_lock_init(&priv->lock);
405
404 ret = pfifo_request(PFIFO_MASK(priv->id)); 406 ret = pfifo_request(PFIFO_MASK(priv->id));
405 if (ret) { 407 if (ret) {
406 printk("unable to request PFIFO\n"); 408 printk("unable to request PFIFO\n");
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a7..7a9f901d4ff6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3898 return 0; 3898 return 0;
3899 ipw_send_disassociate(data, 0); 3899 ipw_send_disassociate(data, 0);
3900 netif_carrier_off(priv->net_dev);
3900 return 1; 3901 return 1;
3901} 3902}
3902 3903
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10190 u16 remaining_bytes; 10191 u16 remaining_bytes;
10191 int fc; 10192 int fc;
10192 10193
10194 if (!(priv->status & STATUS_ASSOCIATED))
10195 goto drop;
10196
10193 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10197 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10194 switch (priv->ieee->iw_mode) { 10198 switch (priv->ieee->iw_mode) {
10195 case IW_MODE_ADHOC: 10199 case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90c..01a845851338 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
290 priv->num_stations = 0; 290 priv->num_stations = 0;
291 memset(priv->stations, 0, sizeof(priv->stations)); 291 memset(priv->stations, 0, sizeof(priv->stations));
292 292
293 /* clean ucode key table bit map */
294 priv->ucode_key_table = 0;
295
293 spin_unlock_irqrestore(&priv->sta_lock, flags); 296 spin_unlock_irqrestore(&priv->sta_lock, flags);
294} 297}
295EXPORT_SYMBOL(iwl_clear_stations_table); 298EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5c..26f7084d3011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
475 if (!test_and_set_bit(i, &priv->ucode_key_table)) 475 if (!test_and_set_bit(i, &priv->ucode_key_table))
476 return i; 476 return i;
477 477
478 return -1; 478 return WEP_INVALID_OFFSET;
479} 479}
480 480
481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
620 /* else, we are overriding an existing key => no need to allocated room 620 /* else, we are overriding an existing key => no need to allocated room
621 * in uCode. */ 621 * in uCode. */
622 622
623 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
624 "no space for new kew");
625
623 priv->stations[sta_id].sta.key.key_flags = key_flags; 626 priv->stations[sta_id].sta.key.key_flags = key_flags;
624 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
625 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 628 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
637{ 640{
638 unsigned long flags; 641 unsigned long flags;
639 __le16 key_flags = 0; 642 __le16 key_flags = 0;
643 int ret;
640 644
641 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 645 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
642 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 646 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
664 /* else, we are overriding an existing key => no need to allocated room 668 /* else, we are overriding an existing key => no need to allocated room
665 * in uCode. */ 669 * in uCode. */
666 670
671 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
672 "no space for new kew");
673
667 priv->stations[sta_id].sta.key.key_flags = key_flags; 674 priv->stations[sta_id].sta.key.key_flags = key_flags;
668 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 675 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 676 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
670 677
678 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
679
671 spin_unlock_irqrestore(&priv->sta_lock, flags); 680 spin_unlock_irqrestore(&priv->sta_lock, flags);
672 681
673 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 682 return ret;
674 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
675} 683}
676 684
677static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 685static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
696 /* else, we are overriding an existing key => no need to allocated room 704 /* else, we are overriding an existing key => no need to allocated room
697 * in uCode. */ 705 * in uCode. */
698 706
707 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
708 "no space for new kew");
709
699 /* This copy is acutally not needed: we get the key with each TX */ 710 /* This copy is acutally not needed: we get the key with each TX */
700 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 711 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
701 712
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
734 return 0; 745 return 0;
735 } 746 }
736 747
748 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
749 IWL_WARNING("Removing wrong key %d 0x%x\n",
750 keyconf->keyidx, key_flags);
751 spin_unlock_irqrestore(&priv->sta_lock, flags);
752 return 0;
753 }
754
737 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 755 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
738 &priv->ucode_key_table)) 756 &priv->ucode_key_table))
739 IWL_ERROR("index %d not used in uCode key table.\n", 757 IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff7..cac732f4047f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
615 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
616 616
617 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
618 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
619 { 619 {
620 __skb_unlink(skb, q); 620 __skb_unlink(skb, q);
621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); 621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8f63f4c6b85f..9aad608bcf3f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/jiffies.h>
19#include <linux/pci-aspm.h> 20#include <linux/pci-aspm.h>
20#include "../pci.h" 21#include "../pci.h"
21 22
@@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
161 */ 162 */
162static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 163static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
163{ 164{
164 int pos, child_pos; 165 int pos, child_pos, i = 0;
165 u16 reg16 = 0; 166 u16 reg16 = 0;
166 struct pci_dev *child_dev; 167 struct pci_dev *child_dev;
167 int same_clock = 1; 168 int same_clock = 1;
168 169 unsigned long start_jiffies;
170 u16 child_regs[8], parent_reg;
169 /* 171 /*
170 * all functions of a slot should have the same Slot Clock 172 * all functions of a slot should have the same Slot Clock
171 * Configuration, so just check one function 173 * Configuration, so just check one function
@@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
191 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 193 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
192 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 194 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
193 &reg16); 195 &reg16);
196 child_regs[i] = reg16;
194 if (same_clock) 197 if (same_clock)
195 reg16 |= PCI_EXP_LNKCTL_CCC; 198 reg16 |= PCI_EXP_LNKCTL_CCC;
196 else 199 else
197 reg16 &= ~PCI_EXP_LNKCTL_CCC; 200 reg16 &= ~PCI_EXP_LNKCTL_CCC;
198 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 201 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
199 reg16); 202 reg16);
203 i++;
200 } 204 }
201 205
202 /* Configure upstream component */ 206 /* Configure upstream component */
203 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 207 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
208 parent_reg = reg16;
204 if (same_clock) 209 if (same_clock)
205 reg16 |= PCI_EXP_LNKCTL_CCC; 210 reg16 |= PCI_EXP_LNKCTL_CCC;
206 else 211 else
@@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
212 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 217 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
213 218
214 /* Wait for link training end */ 219 /* Wait for link training end */
215 while (1) { 220 /* break out after waiting for 1 second */
221 start_jiffies = jiffies;
222 while ((jiffies - start_jiffies) < HZ) {
216 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 223 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
217 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 224 if (!(reg16 & PCI_EXP_LNKSTA_LT))
218 break; 225 break;
219 cpu_relax(); 226 cpu_relax();
220 } 227 }
228 /* training failed -> recover */
229 if ((jiffies - start_jiffies) >= HZ) {
230 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
231 " common clock\n");
232 i = 0;
233 list_for_each_entry(child_dev, &pdev->subordinate->devices,
234 bus_list) {
235 child_pos = pci_find_capability(child_dev,
236 PCI_CAP_ID_EXP);
237 pci_write_config_word(child_dev,
238 child_pos + PCI_EXP_LNKCTL,
239 child_regs[i]);
240 i++;
241 }
242 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
243 }
221} 244}
222 245
223/* 246/*
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 4dd1c3e157ae..5a8ccb4f604d 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -253,6 +253,7 @@ placeholder:
253 __func__, pci_domain_nr(parent), parent->number, slot_nr); 253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 254
255out: 255out:
256 kfree(slot_name);
256 up_write(&pci_bus_sem); 257 up_write(&pci_bus_sem);
257 return slot; 258 return slot;
258err: 259err:
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 341d7a5b45a2..4e91419e8911 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -209,12 +209,18 @@ static int ds1672_probe(struct i2c_client *client,
209 return err; 209 return err;
210} 210}
211 211
212static struct i2c_device_id ds1672_id[] = {
213 { "ds1672", 0 },
214 { }
215};
216
212static struct i2c_driver ds1672_driver = { 217static struct i2c_driver ds1672_driver = {
213 .driver = { 218 .driver = {
214 .name = "rtc-ds1672", 219 .name = "rtc-ds1672",
215 }, 220 },
216 .probe = &ds1672_probe, 221 .probe = &ds1672_probe,
217 .remove = &ds1672_remove, 222 .remove = &ds1672_remove,
223 .id_table = ds1672_id,
218}; 224};
219 225
220static int __init ds1672_init(void) 226static int __init ds1672_init(void)
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 80782798763f..a4f6665ab3c5 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -247,12 +247,18 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
247 return 0; 247 return 0;
248} 248}
249 249
250static struct i2c_device_id max6900_id[] = {
251 { "max6900", 0 },
252 { }
253};
254
250static struct i2c_driver max6900_driver = { 255static struct i2c_driver max6900_driver = {
251 .driver = { 256 .driver = {
252 .name = "rtc-max6900", 257 .name = "rtc-max6900",
253 }, 258 },
254 .probe = max6900_probe, 259 .probe = max6900_probe,
255 .remove = max6900_remove, 260 .remove = max6900_remove,
261 .id_table = max6900_id,
256}; 262};
257 263
258static int __init max6900_init(void) 264static int __init max6900_init(void)
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
index abe87a4d2665..01d8da9afdc8 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl4030.c
@@ -337,7 +337,7 @@ static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
337} 337}
338 338
339#else 339#else
340#define omap_rtc_ioctl NULL 340#define twl4030_rtc_ioctl NULL
341#endif 341#endif
342 342
343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) 343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9aec4ca64e56..f7da7530875e 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -107,6 +107,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
107 struct request *req; 107 struct request *req;
108 int ret; 108 int ret;
109 109
110retry:
110 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 111 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
111 if (!req) 112 if (!req)
112 return SCSI_DH_RES_TEMP_UNAVAIL; 113 return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -121,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
121 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 122 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
122 req->sense_len = 0; 123 req->sense_len = 0;
123 124
124retry:
125 ret = blk_execute_rq(req->q, NULL, req, 1); 125 ret = blk_execute_rq(req->q, NULL, req, 1);
126 if (ret == -EIO) { 126 if (ret == -EIO) {
127 if (req->sense_len > 0) { 127 if (req->sense_len > 0) {
@@ -136,8 +136,10 @@ retry:
136 h->path_state = HP_SW_PATH_ACTIVE; 136 h->path_state = HP_SW_PATH_ACTIVE;
137 ret = SCSI_DH_OK; 137 ret = SCSI_DH_OK;
138 } 138 }
139 if (ret == SCSI_DH_IMM_RETRY) 139 if (ret == SCSI_DH_IMM_RETRY) {
140 blk_put_request(req);
140 goto retry; 141 goto retry;
142 }
141 if (ret == SCSI_DH_DEV_OFFLINED) { 143 if (ret == SCSI_DH_DEV_OFFLINED) {
142 h->path_state = HP_SW_PATH_PASSIVE; 144 h->path_state = HP_SW_PATH_PASSIVE;
143 ret = SCSI_DH_OK; 145 ret = SCSI_DH_OK;
@@ -200,6 +202,7 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
200 struct request *req; 202 struct request *req;
201 int ret, retry; 203 int ret, retry;
202 204
205retry:
203 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 206 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
204 if (!req) 207 if (!req)
205 return SCSI_DH_RES_TEMP_UNAVAIL; 208 return SCSI_DH_RES_TEMP_UNAVAIL;
@@ -216,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
216 req->sense_len = 0; 219 req->sense_len = 0;
217 retry = h->retries; 220 retry = h->retries;
218 221
219retry:
220 ret = blk_execute_rq(req->q, NULL, req, 1); 222 ret = blk_execute_rq(req->q, NULL, req, 1);
221 if (ret == -EIO) { 223 if (ret == -EIO) {
222 if (req->sense_len > 0) { 224 if (req->sense_len > 0) {
@@ -231,8 +233,10 @@ retry:
231 ret = SCSI_DH_OK; 233 ret = SCSI_DH_OK;
232 234
233 if (ret == SCSI_DH_RETRY) { 235 if (ret == SCSI_DH_RETRY) {
234 if (--retry) 236 if (--retry) {
237 blk_put_request(req);
235 goto retry; 238 goto retry;
239 }
236 ret = SCSI_DH_IO; 240 ret = SCSI_DH_IO;
237 } 241 }
238 242
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c9e1242eaf25..5081b3981d3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -757,7 +757,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
757 * access to the device is prohibited. 757 * access to the device is prohibited.
758 */ 758 */
759 error = scsi_nonblockable_ioctl(sdp, cmd, p, 759 error = scsi_nonblockable_ioctl(sdp, cmd, p,
760 (mode & FMODE_NDELAY_NOW) != 0); 760 (mode & FMODE_NDELAY) != 0);
761 if (!scsi_block_when_processing_errors(sdp) || !error) 761 if (!scsi_block_when_processing_errors(sdp) || !error)
762 return error; 762 return error;
763 763
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 62b6633e3a97..45b66b98a516 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -521,7 +521,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
521 * if it doesn't recognise the ioctl 521 * if it doesn't recognise the ioctl
522 */ 522 */
523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, 523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
524 (mode & FMODE_NDELAY_NOW) != 0); 524 (mode & FMODE_NDELAY) != 0);
525 if (ret != -ENODEV) 525 if (ret != -ENODEV)
526 return ret; 526 return ret;
527 return scsi_ioctl(sdev, cmd, argp); 527 return scsi_ioctl(sdev, cmd, argp);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 6dd98f9fb89c..ae3699d77dd0 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2149,7 +2149,7 @@ out4:
2149 return ret; 2149 return ret;
2150} 2150}
2151 2151
2152static struct ioc3_submodule ioc3uart_submodule = { 2152static struct ioc3_submodule ioc3uart_ops = {
2153 .name = "IOC3uart", 2153 .name = "IOC3uart",
2154 .probe = ioc3uart_probe, 2154 .probe = ioc3uart_probe,
2155 .remove = ioc3uart_remove, 2155 .remove = ioc3uart_remove,
@@ -2173,7 +2173,7 @@ static int __devinit ioc3uart_init(void)
2173 __func__); 2173 __func__);
2174 return ret; 2174 return ret;
2175 } 2175 }
2176 ret = ioc3_register_submodule(&ioc3uart_submodule); 2176 ret = ioc3_register_submodule(&ioc3uart_ops);
2177 if (ret) 2177 if (ret)
2178 uart_unregister_driver(&ioc3_uart); 2178 uart_unregister_driver(&ioc3_uart);
2179 return ret; 2179 return ret;
@@ -2181,7 +2181,7 @@ static int __devinit ioc3uart_init(void)
2181 2181
2182static void __devexit ioc3uart_exit(void) 2182static void __devexit ioc3uart_exit(void)
2183{ 2183{
2184 ioc3_unregister_submodule(&ioc3uart_submodule); 2184 ioc3_unregister_submodule(&ioc3uart_ops);
2185 uart_unregister_driver(&ioc3_uart); 2185 uart_unregister_driver(&ioc3_uart);
2186} 2186}
2187 2187
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 6117d3db0b66..28c00c3d58f5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
591 /* Update the per-port timeout */ 591 /* Update the per-port timeout */
592 uart_update_timeout(port, new->c_cflag, baud); 592 uart_update_timeout(port, new->c_cflag, baud);
593 593
594 /* Do our best to flush TX & RX, so we don't loose anything */ 594 /* Do our best to flush TX & RX, so we don't lose anything */
595 /* But we don't wait indefinitly ! */ 595 /* But we don't wait indefinitely ! */
596 j = 5000000; /* Maximum wait */ 596 j = 5000000; /* Maximum wait */
597 /* FIXME Can't receive chars since set_termios might be called at early 597 /* FIXME Can't receive chars since set_termios might be called at early
598 * boot for the console, all stuff is not yet ready to receive at that 598 * boot for the console, all stuff is not yet ready to receive at that
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 317d239ab740..29cbb0afef8e 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -177,5 +177,5 @@ module_exit(s3c2440_serial_exit);
177 177
178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver"); 178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
180MODULE_LICENSE("GPLi v2"); 180MODULE_LICENSE("GPL v2");
181MODULE_ALIAS("platform:s3c2440-uart"); 181MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 51d7bdea2869..aad1359a3eb1 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1498,7 +1498,7 @@ static int ftdi_open(struct tty_struct *tty,
1498 priv->interface, buf, 0, WDR_TIMEOUT); 1498 priv->interface, buf, 0, WDR_TIMEOUT);
1499 1499
1500 /* Termios defaults are set by usb_serial_init. We don't change 1500 /* Termios defaults are set by usb_serial_init. We don't change
1501 port->tty->termios - this would loose speed settings, etc. 1501 port->tty->termios - this would lose speed settings, etc.
1502 This is same behaviour as serial.c/rs_open() - Kuba */ 1502 This is same behaviour as serial.c/rs_open() - Kuba */
1503 1503
1504 /* ftdi_set_termios will send usb control messages */ 1504 /* ftdi_set_termios will send usb control messages */
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index a547e5d4c8bf..a469a3d6edcb 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -5,61 +5,61 @@
5 * --dte 5 * --dte
6 */ 6 */
7 7
8#define FLUSH_CACHE_WORKAROUND 1 8static void radeon_fixup_offset(struct radeonfb_info *rinfo)
9
10void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries)
11{ 9{
12 int i; 10 u32 local_base;
11
12 /* *** Ugly workaround *** */
13 /*
14 * On some platforms, the video memory is mapped at 0 in radeon chip space
15 * (like PPCs) by the firmware. X will always move it up so that it's seen
16 * by the chip to be at the same address as the PCI BAR.
17 * That means that when switching back from X, there is a mismatch between
18 * the offsets programmed into the engine. This means that potentially,
19 * accel operations done before radeonfb has a chance to re-init the engine
20 * will have incorrect offsets, and potentially trash system memory !
21 *
22 * The correct fix is for fbcon to never call any accel op before the engine
23 * has properly been re-initialized (by a call to set_var), but this is a
24 * complex fix. This workaround in the meantime, called before every accel
25 * operation, makes sure the offsets are in sync.
26 */
13 27
14 for (i=0; i<2000000; i++) { 28 radeon_fifo_wait (1);
15 rinfo->fifo_free = INREG(RBBM_STATUS) & 0x7f; 29 local_base = INREG(MC_FB_LOCATION) << 16;
16 if (rinfo->fifo_free >= entries) 30 if (local_base == rinfo->fb_local_base)
17 return; 31 return;
18 udelay(10);
19 }
20 printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
21 /* XXX Todo: attempt to reset the engine */
22}
23 32
24static inline void radeon_fifo_wait(struct radeonfb_info *rinfo, int entries) 33 rinfo->fb_local_base = local_base;
25{
26 if (entries <= rinfo->fifo_free)
27 rinfo->fifo_free -= entries;
28 else
29 radeon_fifo_update_and_wait(rinfo, entries);
30}
31 34
32static inline void radeonfb_set_creg(struct radeonfb_info *rinfo, u32 reg, 35 radeon_fifo_wait (3);
33 u32 *cache, u32 new_val) 36 OUTREG(DEFAULT_PITCH_OFFSET, (rinfo->pitch << 0x16) |
34{ 37 (rinfo->fb_local_base >> 10));
35 if (new_val == *cache) 38 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
36 return; 39 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
37 *cache = new_val;
38 radeon_fifo_wait(rinfo, 1);
39 OUTREG(reg, new_val);
40} 40}
41 41
42static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo, 42static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
43 const struct fb_fillrect *region) 43 const struct fb_fillrect *region)
44{ 44{
45 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, 45 radeon_fifo_wait(4);
46 rinfo->dp_gui_mc_base | GMC_BRUSH_SOLID_COLOR | ROP3_P); 46
47 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, 47 OUTREG(DP_GUI_MASTER_CNTL,
48 DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); 48 rinfo->dp_gui_master_cntl /* contains, like GMC_DST_32BPP */
49 radeonfb_set_creg(rinfo, DP_BRUSH_FRGD_CLR, &rinfo->dp_brush_fg_cache, 49 | GMC_BRUSH_SOLID_COLOR
50 region->color); 50 | ROP3_P);
51 51 if (radeon_get_dstbpp(rinfo->depth) != DST_8BPP)
52 /* Ensure the dst cache is flushed and the engine idle before 52 OUTREG(DP_BRUSH_FRGD_CLR, rinfo->pseudo_palette[region->color]);
53 * issuing the operation. 53 else
54 * 54 OUTREG(DP_BRUSH_FRGD_CLR, region->color);
55 * This works around engine lockups on some cards 55 OUTREG(DP_WRITE_MSK, 0xffffffff);
56 */ 56 OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
57#if FLUSH_CACHE_WORKAROUND 57
58 radeon_fifo_wait(rinfo, 2); 58 radeon_fifo_wait(2);
59 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); 59 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
60 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); 60 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
61#endif 61
62 radeon_fifo_wait(rinfo, 2); 62 radeon_fifo_wait(2);
63 OUTREG(DST_Y_X, (region->dy << 16) | region->dx); 63 OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
64 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); 64 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
65} 65}
@@ -70,14 +70,15 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
70 struct fb_fillrect modded; 70 struct fb_fillrect modded;
71 int vxres, vyres; 71 int vxres, vyres;
72 72
73 WARN_ON(rinfo->gfx_mode); 73 if (info->state != FBINFO_STATE_RUNNING)
74 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
75 return; 74 return;
76 if (info->flags & FBINFO_HWACCEL_DISABLED) { 75 if (info->flags & FBINFO_HWACCEL_DISABLED) {
77 cfb_fillrect(info, region); 76 cfb_fillrect(info, region);
78 return; 77 return;
79 } 78 }
80 79
80 radeon_fixup_offset(rinfo);
81
81 vxres = info->var.xres_virtual; 82 vxres = info->var.xres_virtual;
82 vyres = info->var.yres_virtual; 83 vyres = info->var.yres_virtual;
83 84
@@ -90,10 +91,6 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
90 if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; 91 if(modded.dx + modded.width > vxres) modded.width = vxres - modded.dx;
91 if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; 92 if(modded.dy + modded.height > vyres) modded.height = vyres - modded.dy;
92 93
93 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
94 info->fix.visual == FB_VISUAL_DIRECTCOLOR )
95 modded.color = ((u32 *) (info->pseudo_palette))[region->color];
96
97 radeonfb_prim_fillrect(rinfo, &modded); 94 radeonfb_prim_fillrect(rinfo, &modded);
98} 95}
99 96
@@ -112,22 +109,22 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
112 if ( xdir < 0 ) { sx += w-1; dx += w-1; } 109 if ( xdir < 0 ) { sx += w-1; dx += w-1; }
113 if ( ydir < 0 ) { sy += h-1; dy += h-1; } 110 if ( ydir < 0 ) { sy += h-1; dy += h-1; }
114 111
115 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache, 112 radeon_fifo_wait(3);
116 rinfo->dp_gui_mc_base | 113 OUTREG(DP_GUI_MASTER_CNTL,
117 GMC_BRUSH_NONE | 114 rinfo->dp_gui_master_cntl /* i.e. GMC_DST_32BPP */
118 GMC_SRC_DATATYPE_COLOR | 115 | GMC_BRUSH_NONE
119 ROP3_S | 116 | GMC_SRC_DSTCOLOR
120 DP_SRC_SOURCE_MEMORY); 117 | ROP3_S
121 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache, 118 | DP_SRC_SOURCE_MEMORY );
122 (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) | 119 OUTREG(DP_WRITE_MSK, 0xffffffff);
123 (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); 120 OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
124 121 | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
125#if FLUSH_CACHE_WORKAROUND 122
126 radeon_fifo_wait(rinfo, 2); 123 radeon_fifo_wait(2);
127 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); 124 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
128 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); 125 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
129#endif 126
130 radeon_fifo_wait(rinfo, 3); 127 radeon_fifo_wait(3);
131 OUTREG(SRC_Y_X, (sy << 16) | sx); 128 OUTREG(SRC_Y_X, (sy << 16) | sx);
132 OUTREG(DST_Y_X, (dy << 16) | dx); 129 OUTREG(DST_Y_X, (dy << 16) | dx);
133 OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w); 130 OUTREG(DST_HEIGHT_WIDTH, (h << 16) | w);
@@ -146,14 +143,15 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
146 modded.width = area->width; 143 modded.width = area->width;
147 modded.height = area->height; 144 modded.height = area->height;
148 145
149 WARN_ON(rinfo->gfx_mode); 146 if (info->state != FBINFO_STATE_RUNNING)
150 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
151 return; 147 return;
152 if (info->flags & FBINFO_HWACCEL_DISABLED) { 148 if (info->flags & FBINFO_HWACCEL_DISABLED) {
153 cfb_copyarea(info, area); 149 cfb_copyarea(info, area);
154 return; 150 return;
155 } 151 }
156 152
153 radeon_fixup_offset(rinfo);
154
157 vxres = info->var.xres_virtual; 155 vxres = info->var.xres_virtual;
158 vyres = info->var.yres_virtual; 156 vyres = info->var.yres_virtual;
159 157
@@ -170,115 +168,13 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
170 radeonfb_prim_copyarea(rinfo, &modded); 168 radeonfb_prim_copyarea(rinfo, &modded);
171} 169}
172 170
173static void radeonfb_prim_imageblit(struct radeonfb_info *rinfo,
174 const struct fb_image *image,
175 u32 fg, u32 bg)
176{
177 unsigned int dwords;
178 u32 *bits;
179
180 radeonfb_set_creg(rinfo, DP_GUI_MASTER_CNTL, &rinfo->dp_gui_mc_cache,
181 rinfo->dp_gui_mc_base |
182 GMC_BRUSH_NONE | GMC_DST_CLIP_LEAVE |
183 GMC_SRC_DATATYPE_MONO_FG_BG |
184 ROP3_S |
185 GMC_BYTE_ORDER_MSB_TO_LSB |
186 DP_SRC_SOURCE_HOST_DATA);
187 radeonfb_set_creg(rinfo, DP_CNTL, &rinfo->dp_cntl_cache,
188 DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
189 radeonfb_set_creg(rinfo, DP_SRC_FRGD_CLR, &rinfo->dp_src_fg_cache, fg);
190 radeonfb_set_creg(rinfo, DP_SRC_BKGD_CLR, &rinfo->dp_src_bg_cache, bg);
191
192 /* Ensure the dst cache is flushed and the engine idle before
193 * issuing the operation.
194 *
195 * This works around engine lockups on some cards
196 */
197#if FLUSH_CACHE_WORKAROUND
198 radeon_fifo_wait(rinfo, 2);
199 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
200 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
201#endif
202
203 /* X here pads width to a multiple of 32 and uses the clipper to
204 * adjust the result. Is that really necessary ? Things seem to
205 * work ok for me without that and the doco doesn't seem to imply]
206 * there is such a restriction.
207 */
208 radeon_fifo_wait(rinfo, 4);
209 OUTREG(SC_TOP_LEFT, (image->dy << 16) | image->dx);
210 OUTREG(SC_BOTTOM_RIGHT, ((image->dy + image->height) << 16) |
211 (image->dx + image->width));
212 OUTREG(DST_Y_X, (image->dy << 16) | image->dx);
213
214 OUTREG(DST_HEIGHT_WIDTH, (image->height << 16) | ((image->width + 31) & ~31));
215
216 dwords = (image->width + 31) >> 5;
217 dwords *= image->height;
218 bits = (u32*)(image->data);
219
220 while(dwords >= 8) {
221 radeon_fifo_wait(rinfo, 8);
222#if BITS_PER_LONG == 64
223 __raw_writeq(*((u64 *)(bits)), rinfo->mmio_base + HOST_DATA0);
224 __raw_writeq(*((u64 *)(bits+2)), rinfo->mmio_base + HOST_DATA2);
225 __raw_writeq(*((u64 *)(bits+4)), rinfo->mmio_base + HOST_DATA4);
226 __raw_writeq(*((u64 *)(bits+6)), rinfo->mmio_base + HOST_DATA6);
227 bits += 8;
228#else
229 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
230 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA1);
231 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA2);
232 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA3);
233 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA4);
234 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA5);
235 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA6);
236 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA7);
237#endif
238 dwords -= 8;
239 }
240 while(dwords--) {
241 radeon_fifo_wait(rinfo, 1);
242 __raw_writel(*(bits++), rinfo->mmio_base + HOST_DATA0);
243 }
244}
245
246void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image) 171void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image)
247{ 172{
248 struct radeonfb_info *rinfo = info->par; 173 struct radeonfb_info *rinfo = info->par;
249 u32 fg, bg;
250
251 WARN_ON(rinfo->gfx_mode);
252 if (info->state != FBINFO_STATE_RUNNING || rinfo->gfx_mode)
253 return;
254 174
255 if (!image->width || !image->height) 175 if (info->state != FBINFO_STATE_RUNNING)
256 return; 176 return;
257 177 radeon_engine_idle();
258 /* We only do 1 bpp color expansion for now */
259 if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1)
260 goto fallback;
261
262 /* Fallback if running out of the screen. We may do clipping
263 * in the future */
264 if ((image->dx + image->width) > info->var.xres_virtual ||
265 (image->dy + image->height) > info->var.yres_virtual)
266 goto fallback;
267
268 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
269 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
270 fg = ((u32*)(info->pseudo_palette))[image->fg_color];
271 bg = ((u32*)(info->pseudo_palette))[image->bg_color];
272 } else {
273 fg = image->fg_color;
274 bg = image->bg_color;
275 }
276
277 radeonfb_prim_imageblit(rinfo, image, fg, bg);
278 return;
279
280 fallback:
281 radeon_engine_idle(rinfo);
282 178
283 cfb_imageblit(info, image); 179 cfb_imageblit(info, image);
284} 180}
@@ -289,8 +185,7 @@ int radeonfb_sync(struct fb_info *info)
289 185
290 if (info->state != FBINFO_STATE_RUNNING) 186 if (info->state != FBINFO_STATE_RUNNING)
291 return 0; 187 return 0;
292 188 radeon_engine_idle();
293 radeon_engine_idle(rinfo);
294 189
295 return 0; 190 return 0;
296} 191}
@@ -366,10 +261,9 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
366 /* disable 3D engine */ 261 /* disable 3D engine */
367 OUTREG(RB3D_CNTL, 0); 262 OUTREG(RB3D_CNTL, 0);
368 263
369 rinfo->fifo_free = 0;
370 radeonfb_engine_reset(rinfo); 264 radeonfb_engine_reset(rinfo);
371 265
372 radeon_fifo_wait(rinfo, 1); 266 radeon_fifo_wait (1);
373 if (IS_R300_VARIANT(rinfo)) { 267 if (IS_R300_VARIANT(rinfo)) {
374 OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) | 268 OUTREG(RB2D_DSTCACHE_MODE, INREG(RB2D_DSTCACHE_MODE) |
375 RB2D_DC_AUTOFLUSH_ENABLE | 269 RB2D_DC_AUTOFLUSH_ENABLE |
@@ -383,7 +277,7 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
383 OUTREG(RB2D_DSTCACHE_MODE, 0); 277 OUTREG(RB2D_DSTCACHE_MODE, 0);
384 } 278 }
385 279
386 radeon_fifo_wait(rinfo, 3); 280 radeon_fifo_wait (3);
387 /* We re-read MC_FB_LOCATION from card as it can have been 281 /* We re-read MC_FB_LOCATION from card as it can have been
388 * modified by XFree drivers (ouch !) 282 * modified by XFree drivers (ouch !)
389 */ 283 */
@@ -394,57 +288,41 @@ void radeonfb_engine_init (struct radeonfb_info *rinfo)
394 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); 288 OUTREG(DST_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
395 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10)); 289 OUTREG(SRC_PITCH_OFFSET, (rinfo->pitch << 0x16) | (rinfo->fb_local_base >> 10));
396 290
397 radeon_fifo_wait(rinfo, 1); 291 radeon_fifo_wait (1);
398#ifdef __BIG_ENDIAN 292#if defined(__BIG_ENDIAN)
399 OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN); 293 OUTREGP(DP_DATATYPE, HOST_BIG_ENDIAN_EN, ~HOST_BIG_ENDIAN_EN);
400#else 294#else
401 OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN); 295 OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN);
402#endif 296#endif
403 radeon_fifo_wait(rinfo, 2); 297 radeon_fifo_wait (2);
404 OUTREG(DEFAULT_SC_TOP_LEFT, 0); 298 OUTREG(DEFAULT_SC_TOP_LEFT, 0);
405 OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX | 299 OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX |
406 DEFAULT_SC_BOTTOM_MAX)); 300 DEFAULT_SC_BOTTOM_MAX));
407 301
408 /* set default DP_GUI_MASTER_CNTL */
409 temp = radeon_get_dstbpp(rinfo->depth); 302 temp = radeon_get_dstbpp(rinfo->depth);
410 rinfo->dp_gui_mc_base = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS); 303 rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
411 304
412 rinfo->dp_gui_mc_cache = rinfo->dp_gui_mc_base | 305 radeon_fifo_wait (1);
413 GMC_BRUSH_SOLID_COLOR | 306 OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl |
414 GMC_SRC_DATATYPE_COLOR; 307 GMC_BRUSH_SOLID_COLOR |
415 radeon_fifo_wait(rinfo, 1); 308 GMC_SRC_DATATYPE_COLOR));
416 OUTREG(DP_GUI_MASTER_CNTL, rinfo->dp_gui_mc_cache);
417 309
310 radeon_fifo_wait (7);
418 311
419 /* clear line drawing regs */ 312 /* clear line drawing regs */
420 radeon_fifo_wait(rinfo, 2);
421 OUTREG(DST_LINE_START, 0); 313 OUTREG(DST_LINE_START, 0);
422 OUTREG(DST_LINE_END, 0); 314 OUTREG(DST_LINE_END, 0);
423 315
424 /* set brush and source color regs */ 316 /* set brush color regs */
425 rinfo->dp_brush_fg_cache = 0xffffffff; 317 OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff);
426 rinfo->dp_brush_bg_cache = 0x00000000; 318 OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000);
427 rinfo->dp_src_fg_cache = 0xffffffff; 319
428 rinfo->dp_src_bg_cache = 0x00000000; 320 /* set source color regs */
429 radeon_fifo_wait(rinfo, 4); 321 OUTREG(DP_SRC_FRGD_CLR, 0xffffffff);
430 OUTREG(DP_BRUSH_FRGD_CLR, rinfo->dp_brush_fg_cache); 322 OUTREG(DP_SRC_BKGD_CLR, 0x00000000);
431 OUTREG(DP_BRUSH_BKGD_CLR, rinfo->dp_brush_bg_cache);
432 OUTREG(DP_SRC_FRGD_CLR, rinfo->dp_src_fg_cache);
433 OUTREG(DP_SRC_BKGD_CLR, rinfo->dp_src_bg_cache);
434
435 /* Default direction */
436 rinfo->dp_cntl_cache = DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM;
437 radeon_fifo_wait(rinfo, 1);
438 OUTREG(DP_CNTL, rinfo->dp_cntl_cache);
439 323
440 /* default write mask */ 324 /* default write mask */
441 radeon_fifo_wait(rinfo, 1);
442 OUTREG(DP_WRITE_MSK, 0xffffffff); 325 OUTREG(DP_WRITE_MSK, 0xffffffff);
443 326
444 /* Default to no swapping of host data */ 327 radeon_engine_idle ();
445 radeon_fifo_wait(rinfo, 1);
446 OUTREG(RBBM_GUICNTL, RBBM_GUICNTL_HOST_DATA_SWAP_NONE);
447
448 /* Make sure it's settled */
449 radeon_engine_idle(rinfo);
450} 328}
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index f343ba83f0ae..1a056adb61c8 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -66,7 +66,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
66 level = bd->props.brightness; 66 level = bd->props.brightness;
67 67
68 del_timer_sync(&rinfo->lvds_timer); 68 del_timer_sync(&rinfo->lvds_timer);
69 radeon_engine_idle(rinfo); 69 radeon_engine_idle();
70 70
71 lvds_gen_cntl = INREG(LVDS_GEN_CNTL); 71 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
72 if (level > 0) { 72 if (level > 0) {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index b3ffe8205d2b..d0f1a7fc2c9d 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -852,6 +852,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
852 if (rinfo->asleep) 852 if (rinfo->asleep)
853 return 0; 853 return 0;
854 854
855 radeon_fifo_wait(2);
855 OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset) 856 OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
856 * var->bits_per_pixel / 8) & ~7); 857 * var->bits_per_pixel / 8) & ~7);
857 return 0; 858 return 0;
@@ -881,6 +882,7 @@ static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
881 if (rc) 882 if (rc)
882 return rc; 883 return rc;
883 884
885 radeon_fifo_wait(2);
884 if (value & 0x01) { 886 if (value & 0x01) {
885 tmp = INREG(LVDS_GEN_CNTL); 887 tmp = INREG(LVDS_GEN_CNTL);
886 888
@@ -938,7 +940,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch)
938 if (rinfo->lock_blank) 940 if (rinfo->lock_blank)
939 return 0; 941 return 0;
940 942
941 radeon_engine_idle(rinfo); 943 radeon_engine_idle();
942 944
943 val = INREG(CRTC_EXT_CNTL); 945 val = INREG(CRTC_EXT_CNTL);
944 val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS | 946 val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
@@ -1046,7 +1048,7 @@ static int radeonfb_blank (int blank, struct fb_info *info)
1046 1048
1047 if (rinfo->asleep) 1049 if (rinfo->asleep)
1048 return 0; 1050 return 0;
1049 1051
1050 return radeon_screen_blank(rinfo, blank, 0); 1052 return radeon_screen_blank(rinfo, blank, 0);
1051} 1053}
1052 1054
@@ -1072,6 +1074,8 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
1072 pindex = regno; 1074 pindex = regno;
1073 1075
1074 if (!rinfo->asleep) { 1076 if (!rinfo->asleep) {
1077 radeon_fifo_wait(9);
1078
1075 if (rinfo->bpp == 16) { 1079 if (rinfo->bpp == 16) {
1076 pindex = regno * 8; 1080 pindex = regno * 8;
1077 1081
@@ -1240,6 +1244,8 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
1240{ 1244{
1241 int i; 1245 int i;
1242 1246
1247 radeon_fifo_wait(20);
1248
1243 /* Workaround from XFree */ 1249 /* Workaround from XFree */
1244 if (rinfo->is_mobility) { 1250 if (rinfo->is_mobility) {
1245 /* A temporal workaround for the occational blanking on certain laptop 1251 /* A temporal workaround for the occational blanking on certain laptop
@@ -1335,7 +1341,7 @@ static void radeon_lvds_timer_func(unsigned long data)
1335{ 1341{
1336 struct radeonfb_info *rinfo = (struct radeonfb_info *)data; 1342 struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
1337 1343
1338 radeon_engine_idle(rinfo); 1344 radeon_engine_idle();
1339 1345
1340 OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl); 1346 OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl);
1341} 1347}
@@ -1353,11 +1359,10 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1353 if (nomodeset) 1359 if (nomodeset)
1354 return; 1360 return;
1355 1361
1356 radeon_engine_idle(rinfo);
1357
1358 if (!regs_only) 1362 if (!regs_only)
1359 radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0); 1363 radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0);
1360 1364
1365 radeon_fifo_wait(31);
1361 for (i=0; i<10; i++) 1366 for (i=0; i<10; i++)
1362 OUTREG(common_regs[i].reg, common_regs[i].val); 1367 OUTREG(common_regs[i].reg, common_regs[i].val);
1363 1368
@@ -1385,6 +1390,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1385 radeon_write_pll_regs(rinfo, mode); 1390 radeon_write_pll_regs(rinfo, mode);
1386 1391
1387 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) { 1392 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
1393 radeon_fifo_wait(10);
1388 OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp); 1394 OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
1389 OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp); 1395 OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
1390 OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid); 1396 OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
@@ -1399,6 +1405,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
1399 if (!regs_only) 1405 if (!regs_only)
1400 radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0); 1406 radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0);
1401 1407
1408 radeon_fifo_wait(2);
1402 OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl); 1409 OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
1403 1410
1404 return; 1411 return;
@@ -1549,7 +1556,7 @@ static int radeonfb_set_par(struct fb_info *info)
1549 /* We always want engine to be idle on a mode switch, even 1556 /* We always want engine to be idle on a mode switch, even
1550 * if we won't actually change the mode 1557 * if we won't actually change the mode
1551 */ 1558 */
1552 radeon_engine_idle(rinfo); 1559 radeon_engine_idle();
1553 1560
1554 hSyncStart = mode->xres + mode->right_margin; 1561 hSyncStart = mode->xres + mode->right_margin;
1555 hSyncEnd = hSyncStart + mode->hsync_len; 1562 hSyncEnd = hSyncStart + mode->hsync_len;
@@ -1844,6 +1851,7 @@ static int radeonfb_set_par(struct fb_info *info)
1844 return 0; 1851 return 0;
1845} 1852}
1846 1853
1854
1847static struct fb_ops radeonfb_ops = { 1855static struct fb_ops radeonfb_ops = {
1848 .owner = THIS_MODULE, 1856 .owner = THIS_MODULE,
1849 .fb_check_var = radeonfb_check_var, 1857 .fb_check_var = radeonfb_check_var,
@@ -1867,7 +1875,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
1867 info->par = rinfo; 1875 info->par = rinfo;
1868 info->pseudo_palette = rinfo->pseudo_palette; 1876 info->pseudo_palette = rinfo->pseudo_palette;
1869 info->flags = FBINFO_DEFAULT 1877 info->flags = FBINFO_DEFAULT
1870 | FBINFO_HWACCEL_IMAGEBLIT
1871 | FBINFO_HWACCEL_COPYAREA 1878 | FBINFO_HWACCEL_COPYAREA
1872 | FBINFO_HWACCEL_FILLRECT 1879 | FBINFO_HWACCEL_FILLRECT
1873 | FBINFO_HWACCEL_XPAN 1880 | FBINFO_HWACCEL_XPAN
@@ -1875,7 +1882,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
1875 info->fbops = &radeonfb_ops; 1882 info->fbops = &radeonfb_ops;
1876 info->screen_base = rinfo->fb_base; 1883 info->screen_base = rinfo->fb_base;
1877 info->screen_size = rinfo->mapped_vram; 1884 info->screen_size = rinfo->mapped_vram;
1878
1879 /* Fill fix common fields */ 1885 /* Fill fix common fields */
1880 strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id)); 1886 strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
1881 info->fix.smem_start = rinfo->fb_base_phys; 1887 info->fix.smem_start = rinfo->fb_base_phys;
@@ -1890,25 +1896,8 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
1890 info->fix.mmio_len = RADEON_REGSIZE; 1896 info->fix.mmio_len = RADEON_REGSIZE;
1891 info->fix.accel = FB_ACCEL_ATI_RADEON; 1897 info->fix.accel = FB_ACCEL_ATI_RADEON;
1892 1898
1893 /* Allocate colormap */
1894 fb_alloc_cmap(&info->cmap, 256, 0); 1899 fb_alloc_cmap(&info->cmap, 256, 0);
1895 1900
1896 /* Setup pixmap used for acceleration */
1897#define PIXMAP_SIZE (2048 * 4)
1898
1899 info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL);
1900 if (!info->pixmap.addr) {
1901 printk(KERN_ERR "radeonfb: Failed to allocate pixmap !\n");
1902 noaccel = 1;
1903 goto bail;
1904 }
1905 info->pixmap.size = PIXMAP_SIZE;
1906 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1907 info->pixmap.scan_align = 4;
1908 info->pixmap.buf_align = 4;
1909 info->pixmap.access_align = 32;
1910
1911bail:
1912 if (noaccel) 1901 if (noaccel)
1913 info->flags |= FBINFO_HWACCEL_DISABLED; 1902 info->flags |= FBINFO_HWACCEL_DISABLED;
1914 1903
@@ -2017,6 +2006,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
2017 u32 tom = INREG(NB_TOM); 2006 u32 tom = INREG(NB_TOM);
2018 tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024); 2007 tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
2019 2008
2009 radeon_fifo_wait(6);
2020 OUTREG(MC_FB_LOCATION, tom); 2010 OUTREG(MC_FB_LOCATION, tom);
2021 OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); 2011 OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
2022 OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16); 2012 OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 3df5015f1d13..675abdafc2d8 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2653,9 +2653,9 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2653 2653
2654 if (!(info->flags & FBINFO_HWACCEL_DISABLED)) { 2654 if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
2655 /* Make sure engine is reset */ 2655 /* Make sure engine is reset */
2656 radeon_engine_idle(rinfo); 2656 radeon_engine_idle();
2657 radeonfb_engine_reset(rinfo); 2657 radeonfb_engine_reset(rinfo);
2658 radeon_engine_idle(rinfo); 2658 radeon_engine_idle();
2659 } 2659 }
2660 2660
2661 /* Blank display and LCD */ 2661 /* Blank display and LCD */
@@ -2767,7 +2767,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2767 2767
2768 rinfo->asleep = 0; 2768 rinfo->asleep = 0;
2769 } else 2769 } else
2770 radeon_engine_idle(rinfo); 2770 radeon_engine_idle();
2771 2771
2772 /* Restore display & engine */ 2772 /* Restore display & engine */
2773 radeon_write_mode (rinfo, &rinfo->state, 1); 2773 radeon_write_mode (rinfo, &rinfo->state, 1);
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index ea0b5b47acaf..3ea1b00fdd22 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -336,15 +336,7 @@ struct radeonfb_info {
336 int mon2_type; 336 int mon2_type;
337 u8 *mon2_EDID; 337 u8 *mon2_EDID;
338 338
339 /* accel bits */ 339 u32 dp_gui_master_cntl;
340 u32 dp_gui_mc_base;
341 u32 dp_gui_mc_cache;
342 u32 dp_cntl_cache;
343 u32 dp_brush_fg_cache;
344 u32 dp_brush_bg_cache;
345 u32 dp_src_fg_cache;
346 u32 dp_src_bg_cache;
347 u32 fifo_free;
348 340
349 struct pll_info pll; 341 struct pll_info pll;
350 342
@@ -356,7 +348,6 @@ struct radeonfb_info {
356 int lock_blank; 348 int lock_blank;
357 int dynclk; 349 int dynclk;
358 int no_schedule; 350 int no_schedule;
359 int gfx_mode;
360 enum radeon_pm_mode pm_mode; 351 enum radeon_pm_mode pm_mode;
361 reinit_function_ptr reinit_func; 352 reinit_function_ptr reinit_func;
362 353
@@ -401,14 +392,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
401#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) 392#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
402#define INREG16(addr) readw((rinfo->mmio_base)+addr) 393#define INREG16(addr) readw((rinfo->mmio_base)+addr)
403#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr) 394#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
404
405#ifdef CONFIG_PPC
406#define INREG(addr) ({ eieio(); ld_le32(rinfo->mmio_base+(addr)); })
407#define OUTREG(addr,val) do { eieio(); st_le32(rinfo->mmio_base+(addr),(val)); } while(0)
408#else
409#define INREG(addr) readl((rinfo->mmio_base)+addr) 395#define INREG(addr) readl((rinfo->mmio_base)+addr)
410#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) 396#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
411#endif
412 397
413static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr, 398static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr,
414 u32 val, u32 mask) 399 u32 val, u32 mask)
@@ -550,7 +535,17 @@ static inline u32 radeon_get_dstbpp(u16 depth)
550 * 2D Engine helper routines 535 * 2D Engine helper routines
551 */ 536 */
552 537
553extern void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries); 538static inline void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries)
539{
540 int i;
541
542 for (i=0; i<2000000; i++) {
543 if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
544 return;
545 udelay(1);
546 }
547 printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
548}
554 549
555static inline void radeon_engine_flush (struct radeonfb_info *rinfo) 550static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
556{ 551{
@@ -563,7 +558,7 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
563 /* Ensure FIFO is empty, ie, make sure the flush commands 558 /* Ensure FIFO is empty, ie, make sure the flush commands
564 * has reached the cache 559 * has reached the cache
565 */ 560 */
566 radeon_fifo_update_and_wait(rinfo, 64); 561 _radeon_fifo_wait (rinfo, 64);
567 562
568 /* Wait for the flush to complete */ 563 /* Wait for the flush to complete */
569 for (i=0; i < 2000000; i++) { 564 for (i=0; i < 2000000; i++) {
@@ -575,12 +570,12 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
575} 570}
576 571
577 572
578static inline void radeon_engine_idle(struct radeonfb_info *rinfo) 573static inline void _radeon_engine_idle(struct radeonfb_info *rinfo)
579{ 574{
580 int i; 575 int i;
581 576
582 /* ensure FIFO is empty before waiting for idle */ 577 /* ensure FIFO is empty before waiting for idle */
583 radeon_fifo_update_and_wait (rinfo, 64); 578 _radeon_fifo_wait (rinfo, 64);
584 579
585 for (i=0; i<2000000; i++) { 580 for (i=0; i<2000000; i++) {
586 if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) { 581 if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
@@ -593,6 +588,8 @@ static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
593} 588}
594 589
595 590
591#define radeon_engine_idle() _radeon_engine_idle(rinfo)
592#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
596#define radeon_msleep(ms) _radeon_msleep(rinfo,ms) 593#define radeon_msleep(ms) _radeon_msleep(rinfo,ms)
597 594
598 595
@@ -622,7 +619,6 @@ extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image);
622extern int radeonfb_sync(struct fb_info *info); 619extern int radeonfb_sync(struct fb_info *info);
623extern void radeonfb_engine_init (struct radeonfb_info *rinfo); 620extern void radeonfb_engine_init (struct radeonfb_info *rinfo);
624extern void radeonfb_engine_reset(struct radeonfb_info *rinfo); 621extern void radeonfb_engine_reset(struct radeonfb_info *rinfo);
625extern void radeon_fixup_mem_offset(struct radeonfb_info *rinfo);
626 622
627/* Other functions */ 623/* Other functions */
628extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch); 624extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 67ff370d80af..0b2adefe9e3d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3531,12 +3531,18 @@ static void fbcon_exit(void)
3531 softback_buf = 0UL; 3531 softback_buf = 0UL;
3532 3532
3533 for (i = 0; i < FB_MAX; i++) { 3533 for (i = 0; i < FB_MAX; i++) {
3534 int pending;
3535
3534 mapped = 0; 3536 mapped = 0;
3535 info = registered_fb[i]; 3537 info = registered_fb[i];
3536 3538
3537 if (info == NULL) 3539 if (info == NULL)
3538 continue; 3540 continue;
3539 3541
3542 pending = cancel_work_sync(&info->queue);
3543 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3544 "no"));
3545
3540 for (j = first_fb_vc; j <= last_fb_vc; j++) { 3546 for (j = first_fb_vc; j <= last_fb_vc; j++) {
3541 if (con2fb_map[j] == i) 3547 if (con2fb_map[j] == i)
3542 mapped = 1; 3548 mapped = 1;
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 38718d95fbb9..fb64234a3825 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -927,9 +927,9 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
927 } 927 }
928 928
929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n", 929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
930 (u64)par->fb_base_phys, (ulong)par->mapped_vram); 930 (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram);
931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n", 931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
932 (u64)par->mmio_base_phys, (ulong)par->mmio_len); 932 (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
933 933
934 if (mb862xx_pci_gdc_init(par)) 934 if (mb862xx_pci_gdc_init(par))
935 goto io_unmap; 935 goto io_unmap;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 5a5e407dc45f..1a49519dafa4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi)
392 int bpp; 392 int bpp;
393 393
394 rg = &plane->fbdev->mem_desc.region[plane->idx]; 394 rg = &plane->fbdev->mem_desc.region[plane->idx];
395 fbi->screen_base = (char __iomem *)rg->vaddr; 395 fbi->screen_base = rg->vaddr;
396 fix->smem_start = rg->paddr; 396 fix->smem_start = rg->paddr;
397 fix->smem_len = rg->size; 397 fix->smem_len = rg->size;
398 398
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 26173a270e94..5b395a4ddfdf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -392,7 +392,7 @@ static int iTCO_wdt_stop(void)
392 392
393 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ 393 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
394 val32 = inl(SMI_EN); 394 val32 = inl(SMI_EN);
395 val32 &= 0x00002000; 395 val32 |= 0x00002000;
396 outl(val32, SMI_EN); 396 outl(val32, SMI_EN);
397 397
398 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 398 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */