aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/core-card.c41
-rw-r--r--drivers/firewire/core-cdev.c50
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c187
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c13
-rw-r--r--drivers/gpu/drm/radeon/atom.c102
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/r100.c5
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c82
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/rv770.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
-rw-r--r--drivers/hwmon/amc6821.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c19
-rw-r--r--drivers/hwmon/fschmd.c7
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c26
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/mtd/maps/Kconfig17
-rw-r--r--drivers/mtd/maps/pismo.c320
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/tests/mtd_readtest.c6
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c7
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c6
-rw-r--r--drivers/mtd/ubi/kapi.c15
-rw-r--r--drivers/mtd/ubi/upd.c1
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c8
-rw-r--r--drivers/net/bfin_mac.c5
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c43
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/netdev.c57
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/igbvf/netdev.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c1
-rw-r--r--drivers/net/qlge/qlge_main.c15
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/mcdi.c7
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/sfc/mtd.c5
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sky2.c42
-rw-r--r--drivers/net/tulip/tulip_core.c1
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h26
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/s390/block/dasd_proc.c7
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c9
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fc.c84
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c52
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c28
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c72
-rw-r--r--drivers/scsi/aacraid/dpcsup.c36
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c53
-rw-r--r--[-rwxr-xr-x]drivers/scsi/lpfc/lpfc_hbadisc.c0
-rw-r--r--[-rwxr-xr-x]drivers/scsi/lpfc/lpfc_hw4.h0
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/ixp2000_wdt.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c11
140 files changed, 1969 insertions, 904 deletions
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 7083bcc1b9c7..5045156c5313 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -57,6 +57,8 @@ static LIST_HEAD(descriptor_list);
57static int descriptor_count; 57static int descriptor_count;
58 58
59static __be32 tmp_config_rom[256]; 59static __be32 tmp_config_rom[256];
60/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
61static size_t config_rom_length = 1 + 4 + 1 + 1;
60 62
61#define BIB_CRC(v) ((v) << 0) 63#define BIB_CRC(v) ((v) << 0)
62#define BIB_CRC_LENGTH(v) ((v) << 16) 64#define BIB_CRC_LENGTH(v) ((v) << 16)
@@ -73,7 +75,7 @@ static __be32 tmp_config_rom[256];
73#define BIB_CMC ((1) << 30) 75#define BIB_CMC ((1) << 30)
74#define BIB_IMC ((1) << 31) 76#define BIB_IMC ((1) << 31)
75 77
76static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) 78static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
77{ 79{
78 struct fw_descriptor *desc; 80 struct fw_descriptor *desc;
79 int i, j, k, length; 81 int i, j, k, length;
@@ -130,23 +132,30 @@ static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom)
130 for (i = 0; i < j; i += length + 1) 132 for (i = 0; i < j; i += length + 1)
131 length = fw_compute_block_crc(config_rom + i); 133 length = fw_compute_block_crc(config_rom + i);
132 134
133 return j; 135 WARN_ON(j != config_rom_length);
134} 136}
135 137
136static void update_config_roms(void) 138static void update_config_roms(void)
137{ 139{
138 struct fw_card *card; 140 struct fw_card *card;
139 size_t length;
140 141
141 list_for_each_entry (card, &card_list, link) { 142 list_for_each_entry (card, &card_list, link) {
142 length = generate_config_rom(card, tmp_config_rom); 143 generate_config_rom(card, tmp_config_rom);
143 card->driver->set_config_rom(card, tmp_config_rom, length); 144 card->driver->set_config_rom(card, tmp_config_rom,
145 config_rom_length);
144 } 146 }
145} 147}
146 148
149static size_t required_space(struct fw_descriptor *desc)
150{
151 /* descriptor + entry into root dir + optional immediate entry */
152 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
153}
154
147int fw_core_add_descriptor(struct fw_descriptor *desc) 155int fw_core_add_descriptor(struct fw_descriptor *desc)
148{ 156{
149 size_t i; 157 size_t i;
158 int ret;
150 159
151 /* 160 /*
152 * Check descriptor is valid; the length of all blocks in the 161 * Check descriptor is valid; the length of all blocks in the
@@ -162,15 +171,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
162 171
163 mutex_lock(&card_mutex); 172 mutex_lock(&card_mutex);
164 173
165 list_add_tail(&desc->link, &descriptor_list); 174 if (config_rom_length + required_space(desc) > 256) {
166 descriptor_count++; 175 ret = -EBUSY;
167 if (desc->immediate > 0) 176 } else {
177 list_add_tail(&desc->link, &descriptor_list);
178 config_rom_length += required_space(desc);
168 descriptor_count++; 179 descriptor_count++;
169 update_config_roms(); 180 if (desc->immediate > 0)
181 descriptor_count++;
182 update_config_roms();
183 ret = 0;
184 }
170 185
171 mutex_unlock(&card_mutex); 186 mutex_unlock(&card_mutex);
172 187
173 return 0; 188 return ret;
174} 189}
175EXPORT_SYMBOL(fw_core_add_descriptor); 190EXPORT_SYMBOL(fw_core_add_descriptor);
176 191
@@ -179,6 +194,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
179 mutex_lock(&card_mutex); 194 mutex_lock(&card_mutex);
180 195
181 list_del(&desc->link); 196 list_del(&desc->link);
197 config_rom_length -= required_space(desc);
182 descriptor_count--; 198 descriptor_count--;
183 if (desc->immediate > 0) 199 if (desc->immediate > 0)
184 descriptor_count--; 200 descriptor_count--;
@@ -428,7 +444,6 @@ EXPORT_SYMBOL(fw_card_initialize);
428int fw_card_add(struct fw_card *card, 444int fw_card_add(struct fw_card *card,
429 u32 max_receive, u32 link_speed, u64 guid) 445 u32 max_receive, u32 link_speed, u64 guid)
430{ 446{
431 size_t length;
432 int ret; 447 int ret;
433 448
434 card->max_receive = max_receive; 449 card->max_receive = max_receive;
@@ -437,8 +452,8 @@ int fw_card_add(struct fw_card *card,
437 452
438 mutex_lock(&card_mutex); 453 mutex_lock(&card_mutex);
439 454
440 length = generate_config_rom(card, tmp_config_rom); 455 generate_config_rom(card, tmp_config_rom);
441 ret = card->driver->enable(card, tmp_config_rom, length); 456 ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
442 if (ret == 0) 457 if (ret == 0)
443 list_add_tail(&card->link, &card_list); 458 list_add_tail(&card->link, &card_list);
444 459
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index e6d63849e78e..4eeaed57e219 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -35,6 +35,7 @@
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/string.h>
38#include <linux/time.h> 39#include <linux/time.h>
39#include <linux/uaccess.h> 40#include <linux/uaccess.h>
40#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
@@ -595,13 +596,20 @@ static int ioctl_send_request(struct client *client, void *buffer)
595 client->device->max_speed); 596 client->device->max_speed);
596} 597}
597 598
599static inline bool is_fcp_request(struct fw_request *request)
600{
601 return request == NULL;
602}
603
598static void release_request(struct client *client, 604static void release_request(struct client *client,
599 struct client_resource *resource) 605 struct client_resource *resource)
600{ 606{
601 struct inbound_transaction_resource *r = container_of(resource, 607 struct inbound_transaction_resource *r = container_of(resource,
602 struct inbound_transaction_resource, resource); 608 struct inbound_transaction_resource, resource);
603 609
604 if (r->request) 610 if (is_fcp_request(r->request))
611 kfree(r->data);
612 else
605 fw_send_response(client->device->card, r->request, 613 fw_send_response(client->device->card, r->request,
606 RCODE_CONFLICT_ERROR); 614 RCODE_CONFLICT_ERROR);
607 kfree(r); 615 kfree(r);
@@ -616,6 +624,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
616 struct address_handler_resource *handler = callback_data; 624 struct address_handler_resource *handler = callback_data;
617 struct inbound_transaction_resource *r; 625 struct inbound_transaction_resource *r;
618 struct inbound_transaction_event *e; 626 struct inbound_transaction_event *e;
627 void *fcp_frame = NULL;
619 int ret; 628 int ret;
620 629
621 r = kmalloc(sizeof(*r), GFP_ATOMIC); 630 r = kmalloc(sizeof(*r), GFP_ATOMIC);
@@ -627,6 +636,18 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
627 r->data = payload; 636 r->data = payload;
628 r->length = length; 637 r->length = length;
629 638
639 if (is_fcp_request(request)) {
640 /*
641 * FIXME: Let core-transaction.c manage a
642 * single reference-counted copy?
643 */
644 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
645 if (fcp_frame == NULL)
646 goto failed;
647
648 r->data = fcp_frame;
649 }
650
630 r->resource.release = release_request; 651 r->resource.release = release_request;
631 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); 652 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
632 if (ret < 0) 653 if (ret < 0)
@@ -640,13 +661,15 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
640 e->request.closure = handler->closure; 661 e->request.closure = handler->closure;
641 662
642 queue_event(handler->client, &e->event, 663 queue_event(handler->client, &e->event,
643 &e->request, sizeof(e->request), payload, length); 664 &e->request, sizeof(e->request), r->data, length);
644 return; 665 return;
645 666
646 failed: 667 failed:
647 kfree(r); 668 kfree(r);
648 kfree(e); 669 kfree(e);
649 if (request) 670 kfree(fcp_frame);
671
672 if (!is_fcp_request(request))
650 fw_send_response(card, request, RCODE_CONFLICT_ERROR); 673 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
651} 674}
652 675
@@ -717,18 +740,17 @@ static int ioctl_send_response(struct client *client, void *buffer)
717 740
718 r = container_of(resource, struct inbound_transaction_resource, 741 r = container_of(resource, struct inbound_transaction_resource,
719 resource); 742 resource);
720 if (r->request) { 743 if (is_fcp_request(r->request))
721 if (request->length < r->length) 744 goto out;
722 r->length = request->length; 745
723 if (copy_from_user(r->data, u64_to_uptr(request->data), 746 if (request->length < r->length)
724 r->length)) { 747 r->length = request->length;
725 ret = -EFAULT; 748 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
726 kfree(r->request); 749 ret = -EFAULT;
727 goto out; 750 kfree(r->request);
728 } 751 goto out;
729 fw_send_response(client->device->card, r->request,
730 request->rcode);
731 } 752 }
753 fw_send_response(client->device->card, r->request, request->rcode);
732 out: 754 out:
733 kfree(r); 755 kfree(r);
734 756
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a61571c63c59..2345d4103fe6 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2420,6 +2420,7 @@ static void ohci_pmac_off(struct pci_dev *dev)
2420 2420
2421#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT 2421#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2422#define PCI_DEVICE_ID_AGERE_FW643 0x5901 2422#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2423#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2423 2424
2424static int __devinit pci_probe(struct pci_dev *dev, 2425static int __devinit pci_probe(struct pci_dev *dev,
2425 const struct pci_device_id *ent) 2426 const struct pci_device_id *ent)
@@ -2488,7 +2489,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2488#if !defined(CONFIG_X86_32) 2489#if !defined(CONFIG_X86_32)
2489 /* dual-buffer mode is broken with descriptor addresses above 2G */ 2490 /* dual-buffer mode is broken with descriptor addresses above 2G */
2490 if (dev->vendor == PCI_VENDOR_ID_TI && 2491 if (dev->vendor == PCI_VENDOR_ID_TI &&
2491 dev->device == PCI_DEVICE_ID_TI_TSB43AB22) 2492 (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2493 dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2492 ohci->use_dualbuffer = false; 2494 ohci->use_dualbuffer = false;
2493#endif 2495#endif
2494 2496
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index defcaf108460..f665b05592f3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
633 return NULL; 633 return NULL;
634 } 634 }
635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
636 printk(KERN_WARNING "integrated sync not supported\n"); 636 printk(KERN_WARNING "composite sync not supported\n");
637 return NULL;
638 } 637 }
639 638
640 /* it is incorrect if hsync/vsync width is zero */ 639 /* it is incorrect if hsync/vsync width is zero */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1c2b7d44ec05..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
389 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */ 390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */ 394 /* Display: Off; HSync: Off, VSync: On */
395 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c469..8bf3770f294e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
142 if (IS_ERR(obj->filp)) 142 if (IS_ERR(obj->filp))
143 goto free; 143 goto free;
144 144
145 /* Basically we want to disable the OOM killer and handle ENOMEM
146 * ourselves by sacrificing pages from cached buffers.
147 * XXX shmem_file_[gs]et_gfp_mask()
148 */
149 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
150 GFP_HIGHUSER |
151 __GFP_COLD |
152 __GFP_FS |
153 __GFP_RECLAIMABLE |
154 __GFP_NORETRY |
155 __GFP_NOWARN |
156 __GFP_NOMEMALLOC);
157
158 kref_init(&obj->refcount); 145 kref_init(&obj->refcount);
159 kref_init(&obj->handlecount); 146 kref_init(&obj->handlecount);
160 obj->size = size; 147 obj->size = size;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9998c4dceb..a894ade03093 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
291 obj = obj_priv->obj; 291 obj = obj_priv->obj;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj); 293 ret = i915_gem_object_get_pages(obj, 0);
294 if (ret) { 294 if (ret) {
295 DRM_ERROR("Failed to get pages: %d\n", ret); 295 DRM_ERROR("Failed to get pages: %d\n", ret);
296 spin_unlock(&dev_priv->mm.active_list_lock); 296 spin_unlock(&dev_priv->mm.active_list_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c1669488b5a..aaf934d96f21 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -872,7 +872,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
872void i915_gem_detach_phys_object(struct drm_device *dev, 872void i915_gem_detach_phys_object(struct drm_device *dev,
873 struct drm_gem_object *obj); 873 struct drm_gem_object *obj);
874void i915_gem_free_all_phys_object(struct drm_device *dev); 874void i915_gem_free_all_phys_object(struct drm_device *dev);
875int i915_gem_object_get_pages(struct drm_gem_object *obj); 875int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
876void i915_gem_object_put_pages(struct drm_gem_object *obj); 876void i915_gem_object_put_pages(struct drm_gem_object *obj);
877void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 877void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
878void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 878void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c67924ca80c..dda787aafcc6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
277 277
278 mutex_lock(&dev->struct_mutex); 278 mutex_lock(&dev->struct_mutex);
279 279
280 ret = i915_gem_object_get_pages(obj); 280 ret = i915_gem_object_get_pages(obj, 0);
281 if (ret != 0) 281 if (ret != 0)
282 goto fail_unlock; 282 goto fail_unlock;
283 283
@@ -321,40 +321,24 @@ fail_unlock:
321 return ret; 321 return ret;
322} 322}
323 323
324static inline gfp_t
325i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326{
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328}
329
330static inline void
331i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332{
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334}
335
336static int 324static int
337i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 325i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338{ 326{
339 int ret; 327 int ret;
340 328
341 ret = i915_gem_object_get_pages(obj); 329 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
342 330
343 /* If we've insufficient memory to map in the pages, attempt 331 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers. 332 * to make some space by throwing out some old buffers.
345 */ 333 */
346 if (ret == -ENOMEM) { 334 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev; 335 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
349 336
350 ret = i915_gem_evict_something(dev, obj->size); 337 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret) 338 if (ret)
352 return ret; 339 return ret;
353 340
354 gfp = i915_gem_object_get_page_gfp_mask(obj); 341 ret = i915_gem_object_get_pages(obj, 0);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
358 } 342 }
359 343
360 return ret; 344 return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
790 774
791 mutex_lock(&dev->struct_mutex); 775 mutex_lock(&dev->struct_mutex);
792 776
793 ret = i915_gem_object_get_pages(obj); 777 ret = i915_gem_object_get_pages(obj, 0);
794 if (ret != 0) 778 if (ret != 0)
795 goto fail_unlock; 779 goto fail_unlock;
796 780
@@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2230} 2214}
2231 2215
2232int 2216int
2233i915_gem_object_get_pages(struct drm_gem_object *obj) 2217i915_gem_object_get_pages(struct drm_gem_object *obj,
2218 gfp_t gfpmask)
2234{ 2219{
2235 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2220 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2236 int page_count, i; 2221 int page_count, i;
@@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2256 inode = obj->filp->f_path.dentry->d_inode; 2241 inode = obj->filp->f_path.dentry->d_inode;
2257 mapping = inode->i_mapping; 2242 mapping = inode->i_mapping;
2258 for (i = 0; i < page_count; i++) { 2243 for (i = 0; i < page_count; i++) {
2259 page = read_mapping_page(mapping, i, NULL); 2244 page = read_cache_page_gfp(mapping, i,
2245 mapping_gfp_mask (mapping) |
2246 __GFP_COLD |
2247 gfpmask);
2260 if (IS_ERR(page)) { 2248 if (IS_ERR(page)) {
2261 ret = PTR_ERR(page); 2249 ret = PTR_ERR(page);
2262 i915_gem_object_put_pages(obj); 2250 i915_gem_object_put_pages(obj);
@@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2579 drm_i915_private_t *dev_priv = dev->dev_private; 2567 drm_i915_private_t *dev_priv = dev->dev_private;
2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2568 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2581 struct drm_mm_node *free_space; 2569 struct drm_mm_node *free_space;
2582 bool retry_alloc = false; 2570 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2583 int ret; 2571 int ret;
2584 2572
2585 if (obj_priv->madv != I915_MADV_WILLNEED) { 2573 if (obj_priv->madv != I915_MADV_WILLNEED) {
@@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2611 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2624 obj->size, obj_priv->gtt_offset); 2612 obj->size, obj_priv->gtt_offset);
2625#endif 2613#endif
2626 if (retry_alloc) { 2614 ret = i915_gem_object_get_pages(obj, gfpmask);
2627 i915_gem_object_set_page_gfp_mask (obj,
2628 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2629 }
2630 ret = i915_gem_object_get_pages(obj);
2631 if (retry_alloc) {
2632 i915_gem_object_set_page_gfp_mask (obj,
2633 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2634 }
2635 if (ret) { 2615 if (ret) {
2636 drm_mm_put_block(obj_priv->gtt_space); 2616 drm_mm_put_block(obj_priv->gtt_space);
2637 obj_priv->gtt_space = NULL; 2617 obj_priv->gtt_space = NULL;
@@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2641 ret = i915_gem_evict_something(dev, obj->size); 2621 ret = i915_gem_evict_something(dev, obj->size);
2642 if (ret) { 2622 if (ret) {
2643 /* now try to shrink everyone else */ 2623 /* now try to shrink everyone else */
2644 if (! retry_alloc) { 2624 if (gfpmask) {
2645 retry_alloc = true; 2625 gfpmask = 0;
2646 goto search_free; 2626 goto search_free;
2647 } 2627 }
2648 2628
2649 return ret; 2629 return ret;
@@ -4946,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4946 if (!obj_priv->phys_obj) 4926 if (!obj_priv->phys_obj)
4947 return; 4927 return;
4948 4928
4949 ret = i915_gem_object_get_pages(obj); 4929 ret = i915_gem_object_get_pages(obj, 0);
4950 if (ret) 4930 if (ret)
4951 goto out; 4931 goto out;
4952 4932
@@ -5004,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
5004 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4984 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
5005 obj_priv->phys_obj->cur_obj = obj; 4985 obj_priv->phys_obj->cur_obj = obj;
5006 4986
5007 ret = i915_gem_object_get_pages(obj); 4987 ret = i915_gem_object_get_pages(obj, 0);
5008 if (ret) { 4988 if (ret) {
5009 DRM_ERROR("failed to get page list\n"); 4989 DRM_ERROR("failed to get page list\n");
5010 goto out; 4990 goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..d7f8d8b4a4b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
310 struct drm_device *dev = bios->dev; 310 struct drm_device *dev = bios->dev;
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 313 if (reg & 0x2 ||
314 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
315 reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 return 0; 316
317 } 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 /*
319 * Warn on C51 regs that have not been verified accessible in
320 * mmiotracing
321 */
322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
325 reg); 321 reg);
326 322
327 /* Trust the init scripts on G80 */ 323 if (reg >= (8*1024*1024)) {
328 if (dev_priv->card_type >= NV_50) 324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
329 return 1; 325 return 0;
330
331 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
332 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
333 return 1;
334 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
335 return 1;
336 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
337 return 1;
338 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
339 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
340 return 1;
341 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
342 WITHIN(reg, 0xc000, 0x48))
343 return 1;
344 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
347 if (reg == 0x00011014 || reg == 0x00020328)
348 return 1;
349 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
350 return 1;
351 } 326 }
352 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
353 return 1;
354 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
355 return 1;
356 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
357 return 1;
358 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
359 return 1;
360 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
361 return 1;
362 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
363 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
364 return 1;
365 #undef WITHIN
366 327
367 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); 328 return 1;
368
369 return 0;
370} 329}
371 330
372static bool 331static bool
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3196 } 3155 }
3197#ifdef __powerpc__ 3156#ifdef __powerpc__
3198 /* Powerbook specific quirks */ 3157 /* Powerbook specific quirks */
3199 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3158 if ((dev->pci_device & 0xffff) == 0x0179 ||
3200 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3159 (dev->pci_device & 0xffff) == 0x0189 ||
3201 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3160 (dev->pci_device & 0xffff) == 0x0329) {
3202 if (script == LVDS_PANEL_ON) { 3161 if (script == LVDS_RESET) {
3203 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3162 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3204 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3163
3205 } 3164 } else if (script == LVDS_PANEL_ON) {
3206 if (script == LVDS_PANEL_OFF) { 3165 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3207 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3166 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3208 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3167 | (1 << 31));
3168 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3169 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3170
3171 } else if (script == LVDS_PANEL_OFF) {
3172 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3173 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3174 & ~(1 << 31));
3175 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3176 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3209 } 3177 }
3210 } 3178 }
3211#endif 3179#endif
@@ -5434,52 +5402,49 @@ static bool
5434parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5402parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5403 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5436{ 5404{
5437 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5405 switch (conn & 0x0000000f) {
5438 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5406 case 0:
5439 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5407 entry->type = OUTPUT_ANALOG;
5440 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5408 break;
5441 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5409 case 1:
5442 conn != 0xf2205004 && conn != 0xf2209004) { 5410 entry->type = OUTPUT_TV;
5443 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5411 break;
5444 5412 case 2:
5445 /* cause output setting to fail for !TV, so message is seen */ 5413 case 3:
5446 if ((conn & 0xf) != 0x1)
5447 dcb->entries = 0;
5448
5449 return false;
5450 }
5451 /* most of the below is a "best guess" atm */
5452 entry->type = conn & 0xf;
5453 if (entry->type == 2)
5454 /* another way of specifying straps based lvds... */
5455 entry->type = OUTPUT_LVDS; 5414 entry->type = OUTPUT_LVDS;
5456 if (entry->type == 4) { /* digital */ 5415 break;
5457 if (conn & 0x10) 5416 case 4:
5458 entry->type = OUTPUT_LVDS; 5417 switch ((conn & 0x000000f0) >> 4) {
5459 else 5418 case 0:
5460 entry->type = OUTPUT_TMDS; 5419 entry->type = OUTPUT_TMDS;
5420 break;
5421 case 1:
5422 entry->type = OUTPUT_LVDS;
5423 break;
5424 default:
5425 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5426 (conn & 0x000000f0) >> 4);
5427 return false;
5428 }
5429 break;
5430 default:
5431 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5432 return false;
5461 } 5433 }
5462 /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5434
5463 entry->i2c_index = (conn >> 14) & 0xf; 5435 entry->i2c_index = (conn & 0x0003c000) >> 14;
5464 /* raw heads field is in range 0-1, so move to 1-2 */ 5436 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5465 entry->heads = ((conn >> 18) & 0x7) + 1; 5437 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5466 entry->location = (conn >> 21) & 0xf; 5438 entry->location = (conn & 0x01e00000) >> 21;
5467 /* unused: entry->bus = (conn >> 25) & 0x7; */ 5439 entry->bus = (conn & 0x0e000000) >> 25;
5468 /* set or to be same as heads -- hopefully safe enough */
5469 entry->or = entry->heads;
5470 entry->duallink_possible = false; 5440 entry->duallink_possible = false;
5471 5441
5472 switch (entry->type) { 5442 switch (entry->type) {
5473 case OUTPUT_ANALOG: 5443 case OUTPUT_ANALOG:
5474 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5444 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5475 break; 5445 break;
5476 case OUTPUT_LVDS: 5446 case OUTPUT_TV:
5477 /* 5447 entry->tvconf.has_component_output = false;
5478 * This is probably buried in conn's unknown bits.
5479 * This will upset EDID-ful models, if they exist
5480 */
5481 entry->lvdsconf.use_straps_for_mode = true;
5482 entry->lvdsconf.use_power_scripts = true;
5483 break; 5448 break;
5484 case OUTPUT_TMDS: 5449 case OUTPUT_TMDS:
5485 /* 5450 /*
@@ -5488,8 +5453,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5488 */ 5453 */
5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5454 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5490 break; 5455 break;
5491 case OUTPUT_TV: 5456 case OUTPUT_LVDS:
5492 entry->tvconf.has_component_output = false; 5457 if ((conn & 0x00003f00) != 0x10)
5458 entry->lvdsconf.use_straps_for_mode = true;
5459 entry->lvdsconf.use_power_scripts = true;
5460 break;
5461 default:
5493 break; 5462 break;
5494 } 5463 }
5495 5464
@@ -5564,11 +5533,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5564 dcb->entries = newentries; 5533 dcb->entries = newentries;
5565} 5534}
5566 5535
5567static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5536static int
5537parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5568{ 5538{
5539 struct drm_nouveau_private *dev_priv = dev->dev_private;
5569 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5540 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5570 struct parsed_dcb *dcb; 5541 struct parsed_dcb *dcb;
5571 uint16_t dcbptr, i2ctabptr = 0; 5542 uint16_t dcbptr = 0, i2ctabptr = 0;
5572 uint8_t *dcbtable; 5543 uint8_t *dcbtable;
5573 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5544 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5574 bool configblock = true; 5545 bool configblock = true;
@@ -5579,16 +5550,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
5579 dcb->entries = 0; 5550 dcb->entries = 0;
5580 5551
5581 /* get the offset from 0x36 */ 5552 /* get the offset from 0x36 */
5582 dcbptr = ROM16(bios->data[0x36]); 5553 if (dev_priv->card_type > NV_04) {
5554 dcbptr = ROM16(bios->data[0x36]);
5555 if (dcbptr == 0x0000)
5556 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5557 }
5583 5558
5559 /* this situation likely means a really old card, pre DCB */
5584 if (dcbptr == 0x0) { 5560 if (dcbptr == 0x0) {
5585 NV_WARN(dev, "No output data (DCB) found in BIOS, " 5561 NV_INFO(dev, "Assuming a CRT output exists\n");
5586 "assuming a CRT output exists\n");
5587 /* this situation likely means a really old card, pre DCB */
5588 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5562 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5589 5563
5590 if (nv04_tv_identify(dev, 5564 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5591 bios->legacy.i2c_indices.tv) >= 0)
5592 fabricate_tv_output(dcb, twoHeads); 5565 fabricate_tv_output(dcb, twoHeads);
5593 5566
5594 return 0; 5567 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e342a418d434..db0ed4c13f98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 469
470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
471 evict, no_wait, new_mem); 471 evict, no_wait, new_mem);
472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
472 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
473 return ret; 475 return ret;
474} 476}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..7e6d673f3a23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
24 * 24 *
25 */ 25 */
26 26
27#include <acpi/button.h>
28
27#include "drmP.h" 29#include "drmP.h"
28#include "drm_edid.h" 30#include "drm_edid.h"
29#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32
30#include "nouveau_reg.h" 33#include "nouveau_reg.h"
31#include "nouveau_drv.h" 34#include "nouveau_drv.h"
32#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
@@ -83,14 +86,16 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
83static void 86static void
84nouveau_connector_destroy(struct drm_connector *drm_connector) 87nouveau_connector_destroy(struct drm_connector *drm_connector)
85{ 88{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 89 struct nouveau_connector *nv_connector =
87 struct drm_device *dev = connector->base.dev; 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev;
88 92
89 NV_DEBUG_KMS(dev, "\n"); 93 NV_DEBUG_KMS(dev, "\n");
90 94
91 if (!connector) 95 if (!nv_connector)
92 return; 96 return;
93 97
98 kfree(nv_connector->edid);
94 drm_sysfs_connector_remove(drm_connector); 99 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector); 100 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector); 101 kfree(drm_connector);
@@ -233,10 +238,21 @@ nouveau_connector_detect(struct drm_connector *connector)
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 238 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 239 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) { 240 if (nv_encoder && nv_connector->native_mode) {
241#ifdef CONFIG_ACPI
242 if (!nouveau_ignorelid && !acpi_lid_open())
243 return connector_status_disconnected;
244#endif
236 nouveau_connector_set_encoder(connector, nv_encoder); 245 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected; 246 return connector_status_connected;
238 } 247 }
239 248
249 /* Cleanup the previous EDID block. */
250 if (nv_connector->edid) {
251 drm_mode_connector_update_edid_property(connector, NULL);
252 kfree(nv_connector->edid);
253 nv_connector->edid = NULL;
254 }
255
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 256 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) { 257 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags); 258 nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +263,7 @@ nouveau_connector_detect(struct drm_connector *connector)
247 if (!nv_connector->edid) { 263 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 264 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector)); 265 drm_get_connector_name(connector));
250 return connector_status_disconnected; 266 goto detect_analog;
251 } 267 }
252 268
253 if (nv_encoder->dcb->type == OUTPUT_DP && 269 if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +297,7 @@ nouveau_connector_detect(struct drm_connector *connector)
281 return connector_status_connected; 297 return connector_status_connected;
282 } 298 }
283 299
300detect_analog:
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 301 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder) 302 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 303 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +704,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
687 */ 704 */
688 if (!nv_connector->edid && !nv_connector->native_mode && 705 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) { 706 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid = 707 struct edid *edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev); 708 (struct edid *)nouveau_bios_embedded_edid(dev);
709 if (edid) {
710 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
711 *(nv_connector->edid) = *edid;
712 }
692 } 713 }
693 714
694 if (!nv_connector->edid) 715 if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 7afbe8b40d51..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
126 chan->dma.cur += nr_dwords; 126 chan->dma.cur += nr_dwords;
127} 127}
128 128
129static inline bool 129/* Fetch and adjust GPU GET pointer
130READ_GET(struct nouveau_channel *chan, uint32_t *get) 130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
131{ 138{
132 uint32_t val; 139 uint32_t val;
133 140
134 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
135 if (val < chan->pushbuf_base || 142
136 val > chan->pushbuf_base + (chan->dma.max << 2)) { 143 /* reset counter as long as GET is still advancing, this is
137 /* meaningless to dma_wait() except to know whether the 144 * to avoid misdetecting a GPU lockup if the GPU happens to
138 * GPU has stalled or not 145 * just be processing an operation that takes a long time
139 */ 146 */
140 *get = val; 147 if (val != *prev_get) {
141 return false; 148 *prev_get = val;
149 *timeout = 0;
150 }
151
152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
142 } 156 }
143 157
144 *get = (val - chan->pushbuf_base) >> 2; 158 if (val < chan->pushbuf_base ||
145 return true; 159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
146} 163}
147 164
148int 165int
149nouveau_dma_wait(struct nouveau_channel *chan, int size) 166nouveau_dma_wait(struct nouveau_channel *chan, int size)
150{ 167{
151 uint32_t get, prev_get = 0, cnt = 0; 168 uint32_t prev_get = 0, cnt = 0;
152 bool get_valid; 169 int get;
153 170
154 while (chan->dma.free < size) { 171 while (chan->dma.free < size) {
155 /* reset counter as long as GET is still advancing, this is 172 get = READ_GET(chan, &prev_get, &cnt);
156 * to avoid misdetecting a GPU lockup if the GPU happens to 173 if (unlikely(get == -EBUSY))
157 * just be processing an operation that takes a long time 174 return -EBUSY;
158 */
159 get_valid = READ_GET(chan, &get);
160 if (get != prev_get) {
161 prev_get = get;
162 cnt = 0;
163 }
164
165 if ((++cnt & 0xff) == 0) {
166 DRM_UDELAY(1);
167 if (cnt > 100000)
168 return -EBUSY;
169 }
170 175
171 /* loop until we have a usable GET pointer. the value 176 /* loop until we have a usable GET pointer. the value
172 * we read from the GPU may be outside the main ring if 177 * we read from the GPU may be outside the main ring if
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
177 * from the SKIPS area, so the code below doesn't have to deal 182 * from the SKIPS area, so the code below doesn't have to deal
178 * with some fun corner cases. 183 * with some fun corner cases.
179 */ 184 */
180 if (!get_valid || get < NOUVEAU_DMA_SKIPS) 185 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
181 continue; 186 continue;
182 187
183 if (get <= chan->dma.cur) { 188 if (get <= chan->dma.cur) {
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
203 * after processing the currently pending commands. 208 * after processing the currently pending commands.
204 */ 209 */
205 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211
212 /* wait for GET to depart from the skips area.
213 * prevents writing GET==PUT and causing a race
214 * condition that causes us to think the GPU is
215 * idle when it's not.
216 */
217 do {
218 get = READ_GET(chan, &prev_get, &cnt);
219 if (unlikely(get == -EBUSY))
220 return -EBUSY;
221 if (unlikely(get == -EINVAL))
222 continue;
223 } while (get <= NOUVEAU_DMA_SKIPS);
206 WRITE_PUT(NOUVEAU_DMA_SKIPS); 224 WRITE_PUT(NOUVEAU_DMA_SKIPS);
207 225
208 /* we're now submitting commands at the start of 226 /* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..dd4937224220 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY; 493 ret = -EBUSY;
494 goto out;
494 } 495 }
495 496
496 udelay(400); 497 udelay(400);
@@ -501,6 +502,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
501 break; 502 break;
502 } 503 }
503 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
504 if (cmd & 1) { 510 if (cmd & 1) {
505 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..343ab7f17ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -71,6 +71,10 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1; 71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73 73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
74MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 026419fe8791..6b9690418bc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -509,6 +509,8 @@ struct drm_nouveau_private {
509 void __iomem *ramin; 509 void __iomem *ramin;
510 uint32_t ramin_size; 510 uint32_t ramin_size;
511 511
512 struct nouveau_bo *vga_ram;
513
512 struct workqueue_struct *wq; 514 struct workqueue_struct *wq;
513 struct work_struct irq_work; 515 struct work_struct irq_work;
514 516
@@ -675,6 +677,7 @@ extern char *nouveau_tv_norm;
675extern int nouveau_reg_debug; 677extern int nouveau_reg_debug;
676extern char *nouveau_vbios; 678extern char *nouveau_vbios;
677extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid;
678 681
679/* nouveau_state.c */ 682/* nouveau_state.c */
680extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2009db2426c3..6ac804b0c9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -321,6 +321,7 @@ retry:
321 else { 321 else {
322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
323 b->valid_domains); 323 b->valid_domains);
324 list_add_tail(&nvbo->entry, &op->both_list);
324 validate_fini(op, NULL); 325 validate_fini(op, NULL);
325 return -EINVAL; 326 return -EINVAL;
326 } 327 }
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
466static int 467static int
467nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
468 struct drm_nouveau_gem_pushbuf_bo *bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo,
469 int nr_relocs, uint64_t ptr_relocs, 470 unsigned nr_relocs, uint64_t ptr_relocs,
470 int nr_dwords, int first_dword, 471 unsigned nr_dwords, unsigned first_dword,
471 uint32_t *pushbuf, bool is_iomem) 472 uint32_t *pushbuf, bool is_iomem)
472{ 473{
473 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
474 struct drm_device *dev = chan->dev; 475 struct drm_device *dev = chan->dev;
475 int ret = 0, i; 476 int ret = 0;
477 unsigned i;
476 478
477 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
478 if (IS_ERR(reloc)) 480 if (IS_ERR(reloc))
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
667 } 669 }
668 pbbo = nouveau_gem_object(gem); 670 pbbo = nouveau_gem_object(gem);
669 671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
670 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
671 chan->fence.sequence); 685 chan->fence.sequence);
672 if (ret) { 686 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 919a619ca7fa..3b9bad66162a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -483,6 +483,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 484 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1; 485 unhandled = 1;
486 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
487 uint32_t v = nv_rd32(dev, 0x402000);
488 nv_wr32(dev, 0x402000, v);
489
490 /* dump the error anyway for now: it's useful for
491 Gallium development */
492 unhandled = 1;
486 } else { 493 } else {
487 unhandled = 1; 494 unhandled = 1;
488 } 495 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fb9bdd6edf1f..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -383,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
383{ 383{
384 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385 385
386 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 386 nouveau_bo_unpin(dev_priv->vga_ram);
387 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 387 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
388 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
389 388
390 ttm_bo_device_release(&dev_priv->ttm.bdev); 389 ttm_bo_device_release(&dev_priv->ttm.bdev);
391 390
@@ -622,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
622 return ret; 621 return ret;
623 } 622 }
624 623
624 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
625 0, 0, true, true, &dev_priv->vga_ram);
626 if (ret == 0)
627 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
628 if (ret) {
629 NV_WARN(dev, "failed to reserve VGA memory\n");
630 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
631 }
632
625 /* GART */ 633 /* GART */
626#if !defined(__powerpc__) && !defined(__ia64__) 634#if !defined(__powerpc__) && !defined(__ia64__)
627 if (drm_device_is_agp(dev) && dev->agp) { 635 if (drm_device_is_agp(dev) && dev->agp) {
@@ -653,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
653 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
654 drm_get_resource_len(dev, 1), 662 drm_get_resource_len(dev, 1),
655 DRM_MTRR_WC); 663 DRM_MTRR_WC);
664
656 return 0; 665 return 0;
657} 666}
658 667
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 09b9a46dfc0e..f2d0187ba152 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -525,6 +525,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
525 engine->mc.takedown(dev); 525 engine->mc.takedown(dev);
526 526
527 mutex_lock(&dev->struct_mutex); 527 mutex_lock(&dev->struct_mutex);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 529 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
529 mutex_unlock(&dev->struct_mutex); 530 mutex_unlock(&dev->struct_mutex);
530 nouveau_sgdma_takedown(dev); 531 nouveau_sgdma_takedown(dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
30 * of vram. For now, only reserve a small piece until we know 30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires. 31 * more about what each chipset requires.
32 */ 32 */
33 switch (dev_priv->chipset & 0xf0) { 33 switch (dev_priv->chipset) {
34 case 0x40: 34 case 0x40:
35 case 0x47: 35 case 0x47:
36 case 0x49: 36 case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..40b7360841f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -432,6 +432,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev; 433 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 434 struct drm_encoder *encoder;
435 uint32_t dac = 0, sor = 0;
435 436
436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 437 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 438
@@ -439,9 +440,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 440 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 441 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441 442
442 if (drm_helper_encoder_in_use(encoder)) 443 if (!drm_helper_encoder_in_use(encoder))
443 continue; 444 continue;
444 445
446 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
447 nv_encoder->dcb->type == OUTPUT_TV)
448 dac |= (1 << nv_encoder->or);
449 else
450 sor |= (1 << nv_encoder->or);
451 }
452
453 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
454 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
455
456 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
457 nv_encoder->dcb->type == OUTPUT_TV) {
458 if (dac & (1 << nv_encoder->or))
459 continue;
460 } else {
461 if (sor & (1 << nv_encoder->or))
462 continue;
463 }
464
445 nv_encoder->disconnect(nv_encoder); 465 nv_encoder->disconnect(nv_encoder);
446 } 466 }
447 467
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 39caf167587d..32b244bcb482 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
272 return ret; 272 return ret;
273 ramfc = chan->ramfc->gpuobj; 273 ramfc = chan->ramfc->gpuobj;
274 274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
276 0, &chan->cache); 276 0, &chan->cache);
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..20319e59d368 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
84 nv_wr32(dev, 0x400804, 0xc0000000); 84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000); 87 nv_wr32(dev, 0x401800, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000);
90 90
@@ -282,6 +282,7 @@ nv50_graph_unload_context(struct drm_device *dev)
282 return 0; 282 return 0;
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 284
285 nouveau_wait_for_idle(dev);
285 nv_wr32(dev, 0x400500, fifo & ~1); 286 nv_wr32(dev, 0x400500, fifo & ~1);
286 nv_wr32(dev, 0x400784, inst); 287 nv_wr32(dev, 0x400784, inst);
287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..ecf1936b8224 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,24 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{ 90{
91 struct drm_device *dev = encoder->dev; 91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
93 uint32_t val; 94 uint32_t val;
94 int or = nv_encoder->or; 95 int or = nv_encoder->or;
95 96
96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue;
106
107 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
108 return;
109 }
110
98 /* wait for it to be done */ 111 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 112 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 113 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..e3b44562d265 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -246,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
246 case ATOM_WS_ATTRIBUTES: 246 case ATOM_WS_ATTRIBUTES:
247 val = gctx->io_attr; 247 val = gctx->io_attr;
248 break; 248 break;
249 case ATOM_WS_REGPTR:
250 val = gctx->reg_block;
251 break;
249 default: 252 default:
250 val = ctx->ws[idx]; 253 val = ctx->ws[idx];
251 } 254 }
@@ -385,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 388 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
386} 389}
387 390
391static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
392{
393 uint32_t val = 0xCDCDCDCD;
394
395 switch (align) {
396 case ATOM_SRC_DWORD:
397 val = U32(*ptr);
398 (*ptr) += 4;
399 break;
400 case ATOM_SRC_WORD0:
401 case ATOM_SRC_WORD8:
402 case ATOM_SRC_WORD16:
403 val = U16(*ptr);
404 (*ptr) += 2;
405 break;
406 case ATOM_SRC_BYTE0:
407 case ATOM_SRC_BYTE8:
408 case ATOM_SRC_BYTE16:
409 case ATOM_SRC_BYTE24:
410 val = U8(*ptr);
411 (*ptr)++;
412 break;
413 }
414 return val;
415}
416
388static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 417static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
389 int *ptr, uint32_t *saved, int print) 418 int *ptr, uint32_t *saved, int print)
390{ 419{
@@ -482,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
482 case ATOM_WS_ATTRIBUTES: 511 case ATOM_WS_ATTRIBUTES:
483 gctx->io_attr = val; 512 gctx->io_attr = val;
484 break; 513 break;
514 case ATOM_WS_REGPTR:
515 gctx->reg_block = val;
516 break;
485 default: 517 default:
486 ctx->ws[idx] = val; 518 ctx->ws[idx] = val;
487 } 519 }
@@ -677,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
677 SDEBUG(" dst: "); 709 SDEBUG(" dst: ");
678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 710 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
679 SDEBUG(" src1: "); 711 SDEBUG(" src1: ");
680 src1 = atom_get_src(ctx, attr, ptr); 712 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
681 SDEBUG(" src2: "); 713 SDEBUG(" src2: ");
682 src2 = atom_get_src(ctx, attr, ptr); 714 src2 = atom_get_src(ctx, attr, ptr);
683 dst &= src1; 715 dst &= src1;
@@ -809,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 841 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
810} 842}
811 843
844static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
845{
846 uint8_t attr = U8((*ptr)++), shift;
847 uint32_t saved, dst;
848 int dptr = *ptr;
849 attr &= 0x38;
850 attr |= atom_def_dst[attr >> 3] << 6;
851 SDEBUG(" dst: ");
852 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
853 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
854 SDEBUG(" shift: %d\n", shift);
855 dst <<= shift;
856 SDEBUG(" dst: ");
857 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
858}
859
860static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
861{
862 uint8_t attr = U8((*ptr)++), shift;
863 uint32_t saved, dst;
864 int dptr = *ptr;
865 attr &= 0x38;
866 attr |= atom_def_dst[attr >> 3] << 6;
867 SDEBUG(" dst: ");
868 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
869 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
870 SDEBUG(" shift: %d\n", shift);
871 dst >>= shift;
872 SDEBUG(" dst: ");
873 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
874}
875
812static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 876static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
813{ 877{
814 uint8_t attr = U8((*ptr)++), shift; 878 uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
818 attr |= atom_def_dst[attr >> 3] << 6; 882 attr |= atom_def_dst[attr >> 3] << 6;
819 SDEBUG(" dst: "); 883 SDEBUG(" dst: ");
820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 884 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
821 shift = U8((*ptr)++); 885 shift = atom_get_src(ctx, attr, ptr);
822 SDEBUG(" shift: %d\n", shift); 886 SDEBUG(" shift: %d\n", shift);
823 dst <<= shift; 887 dst <<= shift;
824 SDEBUG(" dst: "); 888 SDEBUG(" dst: ");
@@ -834,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
834 attr |= atom_def_dst[attr >> 3] << 6; 898 attr |= atom_def_dst[attr >> 3] << 6;
835 SDEBUG(" dst: "); 899 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 900 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 shift = U8((*ptr)++); 901 shift = atom_get_src(ctx, attr, ptr);
838 SDEBUG(" shift: %d\n", shift); 902 SDEBUG(" shift: %d\n", shift);
839 dst >>= shift; 903 dst >>= shift;
840 SDEBUG(" dst: "); 904 SDEBUG(" dst: ");
@@ -937,18 +1001,18 @@ static struct {
937 atom_op_or, ATOM_ARG_FB}, { 1001 atom_op_or, ATOM_ARG_FB}, {
938 atom_op_or, ATOM_ARG_PLL}, { 1002 atom_op_or, ATOM_ARG_PLL}, {
939 atom_op_or, ATOM_ARG_MC}, { 1003 atom_op_or, ATOM_ARG_MC}, {
940 atom_op_shl, ATOM_ARG_REG}, { 1004 atom_op_shift_left, ATOM_ARG_REG}, {
941 atom_op_shl, ATOM_ARG_PS}, { 1005 atom_op_shift_left, ATOM_ARG_PS}, {
942 atom_op_shl, ATOM_ARG_WS}, { 1006 atom_op_shift_left, ATOM_ARG_WS}, {
943 atom_op_shl, ATOM_ARG_FB}, { 1007 atom_op_shift_left, ATOM_ARG_FB}, {
944 atom_op_shl, ATOM_ARG_PLL}, { 1008 atom_op_shift_left, ATOM_ARG_PLL}, {
945 atom_op_shl, ATOM_ARG_MC}, { 1009 atom_op_shift_left, ATOM_ARG_MC}, {
946 atom_op_shr, ATOM_ARG_REG}, { 1010 atom_op_shift_right, ATOM_ARG_REG}, {
947 atom_op_shr, ATOM_ARG_PS}, { 1011 atom_op_shift_right, ATOM_ARG_PS}, {
948 atom_op_shr, ATOM_ARG_WS}, { 1012 atom_op_shift_right, ATOM_ARG_WS}, {
949 atom_op_shr, ATOM_ARG_FB}, { 1013 atom_op_shift_right, ATOM_ARG_FB}, {
950 atom_op_shr, ATOM_ARG_PLL}, { 1014 atom_op_shift_right, ATOM_ARG_PLL}, {
951 atom_op_shr, ATOM_ARG_MC}, { 1015 atom_op_shift_right, ATOM_ARG_MC}, {
952 atom_op_mul, ATOM_ARG_REG}, { 1016 atom_op_mul, ATOM_ARG_REG}, {
953 atom_op_mul, ATOM_ARG_PS}, { 1017 atom_op_mul, ATOM_ARG_PS}, {
954 atom_op_mul, ATOM_ARG_WS}, { 1018 atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1122,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1058 1122
1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1123 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1060 1124
1061 /* reset reg block */
1062 ctx->reg_block = 0;
1063 ectx.ctx = ctx; 1125 ectx.ctx = ctx;
1064 ectx.ps_shift = ps / 4; 1126 ectx.ps_shift = ps / 4;
1065 ectx.start = base; 1127 ectx.start = base;
@@ -1096,6 +1158,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1158void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{ 1159{
1098 mutex_lock(&ctx->mutex); 1160 mutex_lock(&ctx->mutex);
1161 /* reset reg block */
1162 ctx->reg_block = 0;
1163 /* reset fb window */
1164 ctx->fb_base = 0;
1165 /* reset io mode */
1166 ctx->io_mode = ATOM_IO_MM;
1099 atom_execute_table_locked(ctx, index, params); 1167 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex); 1168 mutex_unlock(&ctx->mutex);
1101} 1169}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
91#define ATOM_WS_AND_MASK 0x45 91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46 92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47 93#define ATOM_WS_ATTRIBUTES 0x47
94#define ATOM_WS_REGPTR 0x48
94 95
95#define ATOM_IIO_NOP 0 96#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1 97#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id; 308 args.ucCRTC = radeon_crtc->crtc_id;
309 309
310 printk("executing set crtc dtd timing\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312} 311}
313 312
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id; 347 args.ucCRTC = radeon_crtc->crtc_id;
349 348
350 printk("executing set crtc timing\n");
351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352} 350}
353 351
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
409 } 407 }
410} 408}
411 409
412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 410union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
412};
413
414static u32 atombios_adjust_pll(struct drm_crtc *crtc,
415 struct drm_display_mode *mode,
416 struct radeon_pll *pll)
413{ 417{
414 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
415 struct drm_device *dev = crtc->dev; 418 struct drm_device *dev = crtc->dev;
416 struct radeon_device *rdev = dev->dev_private; 419 struct radeon_device *rdev = dev->dev_private;
417 struct drm_encoder *encoder = NULL; 420 struct drm_encoder *encoder = NULL;
418 struct radeon_encoder *radeon_encoder = NULL; 421 struct radeon_encoder *radeon_encoder = NULL;
419 uint8_t frev, crev; 422 u32 adjusted_clock = mode->clock;
420 int index;
421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
428 struct radeon_pll *pll;
429 int pll_flags = 0;
430 423
431 memset(&args, 0, sizeof(args)); 424 /* reset the pll flags */
425 pll->flags = 0;
432 426
433 if (ASIC_IS_AVIVO(rdev)) { 427 if (ASIC_IS_AVIVO(rdev)) {
434 if ((rdev->family == CHIP_RS600) || 428 if ((rdev->family == CHIP_RS600) ||
435 (rdev->family == CHIP_RS690) || 429 (rdev->family == CHIP_RS690) ||
436 (rdev->family == CHIP_RS740)) 430 (rdev->family == CHIP_RS740))
437 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 431 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
438 RADEON_PLL_PREFER_CLOSEST_LOWER); 432 RADEON_PLL_PREFER_CLOSEST_LOWER);
439 433
440 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 434 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 435 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
442 else 436 else
443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 437 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
444 } else { 438 } else {
445 pll_flags |= RADEON_PLL_LEGACY; 439 pll->flags |= RADEON_PLL_LEGACY;
446 440
447 if (mode->clock > 200000) /* range limits??? */ 441 if (mode->clock > 200000) /* range limits??? */
448 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 442 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
449 else 443 else
450 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 444 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
451 445
452 } 446 }
453 447
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 if (encoder->crtc == crtc) { 449 if (encoder->crtc == crtc) {
456 if (!ASIC_IS_AVIVO(rdev)) {
457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (encoder->encoder_type ==
461 DRM_MODE_ENCODER_LVDS)
462 pll_flags |= RADEON_PLL_USE_REF_DIV;
463 }
464 radeon_encoder = to_radeon_encoder(encoder); 450 radeon_encoder = to_radeon_encoder(encoder);
451 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2;
455 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
458 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
459 pll->flags |= RADEON_PLL_USE_REF_DIV;
460 }
465 break; 461 break;
466 } 462 }
467 } 463 }
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
471 * special hw requirements. 467 * special hw requirements.
472 */ 468 */
473 if (ASIC_IS_DCE3(rdev)) { 469 if (ASIC_IS_DCE3(rdev)) {
474 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 470 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev;
473 int index;
475 474
476 if (!encoder) 475 if (!radeon_encoder->enc_priv)
477 return; 476 return adjusted_clock;
478 477 dig = radeon_encoder->enc_priv;
479 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
480 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
481 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
482 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
483 478
484 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
485 atom_execute_table(rdev->mode_info.atom_context, 480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
486 index, (uint32_t *)&adjust_pll_args); 481 &crev);
487 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 482
488 } else { 483 memset(&args, 0, sizeof(args));
489 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 484
490 if (ASIC_IS_AVIVO(rdev) && 485 switch (frev) {
491 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 486 case 1:
492 adjusted_clock = mode->clock * 2; 487 switch (crev) {
493 else 488 case 1:
494 adjusted_clock = mode->clock; 489 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
493
494 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
505 return adjusted_clock;
506 }
495 } 507 }
508 return adjusted_clock;
509}
510
511union set_pixel_clock {
512 SET_PIXEL_CLOCK_PS_ALLOCATION base;
513 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3;
516};
517
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev;
522 struct radeon_device *rdev = dev->dev_private;
523 struct drm_encoder *encoder = NULL;
524 struct radeon_encoder *radeon_encoder = NULL;
525 u8 frev, crev;
526 int index;
527 union set_pixel_clock args;
528 u32 pll_clock = mode->clock;
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll;
531 u32 adjusted_clock;
532
533 memset(&args, 0, sizeof(args));
534
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder);
538 break;
539 }
540 }
541
542 if (!radeon_encoder)
543 return;
496 544
497 if (radeon_crtc->crtc_id == 0) 545 if (radeon_crtc->crtc_id == 0)
498 pll = &rdev->clock.p1pll; 546 pll = &rdev->clock.p1pll;
499 else 547 else
500 pll = &rdev->clock.p2pll; 548 pll = &rdev->clock.p2pll;
501 549
550 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552
502 if (ASIC_IS_AVIVO(rdev)) { 553 if (ASIC_IS_AVIVO(rdev)) {
503 if (radeon_new_pll) 554 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div, 556 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags); 557 &ref_div, &post_div);
507 else 558 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div, 560 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags); 561 &ref_div, &post_div);
511 } else 562 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags); 564 &ref_div, &post_div);
514 565
515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
520 case 1: 571 case 1:
521 switch (crev) { 572 switch (crev) {
522 case 1: 573 case 1:
523 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 574 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
524 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 575 args.v1.usRefDiv = cpu_to_le16(ref_div);
525 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 576 args.v1.usFbDiv = cpu_to_le16(fb_div);
526 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 577 args.v1.ucFracFbDiv = frac_fb_div;
527 spc1_ptr->ucFracFbDiv = frac_fb_div; 578 args.v1.ucPostDiv = post_div;
528 spc1_ptr->ucPostDiv = post_div; 579 args.v1.ucPpll =
529 spc1_ptr->ucPpll =
530 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
531 spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 581 args.v1.ucCRTC = radeon_crtc->crtc_id;
532 spc1_ptr->ucRefDivSrc = 1; 582 args.v1.ucRefDivSrc = 1;
533 break; 583 break;
534 case 2: 584 case 2:
535 spc2_ptr = 585 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
536 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 586 args.v2.usRefDiv = cpu_to_le16(ref_div);
537 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v2.usFbDiv = cpu_to_le16(fb_div);
538 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 588 args.v2.ucFracFbDiv = frac_fb_div;
539 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 589 args.v2.ucPostDiv = post_div;
540 spc2_ptr->ucFracFbDiv = frac_fb_div; 590 args.v2.ucPpll =
541 spc2_ptr->ucPostDiv = post_div;
542 spc2_ptr->ucPpll =
543 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
544 spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 592 args.v2.ucCRTC = radeon_crtc->crtc_id;
545 spc2_ptr->ucRefDivSrc = 1; 593 args.v2.ucRefDivSrc = 1;
546 break; 594 break;
547 case 3: 595 case 3:
548 if (!encoder) 596 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
549 return; 597 args.v3.usRefDiv = cpu_to_le16(ref_div);
550 spc3_ptr = 598 args.v3.usFbDiv = cpu_to_le16(fb_div);
551 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 599 args.v3.ucFracFbDiv = frac_fb_div;
552 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 600 args.v3.ucPostDiv = post_div;
553 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 601 args.v3.ucPpll =
554 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
555 spc3_ptr->ucFracFbDiv = frac_fb_div;
556 spc3_ptr->ucPostDiv = post_div;
557 spc3_ptr->ucPpll =
558 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
559 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
560 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 604 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
561 spc3_ptr->ucEncoderMode = 605 args.v3.ucEncoderMode =
562 atombios_get_encoder_mode(encoder); 606 atombios_get_encoder_mode(encoder);
563 break; 607 break;
564 default: 608 default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
571 return; 615 return;
572 } 616 }
573 617
574 printk("executing set pll\n");
575 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
576} 619}
577 620
578int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
579 struct drm_framebuffer *old_fb) 622 struct drm_framebuffer *old_fb)
580{ 623{
581 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 624 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
582 struct drm_device *dev = crtc->dev; 625 struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
706 return 0; 749 return 0;
707} 750}
708 751
752int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
753 struct drm_framebuffer *old_fb)
754{
755 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private;
757
758 if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb);
762}
763
764/* properly set additional regs when using atombios */
765static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
766{
767 struct drm_device *dev = crtc->dev;
768 struct radeon_device *rdev = dev->dev_private;
769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
770 u32 disp_merge_cntl;
771
772 switch (radeon_crtc->crtc_id) {
773 case 0:
774 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
775 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
776 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
777 break;
778 case 1:
779 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
780 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
781 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
782 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
783 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
784 break;
785 }
786}
787
709int atombios_crtc_mode_set(struct drm_crtc *crtc, 788int atombios_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode, 789 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode, 790 struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
727 else { 806 else {
728 if (radeon_crtc->crtc_id == 0) 807 if (radeon_crtc->crtc_id == 0)
729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
730 radeon_crtc_set_base(crtc, x, y, old_fb); 809 atombios_crtc_set_base(crtc, x, y, old_fb);
731 radeon_legacy_atom_set_surface(crtc); 810 radeon_legacy_atom_fixup(crtc);
732 } 811 }
733 atombios_overscan_setup(crtc, mode, adjusted_mode); 812 atombios_overscan_setup(crtc, mode, adjusted_mode);
734 atombios_scaler_setup(crtc); 813 atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
746 825
747static void atombios_crtc_prepare(struct drm_crtc *crtc) 826static void atombios_crtc_prepare(struct drm_crtc *crtc)
748{ 827{
749 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
750 atombios_lock_crtc(crtc, 1); 828 atombios_lock_crtc(crtc, 1);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
751} 830}
752 831
753static void atombios_crtc_commit(struct drm_crtc *crtc) 832static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8760d66e058a..11c9a3fe6810 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1504,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1505 return -EINVAL; 1505 return -EINVAL;
1506 } 1506 }
1507 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1507 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1508 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1508 track->immd_dwords = pkt->count - 1; 1509 track->immd_dwords = pkt->count - 1;
1509 r = r100_cs_track_check(p->rdev, track); 1510 r = r100_cs_track_check(p->rdev, track);
@@ -3399,9 +3400,7 @@ int r100_mc_init(struct radeon_device *rdev)
3399 if (rdev->flags & RADEON_IS_AGP) { 3400 if (rdev->flags & RADEON_IS_AGP) {
3400 r = radeon_agp_init(rdev); 3401 r = radeon_agp_init(rdev);
3401 if (r) { 3402 if (r) {
3402 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3403 radeon_agp_disable(rdev);
3403 rdev->flags &= ~RADEON_IS_AGP;
3404 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3405 } else { 3404 } else {
3406 rdev->mc.gtt_location = rdev->mc.agp_base; 3405 rdev->mc.gtt_location = rdev->mc.agp_base;
3407 } 3406 }
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
371 case 5: 371 case 5:
372 case 6: 372 case 6:
373 case 7: 373 case 7:
374 /* 1D/2D */
374 track->textures[i].tex_coord_type = 0; 375 track->textures[i].tex_coord_type = 0;
375 break; 376 break;
376 case 1: 377 case 1:
377 track->textures[i].tex_coord_type = 1; 378 /* CUBE */
379 track->textures[i].tex_coord_type = 2;
378 break; 380 break;
379 case 2: 381 case 2:
380 track->textures[i].tex_coord_type = 2; 382 /* 3D */
383 track->textures[i].tex_coord_type = 1;
381 break; 384 break;
382 } 385 }
383 break; 386 break;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 053404e71a9d..4526faaacca8 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
50 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
52 if (r) { 52 if (r) {
53 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
54 rdev->flags &= ~RADEON_IS_AGP;
55 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
56 } else { 54 } else {
57 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
58 } 56 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f5ff3490929f..da9aa3c31bcf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
624 fixed20_12 a; 624 fixed20_12 a;
625 u32 tmp; 625 u32 tmp;
626 int chansize, numchan; 626 int chansize, numchan;
627 int r;
628 627
629 /* Get VRAM informations */ 628 /* Get VRAM informations */
630 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
667 rdev->mc.real_vram_size = rdev->mc.aper_size; 666 rdev->mc.real_vram_size = rdev->mc.aper_size;
668 667
669 if (rdev->flags & RADEON_IS_AGP) { 668 if (rdev->flags & RADEON_IS_AGP) {
670 r = radeon_agp_init(rdev);
671 if (r)
672 return r;
673 /* gtt_size is setup by radeon_agp_init */ 669 /* gtt_size is setup by radeon_agp_init */
674 rdev->mc.gtt_location = rdev->mc.agp_base; 670 rdev->mc.gtt_location = rdev->mc.agp_base;
675 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -1958,14 +1954,17 @@ int r600_suspend(struct radeon_device *rdev)
1958 /* FIXME: we should wait for ring to be empty */ 1954 /* FIXME: we should wait for ring to be empty */
1959 r600_cp_stop(rdev); 1955 r600_cp_stop(rdev);
1960 rdev->cp.ready = false; 1956 rdev->cp.ready = false;
1957 r600_irq_suspend(rdev);
1961 r600_wb_disable(rdev); 1958 r600_wb_disable(rdev);
1962 r600_pcie_gart_disable(rdev); 1959 r600_pcie_gart_disable(rdev);
1963 /* unpin shaders bo */ 1960 /* unpin shaders bo */
1964 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1961 if (rdev->r600_blit.shader_obj) {
1965 if (unlikely(r != 0)) 1962 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1966 return r; 1963 if (!r) {
1967 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1964 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1968 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1965 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1966 }
1967 }
1969 return 0; 1968 return 0;
1970} 1969}
1971 1970
@@ -2026,6 +2025,11 @@ int r600_init(struct radeon_device *rdev)
2026 r = radeon_fence_driver_init(rdev); 2025 r = radeon_fence_driver_init(rdev);
2027 if (r) 2026 if (r)
2028 return r; 2027 return r;
2028 if (rdev->flags & RADEON_IS_AGP) {
2029 r = radeon_agp_init(rdev);
2030 if (r)
2031 radeon_agp_disable(rdev);
2032 }
2029 r = r600_mc_init(rdev); 2033 r = r600_mc_init(rdev);
2030 if (r) 2034 if (r)
2031 return r; 2035 return r;
@@ -2060,13 +2064,14 @@ int r600_init(struct radeon_device *rdev)
2060 if (rdev->accel_working) { 2064 if (rdev->accel_working) {
2061 r = radeon_ib_pool_init(rdev); 2065 r = radeon_ib_pool_init(rdev);
2062 if (r) { 2066 if (r) {
2063 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2067 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2064 rdev->accel_working = false;
2065 }
2066 r = r600_ib_test(rdev);
2067 if (r) {
2068 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2069 rdev->accel_working = false; 2068 rdev->accel_working = false;
2069 } else {
2070 r = r600_ib_test(rdev);
2071 if (r) {
2072 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2073 rdev->accel_working = false;
2074 }
2070 } 2075 }
2071 } 2076 }
2072 2077
@@ -2197,14 +2202,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2197 rb_bufsz = drm_order(ring_size / 4); 2202 rb_bufsz = drm_order(ring_size / 4);
2198 ring_size = (1 << rb_bufsz) * 4; 2203 ring_size = (1 << rb_bufsz) * 4;
2199 rdev->ih.ring_size = ring_size; 2204 rdev->ih.ring_size = ring_size;
2200 rdev->ih.align_mask = 4 - 1; 2205 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2206 rdev->ih.rptr = 0;
2201} 2207}
2202 2208
2203static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2209static int r600_ih_ring_alloc(struct radeon_device *rdev)
2204{ 2210{
2205 int r; 2211 int r;
2206 2212
2207 rdev->ih.ring_size = ring_size;
2208 /* Allocate ring buffer */ 2213 /* Allocate ring buffer */
2209 if (rdev->ih.ring_obj == NULL) { 2214 if (rdev->ih.ring_obj == NULL) {
2210 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2215 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2234,9 +2239,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2234 return r; 2239 return r;
2235 } 2240 }
2236 } 2241 }
2237 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2238 rdev->ih.rptr = 0;
2239
2240 return 0; 2242 return 0;
2241} 2243}
2242 2244
@@ -2386,7 +2388,7 @@ int r600_irq_init(struct radeon_device *rdev)
2386 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2388 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2387 2389
2388 /* allocate ring */ 2390 /* allocate ring */
2389 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2391 ret = r600_ih_ring_alloc(rdev);
2390 if (ret) 2392 if (ret)
2391 return ret; 2393 return ret;
2392 2394
@@ -2449,10 +2451,15 @@ int r600_irq_init(struct radeon_device *rdev)
2449 return ret; 2451 return ret;
2450} 2452}
2451 2453
2452void r600_irq_fini(struct radeon_device *rdev) 2454void r600_irq_suspend(struct radeon_device *rdev)
2453{ 2455{
2454 r600_disable_interrupts(rdev); 2456 r600_disable_interrupts(rdev);
2455 r600_rlc_stop(rdev); 2457 r600_rlc_stop(rdev);
2458}
2459
2460void r600_irq_fini(struct radeon_device *rdev)
2461{
2462 r600_irq_suspend(rdev);
2456 r600_ih_ring_fini(rdev); 2463 r600_ih_ring_fini(rdev);
2457} 2464}
2458 2465
@@ -2467,8 +2474,12 @@ int r600_irq_set(struct radeon_device *rdev)
2467 return -EINVAL; 2474 return -EINVAL;
2468 } 2475 }
2469 /* don't enable anything if the ih is disabled */ 2476 /* don't enable anything if the ih is disabled */
2470 if (!rdev->ih.enabled) 2477 if (!rdev->ih.enabled) {
2478 r600_disable_interrupts(rdev);
2479 /* force the active interrupt state to all disabled */
2480 r600_disable_interrupt_state(rdev);
2471 return 0; 2481 return 0;
2482 }
2472 2483
2473 if (ASIC_IS_DCE3(rdev)) { 2484 if (ASIC_IS_DCE3(rdev)) {
2474 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2485 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2638,16 +2649,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2638 wptr = RREG32(IH_RB_WPTR); 2649 wptr = RREG32(IH_RB_WPTR);
2639 2650
2640 if (wptr & RB_OVERFLOW) { 2651 if (wptr & RB_OVERFLOW) {
2641 WARN_ON(1); 2652 /* When a ring buffer overflow happen start parsing interrupt
2642 /* XXX deal with overflow */ 2653 * from the last not overwritten vector (wptr + 16). Hopefully
2643 DRM_ERROR("IH RB overflow\n"); 2654 * this should allow us to catchup.
2655 */
2656 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2657 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2658 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2644 tmp = RREG32(IH_RB_CNTL); 2659 tmp = RREG32(IH_RB_CNTL);
2645 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2660 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2646 WREG32(IH_RB_CNTL, tmp); 2661 WREG32(IH_RB_CNTL, tmp);
2647 } 2662 }
2648 wptr = wptr & WPTR_OFFSET_MASK; 2663 return (wptr & rdev->ih.ptr_mask);
2649
2650 return wptr;
2651} 2664}
2652 2665
2653/* r600 IV Ring 2666/* r600 IV Ring
@@ -2683,12 +2696,13 @@ int r600_irq_process(struct radeon_device *rdev)
2683 u32 wptr = r600_get_ih_wptr(rdev); 2696 u32 wptr = r600_get_ih_wptr(rdev);
2684 u32 rptr = rdev->ih.rptr; 2697 u32 rptr = rdev->ih.rptr;
2685 u32 src_id, src_data; 2698 u32 src_id, src_data;
2686 u32 last_entry = rdev->ih.ring_size - 16;
2687 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2699 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2688 unsigned long flags; 2700 unsigned long flags;
2689 bool queue_hotplug = false; 2701 bool queue_hotplug = false;
2690 2702
2691 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2703 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2704 if (!rdev->ih.enabled)
2705 return IRQ_NONE;
2692 2706
2693 spin_lock_irqsave(&rdev->ih.lock, flags); 2707 spin_lock_irqsave(&rdev->ih.lock, flags);
2694 2708
@@ -2817,10 +2831,8 @@ restart_ih:
2817 } 2831 }
2818 2832
2819 /* wptr/rptr are in bytes! */ 2833 /* wptr/rptr are in bytes! */
2820 if (rptr == last_entry) 2834 rptr += 16;
2821 rptr = 0; 2835 rptr &= rdev->ih.ptr_mask;
2822 else
2823 rptr += 16;
2824 } 2836 }
2825 /* make sure wptr hasn't changed while processing */ 2837 /* make sure wptr hasn't changed while processing */
2826 wptr = r600_get_ih_wptr(rdev); 2838 wptr = r600_get_ih_wptr(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8787ea89dc6e..2bedce477a97 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,14 +512,16 @@ void r600_blit_fini(struct radeon_device *rdev)
512{ 512{
513 int r; 513 int r;
514 514
515 if (rdev->r600_blit.shader_obj == NULL)
516 return;
517 /* If we can't reserve the bo, unref should be enough to destroy
518 * it when it becomes idle.
519 */
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 520 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) { 521 if (!r) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 522 radeon_bo_unpin(rdev->r600_blit.shader_obj);
518 goto out_unref; 523 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
519 } 524 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 525 radeon_bo_unref(&rdev->r600_blit.shader_obj);
524} 526}
525 527
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 38
39struct r600_cs_track {
40 u32 cb_color0_base_last;
41};
42
39/** 43/**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context. 45 * @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
177} 181}
178 182
179/** 183/**
184 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
185 * @parser: parser structure holding parsing context.
186 *
187 * Check next packet is relocation packet3, do bo validation and compute
188 * GPU offset using the provided start.
189 **/
190static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
191{
192 struct radeon_cs_packet p3reloc;
193 int r;
194
195 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
196 if (r) {
197 return 0;
198 }
199 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
200 return 0;
201 }
202 return 1;
203}
204
205/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet 206 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context. 207 * @parser: parser structure holding parsing context.
182 * 208 *
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt) 363 struct radeon_cs_packet *pkt)
338{ 364{
339 struct radeon_cs_reloc *reloc; 365 struct radeon_cs_reloc *reloc;
366 struct r600_cs_track *track;
340 volatile u32 *ib; 367 volatile u32 *ib;
341 unsigned idx; 368 unsigned idx;
342 unsigned i; 369 unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
344 int r; 371 int r;
345 u32 idx_value; 372 u32 idx_value;
346 373
374 track = (struct r600_cs_track *)p->track;
347 ib = p->ib->ptr; 375 ib = p->ib->ptr;
348 idx = pkt->idx + 1; 376 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx); 377 idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
503 for (i = 0; i < pkt->count; i++) { 531 for (i = 0; i < pkt->count; i++) {
504 reg = start_reg + (4 * i); 532 reg = start_reg + (4 * i);
505 switch (reg) { 533 switch (reg) {
534 /* This register were added late, there is userspace
535 * which does provide relocation for those but set
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
506 case DB_DEPTH_BASE: 576 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE: 577 case DB_HTILE_DATA_BASE:
508 case CB_COLOR0_BASE: 578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
509 case CB_COLOR1_BASE: 588 case CB_COLOR1_BASE:
510 case CB_COLOR2_BASE: 589 case CB_COLOR2_BASE:
511 case CB_COLOR3_BASE: 590 case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
678int r600_cs_parse(struct radeon_cs_parser *p) 757int r600_cs_parse(struct radeon_cs_parser *p)
679{ 758{
680 struct radeon_cs_packet pkt; 759 struct radeon_cs_packet pkt;
760 struct r600_cs_track *track;
681 int r; 761 int r;
682 762
763 track = kzalloc(sizeof(*track), GFP_KERNEL);
764 p->track = track;
683 do { 765 do {
684 r = r600_cs_packet_parse(p, &pkt, p->idx); 766 r = r600_cs_packet_parse(p, &pkt, p->idx);
685 if (r) { 767 if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
757 /* initialize parser */ 839 /* initialize parser */
758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 840 memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 parser.filp = filp; 841 parser.filp = filp;
842 parser.dev = &dev->pdev->dev;
760 parser.rdev = NULL; 843 parser.rdev = NULL;
761 parser.family = family; 844 parser.family = family;
762 parser.ib = &fake_ib; 845 parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
889#define C_0280E0_BASE_256B 0x00000000
890#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
891#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
892#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
893#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
894#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
895#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
896#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
897#define R_0280C0_CB_COLOR0_TILE 0x0280C0
898#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
899#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
900#define C_0280C0_BASE_256B 0x00000000
901#define R_0280C4_CB_COLOR1_TILE 0x0280C4
902#define R_0280C8_CB_COLOR2_TILE 0x0280C8
903#define R_0280CC_CB_COLOR3_TILE 0x0280CC
904#define R_0280D0_CB_COLOR4_TILE 0x0280D0
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908
909
885#endif 910#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index eb5f99b9469d..f7df1a7e4413 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -410,7 +410,6 @@ struct r600_ih {
410 unsigned wptr_old; 410 unsigned wptr_old;
411 unsigned ring_size; 411 unsigned ring_size;
412 uint64_t gpu_addr; 412 uint64_t gpu_addr;
413 uint32_t align_mask;
414 uint32_t ptr_mask; 413 uint32_t ptr_mask;
415 spinlock_t lock; 414 spinlock_t lock;
416 bool enabled; 415 bool enabled;
@@ -465,6 +464,7 @@ struct radeon_cs_chunk {
465}; 464};
466 465
467struct radeon_cs_parser { 466struct radeon_cs_parser {
467 struct device *dev;
468 struct radeon_device *rdev; 468 struct radeon_device *rdev;
469 struct drm_file *filp; 469 struct drm_file *filp;
470 /* chunks */ 470 /* chunks */
@@ -847,7 +847,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
847 847
848static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 848static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 849{
850 if (reg < 0x10000) 850 if (reg < rdev->rmmio_size)
851 return readl(((void __iomem *)rdev->rmmio) + reg); 851 return readl(((void __iomem *)rdev->rmmio) + reg);
852 else { 852 else {
853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -857,7 +857,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
857 857
858static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 858static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
859{ 859{
860 if (reg < 0x10000) 860 if (reg < rdev->rmmio_size)
861 writel(v, ((void __iomem *)rdev->rmmio) + reg); 861 writel(v, ((void __iomem *)rdev->rmmio) + reg);
862 else { 862 else {
863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1017,6 +1017,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1017#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1017#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1018 1018
1019/* Common functions */ 1019/* Common functions */
1020/* AGP */
1021extern void radeon_agp_disable(struct radeon_device *rdev);
1020extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1022extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1021extern int radeon_modeset_init(struct radeon_device *rdev); 1023extern int radeon_modeset_init(struct radeon_device *rdev);
1022extern void radeon_modeset_fini(struct radeon_device *rdev); 1024extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1160,7 +1162,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
1160extern void r600_irq_fini(struct radeon_device *rdev); 1162extern void r600_irq_fini(struct radeon_device *rdev);
1161extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1163extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1162extern int r600_irq_set(struct radeon_device *rdev); 1164extern int r600_irq_set(struct radeon_device *rdev);
1163 1165extern void r600_irq_suspend(struct radeon_device *rdev);
1166/* r600 audio */
1164extern int r600_audio_init(struct radeon_device *rdev); 1167extern int r600_audio_init(struct radeon_device *rdev);
1165extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1168extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1166extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1169extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 220f454ea9fa..c9ad7f5cc1ac 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -133,6 +133,13 @@ int radeon_agp_init(struct radeon_device *rdev)
133 bool is_v3; 133 bool is_v3;
134 int ret; 134 int ret;
135 135
136 if (rdev->ddev->agp->agp_info.aper_size < 32) {
137 dev_warn(rdev->dev, "AGP aperture to small (%dM) "
138 "need at least 32M, disabling AGP\n",
139 rdev->ddev->agp->agp_info.aper_size);
140 return -EINVAL;
141 }
142
136 /* Acquire AGP. */ 143 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) { 144 if (!rdev->ddev->agp->acquired) {
138 ret = drm_agp_acquire(rdev->ddev); 145 ret = drm_agp_acquire(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
56 else if (post_div == 3) 56 else if (post_div == 3)
57 sclk >>= 2; 57 sclk >>= 2;
58 else if (post_div == 4) 58 else if (post_div == 4)
59 sclk >>= 4; 59 sclk >>= 3;
60 60
61 return sclk; 61 return sclk;
62} 62}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
86 else if (post_div == 3) 86 else if (post_div == 3)
87 mclk >>= 2; 87 mclk >>= 2;
88 else if (post_div == 4) 88 else if (post_div == 4)
89 mclk >>= 4; 89 mclk >>= 3;
90 90
91 return mclk; 91 return mclk;
92} 92}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..1496cb8658ef 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 231 memset(&parser, 0, sizeof(struct radeon_cs_parser));
232 parser.filp = filp; 232 parser.filp = filp;
233 parser.rdev = rdev; 233 parser.rdev = rdev;
234 parser.dev = rdev->dev;
234 r = radeon_cs_parser_init(&parser, data); 235 r = radeon_cs_parser_init(&parser, data);
235 if (r) { 236 if (r) {
236 DRM_ERROR("Failed to initialize parser !\n"); 237 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0c51f8e46613..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
546 } 546 }
547 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
547} 548}
548 549
549void radeon_check_arguments(struct radeon_device *rdev) 550void radeon_check_arguments(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0ec491ead2ff..6a92f994cc26 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -357,7 +357,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
360 if (dig->dp_i2c_bus) 360 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
361 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 362 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
362 } 363 }
363 if (!radeon_connector->ddc_bus) 364 if (!radeon_connector->ddc_bus)
@@ -410,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
410 uint32_t *fb_div_p, 411 uint32_t *fb_div_p,
411 uint32_t *frac_fb_div_p, 412 uint32_t *frac_fb_div_p,
412 uint32_t *ref_div_p, 413 uint32_t *ref_div_p,
413 uint32_t *post_div_p, 414 uint32_t *post_div_p)
414 int flags)
415{ 415{
416 uint32_t min_ref_div = pll->min_ref_div; 416 uint32_t min_ref_div = pll->min_ref_div;
417 uint32_t max_ref_div = pll->max_ref_div; 417 uint32_t max_ref_div = pll->max_ref_div;
418 uint32_t min_post_div = pll->min_post_div;
419 uint32_t max_post_div = pll->max_post_div;
418 uint32_t min_fractional_feed_div = 0; 420 uint32_t min_fractional_feed_div = 0;
419 uint32_t max_fractional_feed_div = 0; 421 uint32_t max_fractional_feed_div = 0;
420 uint32_t best_vco = pll->best_vco; 422 uint32_t best_vco = pll->best_vco;
@@ -430,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
430 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 432 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
431 freq = freq * 1000; 433 freq = freq * 1000;
432 434
433 if (flags & RADEON_PLL_USE_REF_DIV) 435 if (pll->flags & RADEON_PLL_USE_REF_DIV)
434 min_ref_div = max_ref_div = pll->reference_div; 436 min_ref_div = max_ref_div = pll->reference_div;
435 else { 437 else {
436 while (min_ref_div < max_ref_div-1) { 438 while (min_ref_div < max_ref_div-1) {
@@ -445,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
445 } 447 }
446 } 448 }
447 449
448 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 450 if (pll->flags & RADEON_PLL_USE_POST_DIV)
451 min_post_div = max_post_div = pll->post_div;
452
453 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
449 min_fractional_feed_div = pll->min_frac_feedback_div; 454 min_fractional_feed_div = pll->min_frac_feedback_div;
450 max_fractional_feed_div = pll->max_frac_feedback_div; 455 max_fractional_feed_div = pll->max_frac_feedback_div;
451 } 456 }
452 457
453 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 458 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
454 uint32_t ref_div; 459 uint32_t ref_div;
455 460
456 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 461 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
457 continue; 462 continue;
458 463
459 /* legacy radeons only have a few post_divs */ 464 /* legacy radeons only have a few post_divs */
460 if (flags & RADEON_PLL_LEGACY) { 465 if (pll->flags & RADEON_PLL_LEGACY) {
461 if ((post_div == 5) || 466 if ((post_div == 5) ||
462 (post_div == 7) || 467 (post_div == 7) ||
463 (post_div == 9) || 468 (post_div == 9) ||
@@ -504,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
504 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 509 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
505 current_freq = radeon_div(tmp, ref_div * post_div); 510 current_freq = radeon_div(tmp, ref_div * post_div);
506 511
507 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 512 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
508 error = freq - current_freq; 513 error = freq - current_freq;
509 error = error < 0 ? 0xffffffff : error; 514 error = error < 0 ? 0xffffffff : error;
510 } else 515 } else
@@ -531,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
531 best_freq = current_freq; 536 best_freq = current_freq;
532 best_error = error; 537 best_error = error;
533 best_vco_diff = vco_diff; 538 best_vco_diff = vco_diff;
534 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 539 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
535 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 540 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
536 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 541 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
537 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 542 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
538 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 543 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
539 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 544 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
540 best_post_div = post_div; 545 best_post_div = post_div;
541 best_ref_div = ref_div; 546 best_ref_div = ref_div;
542 best_feedback_div = feedback_div; 547 best_feedback_div = feedback_div;
@@ -572,8 +577,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
572 uint32_t *fb_div_p, 577 uint32_t *fb_div_p,
573 uint32_t *frac_fb_div_p, 578 uint32_t *frac_fb_div_p,
574 uint32_t *ref_div_p, 579 uint32_t *ref_div_p,
575 uint32_t *post_div_p, 580 uint32_t *post_div_p)
576 int flags)
577{ 581{
578 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 582 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
579 fixed20_12 pll_out_max, pll_out_min; 583 fixed20_12 pll_out_max, pll_out_min;
@@ -667,7 +671,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
667 radeonfb_remove(dev, fb); 671 radeonfb_remove(dev, fb);
668 672
669 if (radeon_fb->obj) { 673 if (radeon_fb->obj) {
670 radeon_gem_object_unpin(radeon_fb->obj);
671 mutex_lock(&dev->struct_mutex); 674 mutex_lock(&dev->struct_mutex);
672 drm_gem_object_unreference(radeon_fb->obj); 675 drm_gem_object_unreference(radeon_fb->obj);
673 mutex_unlock(&dev->struct_mutex); 676 mutex_unlock(&dev->struct_mutex);
@@ -715,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
715 struct drm_gem_object *obj; 718 struct drm_gem_object *obj;
716 719
717 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 720 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
718 721 if (obj == NULL) {
722 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
723 "can't create framebuffer\n", mode_cmd->handle);
724 return NULL;
725 }
719 return radeon_framebuffer_create(dev, mode_cmd, obj); 726 return radeon_framebuffer_create(dev, mode_cmd, obj);
720} 727}
721 728
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
339 } 339 }
340} 340}
341 341
342/* properly set crtc bpp when using atombios */
343void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
344{
345 struct drm_device *dev = crtc->dev;
346 struct radeon_device *rdev = dev->dev_private;
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 int format;
349 uint32_t crtc_gen_cntl;
350 uint32_t disp_merge_cntl;
351 uint32_t crtc_pitch;
352
353 switch (crtc->fb->bits_per_pixel) {
354 case 8:
355 format = 2;
356 break;
357 case 15: /* 555 */
358 format = 3;
359 break;
360 case 16: /* 565 */
361 format = 4;
362 break;
363 case 24: /* RGB */
364 format = 5;
365 break;
366 case 32: /* xRGB */
367 format = 6;
368 break;
369 default:
370 return;
371 }
372
373 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
374 ((crtc->fb->bits_per_pixel * 8) - 1)) /
375 (crtc->fb->bits_per_pixel * 8));
376 crtc_pitch |= crtc_pitch << 16;
377
378 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
379
380 switch (radeon_crtc->crtc_id) {
381 case 0:
382 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
383 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
384 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
385
386 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
387 crtc_gen_cntl |= (format << 8);
388 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
389 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
390 break;
391 case 1:
392 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
393 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
394 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
395
396 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
397 crtc_gen_cntl |= (format << 8);
398 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
399 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
400 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
401 break;
402 }
403}
404
405int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 342int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
406 struct drm_framebuffer *old_fb) 343 struct drm_framebuffer *old_fb)
407{ 344{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
755 uint32_t post_divider = 0; 692 uint32_t post_divider = 0;
756 uint32_t freq = 0; 693 uint32_t freq = 0;
757 uint8_t pll_gain; 694 uint8_t pll_gain;
758 int pll_flags = RADEON_PLL_LEGACY;
759 bool use_bios_divs = false; 695 bool use_bios_divs = false;
760 /* PLL registers */ 696 /* PLL registers */
761 uint32_t pll_ref_div = 0; 697 uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
789 else 725 else
790 pll = &rdev->clock.p1pll; 726 pll = &rdev->clock.p1pll;
791 727
728 pll->flags = RADEON_PLL_LEGACY;
729
792 if (mode->clock > 200000) /* range limits??? */ 730 if (mode->clock > 200000) /* range limits??? */
793 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
794 else 732 else
795 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 733 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
796 734
797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 735 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
798 if (encoder->crtc == crtc) { 736 if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
804 } 742 }
805 743
806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
807 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 745 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
809 if (!rdev->is_atom_bios) { 747 if (!rdev->is_atom_bios) {
810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
819 } 757 }
820 } 758 }
821 } 759 }
822 pll_flags |= RADEON_PLL_USE_REF_DIV; 760 pll->flags |= RADEON_PLL_USE_REF_DIV;
823 } 761 }
824 } 762 }
825 } 763 }
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
829 if (!use_bios_divs) { 767 if (!use_bios_divs) {
830 radeon_compute_pll(pll, mode->clock, 768 radeon_compute_pll(pll, mode->clock,
831 &freq, &feedback_div, &frac_fb_div, 769 &freq, &feedback_div, &frac_fb_div,
832 &reference_div, &post_divider, 770 &reference_div, &post_divider);
833 pll_flags);
834 771
835 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 772 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
836 if (post_div->divider == post_divider) 773 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 91cb041cb40d..96b851f92f4c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -125,16 +125,24 @@ struct radeon_tmds_pll {
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12)
128 129
129struct radeon_pll { 130struct radeon_pll {
130 uint16_t reference_freq; 131 /* reference frequency */
131 uint16_t reference_div; 132 uint32_t reference_freq;
133
134 /* fixed dividers */
135 uint32_t reference_div;
136 uint32_t post_div;
137
138 /* pll in/out limits */
132 uint32_t pll_in_min; 139 uint32_t pll_in_min;
133 uint32_t pll_in_max; 140 uint32_t pll_in_max;
134 uint32_t pll_out_min; 141 uint32_t pll_out_min;
135 uint32_t pll_out_max; 142 uint32_t pll_out_max;
136 uint16_t xclk; 143 uint32_t best_vco;
137 144
145 /* divider limits */
138 uint32_t min_ref_div; 146 uint32_t min_ref_div;
139 uint32_t max_ref_div; 147 uint32_t max_ref_div;
140 uint32_t min_post_div; 148 uint32_t min_post_div;
@@ -143,7 +151,12 @@ struct radeon_pll {
143 uint32_t max_feedback_div; 151 uint32_t max_feedback_div;
144 uint32_t min_frac_feedback_div; 152 uint32_t min_frac_feedback_div;
145 uint32_t max_frac_feedback_div; 153 uint32_t max_frac_feedback_div;
146 uint32_t best_vco; 154
155 /* flags for the current clock */
156 uint32_t flags;
157
158 /* pll id */
159 uint32_t id;
147}; 160};
148 161
149struct radeon_i2c_chan { 162struct radeon_i2c_chan {
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
417 uint32_t *fb_div_p, 430 uint32_t *fb_div_p,
418 uint32_t *frac_fb_div_p, 431 uint32_t *frac_fb_div_p,
419 uint32_t *ref_div_p, 432 uint32_t *ref_div_p,
420 uint32_t *post_div_p, 433 uint32_t *post_div_p);
421 int flags);
422 434
423extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
424 uint64_t freq, 436 uint64_t freq,
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
426 uint32_t *fb_div_p, 438 uint32_t *fb_div_p,
427 uint32_t *frac_fb_div_p, 439 uint32_t *frac_fb_div_p,
428 uint32_t *ref_div_p, 440 uint32_t *ref_div_p,
429 uint32_t *post_div_p, 441 uint32_t *post_div_p);
430 int flags);
431 442
432extern void radeon_setup_encoder_clones(struct drm_device *dev); 443extern void radeon_setup_encoder_clones(struct drm_device *dev);
433 444
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
453 464
454extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 465extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
455 struct drm_framebuffer *old_fb); 466 struct drm_framebuffer *old_fb);
456extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
457 467
458extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 468extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
459 struct drm_file *file_priv, 469 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4e636de877b2..d72a71bff218 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
220 220
221int radeon_bo_evict_vram(struct radeon_device *rdev) 221int radeon_bo_evict_vram(struct radeon_device *rdev)
222{ 222{
223 if (rdev->flags & RADEON_IS_IGP) { 223 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
224 if (0 && (rdev->flags & RADEON_IS_IGP)) {
224 if (rdev->mc.igp_sideport_enabled == false) 225 if (rdev->mc.igp_sideport_enabled == false)
225 /* Useless to evict on IGP chips */ 226 /* Useless to evict on IGP chips */
226 return 0; 227 return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
910x22b8 SE_TCL_TEX_CYL_WRAP_CTL 910x22b8 SE_TCL_TEX_CYL_WRAP_CTL
920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
930x22c4 SE_TCL_POINT_SPRITE_CNTL 930x22c4 SE_TCL_POINT_SPRITE_CNTL
940x22d0 SE_PVS_CNTL
950x22d4 SE_PVS_CONST_CNTL
940x2648 RE_POINTSIZE 960x2648 RE_POINTSIZE
950x26c0 RE_TOP_LEFT 970x26c0 RE_TOP_LEFT
960x26c4 RE_MISC 980x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 59c71245fb91..55f6ffc4e58b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev)
779 fixed20_12 a; 779 fixed20_12 a;
780 u32 tmp; 780 u32 tmp;
781 int chansize, numchan; 781 int chansize, numchan;
782 int r;
783 782
784 /* Get VRAM informations */ 783 /* Get VRAM informations */
785 rdev->mc.vram_is_ddr = true; 784 rdev->mc.vram_is_ddr = true;
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev)
822 rdev->mc.real_vram_size = rdev->mc.aper_size; 821 rdev->mc.real_vram_size = rdev->mc.aper_size;
823 822
824 if (rdev->flags & RADEON_IS_AGP) { 823 if (rdev->flags & RADEON_IS_AGP) {
825 r = radeon_agp_init(rdev);
826 if (r)
827 return r;
828 /* gtt_size is setup by radeon_agp_init */ 824 /* gtt_size is setup by radeon_agp_init */
829 rdev->mc.gtt_location = rdev->mc.agp_base; 825 rdev->mc.gtt_location = rdev->mc.agp_base;
830 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 826 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -972,13 +968,16 @@ int rv770_suspend(struct radeon_device *rdev)
972 /* FIXME: we should wait for ring to be empty */ 968 /* FIXME: we should wait for ring to be empty */
973 r700_cp_stop(rdev); 969 r700_cp_stop(rdev);
974 rdev->cp.ready = false; 970 rdev->cp.ready = false;
971 r600_irq_suspend(rdev);
975 r600_wb_disable(rdev); 972 r600_wb_disable(rdev);
976 rv770_pcie_gart_disable(rdev); 973 rv770_pcie_gart_disable(rdev);
977 /* unpin shaders bo */ 974 /* unpin shaders bo */
978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 975 if (rdev->r600_blit.shader_obj) {
979 if (likely(r == 0)) { 976 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
980 radeon_bo_unpin(rdev->r600_blit.shader_obj); 977 if (likely(r == 0)) {
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 978 radeon_bo_unpin(rdev->r600_blit.shader_obj);
979 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
980 }
982 } 981 }
983 return 0; 982 return 0;
984} 983}
@@ -1037,6 +1036,11 @@ int rv770_init(struct radeon_device *rdev)
1037 r = radeon_fence_driver_init(rdev); 1036 r = radeon_fence_driver_init(rdev);
1038 if (r) 1037 if (r)
1039 return r; 1038 return r;
1039 if (rdev->flags & RADEON_IS_AGP) {
1040 r = radeon_agp_init(rdev);
1041 if (r)
1042 radeon_agp_disable(rdev);
1043 }
1040 r = rv770_mc_init(rdev); 1044 r = rv770_mc_init(rdev);
1041 if (r) 1045 if (r)
1042 return r; 1046 return r;
@@ -1071,13 +1075,14 @@ int rv770_init(struct radeon_device *rdev)
1071 if (rdev->accel_working) { 1075 if (rdev->accel_working) {
1072 r = radeon_ib_pool_init(rdev); 1076 r = radeon_ib_pool_init(rdev);
1073 if (r) { 1077 if (r) {
1074 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1078 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1075 rdev->accel_working = false;
1076 }
1077 r = r600_ib_test(rdev);
1078 if (r) {
1079 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1080 rdev->accel_working = false; 1079 rdev->accel_working = false;
1080 } else {
1081 r = r600_ib_test(rdev);
1082 if (r) {
1083 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1084 rdev->accel_working = false;
1085 }
1081 } 1086 }
1082 } 1087 }
1083 return 0; 1088 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..1a3e909b7bba 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1844,6 +1845,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1845 * anyone tries to access a ttm page.
1845 */ 1846 */
1846 1847
1848 if (bo->bdev->driver->swap_notify)
1849 bo->bdev->driver->swap_notify(bo);
1850
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1851 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1852out:
1849 1853
@@ -1864,3 +1868,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1868 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1869 ;
1866} 1870}
1871EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 48 .busy_placement = &vram_placement_flags
49}; 49};
50 50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
51struct ttm_placement vmw_vram_ne_placement = { 60struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0, 61 .fpfn = 0,
53 .lpfn = 0, 62 .lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
172 return 0; 181 return 0;
173} 182}
174 183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
175/** 196/**
176 * FIXME: We're using the old vmware polling method to sync. 197 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead. 198 * Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
225 .sync_obj_wait = vmw_sync_obj_wait, 246 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush, 247 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref, 248 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref 249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
229}; 252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..dedd121d8fe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
147 147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *); 149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
150 152
151static void vmw_print_capabilities(uint32_t capabilities) 153static void vmw_print_capabilities(uint32_t capabilities)
152{ 154{
@@ -217,6 +219,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
217 219
218 dev_priv->dev = dev; 220 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset; 221 dev_priv->vmw_chipset = chipset;
222 dev_priv->last_read_sequence = (uint32_t) -100;
220 mutex_init(&dev_priv->hw_mutex); 223 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex); 224 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock); 225 rwlock_init(&dev_priv->resource_lock);
@@ -351,6 +354,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
351 vmw_fb_init(dev_priv); 354 vmw_fb_init(dev_priv);
352 } 355 }
353 356
357 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
358 register_pm_notifier(&dev_priv->pm_nb);
359
354 return 0; 360 return 0;
355 361
356out_no_device: 362out_no_device:
@@ -385,6 +391,8 @@ static int vmw_driver_unload(struct drm_device *dev)
385 391
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 392 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387 393
394 unregister_pm_notifier(&dev_priv->pm_nb);
395
388 if (!dev_priv->stealth) { 396 if (!dev_priv->stealth) {
389 vmw_fb_close(dev_priv); 397 vmw_fb_close(dev_priv);
390 vmw_kms_close(dev_priv); 398 vmw_kms_close(dev_priv);
@@ -650,6 +658,57 @@ static void vmw_remove(struct pci_dev *pdev)
650 drm_put_dev(dev); 658 drm_put_dev(dev);
651} 659}
652 660
661static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
662 void *ptr)
663{
664 struct vmw_private *dev_priv =
665 container_of(nb, struct vmw_private, pm_nb);
666 struct vmw_master *vmaster = dev_priv->active_master;
667
668 switch (val) {
669 case PM_HIBERNATION_PREPARE:
670 case PM_SUSPEND_PREPARE:
671 ttm_suspend_lock(&vmaster->lock);
672
673 /**
674 * This empties VRAM and unbinds all GMR bindings.
675 * Buffer contents is moved to swappable memory.
676 */
677 ttm_bo_swapout_all(&dev_priv->bdev);
678 break;
679 case PM_POST_HIBERNATION:
680 case PM_POST_SUSPEND:
681 ttm_suspend_unlock(&vmaster->lock);
682 break;
683 case PM_RESTORE_PREPARE:
684 break;
685 case PM_POST_RESTORE:
686 break;
687 default:
688 break;
689 }
690 return 0;
691}
692
693/**
694 * These might not be needed with the virtual SVGA device.
695 */
696
697int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
698{
699 pci_save_state(pdev);
700 pci_disable_device(pdev);
701 pci_set_power_state(pdev, PCI_D3hot);
702 return 0;
703}
704
705int vmw_pci_resume(struct pci_dev *pdev)
706{
707 pci_set_power_state(pdev, PCI_D0);
708 pci_restore_state(pdev);
709 return pci_enable_device(pdev);
710}
711
653static struct drm_driver driver = { 712static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 713 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET, 714 DRIVER_MODESET,
@@ -689,7 +748,9 @@ static struct drm_driver driver = {
689 .name = VMWGFX_DRIVER_NAME, 748 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list, 749 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe, 750 .probe = vmw_probe,
692 .remove = vmw_remove 751 .remove = vmw_remove,
752 .suspend = vmw_pci_suspend,
753 .resume = vmw_pci_resume
693 }, 754 },
694 .name = VMWGFX_DRIVER_NAME, 755 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC, 756 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..50529a7f06fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "vmwgfx_drm.h" 33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h" 34#include "drm_hashtab.h"
35#include "linux/suspend.h"
35#include "ttm/ttm_bo_driver.h" 36#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h" 37#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
@@ -258,6 +259,7 @@ struct vmw_private {
258 259
259 struct vmw_master *active_master; 260 struct vmw_master *active_master;
260 struct vmw_master fbdev_master; 261 struct vmw_master fbdev_master;
262 struct notifier_block pm_nb;
261}; 263};
262 264
263static inline struct vmw_private *vmw_priv(struct drm_device *dev) 265static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +355,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo); 355 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 356extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo); 357 struct vmw_dma_buffer *bo);
358extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 359extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv); 360 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 361extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -401,6 +404,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
401 404
402extern struct ttm_placement vmw_vram_placement; 405extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement; 406extern struct ttm_placement vmw_vram_ne_placement;
407extern struct ttm_placement vmw_vram_sys_placement;
404extern struct ttm_placement vmw_sys_placement; 408extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver; 409extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev); 410extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0; 491 return 0;
492 492
493 /**
494 * Put BO in VRAM, only if there is space.
495 */
496
497 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
498 if (unlikely(ret == -ERESTARTSYS))
499 return ret;
500
501 /**
502 * Otherwise, set it up as GMR.
503 */
504
505 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
506 return 0;
507
493 ret = vmw_gmr_bind(dev_priv, bo); 508 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 509 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret; 510 return ret;
496 511
512 /**
513 * If that failed, try VRAM again, this time evicting
514 * previous contents.
515 */
497 516
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret; 518 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..4f4f6432be8b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
649 if (unlikely(ret != 0)) 649 if (unlikely(ret != 0))
650 goto err_unlock; 650 goto err_unlock;
651 651
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 652 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo); 653 ttm_bo_unreserve(bo);
662err_unlock: 654err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..f7d5f70b52dd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -98,8 +98,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 98 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 99 (unsigned int) fifo->capabilities);
100 100
101 dev_priv->fence_seq = (uint32_t) -100; 101 dev_priv->fence_seq = dev_priv->last_read_sequence;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 102 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104 103
105 return vmw_fifo_send_fence(dev_priv, &dummy); 104 return vmw_fifo_send_fence(dev_priv, &dummy);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..686692de209a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
553 } *cmd; 553 } *cmd;
554 int i, increment = 1; 554 int i, increment = 1;
555 555
556 if (!num_clips || 556 if (!num_clips) {
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1; 557 num_clips = 1;
560 clips = &norect; 558 clips = &norect;
561 norect.x1 = norect.y1 = 0; 559 norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
574 572
575 for (i = 0; i < num_clips; i++, clips += increment) { 573 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 575 cmd[i].body.x = cpu_to_le32(clips->x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 576 cmd[i].body.y = cpu_to_le32(clips->y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
581 } 579 }
582 580
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
104 bool pin, bool interruptible) 104 bool pin, bool interruptible)
105{ 105{
106 struct ttm_buffer_object *bo = &buf->base; 106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret; 108 int ret;
110 109
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
116 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
117 goto err; 116 goto err;
118 117
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin) 118 if (pin)
128 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
129 120
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..e01db120efff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -599,6 +599,27 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 599 if (unlikely(ret != 0))
600 goto out_err1; 600 goto out_err1;
601 601
602
603 if (srf->flags & (1 << 9) &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
602 user_srf->base.shareable = false; 623 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL; 624 user_srf->base.tfile = NULL;
604 625
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
622 return ret; 643 return ret;
623 } 644 }
624 645
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key; 646 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID) 647 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n"); 648 DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 return bo_user_size + page_array_size; 757 return bo_user_size + page_array_size;
755} 758}
756 759
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
758{ 761{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob; 763 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv = 764 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev); 765 container_of(bo->bdev, struct vmw_private, bdev);
763 766
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) { 767 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock); 769 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock); 771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
770 } 773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
771 kfree(vmw_bo); 783 kfree(vmw_bo);
772} 784}
773 785
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{ 826{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob; 828 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820 829
830 vmw_dmabuf_gmr_unbind(bo);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo); 832 kfree(vmw_user_bo);
829} 833}
830 834
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
868 } 872 }
869 873
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true, 875 &vmw_vram_sys_placement, true,
872 &vmw_user_dmabuf_destroy); 876 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0)) 877 if (unlikely(ret != 0))
874 return ret; 878 return ret;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 1c89d922d619..fa9708c2d723 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -686,7 +686,6 @@ static ssize_t set_fan1_div(
686 data->fan1_div = 4; 686 data->fan1_div = 4;
687 break; 687 break;
688 default: 688 default:
689 mutex_unlock(&data->update_lock);
690 count = -EINVAL; 689 count = -EINVAL;
691 goto EXIT; 690 goto EXIT;
692 } 691 }
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 6811346c1c62..028284f544e3 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1329,17 +1329,16 @@ static int atk_add(struct acpi_device *device)
1329 &buf, ACPI_TYPE_PACKAGE); 1329 &buf, ACPI_TYPE_PACKAGE);
1330 if (ret != AE_OK) { 1330 if (ret != AE_OK) {
1331 dev_dbg(&device->dev, "atk: method MBIF not found\n"); 1331 dev_dbg(&device->dev, "atk: method MBIF not found\n");
1332 err = -ENODEV; 1332 } else {
1333 goto out; 1333 obj = buf.pointer;
1334 } 1334 if (obj->package.count >= 2) {
1335 1335 union acpi_object *id = &obj->package.elements[1];
1336 obj = buf.pointer; 1336 if (id->type == ACPI_TYPE_STRING)
1337 if (obj->package.count >= 2 && 1337 dev_dbg(&device->dev, "board ID = %s\n",
1338 obj->package.elements[1].type == ACPI_TYPE_STRING) { 1338 id->string.pointer);
1339 dev_dbg(&device->dev, "board ID = %s\n", 1339 }
1340 obj->package.elements[1].string.pointer); 1340 ACPI_FREE(buf.pointer);
1341 } 1341 }
1342 ACPI_FREE(buf.pointer);
1343 1342
1344 err = atk_probe_if(data); 1343 err = atk_probe_if(data);
1345 if (err) { 1344 if (err) {
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index bd0fc67e804b..fa0728232e71 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -768,6 +768,7 @@ leave:
768static int watchdog_open(struct inode *inode, struct file *filp) 768static int watchdog_open(struct inode *inode, struct file *filp)
769{ 769{
770 struct fschmd_data *pos, *data = NULL; 770 struct fschmd_data *pos, *data = NULL;
771 int watchdog_is_open;
771 772
772 /* We get called from drivers/char/misc.c with misc_mtx hold, and we 773 /* We get called from drivers/char/misc.c with misc_mtx hold, and we
773 call misc_register() from fschmd_probe() with watchdog_data_mutex 774 call misc_register() from fschmd_probe() with watchdog_data_mutex
@@ -782,10 +783,12 @@ static int watchdog_open(struct inode *inode, struct file *filp)
782 } 783 }
783 } 784 }
784 /* Note we can never not have found data, so we don't check for this */ 785 /* Note we can never not have found data, so we don't check for this */
785 kref_get(&data->kref); 786 watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
787 if (!watchdog_is_open)
788 kref_get(&data->kref);
786 mutex_unlock(&watchdog_data_mutex); 789 mutex_unlock(&watchdog_data_mutex);
787 790
788 if (test_and_set_bit(0, &data->watchdog_is_open)) 791 if (watchdog_is_open)
789 return -EBUSY; 792 return -EBUSY;
790 793
791 /* Start the watchdog */ 794 /* Start the watchdog */
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 9ca97818bd4b..8fa462f2b570 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -488,7 +488,7 @@ static int __init smsc47m1_find(unsigned short *addr,
488} 488}
489 489
490/* Restore device to its initial state */ 490/* Restore device to its initial state */
491static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) 491static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
492{ 492{
493 if ((sio_data->activate & 0x01) == 0) { 493 if ((sio_data->activate & 0x01) == 0) {
494 superio_enter(); 494 superio_enter();
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e3654d683e15..75bf820e7ccb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -226,7 +226,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
226 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 226 temp = readb(i2c_imx->base + IMX_I2C_I2CR);
227 temp &= ~(I2CR_MSTA | I2CR_MTX); 227 temp &= ~(I2CR_MSTA | I2CR_MTX);
228 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 228 writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
229 i2c_imx->stopped = 1;
230 } 229 }
231 if (cpu_is_mx1()) { 230 if (cpu_is_mx1()) {
232 /* 231 /*
@@ -236,8 +235,10 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
236 udelay(i2c_imx->disable_delay); 235 udelay(i2c_imx->disable_delay);
237 } 236 }
238 237
239 if (!i2c_imx->stopped) 238 if (!i2c_imx->stopped) {
240 i2c_imx_bus_busy(i2c_imx, 0); 239 i2c_imx_bus_busy(i2c_imx, 0);
240 i2c_imx->stopped = 1;
241 }
241 242
242 /* Disable I2C controller */ 243 /* Disable I2C controller */
243 writeb(0, i2c_imx->base + IMX_I2C_I2CR); 244 writeb(0, i2c_imx->base + IMX_I2C_I2CR);
@@ -496,22 +497,23 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
496 } 497 }
497 498
498 res_size = resource_size(res); 499 res_size = resource_size(res);
500
501 if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
502 ret = -EBUSY;
503 goto fail0;
504 }
505
499 base = ioremap(res->start, res_size); 506 base = ioremap(res->start, res_size);
500 if (!base) { 507 if (!base) {
501 dev_err(&pdev->dev, "ioremap failed\n"); 508 dev_err(&pdev->dev, "ioremap failed\n");
502 ret = -EIO; 509 ret = -EIO;
503 goto fail0; 510 goto fail1;
504 } 511 }
505 512
506 i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); 513 i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL);
507 if (!i2c_imx) { 514 if (!i2c_imx) {
508 dev_err(&pdev->dev, "can't allocate interface\n"); 515 dev_err(&pdev->dev, "can't allocate interface\n");
509 ret = -ENOMEM; 516 ret = -ENOMEM;
510 goto fail1;
511 }
512
513 if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
514 ret = -EBUSY;
515 goto fail2; 517 goto fail2;
516 } 518 }
517 519
@@ -582,11 +584,11 @@ fail5:
582fail4: 584fail4:
583 clk_put(i2c_imx->clk); 585 clk_put(i2c_imx->clk);
584fail3: 586fail3:
585 release_mem_region(i2c_imx->res->start, resource_size(res));
586fail2:
587 kfree(i2c_imx); 587 kfree(i2c_imx);
588fail1: 588fail2:
589 iounmap(base); 589 iounmap(base);
590fail1:
591 release_mem_region(res->start, resource_size(res));
590fail0: 592fail0:
591 if (pdata && pdata->exit) 593 if (pdata && pdata->exit)
592 pdata->exit(&pdev->dev); 594 pdata->exit(&pdev->dev);
@@ -618,8 +620,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
618 620
619 clk_put(i2c_imx->clk); 621 clk_put(i2c_imx->clk);
620 622
621 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
622 iounmap(i2c_imx->base); 623 iounmap(i2c_imx->base);
624 release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
623 kfree(i2c_imx); 625 kfree(i2c_imx);
624 return 0; 626 return 0;
625} 627}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 85bc6a685e36..44d2037e9e56 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
4330 4330
4331 if (ioc->bus_type == SPI) 4331 if (ioc->bus_type == SPI)
4332 num_chain *= MPT_SCSI_CAN_QUEUE; 4332 num_chain *= MPT_SCSI_CAN_QUEUE;
4333 else if (ioc->bus_type == SAS)
4334 num_chain *= MPT_SAS_CAN_QUEUE;
4333 else 4335 else
4334 num_chain *= MPT_FC_CAN_QUEUE; 4336 num_chain *= MPT_FC_CAN_QUEUE;
4335 4337
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4c364d44ad59..2de0cc823d60 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -549,4 +549,21 @@ config MTD_VMU
549 To build this as a module select M here, the module will be called 549 To build this as a module select M here, the module will be called
550 vmu-flash. 550 vmu-flash.
551 551
552config MTD_PISMO
553 tristate "MTD discovery driver for PISMO modules"
554 depends on I2C
555 depends on ARCH_VERSATILE
556 help
557 This driver allows for discovery of PISMO modules - see
558 <http://www.pismoworld.org/>. These are small modules containing
559 up to five memory devices (eg, SRAM, flash, DOC) described by an
560 I2C EEPROM.
561
562 This driver does not create any MTD maps itself; instead it
563 creates MTD physmap and MTD SRAM platform devices. If you
564 enable this option, you should consider enabling MTD_PHYSMAP
565 and/or MTD_PLATRAM according to the devices on your module.
566
567 When built as a module, it will be called pismo.ko
568
552endmenu 569endmenu
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
new file mode 100644
index 000000000000..c48cad271f5d
--- /dev/null
+++ b/drivers/mtd/maps/pismo.c
@@ -0,0 +1,320 @@
1/*
2 * PISMO memory driver - http://www.pismoworld.org/
3 *
4 * For ARM Realview and Versatile platforms
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
9 */
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/i2c.h>
13#include <linux/platform_device.h>
14#include <linux/spinlock.h>
15#include <linux/mutex.h>
16#include <linux/mtd/physmap.h>
17#include <linux/mtd/plat-ram.h>
18#include <linux/mtd/pismo.h>
19
20#define PISMO_NUM_CS 5
21
22struct pismo_cs_block {
23 u8 type;
24 u8 width;
25 __le16 access;
26 __le32 size;
27 u32 reserved[2];
28 char device[32];
29} __packed;
30
31struct pismo_eeprom {
32 struct pismo_cs_block cs[PISMO_NUM_CS];
33 char board[15];
34 u8 sum;
35} __packed;
36
37struct pismo_mem {
38 phys_addr_t base;
39 u32 size;
40 u16 access;
41 u8 width;
42 u8 type;
43};
44
45struct pismo_data {
46 struct i2c_client *client;
47 void (*vpp)(void *, int);
48 void *vpp_data;
49 struct platform_device *dev[PISMO_NUM_CS];
50};
51
52/* FIXME: set_vpp could do with a better calling convention */
53static struct pismo_data *vpp_pismo;
54static DEFINE_MUTEX(pismo_mutex);
55
56static int pismo_setvpp_probe_fix(struct pismo_data *pismo)
57{
58 mutex_lock(&pismo_mutex);
59 if (vpp_pismo) {
60 mutex_unlock(&pismo_mutex);
61 kfree(pismo);
62 return -EBUSY;
63 }
64 vpp_pismo = pismo;
65 mutex_unlock(&pismo_mutex);
66 return 0;
67}
68
69static void pismo_setvpp_remove_fix(struct pismo_data *pismo)
70{
71 mutex_lock(&pismo_mutex);
72 if (vpp_pismo == pismo)
73 vpp_pismo = NULL;
74 mutex_unlock(&pismo_mutex);
75}
76
77static void pismo_set_vpp(struct map_info *map, int on)
78{
79 struct pismo_data *pismo = vpp_pismo;
80
81 pismo->vpp(pismo->vpp_data, on);
82}
83/* end of hack */
84
85
86static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
87{
88 width &= 15;
89 if (width > 2)
90 return 0;
91 return 1 << width;
92}
93
94static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
95 u8 addr, size_t size)
96{
97 int ret;
98 struct i2c_msg msg[] = {
99 {
100 .addr = client->addr,
101 .len = sizeof(addr),
102 .buf = &addr,
103 }, {
104 .addr = client->addr,
105 .flags = I2C_M_RD,
106 .len = size,
107 .buf = buf,
108 },
109 };
110
111 ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
112
113 return ret == ARRAY_SIZE(msg) ? size : -EIO;
114}
115
116static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
117 struct pismo_mem *region, const char *name, void *pdata, size_t psize)
118{
119 struct platform_device *dev;
120 struct resource res = { };
121 phys_addr_t base = region.base;
122 int ret;
123
124 if (base == ~0)
125 return -ENXIO;
126
127 res.start = base;
128 res.end = base + region->size - 1;
129 res.flags = IORESOURCE_MEM;
130
131 dev = platform_device_alloc(name, i);
132 if (!dev)
133 return -ENOMEM;
134 dev->dev.parent = &pismo->client->dev;
135
136 do {
137 ret = platform_device_add_resources(dev, &res, 1);
138 if (ret)
139 break;
140
141 ret = platform_device_add_data(dev, pdata, psize);
142 if (ret)
143 break;
144
145 ret = platform_device_add(dev);
146 if (ret)
147 break;
148
149 pismo->dev[i] = dev;
150 return 0;
151 } while (0);
152
153 platform_device_put(dev);
154 return ret;
155}
156
157static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
158 struct pismo_mem *region)
159{
160 struct physmap_flash_data data = {
161 .width = region->width,
162 };
163
164 if (pismo->vpp)
165 data.set_vpp = pismo_set_vpp;
166
167 return pismo_add_device(pismo, i, region, "physmap-flash",
168 &data, sizeof(data));
169}
170
171static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
172 struct pismo_mem *region)
173{
174 struct platdata_mtd_ram data = {
175 .bankwidth = region->width,
176 };
177
178 return pismo_add_device(pismo, i, region, "mtd-ram",
179 &data, sizeof(data));
180}
181
182static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
183 const struct pismo_cs_block *cs, phys_addr_t base)
184{
185 struct device *dev = &pismo->client->dev;
186 struct pismo_mem region;
187
188 region.base = base;
189 region.type = cs->type;
190 region.width = pismo_width_to_bytes(cs->width);
191 region.access = le16_to_cpu(cs->access);
192 region.size = le32_to_cpu(cs->size);
193
194 if (region.width == 0) {
195 dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
196 return;
197 }
198
199 /*
200 * FIXME: may need to the platforms memory controller here, but at
201 * the moment we assume that it has already been correctly setup.
202 * The memory controller can also tell us the base address as well.
203 */
204
205 dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
206 i, cs->device, region.type, region.access, region.size / 1024);
207
208 switch (region.type) {
209 case 0:
210 break;
211 case 1:
212 /* static DOC */
213 break;
214 case 2:
215 /* static NOR */
216 pismo_add_nor(pismo, i, &region);
217 break;
218 case 3:
219 /* static RAM */
220 pismo_add_sram(pismo, i, &region);
221 break;
222 }
223}
224
225static int __devexit pismo_remove(struct i2c_client *client)
226{
227 struct pismo_data *pismo = i2c_get_clientdata(client);
228 int i;
229
230 for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
231 platform_device_unregister(pismo->dev[i]);
232
233 /* FIXME: set_vpp needs saner arguments */
234 pismo_setvpp_remove_fix(pismo);
235
236 kfree(pismo);
237
238 return 0;
239}
240
241static int __devinit pismo_probe(struct i2c_client *client,
242 const struct i2c_device_id *id)
243{
244 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
245 struct pismo_pdata *pdata = client->dev.platform_data;
246 struct pismo_eeprom eeprom;
247 struct pismo_data *pismo;
248 int ret, i;
249
250 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
251 dev_err(&client->dev, "functionality mismatch\n");
252 return -EIO;
253 }
254
255 pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
256 if (!pismo)
257 return -ENOMEM;
258
259 /* FIXME: set_vpp needs saner arguments */
260 ret = pismo_setvpp_probe_fix(pismo);
261 if (ret)
262 return ret;
263
264 pismo->client = client;
265 if (pdata) {
266 pismo->vpp = pdata->set_vpp;
267 pismo->vpp_data = pdata->vpp_data;
268 }
269 i2c_set_clientdata(client, pismo);
270
271 ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
272 if (ret < 0) {
273 dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
274 return ret;
275 }
276
277 dev_info(&client->dev, "%.15s board found\n", eeprom.board);
278
279 for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
280 if (eeprom.cs[i].type != 0xff)
281 pismo_add_one(pismo, i, &eeprom.cs[i],
282 pdata->cs_addrs[i]);
283
284 return 0;
285}
286
287static const struct i2c_device_id pismo_id[] = {
288 { "pismo" },
289 { },
290};
291MODULE_DEVICE_TABLE(i2c, pismo_id);
292
293static struct i2c_driver pismo_driver = {
294 .driver = {
295 .name = "pismo",
296 .owner = THIS_MODULE,
297 },
298 .probe = pismo_probe,
299 .remove = __devexit_p(pismo_remove),
300 .id_table = pismo_id,
301};
302
303static int __init pismo_init(void)
304{
305 BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
306 BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
307
308 return i2c_add_driver(&pismo_driver);
309}
310module_init(pismo_init);
311
312static void __exit pismo_exit(void)
313{
314 i2c_del_driver(&pismo_driver);
315}
316module_exit(pismo_exit);
317
318MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
319MODULE_DESCRIPTION("PISMO memory driver");
320MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index a714ec482761..92e12df0917f 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -322,7 +322,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); 322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
323 323
324 /* Panics must be written immediately */ 324 /* Panics must be written immediately */
325 if (reason == KMSG_DUMP_PANIC) { 325 if (reason != KMSG_DUMP_OOPS) {
326 if (!cxt->mtd->panic_write) 326 if (!cxt->mtd->panic_write)
327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
328 else 328 else
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 79fc4530987b..25c5dd03a837 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -147,6 +147,10 @@ static int scan_for_bad_eraseblocks(void)
147 } 147 }
148 memset(bbt, 0 , ebcnt); 148 memset(bbt, 0 , ebcnt);
149 149
150 /* NOR flash does not implement block_isbad */
151 if (mtd->block_isbad == NULL)
152 return 0;
153
150 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 154 printk(PRINT_PREF "scanning for bad eraseblocks\n");
151 for (i = 0; i < ebcnt; ++i) { 155 for (i = 0; i < ebcnt; ++i) {
152 bbt[i] = is_block_bad(i) ? 1 : 0; 156 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -184,7 +188,7 @@ static int __init mtd_readtest_init(void)
184 tmp = mtd->size; 188 tmp = mtd->size;
185 do_div(tmp, mtd->erasesize); 189 do_div(tmp, mtd->erasesize);
186 ebcnt = tmp; 190 ebcnt = tmp;
187 pgcnt = mtd->erasesize / mtd->writesize; 191 pgcnt = mtd->erasesize / pgsize;
188 192
189 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 193 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
190 "page size %u, count of eraseblocks %u, pages per " 194 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 141363a7e805..7fbb51d4eabe 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -301,6 +301,10 @@ static int scan_for_bad_eraseblocks(void)
301 } 301 }
302 memset(bbt, 0 , ebcnt); 302 memset(bbt, 0 , ebcnt);
303 303
304 /* NOR flash does not implement block_isbad */
305 if (mtd->block_isbad == NULL)
306 goto out;
307
304 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 308 printk(PRINT_PREF "scanning for bad eraseblocks\n");
305 for (i = 0; i < ebcnt; ++i) { 309 for (i = 0; i < ebcnt; ++i) {
306 bbt[i] = is_block_bad(i) ? 1 : 0; 310 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -309,6 +313,7 @@ static int scan_for_bad_eraseblocks(void)
309 cond_resched(); 313 cond_resched();
310 } 314 }
311 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 315 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
316out:
312 goodebcnt = ebcnt - bad; 317 goodebcnt = ebcnt - bad;
313 return 0; 318 return 0;
314} 319}
@@ -340,7 +345,7 @@ static int __init mtd_speedtest_init(void)
340 tmp = mtd->size; 345 tmp = mtd->size;
341 do_div(tmp, mtd->erasesize); 346 do_div(tmp, mtd->erasesize);
342 ebcnt = tmp; 347 ebcnt = tmp;
343 pgcnt = mtd->erasesize / mtd->writesize; 348 pgcnt = mtd->erasesize / pgsize;
344 349
345 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 350 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
346 "page size %u, count of eraseblocks %u, pages per " 351 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 63920476b57a..a99d3cd737d8 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -227,6 +227,10 @@ static int scan_for_bad_eraseblocks(void)
227 } 227 }
228 memset(bbt, 0 , ebcnt); 228 memset(bbt, 0 , ebcnt);
229 229
230 /* NOR flash does not implement block_isbad */
231 if (mtd->block_isbad == NULL)
232 return 0;
233
230 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 234 printk(PRINT_PREF "scanning for bad eraseblocks\n");
231 for (i = 0; i < ebcnt; ++i) { 235 for (i = 0; i < ebcnt; ++i) {
232 bbt[i] = is_block_bad(i) ? 1 : 0; 236 bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -265,7 +269,7 @@ static int __init mtd_stresstest_init(void)
265 tmp = mtd->size; 269 tmp = mtd->size;
266 do_div(tmp, mtd->erasesize); 270 do_div(tmp, mtd->erasesize);
267 ebcnt = tmp; 271 ebcnt = tmp;
268 pgcnt = mtd->erasesize / mtd->writesize; 272 pgcnt = mtd->erasesize / pgsize;
269 273
270 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 274 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
271 "page size %u, count of eraseblocks %u, pages per " 275 "page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 277786ebaa2c..1361574e2b00 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
291 */ 291 */
292struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) 292struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
293{ 293{
294 int error, ubi_num, vol_id; 294 int error, ubi_num, vol_id, mod;
295 struct ubi_volume_desc *ret;
296 struct inode *inode; 295 struct inode *inode;
297 struct path path; 296 struct path path;
298 297
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
306 return ERR_PTR(error); 305 return ERR_PTR(error);
307 306
308 inode = path.dentry->d_inode; 307 inode = path.dentry->d_inode;
308 mod = inode->i_mode;
309 ubi_num = ubi_major2num(imajor(inode)); 309 ubi_num = ubi_major2num(imajor(inode));
310 vol_id = iminor(inode) - 1; 310 vol_id = iminor(inode) - 1;
311 path_put(&path);
311 312
313 if (!S_ISCHR(mod))
314 return ERR_PTR(-EINVAL);
312 if (vol_id >= 0 && ubi_num >= 0) 315 if (vol_id >= 0 && ubi_num >= 0)
313 ret = ubi_open_volume(ubi_num, vol_id, mode); 316 return ubi_open_volume(ubi_num, vol_id, mode);
314 else 317 return ERR_PTR(-ENODEV);
315 ret = ERR_PTR(-ENODEV);
316
317 path_put(&path);
318 return ret;
319} 318}
320EXPORT_SYMBOL_GPL(ubi_open_volume_path); 319EXPORT_SYMBOL_GPL(ubi_open_volume_path);
321 320
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index c1d7b880c795..425bf5a3edd4 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -155,6 +155,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
155 if (err) 155 if (err)
156 return err; 156 return err;
157 vol->updating = 0; 157 vol->updating = 0;
158 return 0;
158 } 159 }
159 160
160 vol->upd_buf = vmalloc(ubi->leb_size); 161 vol->upd_buf = vmalloc(ubi->leb_size);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 1afc61e7455d..40044028d682 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
566 vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); 566 vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
567 vol->alignment = be32_to_cpu(vtbl[i].alignment); 567 vol->alignment = be32_to_cpu(vtbl[i].alignment);
568 vol->data_pad = be32_to_cpu(vtbl[i].data_pad); 568 vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
569 vol->upd_marker = vtbl[i].upd_marker;
569 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? 570 vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
570 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; 571 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
571 vol->name_len = be16_to_cpu(vtbl[i].name_len); 572 vol->name_len = be16_to_cpu(vtbl[i].name_len);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 102ade134165..fee6eee7ae5b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
286 MCC_WRB_SGE_CNT_SHIFT; 286 MCC_WRB_SGE_CNT_SHIFT;
287 wrb->payload_length = payload_len; 287 wrb->payload_length = payload_len;
288 wrb->tag0 = opcode; 288 wrb->tag0 = opcode;
289 be_dws_cpu_to_le(wrb, 20); 289 be_dws_cpu_to_le(wrb, 8);
290} 290}
291 291
292/* Don't touch the hdr after it's prepared */ 292/* Don't touch the hdr after it's prepared */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f7902c16d..33ab8c7f14fe 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
910static void be_post_rx_frags(struct be_adapter *adapter) 910static void be_post_rx_frags(struct be_adapter *adapter)
911{ 911{
912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
913 struct be_rx_page_info *page_info = NULL; 913 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
914 struct be_queue_info *rxq = &adapter->rx_obj.q; 914 struct be_queue_info *rxq = &adapter->rx_obj.q;
915 struct page *pagep = NULL; 915 struct page *pagep = NULL;
916 struct be_eth_rx_d *rxd; 916 struct be_eth_rx_d *rxd;
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
941 rxd = queue_head_node(rxq); 941 rxd = queue_head_node(rxq);
942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
944 queue_head_inc(rxq);
945 944
946 /* Any space left in the current big page for another frag? */ 945 /* Any space left in the current big page for another frag? */
947 if ((page_offset + rx_frag_size + rx_frag_size) > 946 if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
949 pagep = NULL; 948 pagep = NULL;
950 page_info->last_page_user = true; 949 page_info->last_page_user = true;
951 } 950 }
951
952 prev_page_info = page_info;
953 queue_head_inc(rxq);
952 page_info = &page_info_tbl[rxq->head]; 954 page_info = &page_info_tbl[rxq->head];
953 } 955 }
954 if (pagep) 956 if (pagep)
955 page_info->last_page_user = true; 957 prev_page_info->last_page_user = true;
956 958
957 if (posted) { 959 if (posted) {
958 atomic_add(posted, &rxq->used); 960 atomic_add(posted, &rxq->used);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea3990d07..0b23bc4f56c6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <asm/dpmc.h>
36#include <asm/blackfin.h> 37#include <asm/blackfin.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
38#include <asm/portmux.h> 39#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
386 u32 sclk, mdc_div; 387 u32 sclk, mdc_div;
387 388
388 /* Enable PHY output early */ 389 /* Enable PHY output early */
389 if (!(bfin_read_VR_CTL() & PHYCLKOE)) 390 if (!(bfin_read_VR_CTL() & CLKBUFOE))
390 bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 391 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
391 392
392 sclk = get_sclk(); 393 sclk = get_sclk();
393 mdc_div = ((sclk / MDC_CLK) / 2) - 1; 394 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df3ea71..e8932db7ee77 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -326,6 +326,8 @@ struct e1000_adapter {
326 /* for ioport free */ 326 /* for ioport free */
327 int bars; 327 int bars;
328 int need_ioport; 328 int need_ioport;
329
330 bool discarding;
329}; 331};
330 332
331enum e1000_state_t { 333enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..d29bb532eccf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1698 rctl &= ~E1000_RCTL_SZ_4096; 1698 rctl &= ~E1000_RCTL_SZ_4096;
1699 rctl |= E1000_RCTL_BSEX; 1699 rctl |= E1000_RCTL_BSEX;
1700 switch (adapter->rx_buffer_len) { 1700 switch (adapter->rx_buffer_len) {
1701 case E1000_RXBUFFER_256:
1702 rctl |= E1000_RCTL_SZ_256;
1703 rctl &= ~E1000_RCTL_BSEX;
1704 break;
1705 case E1000_RXBUFFER_512:
1706 rctl |= E1000_RCTL_SZ_512;
1707 rctl &= ~E1000_RCTL_BSEX;
1708 break;
1709 case E1000_RXBUFFER_1024:
1710 rctl |= E1000_RCTL_SZ_1024;
1711 rctl &= ~E1000_RCTL_BSEX;
1712 break;
1713 case E1000_RXBUFFER_2048: 1701 case E1000_RXBUFFER_2048:
1714 default: 1702 default:
1715 rctl |= E1000_RCTL_SZ_2048; 1703 rctl |= E1000_RCTL_SZ_2048;
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2802dma_error: 2790dma_error:
2803 dev_err(&pdev->dev, "TX DMA map failed\n"); 2791 dev_err(&pdev->dev, "TX DMA map failed\n");
2804 buffer_info->dma = 0; 2792 buffer_info->dma = 0;
2805 count--; 2793 if (count)
2806
2807 while (count >= 0) {
2808 count--; 2794 count--;
2809 i--; 2795
2810 if (i < 0) 2796 while (count--) {
2797 if (i==0)
2811 i += tx_ring->count; 2798 i += tx_ring->count;
2799 i--;
2812 buffer_info = &tx_ring->buffer_info[i]; 2800 buffer_info = &tx_ring->buffer_info[i];
2813 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2801 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2814 } 2802 }
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3176 * however with the new *_jumbo_rx* routines, jumbo receives will use 3164 * however with the new *_jumbo_rx* routines, jumbo receives will use
3177 * fragmented skbs */ 3165 * fragmented skbs */
3178 3166
3179 if (max_frame <= E1000_RXBUFFER_256) 3167 if (max_frame <= E1000_RXBUFFER_2048)
3180 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3181 else if (max_frame <= E1000_RXBUFFER_512)
3182 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3183 else if (max_frame <= E1000_RXBUFFER_1024)
3184 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3185 else if (max_frame <= E1000_RXBUFFER_2048)
3186 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3168 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3187 else 3169 else
3188#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3170#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3850 3832
3851 length = le16_to_cpu(rx_desc->length); 3833 length = le16_to_cpu(rx_desc->length);
3852 /* !EOP means multiple descriptors were used to store a single 3834 /* !EOP means multiple descriptors were used to store a single
3853 * packet, also make sure the frame isn't just CRC only */ 3835 * packet, if thats the case we need to toss it. In fact, we
3854 if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3836 * to toss every packet with the EOP bit clear and the next
3837 * frame that _does_ have the EOP bit set, as it is by
3838 * definition only a frame fragment
3839 */
3840 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3841 adapter->discarding = true;
3842
3843 if (adapter->discarding) {
3855 /* All receives must fit into a single buffer */ 3844 /* All receives must fit into a single buffer */
3856 E1000_DBG("%s: Receive packet consumed multiple" 3845 E1000_DBG("%s: Receive packet consumed multiple"
3857 " buffers\n", netdev->name); 3846 " buffers\n", netdev->name);
3858 /* recycle */ 3847 /* recycle */
3859 buffer_info->skb = skb; 3848 buffer_info->skb = skb;
3849 if (status & E1000_RXD_STAT_EOP)
3850 adapter->discarding = false;
3860 goto next_desc; 3851 goto next_desc;
3861 } 3852 }
3862 3853
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d6ee28f6ea08..d236efaf7478 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
421/* CRC Stripping defines */ 421/* CRC Stripping defines */
422#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
423#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
424#define FLAG2_IS_DISCARDING (1 << 2)
424 425
425#define E1000_RX_DESC_PS(R, i) \ 426#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c45965a256b6..57f149b75fbe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
450 450
451 length = le16_to_cpu(rx_desc->length); 451 length = le16_to_cpu(rx_desc->length);
452 452
453 /* !EOP means multiple descriptors were used to store a single 453 /*
454 * packet, also make sure the frame isn't just CRC only */ 454 * !EOP means multiple descriptors were used to store a single
455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 * packet, if that's the case we need to toss it. In fact, we
456 * need to toss every packet with the EOP bit clear and the
457 * next frame that _does_ have the EOP bit set, as it is by
458 * definition only a frame fragment
459 */
460 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
461 adapter->flags2 |= FLAG2_IS_DISCARDING;
462
463 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
456 /* All receives must fit into a single buffer */ 464 /* All receives must fit into a single buffer */
457 e_dbg("Receive packet consumed multiple buffers\n"); 465 e_dbg("Receive packet consumed multiple buffers\n");
458 /* recycle */ 466 /* recycle */
459 buffer_info->skb = skb; 467 buffer_info->skb = skb;
468 if (status & E1000_RXD_STAT_EOP)
469 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
460 goto next_desc; 470 goto next_desc;
461 } 471 }
462 472
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
745 PCI_DMA_FROMDEVICE); 755 PCI_DMA_FROMDEVICE);
746 buffer_info->dma = 0; 756 buffer_info->dma = 0;
747 757
748 if (!(staterr & E1000_RXD_STAT_EOP)) { 758 /* see !EOP comment in other rx routine */
759 if (!(staterr & E1000_RXD_STAT_EOP))
760 adapter->flags2 |= FLAG2_IS_DISCARDING;
761
762 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
749 e_dbg("Packet Split buffers didn't pick up the full " 763 e_dbg("Packet Split buffers didn't pick up the full "
750 "packet\n"); 764 "packet\n");
751 dev_kfree_skb_irq(skb); 765 dev_kfree_skb_irq(skb);
766 if (staterr & E1000_RXD_STAT_EOP)
767 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
752 goto next_desc; 768 goto next_desc;
753 } 769 }
754 770
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 1134
1119 rx_ring->next_to_clean = 0; 1135 rx_ring->next_to_clean = 0;
1120 rx_ring->next_to_use = 0; 1136 rx_ring->next_to_use = 0;
1137 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1121 1138
1122 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->head);
1123 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1140 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2333 rctl &= ~E1000_RCTL_SZ_4096; 2350 rctl &= ~E1000_RCTL_SZ_4096;
2334 rctl |= E1000_RCTL_BSEX; 2351 rctl |= E1000_RCTL_BSEX;
2335 switch (adapter->rx_buffer_len) { 2352 switch (adapter->rx_buffer_len) {
2336 case 256:
2337 rctl |= E1000_RCTL_SZ_256;
2338 rctl &= ~E1000_RCTL_BSEX;
2339 break;
2340 case 512:
2341 rctl |= E1000_RCTL_SZ_512;
2342 rctl &= ~E1000_RCTL_BSEX;
2343 break;
2344 case 1024:
2345 rctl |= E1000_RCTL_SZ_1024;
2346 rctl &= ~E1000_RCTL_BSEX;
2347 break;
2348 case 2048: 2353 case 2048:
2349 default: 2354 default:
2350 rctl |= E1000_RCTL_SZ_2048; 2355 rctl |= E1000_RCTL_SZ_2048;
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
3781 0, IPPROTO_TCP, 0); 3786 0, IPPROTO_TCP, 0);
3782 cmd_length = E1000_TXD_CMD_IP; 3787 cmd_length = E1000_TXD_CMD_IP;
3783 ipcse = skb_transport_offset(skb) - 1; 3788 ipcse = skb_transport_offset(skb) - 1;
3784 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3789 } else if (skb_is_gso_v6(skb)) {
3785 ipv6_hdr(skb)->payload_len = 0; 3790 ipv6_hdr(skb)->payload_len = 0;
3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3791 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3787 &ipv6_hdr(skb)->daddr, 3792 &ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3962dma_error: 3967dma_error:
3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3968 dev_err(&pdev->dev, "TX DMA map failed\n");
3964 buffer_info->dma = 0; 3969 buffer_info->dma = 0;
3965 count--; 3970 if (count)
3966
3967 while (count >= 0) {
3968 count--; 3971 count--;
3969 i--; 3972
3970 if (i < 0) 3973 while (count--) {
3974 if (i==0)
3971 i += tx_ring->count; 3975 i += tx_ring->count;
3976 i--;
3972 buffer_info = &tx_ring->buffer_info[i]; 3977 buffer_info = &tx_ring->buffer_info[i];
3973 e1000_put_txbuf(adapter, buffer_info);; 3978 e1000_put_txbuf(adapter, buffer_info);;
3974 } 3979 }
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4317 * fragmented skbs 4322 * fragmented skbs
4318 */ 4323 */
4319 4324
4320 if (max_frame <= 256) 4325 if (max_frame <= 2048)
4321 adapter->rx_buffer_len = 256;
4322 else if (max_frame <= 512)
4323 adapter->rx_buffer_len = 512;
4324 else if (max_frame <= 1024)
4325 adapter->rx_buffer_len = 1024;
4326 else if (max_frame <= 2048)
4327 adapter->rx_buffer_len = 2048; 4326 adapter->rx_buffer_len = 2048;
4328 else 4327 else
4329 adapter->rx_buffer_len = 4096; 4328 adapter->rx_buffer_len = 4096;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 933c64ff2465..997124d2992a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3422,7 +3422,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
3422 iph->daddr, 0, 3422 iph->daddr, 0,
3423 IPPROTO_TCP, 3423 IPPROTO_TCP,
3424 0); 3424 0);
3425 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3425 } else if (skb_is_gso_v6(skb)) {
3426 ipv6_hdr(skb)->payload_len = 0; 3426 ipv6_hdr(skb)->payload_len = 0;
3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3428 &ipv6_hdr(skb)->daddr, 3428 &ipv6_hdr(skb)->daddr,
@@ -3584,6 +3584,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3585 struct skb_frag_struct *frag; 3585 struct skb_frag_struct *frag;
3586 3586
3587 count++;
3587 i++; 3588 i++;
3588 if (i == tx_ring->count) 3589 if (i == tx_ring->count)
3589 i = 0; 3590 i = 0;
@@ -3605,7 +3606,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3605 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3606 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3606 goto dma_error; 3607 goto dma_error;
3607 3608
3608 count++;
3609 } 3609 }
3610 3610
3611 tx_ring->buffer_info[i].skb = skb; 3611 tx_ring->buffer_info[i].skb = skb;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 0dbd0320023a..297a5ddd77f0 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1963 iph->daddr, 0, 1963 iph->daddr, 0,
1964 IPPROTO_TCP, 1964 IPPROTO_TCP,
1965 0); 1965 0);
1966 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1966 } else if (skb_is_gso_v6(skb)) {
1967 ipv6_hdr(skb)->payload_len = 0; 1967 ipv6_hdr(skb)->payload_len = 0;
1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1969 &ipv6_hdr(skb)->daddr, 1969 &ipv6_hdr(skb)->daddr,
@@ -2126,6 +2126,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2127 struct skb_frag_struct *frag; 2127 struct skb_frag_struct *frag;
2128 2128
2129 count++;
2129 i++; 2130 i++;
2130 if (i == tx_ring->count) 2131 if (i == tx_ring->count)
2131 i = 0; 2132 i = 0;
@@ -2146,7 +2147,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2146 PCI_DMA_TODEVICE); 2147 PCI_DMA_TODEVICE);
2147 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2148 if (pci_dma_mapping_error(pdev, buffer_info->dma))
2148 goto dma_error; 2149 goto dma_error;
2149 count++;
2150 } 2150 }
2151 2151
2152 tx_ring->buffer_info[i].skb = skb; 2152 tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2163,14 @@ dma_error:
2163 buffer_info->length = 0; 2163 buffer_info->length = 0;
2164 buffer_info->next_to_watch = 0; 2164 buffer_info->next_to_watch = 0;
2165 buffer_info->mapped_as_page = false; 2165 buffer_info->mapped_as_page = false;
2166 count--; 2166 if (count)
2167 count--;
2167 2168
2168 /* clear timestamp and dma mappings for remaining portion of packet */ 2169 /* clear timestamp and dma mappings for remaining portion of packet */
2169 while (count >= 0) { 2170 while (count--) {
2170 count--; 2171 if (i==0)
2171 i--;
2172 if (i < 0)
2173 i += tx_ring->count; 2172 i += tx_ring->count;
2173 i--;
2174 buffer_info = &tx_ring->buffer_info[i]; 2174 buffer_info = &tx_ring->buffer_info[i];
2175 igbvf_put_txbuf(adapter, buffer_info); 2175 igbvf_put_txbuf(adapter, buffer_info);
2176 } 2176 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..593d1a4f217c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1363dma_error: 1363dma_error:
1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1364 dev_err(&pdev->dev, "TX DMA map failed\n");
1365 buffer_info->dma = 0; 1365 buffer_info->dma = 0;
1366 count--; 1366 if (count)
1367
1368 while (count >= 0) {
1369 count--; 1367 count--;
1370 i--; 1368
1371 if (i < 0) 1369 while (count--) {
1370 if (i==0)
1372 i += tx_ring->count; 1371 i += tx_ring->count;
1372 i--;
1373 buffer_info = &tx_ring->buffer_info[i]; 1373 buffer_info = &tx_ring->buffer_info[i];
1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1375 } 1375 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9c9202f40b10..b5f64ad67975 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4928,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
4928 iph->daddr, 0, 4928 iph->daddr, 0,
4929 IPPROTO_TCP, 4929 IPPROTO_TCP,
4930 0); 4930 0);
4931 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 4931 } else if (skb_is_gso_v6(skb)) {
4932 ipv6_hdr(skb)->payload_len = 0; 4932 ipv6_hdr(skb)->payload_len = 0;
4933 tcp_hdr(skb)->check = 4933 tcp_hdr(skb)->check =
4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5167,14 +5167,14 @@ dma_error:
5167 tx_buffer_info->dma = 0; 5167 tx_buffer_info->dma = 0;
5168 tx_buffer_info->time_stamp = 0; 5168 tx_buffer_info->time_stamp = 0;
5169 tx_buffer_info->next_to_watch = 0; 5169 tx_buffer_info->next_to_watch = 0;
5170 count--; 5170 if (count)
5171 count--;
5171 5172
5172 /* clear timestamp and dma mappings for remaining portion of packet */ 5173 /* clear timestamp and dma mappings for remaining portion of packet */
5173 while (count >= 0) { 5174 while (count--) {
5174 count--; 5175 if (i==0)
5175 i--;
5176 if (i < 0)
5177 i += tx_ring->count; 5176 i += tx_ring->count;
5177 i--;
5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3fc433..7b17404d0858 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 722 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
722 PCMCIA_DEVICE_NULL, 723 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c51721..0295097d6c44 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
410 410
411 411
412static void phy_change(struct work_struct *work); 412static void phy_change(struct work_struct *work);
413static void phy_state_machine(struct work_struct *work);
414 413
415/** 414/**
416 * phy_start_machine - start PHY state machine tracking 415 * phy_start_machine - start PHY state machine tracking
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
430{ 429{
431 phydev->adjust_state = handler; 430 phydev->adjust_state = handler;
432 431
433 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
434 schedule_delayed_work(&phydev->state_queue, HZ); 432 schedule_delayed_work(&phydev->state_queue, HZ);
435} 433}
436 434
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
761 * phy_state_machine - Handle the state machine 759 * phy_state_machine - Handle the state machine
762 * @work: work_struct that describes the work to be done 760 * @work: work_struct that describes the work to be done
763 */ 761 */
764static void phy_state_machine(struct work_struct *work) 762void phy_state_machine(struct work_struct *work)
765{ 763{
766 struct delayed_work *dwork = to_delayed_work(work); 764 struct delayed_work *dwork = to_delayed_work(work);
767 struct phy_device *phydev = 765 struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8212b2b93422..adbc0fded130 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
177 dev->state = PHY_DOWN; 177 dev->state = PHY_DOWN;
178 178
179 mutex_init(&dev->lock); 179 mutex_init(&dev->lock);
180 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
180 181
181 return dev; 182 return dev;
182} 183}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..894a7c84faef 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4119 err = pcie_set_readrq(pdev, 4096); 4119 err = pcie_set_readrq(pdev, 4096);
4120 if (err) { 4120 if (err) {
4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4121 dev_err(&pdev->dev, "Set readrq failed.\n");
4122 goto err_out; 4122 goto err_out1;
4123 } 4123 }
4124 4124
4125 err = pci_request_regions(pdev, DRV_NAME); 4125 err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4140 4140
4141 if (err) { 4141 if (err) {
4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4143 goto err_out; 4143 goto err_out2;
4144 } 4144 }
4145 4145
4146 /* Set PCIe reset type for EEH to fundamental. */ 4146 /* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4152 if (!qdev->reg_base) { 4152 if (!qdev->reg_base) {
4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4153 dev_err(&pdev->dev, "Register mapping failed.\n");
4154 err = -ENOMEM; 4154 err = -ENOMEM;
4155 goto err_out; 4155 goto err_out2;
4156 } 4156 }
4157 4157
4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4162 if (!qdev->doorbell_area) { 4162 if (!qdev->doorbell_area) {
4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4164 err = -ENOMEM; 4164 err = -ENOMEM;
4165 goto err_out; 4165 goto err_out2;
4166 } 4166 }
4167 4167
4168 err = ql_get_board_info(qdev); 4168 err = ql_get_board_info(qdev);
4169 if (err) { 4169 if (err) {
4170 dev_err(&pdev->dev, "Register access failed.\n"); 4170 dev_err(&pdev->dev, "Register access failed.\n");
4171 err = -EIO; 4171 err = -EIO;
4172 goto err_out; 4172 goto err_out2;
4173 } 4173 }
4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4174 qdev->msg_enable = netif_msg_init(debug, default_msg);
4175 spin_lock_init(&qdev->hw_lock); 4175 spin_lock_init(&qdev->hw_lock);
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4179 err = qdev->nic_ops->get_flash(qdev); 4179 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4180 if (err) {
4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4181 dev_err(&pdev->dev, "Invalid FLASH.\n");
4182 goto err_out; 4182 goto err_out2;
4183 } 4183 }
4184 4184
4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4212 DRV_NAME, DRV_VERSION); 4212 DRV_NAME, DRV_VERSION);
4213 } 4213 }
4214 return 0; 4214 return 0;
4215err_out: 4215err_out2:
4216 ql_release_all(pdev); 4216 ql_release_all(pdev);
4217err_out1:
4217 pci_disable_device(pdev); 4218 pci_disable_device(pdev);
4218 return err; 4219 return err;
4219} 4220}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..3c4836d0898f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3421 break; 3421 break;
3422 } 3422 }
3423 } else { 3423 } else {
3424 if (!(val64 & busy_bit)) { 3424 if (val64 & busy_bit) {
3425 ret = SUCCESS; 3425 ret = SUCCESS;
3426 break; 3426 break;
3427 } 3427 }
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 0d4eba7266ec..9f035b9f0350 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -804,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
804 loff_t offset, u8 *buffer, size_t length) 804 loff_t offset, u8 *buffer, size_t length)
805{ 805{
806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
807 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; 807 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
808 size_t outlen; 808 size_t outlen;
809 int rc; 809 int rc;
810 810
@@ -828,7 +828,7 @@ fail:
828int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 828int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
829 loff_t offset, const u8 *buffer, size_t length) 829 loff_t offset, const u8 *buffer, size_t length)
830{ 830{
831 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; 831 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
832 int rc; 832 int rc;
833 833
834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -838,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
838 838
839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
840 840
841 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), 841 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
842 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
842 NULL, 0, NULL); 843 NULL, 0, NULL);
843 if (rc) 844 if (rc)
844 goto fail; 845 goto fail;
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de916728c2e3..10ce98f4c0fb 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
112 loff_t offset, const u8 *buffer, 112 loff_t offset, const u8 *buffer,
113 size_t length); 113 size_t length);
114#define EFX_MCDI_NVRAM_LEN_MAX 128
114extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 115extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
115 loff_t offset, size_t length); 116 loff_t offset, size_t length);
116extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, 117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..73e71f420624 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1090,8 +1090,10 @@
1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1092#define MC_CMD_MAC_RX_MATCH_FAULT 59 1092#define MC_CMD_MAC_RX_MATCH_FAULT 59
1093#define MC_CMD_GMAC_DMABUF_START 64
1094#define MC_CMD_GMAC_DMABUF_END 95
1093/* Insert new members here. */ 1095/* Insert new members here. */
1094#define MC_CMD_MAC_GENERATION_END 60 1096#define MC_CMD_MAC_GENERATION_END 96
1095#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1097#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1096 1098
1097/* MC_CMD_MAC_STATS: 1099/* MC_CMD_MAC_STATS:
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a464529a46b..407bbaddfea6 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -23,7 +23,6 @@
23#include "mcdi_pcol.h" 23#include "mcdi_pcol.h"
24 24
25#define EFX_SPI_VERIFY_BUF_LEN 16 25#define EFX_SPI_VERIFY_BUF_LEN 16
26#define EFX_MCDI_CHUNK_LEN 128
27 26
28struct efx_mtd_partition { 27struct efx_mtd_partition {
29 struct mtd_info mtd; 28 struct mtd_info mtd;
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
428 int rc = 0; 427 int rc = 0;
429 428
430 while (offset < end) { 429 while (offset < end) {
431 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 430 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 431 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
433 buffer, chunk); 432 buffer, chunk);
434 if (rc) 433 if (rc)
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
491 } 490 }
492 491
493 while (offset < end) { 492 while (offset < end) {
494 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 493 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 494 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
496 buffer, chunk); 495 buffer, chunk);
497 if (rc) 496 if (rc)
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ff8f0a417fa3..e0d13a451019 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -318,12 +318,6 @@ static int qt202x_reset_phy(struct efx_nic *efx)
318 /* Wait 250ms for the PHY to complete bootup */ 318 /* Wait 250ms for the PHY to complete bootup */
319 msleep(250); 319 msleep(250);
320 320
321 /* Check that all the MMDs we expect are present and responding. We
322 * expect faults on some if the link is down, but not on the PHY XS */
323 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
324 if (rc < 0)
325 goto fail;
326
327 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
328 322
329 return rc; 323 return rc;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 37f486b65f63..d760650c5c04 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
644{ 644{
645 u32 reg1; 645 u32 reg1;
646 646
647 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
647 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
648 reg1 &= ~phy_power[port]; 649 reg1 &= ~phy_power[port];
649 650
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
651 reg1 |= coma_mode[port]; 652 reg1 |= coma_mode[port];
652 653
653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 654 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
655 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
654 sky2_pci_read32(hw, PCI_DEV_REG1); 656 sky2_pci_read32(hw, PCI_DEV_REG1);
655 657
656 if (hw->chip_id == CHIP_ID_YUKON_FE) 658 if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 709 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
708 } 710 }
709 711
712 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 713 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
711 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 714 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
712 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 715 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
716 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
713} 717}
714 718
715/* Force a renegotiation */ 719/* Force a renegotiation */
@@ -2149,7 +2153,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
2149 2153
2150 /* reset PHY Link Detect */ 2154 /* reset PHY Link Detect */
2151 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2155 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2156 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2152 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2157 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2158 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2153 2159
2154 sky2_link_up(sky2); 2160 sky2_link_up(sky2);
2155} 2161}
@@ -2640,6 +2646,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2640 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2646 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2641 u16 pci_err; 2647 u16 pci_err;
2642 2648
2649 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2643 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2650 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2644 if (net_ratelimit()) 2651 if (net_ratelimit())
2645 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", 2652 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2647,12 +2654,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2647 2654
2648 sky2_pci_write16(hw, PCI_STATUS, 2655 sky2_pci_write16(hw, PCI_STATUS,
2649 pci_err | PCI_STATUS_ERROR_BITS); 2656 pci_err | PCI_STATUS_ERROR_BITS);
2657 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2650 } 2658 }
2651 2659
2652 if (status & Y2_IS_PCI_EXP) { 2660 if (status & Y2_IS_PCI_EXP) {
2653 /* PCI-Express uncorrectable Error occurred */ 2661 /* PCI-Express uncorrectable Error occurred */
2654 u32 err; 2662 u32 err;
2655 2663
2664 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2656 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2665 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2657 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2666 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2658 0xfffffffful); 2667 0xfffffffful);
@@ -2660,6 +2669,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2660 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2669 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2661 2670
2662 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2671 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2672 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2663 } 2673 }
2664 2674
2665 if (status & Y2_HWE_L1_MASK) 2675 if (status & Y2_HWE_L1_MASK)
@@ -3038,6 +3048,7 @@ static void sky2_reset(struct sky2_hw *hw)
3038 } 3048 }
3039 3049
3040 sky2_power_on(hw); 3050 sky2_power_on(hw);
3051 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3041 3052
3042 for (i = 0; i < hw->ports; i++) { 3053 for (i = 0; i < hw->ports; i++) {
3043 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3054 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3074,6 +3085,7 @@ static void sky2_reset(struct sky2_hw *hw)
3074 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3085 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3075 3086
3076 /* reset PHY Link Detect */ 3087 /* reset PHY Link Detect */
3088 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3077 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3089 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3078 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3090 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3079 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3091 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3091,6 +3103,7 @@ static void sky2_reset(struct sky2_hw *hw)
3091 /* restore the PCIe Link Control register */ 3103 /* restore the PCIe Link Control register */
3092 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3104 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3093 } 3105 }
3106 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3094 3107
3095 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3108 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3096 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); 3109 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -3228,6 +3241,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3228 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3241 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3229} 3242}
3230 3243
3244static void sky2_hw_set_wol(struct sky2_hw *hw)
3245{
3246 int wol = 0;
3247 int i;
3248
3249 for (i = 0; i < hw->ports; i++) {
3250 struct net_device *dev = hw->dev[i];
3251 struct sky2_port *sky2 = netdev_priv(dev);
3252
3253 if (sky2->wol)
3254 wol = 1;
3255 }
3256
3257 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3258 hw->chip_id == CHIP_ID_YUKON_EX ||
3259 hw->chip_id == CHIP_ID_YUKON_FE_P)
3260 sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3261
3262 device_set_wakeup_enable(&hw->pdev->dev, wol);
3263}
3264
3231static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3265static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3232{ 3266{
3233 const struct sky2_port *sky2 = netdev_priv(dev); 3267 const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3247,13 +3281,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3247 3281
3248 sky2->wol = wol->wolopts; 3282 sky2->wol = wol->wolopts;
3249 3283
3250 if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3284 sky2_hw_set_wol(hw);
3251 hw->chip_id == CHIP_ID_YUKON_EX ||
3252 hw->chip_id == CHIP_ID_YUKON_FE_P)
3253 sky2_write32(hw, B0_CTST, sky2->wol
3254 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3255
3256 device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
3257 3285
3258 if (!netif_running(dev)) 3286 if (!netif_running(dev))
3259 sky2_wol_init(sky2); 3287 sky2_wol_init(sky2);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 595777dcadb1..20696b5d60a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -249,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
252 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
252 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
253 { } /* terminate list */ 254 { } /* terminate list */
254}; 255};
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 96bdc0b43889..eb8fe7e16c6c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3279,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3279 /* Handle the transmitted buffer and release */ 3279 /* Handle the transmitted buffer and release */
3280 /* the BD to be used with the current frame */ 3280 /* the BD to be used with the current frame */
3281 3281
3282 if (bd == ugeth->txBd[txQ]) /* queue empty? */ 3282 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3283 if (!skb)
3283 break; 3284 break;
3284 3285
3285 dev->stats.tx_packets++; 3286 dev->stats.tx_packets++;
3286 3287
3287 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3288
3289 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
3290 skb_recycle_check(skb, 3289 skb_recycle_check(skb,
3291 ugeth->ug_info->uf_info.max_rx_buf_length + 3290 ugeth->ug_info->uf_info.max_rx_buf_length +
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc3cb2e..9ead30bd00c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work)
395 395
396 vi = container_of(work, struct virtnet_info, refill.work); 396 vi = container_of(work, struct virtnet_info, refill.work);
397 napi_disable(&vi->napi); 397 napi_disable(&vi->napi);
398 try_fill_recv(vi, GFP_KERNEL); 398 still_empty = !try_fill_recv(vi, GFP_KERNEL);
399 still_empty = (vi->num == 0);
400 napi_enable(&vi->napi); 399 napi_enable(&vi->napi);
401 400
402 /* In theory, this can happen: if we don't get any buffers in 401 /* In theory, this can happen: if we don't get any buffers in
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f279417e..2d7c96d7e865 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
151 151
152 /* Device IDs */ 152 /* Device IDs */
153 USB_DEVICE_ID_I6050 = 0x0186, 153 USB_DEVICE_ID_I6050 = 0x0186,
154 USB_DEVICE_ID_I6050_2 = 0x0188,
154}; 155};
155 156
156 157
@@ -234,6 +235,7 @@ struct i2400mu {
234 u8 rx_size_auto_shrink; 235 u8 rx_size_auto_shrink;
235 236
236 struct dentry *debugfs_dentry; 237 struct dentry *debugfs_dentry;
238 unsigned i6050:1; /* 1 if this is a 6050 based SKU */
237}; 239};
238 240
239 241
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681f8a0d..98f4f8c5fb68 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
479 i2400m->bus_bm_mac_addr_impaired = 0; 479 i2400m->bus_bm_mac_addr_impaired = 0;
480 480
481 if (id->idProduct == USB_DEVICE_ID_I6050) { 481 switch (id->idProduct) {
482 case USB_DEVICE_ID_I6050:
483 case USB_DEVICE_ID_I6050_2:
484 i2400mu->i6050 = 1;
485 break;
486 default:
487 break;
488 }
489
490 if (i2400mu->i6050) {
482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 491 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
483 i2400mu->endpoint_cfg.bulk_out = 0; 492 i2400mu->endpoint_cfg.bulk_out = 0;
484 i2400mu->endpoint_cfg.notification = 3; 493 i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
719static 728static
720struct usb_device_id i2400mu_id_table[] = { 729struct usb_device_id i2400mu_id_table[] = {
721 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 730 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
731 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
722 { USB_DEVICE(0x8086, 0x0181) }, 732 { USB_DEVICE(0x8086, 0x0181) },
723 { USB_DEVICE(0x8086, 0x1403) }, 733 { USB_DEVICE(0x8086, 0x1403) },
724 { USB_DEVICE(0x8086, 0x1405) }, 734 { USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 33a5866538e7..de45f308b744 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1598,6 +1598,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
1598 .use_bsm = false, 1598 .use_bsm = false,
1599 .ht_greenfield_support = true, 1599 .ht_greenfield_support = true,
1600 .led_compensation = 51, 1600 .led_compensation = 51,
1601 .use_rts_for_ht = true, /* use rts/cts protection */
1601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1602 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1603}; 1604};
@@ -1622,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1622 .use_bsm = false, 1623 .use_bsm = false,
1623 .ht_greenfield_support = true, 1624 .ht_greenfield_support = true,
1624 .led_compensation = 51, 1625 .led_compensation = 51,
1626 .use_rts_for_ht = true, /* use rts/cts protection */
1625 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1626}; 1628};
1627 1629
@@ -1667,6 +1669,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
1667 .use_bsm = false, 1669 .use_bsm = false,
1668 .ht_greenfield_support = true, 1670 .ht_greenfield_support = true,
1669 .led_compensation = 51, 1671 .led_compensation = 51,
1672 .use_rts_for_ht = true, /* use rts/cts protection */
1670 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1673 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1671 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1674 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1672}; 1675};
@@ -1691,6 +1694,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
1691 .use_bsm = false, 1694 .use_bsm = false,
1692 .ht_greenfield_support = true, 1695 .ht_greenfield_support = true,
1693 .led_compensation = 51, 1696 .led_compensation = 51,
1697 .use_rts_for_ht = true, /* use rts/cts protection */
1694 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1695 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1699 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1696}; 1700};
@@ -1715,6 +1719,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
1715 .use_bsm = false, 1719 .use_bsm = false,
1716 .ht_greenfield_support = true, 1720 .ht_greenfield_support = true,
1717 .led_compensation = 51, 1721 .led_compensation = 51,
1722 .use_rts_for_ht = true, /* use rts/cts protection */
1718 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1723 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1719 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1724 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1720}; 1725};
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1da15d..83cc4e500a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#include <linux/module.h> 27#include <linux/module.h>
2 28
3/* sparse doesn't like tracepoint macros */ 29/* sparse doesn't like tracepoint macros */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 21361968ab7e..d9c7363b1bbb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
3 29
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d76a88..1e41ad0fcad5 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
973 973
974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
975 975
976 update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
977 update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
978 sizeof(struct iwm_umac_wifi_if));
979
976 update.command = cpu_to_le32(command); 980 update.command = cpu_to_le32(command);
977 if (pmksa->bssid) 981 if (pmksa->bssid)
978 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); 982 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af0552cd75..3dfd9f0e9003 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
463#define IWM_CMD_PMKID_FLUSH 3 463#define IWM_CMD_PMKID_FLUSH 3
464 464
465struct iwm_umac_pmkid_update { 465struct iwm_umac_pmkid_update {
466 struct iwm_umac_wifi_if hdr;
466 __le32 command; 467 __le32 command;
467 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
468 __le16 reserved; 469 __le16 reserved;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..a72f7c2577de 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
197 i %= ring_limit; 197 i %= ring_limit;
198 continue; 198 continue;
199 } 199 }
200
201 if (unlikely(len > priv->common.rx_mtu)) {
202 if (net_ratelimit())
203 dev_err(&priv->pdev->dev, "rx'd frame size "
204 "exceeds length threshold.\n");
205
206 len = priv->common.rx_mtu;
207 }
200 skb_put(skb, len); 208 skb_put(skb, len);
201 209
202 if (p54_rx(dev, skb)) { 210 if (p54_rx(dev, skb)) {
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd19cfe..72d3e437e190 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
63 /* ZD1211B */ 63 /* ZD1211B */
64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
67 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fdb2e7c14506..5905936c7c60 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1004,8 +1004,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
1004 if (device == NULL || 1004 if (device == NULL ||
1005 device != dasd_device_from_cdev_locked(cdev) || 1005 device != dasd_device_from_cdev_locked(cdev) ||
1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1006 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1007 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1007 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1008 "bus_id %s", dev_name(&cdev->dev)); 1008 "invalid device in request");
1009 return; 1009 return;
1010 } 1010 }
1011 1011
@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1078 device = (struct dasd_device *) cqr->startdev; 1078 device = (struct dasd_device *) cqr->startdev;
1079 if (!device || 1079 if (!device ||
1080 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1080 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1081 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1081 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1082 "bus_id %s", dev_name(&cdev->dev)); 1082 "invalid device in request");
1083 return; 1083 return;
1084 } 1084 }
1085 1085
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c500c462225..1cca21aafaba 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3033,7 +3033,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3033 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3033 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3034 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3034 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3035 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3035 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3036 scsw_cc(&irb->scsw), req->intrc); 3036 scsw_cc(&irb->scsw), req ? req->intrc : 0);
3037 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3037 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3038 " device %s: Failing CCW: %p\n", 3038 " device %s: Failing CCW: %p\n",
3039 dev_name(&device->cdev->dev), 3039 dev_name(&device->cdev->dev),
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index fc7b30b4a255..7039d9cf0fb4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
260 struct ccw_dev_id dev_id; 260 struct ccw_dev_id dev_id;
261 261
262 base = block->base; 262 base = block->base;
263 if (!base->discipline->fill_info) 263 if (!base->discipline || !base->discipline->fill_info)
264 return -EINVAL; 264 return -EINVAL;
265 265
266 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 266 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
303 dasd_info->features |= 303 dasd_info->features |=
304 ((base->features & DASD_FEATURE_READONLY) != 0); 304 ((base->features & DASD_FEATURE_READONLY) != 0);
305 305
306 if (base->discipline) 306 memcpy(dasd_info->type, base->discipline->name, 4);
307 memcpy(dasd_info->type, base->discipline->name, 4);
308 else
309 memcpy(dasd_info->type, "none", 4);
310 307
311 if (block->request_queue->request_fn) { 308 if (block->request_queue->request_fn) {
312 struct list_head *l; 309 struct list_head *l;
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6315fbd8e68b..71f95f54866f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -72,7 +72,7 @@ dasd_devices_show(struct seq_file *m, void *v)
72 /* Print device number. */ 72 /* Print device number. */
73 seq_printf(m, "%s", dev_name(&device->cdev->dev)); 73 seq_printf(m, "%s", dev_name(&device->cdev->dev));
74 /* Print discipline string. */ 74 /* Print discipline string. */
75 if (device != NULL && device->discipline != NULL) 75 if (device->discipline != NULL)
76 seq_printf(m, "(%s)", device->discipline->name); 76 seq_printf(m, "(%s)", device->discipline->name);
77 else 77 else
78 seq_printf(m, "(none)"); 78 seq_printf(m, "(none)");
@@ -92,10 +92,7 @@ dasd_devices_show(struct seq_file *m, void *v)
92 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; 92 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
93 seq_printf(m, "%4s: ", substr); 93 seq_printf(m, "%4s: ", substr);
94 /* Print device status information. */ 94 /* Print device status information. */
95 switch ((device != NULL) ? device->state : -1) { 95 switch (device->state) {
96 case -1:
97 seq_printf(m, "unknown");
98 break;
99 case DASD_STATE_NEW: 96 case DASD_STATE_NEW:
100 seq_printf(m, "new"); 97 seq_printf(m, "new");
101 break; 98 break;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9d2a007e93b..3796ffdb8479 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -495,6 +495,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495 if (tty->driver_data == NULL) 495 if (tty->driver_data == NULL)
496 return -ENOMEM; 496 return -ENOMEM;
497 tty->low_latency = 0; 497 tty->low_latency = 0;
498 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
499 tty->winsize.ws_row = 24;
500 tty->winsize.ws_col = 80;
501 }
498 } 502 }
499 return 0; 503 return 0;
500} 504}
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index a23726a0735c..142f72a2ca5a 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev,
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN; 374 return -EAGAIN;
375 } 375 }
376 if (service_rc == 8 && service_rs == 72)
377 return -EINVAL;
376 zdev->online = 0; 378 zdev->online = 0;
377 return -EAGAIN; /* repeat the request on a different device. */ 379 return -EAGAIN; /* repeat the request on a different device. */
378 } 380 }
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 79c120578e61..68f3e6204db8 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -470,6 +470,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
470 } 470 }
471 if (service_rc == 12 && service_rs == 769) 471 if (service_rc == 12 && service_rs == 769)
472 return -EINVAL; 472 return -EINVAL;
473 if (service_rc == 8 && service_rs == 72)
474 return -EINVAL;
473 zdev->online = 0; 475 zdev->online = 0;
474 return -EAGAIN; /* repeat the request on a different device. */ 476 return -EAGAIN; /* repeat the request on a different device. */
475 } 477 }
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index f932400e980a..0eb6eefd2c1a 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
15#include <asm/compat.h>
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include "zfcp_def.h" 17#include "zfcp_def.h"
17#include "zfcp_ext.h" 18#include "zfcp_ext.h"
@@ -163,7 +164,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
163} 164}
164 165
165static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 166static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
166 unsigned long buffer) 167 unsigned long arg)
167{ 168{
168 struct zfcp_cfdc_data *data; 169 struct zfcp_cfdc_data *data;
169 struct zfcp_cfdc_data __user *data_user; 170 struct zfcp_cfdc_data __user *data_user;
@@ -175,7 +176,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
175 if (command != ZFCP_CFDC_IOC) 176 if (command != ZFCP_CFDC_IOC)
176 return -ENOTTY; 177 return -ENOTTY;
177 178
178 data_user = (void __user *) buffer; 179 if (is_compat_task())
180 data_user = compat_ptr(arg);
181 else
182 data_user = (void __user *)arg;
183
179 if (!data_user) 184 if (!data_user)
180 return -EINVAL; 185 return -EINVAL;
181 186
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 84450955ae11..7369c8911bcf 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -327,7 +327,7 @@ static void zfcp_dbf_hba_view_response(char **p,
327 break; 327 break;
328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
330 p += sprintf(*p, "\n"); 330 *p += sprintf(*p, "\n");
331 break; 331 break;
332 332
333 case FSF_QTCB_OPEN_PORT_WITH_DID: 333 case FSF_QTCB_OPEN_PORT_WITH_DID:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 03dec832b465..66bdb34143cb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -108,6 +108,7 @@ extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
108extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 108extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
109extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 109extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
110extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 110extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
111extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
111 112
112/* zfcp_fsf.c */ 113/* zfcp_fsf.c */
113extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 114extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@@ -129,9 +130,9 @@ extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
129extern int zfcp_fsf_status_read(struct zfcp_qdio *); 130extern int zfcp_fsf_status_read(struct zfcp_qdio *);
130extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 131extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
131extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, 132extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
132 mempool_t *); 133 mempool_t *, unsigned int);
133extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 134extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
134 struct zfcp_fsf_ct_els *); 135 struct zfcp_fsf_ct_els *, unsigned int);
135extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 136extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
136 struct scsi_cmnd *); 137 struct scsi_cmnd *);
137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 138extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ac5e3b7a3576..0f7b493fb105 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -258,7 +258,8 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; 258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
259 259
260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, 260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
261 adapter->pool.gid_pn_req); 261 adapter->pool.gid_pn_req,
262 ZFCP_FC_CTELS_TMO);
262 if (!ret) { 263 if (!ret) {
263 wait_for_completion(&completion); 264 wait_for_completion(&completion);
264 zfcp_fc_ns_gid_pn_eval(gid_pn); 265 zfcp_fc_ns_gid_pn_eval(gid_pn);
@@ -421,7 +422,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
421 hton24(adisc->adisc_req.adisc_port_id, 422 hton24(adisc->adisc_req.adisc_port_id,
422 fc_host_port_id(adapter->scsi_host)); 423 fc_host_port_id(adapter->scsi_host));
423 424
424 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); 425 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
426 ZFCP_FC_CTELS_TMO);
425 if (ret) 427 if (ret)
426 kmem_cache_free(zfcp_data.adisc_cache, adisc); 428 kmem_cache_free(zfcp_data.adisc_cache, adisc);
427 429
@@ -532,7 +534,8 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
532 ct->req = &gpn_ft->sg_req; 534 ct->req = &gpn_ft->sg_req;
533 ct->resp = gpn_ft->sg_resp; 535 ct->resp = gpn_ft->sg_resp;
534 536
535 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); 537 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
538 ZFCP_FC_CTELS_TMO);
536 if (!ret) 539 if (!ret)
537 wait_for_completion(&completion); 540 wait_for_completion(&completion);
538 return ret; 541 return ret;
@@ -677,6 +680,44 @@ static void zfcp_fc_ct_els_job_handler(void *data)
677 job->job_done(job); 680 job->job_done(job);
678} 681}
679 682
683static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
684{
685 u32 preamble_word1;
686 u8 gs_type;
687 struct zfcp_adapter *adapter;
688
689 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
690 gs_type = (preamble_word1 & 0xff000000) >> 24;
691
692 adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
693
694 switch (gs_type) {
695 case FC_FST_ALIAS:
696 return &adapter->gs->as;
697 case FC_FST_MGMT:
698 return &adapter->gs->ms;
699 case FC_FST_TIME:
700 return &adapter->gs->ts;
701 break;
702 case FC_FST_DIR:
703 return &adapter->gs->ds;
704 break;
705 default:
706 return NULL;
707 }
708}
709
710static void zfcp_fc_ct_job_handler(void *data)
711{
712 struct fc_bsg_job *job = data;
713 struct zfcp_fc_wka_port *wka_port;
714
715 wka_port = zfcp_fc_job_wka_port(job);
716 zfcp_fc_wka_port_put(wka_port);
717
718 zfcp_fc_ct_els_job_handler(data);
719}
720
680static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, 721static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
681 struct zfcp_adapter *adapter) 722 struct zfcp_adapter *adapter)
682{ 723{
@@ -695,43 +736,27 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
695 } else 736 } else
696 d_id = ntoh24(job->request->rqst_data.h_els.port_id); 737 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
697 738
698 return zfcp_fsf_send_els(adapter, d_id, els); 739 els->handler = zfcp_fc_ct_els_job_handler;
740 return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
699} 741}
700 742
701static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, 743static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
702 struct zfcp_adapter *adapter) 744 struct zfcp_adapter *adapter)
703{ 745{
704 int ret; 746 int ret;
705 u8 gs_type;
706 struct zfcp_fsf_ct_els *ct = job->dd_data; 747 struct zfcp_fsf_ct_els *ct = job->dd_data;
707 struct zfcp_fc_wka_port *wka_port; 748 struct zfcp_fc_wka_port *wka_port;
708 u32 preamble_word1;
709 749
710 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 750 wka_port = zfcp_fc_job_wka_port(job);
711 gs_type = (preamble_word1 & 0xff000000) >> 24; 751 if (!wka_port)
712 752 return -EINVAL;
713 switch (gs_type) {
714 case FC_FST_ALIAS:
715 wka_port = &adapter->gs->as;
716 break;
717 case FC_FST_MGMT:
718 wka_port = &adapter->gs->ms;
719 break;
720 case FC_FST_TIME:
721 wka_port = &adapter->gs->ts;
722 break;
723 case FC_FST_DIR:
724 wka_port = &adapter->gs->ds;
725 break;
726 default:
727 return -EINVAL; /* no such service */
728 }
729 753
730 ret = zfcp_fc_wka_port_get(wka_port); 754 ret = zfcp_fc_wka_port_get(wka_port);
731 if (ret) 755 if (ret)
732 return ret; 756 return ret;
733 757
734 ret = zfcp_fsf_send_ct(wka_port, ct, NULL); 758 ct->handler = zfcp_fc_ct_job_handler;
759 ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
735 if (ret) 760 if (ret)
736 zfcp_fc_wka_port_put(wka_port); 761 zfcp_fc_wka_port_put(wka_port);
737 762
@@ -752,7 +777,6 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
752 777
753 ct_els->req = job->request_payload.sg_list; 778 ct_els->req = job->request_payload.sg_list;
754 ct_els->resp = job->reply_payload.sg_list; 779 ct_els->resp = job->reply_payload.sg_list;
755 ct_els->handler = zfcp_fc_ct_els_job_handler;
756 ct_els->handler_data = job; 780 ct_els->handler_data = job;
757 781
758 switch (job->request->msgcode) { 782 switch (job->request->msgcode) {
@@ -767,6 +791,12 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
767 } 791 }
768} 792}
769 793
794int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
795{
796 /* hardware tracks timeout, reset bsg timeout to not interfere */
797 return -EAGAIN;
798}
799
770int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) 800int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
771{ 801{
772 struct zfcp_fc_wka_ports *wka_ports; 802 struct zfcp_fc_wka_ports *wka_ports;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index cb2a3669a384..0747b087390d 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -27,6 +27,8 @@
27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ 27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) 28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
29 29
30#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
31
30/** 32/**
31 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
32 * @ct_hdr: FC GS common transport header 34 * @ct_hdr: FC GS common transport header
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 482dcd97aa5d..e8fb4d9baa8b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1068,20 +1068,20 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req, 1069 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp, 1070 struct scatterlist *sg_resp,
1071 int max_sbals) 1071 int max_sbals, unsigned int timeout)
1072{ 1072{
1073 int ret; 1073 int ret;
1074 unsigned int fcp_chan_timeout;
1075 1074
1076 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1075 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1077 if (ret) 1076 if (ret)
1078 return ret; 1077 return ret;
1079 1078
1080 /* common settings for ct/gs and els requests */ 1079 /* common settings for ct/gs and els requests */
1081 fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; 1080 if (timeout > 255)
1081 timeout = 255; /* max value accepted by hardware */
1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1083 req->qtcb->bottom.support.timeout = fcp_chan_timeout; 1083 req->qtcb->bottom.support.timeout = timeout;
1084 zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); 1084 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1085 1085
1086 return 0; 1086 return 0;
1087} 1087}
@@ -1092,7 +1092,8 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1093 */ 1093 */
1094int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1094int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1095 struct zfcp_fsf_ct_els *ct, mempool_t *pool) 1095 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1096 unsigned int timeout)
1096{ 1097{
1097 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1098 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1098 struct zfcp_fsf_req *req; 1099 struct zfcp_fsf_req *req;
@@ -1111,7 +1112,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1111 1112
1112 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1113 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1113 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1114 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1114 FSF_MAX_SBALS_PER_REQ); 1115 FSF_MAX_SBALS_PER_REQ, timeout);
1115 if (ret) 1116 if (ret)
1116 goto failed_send; 1117 goto failed_send;
1117 1118
@@ -1188,7 +1189,7 @@ skip_fsfstatus:
1188 * @els: pointer to struct zfcp_send_els with data for the command 1189 * @els: pointer to struct zfcp_send_els with data for the command
1189 */ 1190 */
1190int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1191int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1191 struct zfcp_fsf_ct_els *els) 1192 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1192{ 1193{
1193 struct zfcp_fsf_req *req; 1194 struct zfcp_fsf_req *req;
1194 struct zfcp_qdio *qdio = adapter->qdio; 1195 struct zfcp_qdio *qdio = adapter->qdio;
@@ -1206,7 +1207,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1206 } 1207 }
1207 1208
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1209 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); 1210 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
1210 1211
1211 if (ret) 1212 if (ret)
1212 goto failed_send; 1213 goto failed_send;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 771cc536a989..8e6fc68d6bd4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -652,6 +652,7 @@ struct fc_function_template zfcp_transport_functions = {
652 .show_host_port_state = 1, 652 .show_host_port_state = 1,
653 .show_host_active_fc4s = 1, 653 .show_host_active_fc4s = 1,
654 .bsg_request = zfcp_fc_exec_bsg_job, 654 .bsg_request = zfcp_fc_exec_bsg_job,
655 .bsg_timeout = zfcp_fc_timeout_bsg_job,
655 /* no functions registered for following dynamic attributes but 656 /* no functions registered for following dynamic attributes but
656 directly set by LLDD */ 657 directly set by LLDD */
657 .show_host_port_type = 1, 658 .show_host_port_type = 1,
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 2a889853a106..7e26ebc26661 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
293 status = -EINVAL; 293 status = -EINVAL;
294 } 294 }
295 } 295 }
296 aac_fib_complete(fibptr); 296 /* Do not set XferState to zero unless receives a response from F/W */
297 if (status >= 0)
298 aac_fib_complete(fibptr);
299
297 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 300 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
298 if (status >= 0) { 301 if (status >= 0) {
299 if ((aac_commit == 1) || commit_flag) { 302 if ((aac_commit == 1) || commit_flag) {
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
310 FsaNormal, 313 FsaNormal,
311 1, 1, 314 1, 1,
312 NULL, NULL); 315 NULL, NULL);
313 aac_fib_complete(fibptr); 316 /* Do not set XferState to zero unless
317 * receives a response from F/W */
318 if (status >= 0)
319 aac_fib_complete(fibptr);
314 } else if (aac_commit == 0) { 320 } else if (aac_commit == 0) {
315 printk(KERN_WARNING 321 printk(KERN_WARNING
316 "aac_get_config_status: Foreign device configurations are being ignored\n"); 322 "aac_get_config_status: Foreign device configurations are being ignored\n");
317 } 323 }
318 } 324 }
319 aac_fib_free(fibptr); 325 /* FIB should be freed only after getting the response from the F/W */
326 if (status != -ERESTARTSYS)
327 aac_fib_free(fibptr);
320 return status; 328 return status;
321} 329}
322 330
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev)
355 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 363 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
356 aac_fib_complete(fibptr); 364 aac_fib_complete(fibptr);
357 } 365 }
358 aac_fib_free(fibptr); 366 /* FIB should be freed only after getting the response from the F/W */
367 if (status != -ERESTARTSYS)
368 aac_fib_free(fibptr);
359 369
360 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 370 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
361 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 371 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1245 NULL); 1255 NULL);
1246 1256
1247 if (rcode < 0) { 1257 if (rcode < 0) {
1248 aac_fib_complete(fibptr); 1258 /* FIB should be freed only after
1249 aac_fib_free(fibptr); 1259 * getting the response from the F/W */
1260 if (rcode != -ERESTARTSYS) {
1261 aac_fib_complete(fibptr);
1262 aac_fib_free(fibptr);
1263 }
1250 return rcode; 1264 return rcode;
1251 } 1265 }
1252 memcpy(&dev->adapter_info, info, sizeof(*info)); 1266 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1270 1284
1271 if (rcode >= 0) 1285 if (rcode >= 0)
1272 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1286 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
1287 if (rcode == -ERESTARTSYS) {
1288 fibptr = aac_fib_alloc(dev);
1289 if (!fibptr)
1290 return -ENOMEM;
1291 }
1292
1273 } 1293 }
1274 1294
1275 1295
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
1470 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 1490 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
1471 } 1491 }
1472 } 1492 }
1473 1493 /* FIB should be freed only after getting the response from the F/W */
1474 aac_fib_complete(fibptr); 1494 if (rcode != -ERESTARTSYS) {
1475 aac_fib_free(fibptr); 1495 aac_fib_complete(fibptr);
1496 aac_fib_free(fibptr);
1497 }
1476 1498
1477 return rcode; 1499 return rcode;
1478} 1500}
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1633 * Alocate and initialize a Fib 1655 * Alocate and initialize a Fib
1634 */ 1656 */
1635 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1657 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1658 printk(KERN_WARNING "aac_read: fib allocation failed\n");
1636 return -1; 1659 return -1;
1637 } 1660 }
1638 1661
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1712 * Allocate and initialize a Fib then setup a BlockWrite command 1735 * Allocate and initialize a Fib then setup a BlockWrite command
1713 */ 1736 */
1714 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1737 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1715 scsicmd->result = DID_ERROR << 16; 1738 /* FIB temporarily unavailable,not catastrophic failure */
1716 scsicmd->scsi_done(scsicmd); 1739
1717 return 0; 1740 /* scsicmd->result = DID_ERROR << 16;
1741 * scsicmd->scsi_done(scsicmd);
1742 * return 0;
1743 */
1744 printk(KERN_WARNING "aac_write: fib allocation failed\n");
1745 return -1;
1718 } 1746 }
1719 1747
1720 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 1748 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 83986ed86556..619c02d9c862 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2461 15# define AAC_DRIVER_BUILD 24702
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -1036,6 +1036,9 @@ struct aac_dev
1036 u8 printf_enabled; 1036 u8 printf_enabled;
1037 u8 in_reset; 1037 u8 in_reset;
1038 u8 msi; 1038 u8 msi;
1039 int management_fib_count;
1040 spinlock_t manage_lock;
1041
1039}; 1042};
1040 1043
1041#define aac_adapter_interrupt(dev) \ 1044#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0391d759dfdb..9c0c91178538 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -153,7 +153,7 @@ cleanup:
153 fibptr->hw_fib_pa = hw_fib_pa; 153 fibptr->hw_fib_pa = hw_fib_pa;
154 fibptr->hw_fib_va = hw_fib; 154 fibptr->hw_fib_va = hw_fib;
155 } 155 }
156 if (retval != -EINTR) 156 if (retval != -ERESTARTSYS)
157 aac_fib_free(fibptr); 157 aac_fib_free(fibptr);
158 return retval; 158 return retval;
159} 159}
@@ -322,7 +322,7 @@ return_fib:
322 } 322 }
323 if (f.wait) { 323 if (f.wait) {
324 if(down_interruptible(&fibctx->wait_sem) < 0) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) {
325 status = -EINTR; 325 status = -ERESTARTSYS;
326 } else { 326 } else {
327 /* Lock again and retry */ 327 /* Lock again and retry */
328 spin_lock_irqsave(&dev->fib_lock, flags); 328 spin_lock_irqsave(&dev->fib_lock, flags);
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
593 u64 addr; 593 u64 addr;
594 void* p; 594 void* p;
595 if (upsg->sg[i].count > 595 if (upsg->sg[i].count >
596 (dev->adapter_info.options & 596 ((dev->adapter_info.options &
597 AAC_OPT_NEW_COMM) ? 597 AAC_OPT_NEW_COMM) ?
598 (dev->scsi_host_ptr->max_sectors << 9) : 598 (dev->scsi_host_ptr->max_sectors << 9) :
599 65536) { 599 65536)) {
600 rcode = -EINVAL; 600 rcode = -EINVAL;
601 goto cleanup; 601 goto cleanup;
602 } 602 }
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
645 u64 addr; 645 u64 addr;
646 void* p; 646 void* p;
647 if (usg->sg[i].count > 647 if (usg->sg[i].count >
648 (dev->adapter_info.options & 648 ((dev->adapter_info.options &
649 AAC_OPT_NEW_COMM) ? 649 AAC_OPT_NEW_COMM) ?
650 (dev->scsi_host_ptr->max_sectors << 9) : 650 (dev->scsi_host_ptr->max_sectors << 9) :
651 65536) { 651 65536)) {
652 rcode = -EINVAL; 652 rcode = -EINVAL;
653 goto cleanup; 653 goto cleanup;
654 } 654 }
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
695 uintptr_t addr; 695 uintptr_t addr;
696 void* p; 696 void* p;
697 if (usg->sg[i].count > 697 if (usg->sg[i].count >
698 (dev->adapter_info.options & 698 ((dev->adapter_info.options &
699 AAC_OPT_NEW_COMM) ? 699 AAC_OPT_NEW_COMM) ?
700 (dev->scsi_host_ptr->max_sectors << 9) : 700 (dev->scsi_host_ptr->max_sectors << 9) :
701 65536) { 701 65536)) {
702 rcode = -EINVAL; 702 rcode = -EINVAL;
703 goto cleanup; 703 goto cleanup;
704 } 704 }
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
734 dma_addr_t addr; 734 dma_addr_t addr;
735 void* p; 735 void* p;
736 if (upsg->sg[i].count > 736 if (upsg->sg[i].count >
737 (dev->adapter_info.options & 737 ((dev->adapter_info.options &
738 AAC_OPT_NEW_COMM) ? 738 AAC_OPT_NEW_COMM) ?
739 (dev->scsi_host_ptr->max_sectors << 9) : 739 (dev->scsi_host_ptr->max_sectors << 9) :
740 65536) { 740 65536)) {
741 rcode = -EINVAL; 741 rcode = -EINVAL;
742 goto cleanup; 742 goto cleanup;
743 } 743 }
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
772 psg->count = cpu_to_le32(sg_indx+1); 772 psg->count = cpu_to_le32(sg_indx+1);
773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
774 } 774 }
775 if (status == -EINTR) { 775 if (status == -ERESTARTSYS) {
776 rcode = -EINTR; 776 rcode = -ERESTARTSYS;
777 goto cleanup; 777 goto cleanup;
778 } 778 }
779 779
@@ -810,7 +810,7 @@ cleanup:
810 for(i=0; i <= sg_indx; i++){ 810 for(i=0; i <= sg_indx; i++){
811 kfree(sg_list[i]); 811 kfree(sg_list[i]);
812 } 812 }
813 if (rcode != -EINTR) { 813 if (rcode != -ERESTARTSYS) {
814 aac_fib_complete(srbfib); 814 aac_fib_complete(srbfib);
815 aac_fib_free(srbfib); 815 aac_fib_free(srbfib);
816 } 816 }
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
848 */ 848 */
849 849
850 status = aac_dev_ioctl(dev, cmd, arg); 850 status = aac_dev_ioctl(dev, cmd, arg);
851 if(status != -ENOTTY) 851 if (status != -ENOTTY)
852 return status; 852 return status;
853 853
854 switch (cmd) { 854 switch (cmd) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 666d5151d628..a7261486ccd4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev)
194 194
195 if (status >= 0) 195 if (status >= 0)
196 aac_fib_complete(fibctx); 196 aac_fib_complete(fibctx);
197 aac_fib_free(fibctx); 197 /* FIB should be freed only after getting the response from the F/W */
198 if (status != -ERESTARTSYS)
199 aac_fib_free(fibctx);
198 return status; 200 return status;
199} 201}
200 202
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
304 /* 306 /*
305 * Check the preferred comm settings, defaults from template. 307 * Check the preferred comm settings, defaults from template.
306 */ 308 */
309 dev->management_fib_count = 0;
310 spin_lock_init(&dev->manage_lock);
307 dev->max_fib_size = sizeof(struct hw_fib); 311 dev->max_fib_size = sizeof(struct hw_fib);
308 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 312 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
309 - sizeof(struct aac_fibhdr) 313 - sizeof(struct aac_fibhdr)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 956261f25181..94d2954d79ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
189 189
190void aac_fib_free(struct fib *fibptr) 190void aac_fib_free(struct fib *fibptr)
191{ 191{
192 unsigned long flags; 192 unsigned long flags, flagsv;
193
194 spin_lock_irqsave(&fibptr->event_lock, flagsv);
195 if (fibptr->done == 2) {
196 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
197 return;
198 }
199 spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
193 200
194 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 201 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
195 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 202 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
390 struct hw_fib * hw_fib = fibptr->hw_fib_va; 397 struct hw_fib * hw_fib = fibptr->hw_fib_va;
391 unsigned long flags = 0; 398 unsigned long flags = 0;
392 unsigned long qflags; 399 unsigned long qflags;
400 unsigned long mflags = 0;
401
393 402
394 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 403 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395 return -EBUSY; 404 return -EBUSY;
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
471 if (!dev->queues) 480 if (!dev->queues)
472 return -EBUSY; 481 return -EBUSY;
473 482
474 if(wait) 483 if (wait) {
484
485 spin_lock_irqsave(&dev->manage_lock, mflags);
486 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
487 printk(KERN_INFO "No management Fibs Available:%d\n",
488 dev->management_fib_count);
489 spin_unlock_irqrestore(&dev->manage_lock, mflags);
490 return -EBUSY;
491 }
492 dev->management_fib_count++;
493 spin_unlock_irqrestore(&dev->manage_lock, mflags);
475 spin_lock_irqsave(&fibptr->event_lock, flags); 494 spin_lock_irqsave(&fibptr->event_lock, flags);
476 aac_adapter_deliver(fibptr); 495 }
496
497 if (aac_adapter_deliver(fibptr) != 0) {
498 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
499 if (wait) {
500 spin_unlock_irqrestore(&fibptr->event_lock, flags);
501 spin_lock_irqsave(&dev->manage_lock, mflags);
502 dev->management_fib_count--;
503 spin_unlock_irqrestore(&dev->manage_lock, mflags);
504 }
505 return -EBUSY;
506 }
507
477 508
478 /* 509 /*
479 * If the caller wanted us to wait for response wait now. 510 * If the caller wanted us to wait for response wait now.
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
516 udelay(5); 547 udelay(5);
517 } 548 }
518 } else if (down_interruptible(&fibptr->event_wait)) { 549 } else if (down_interruptible(&fibptr->event_wait)) {
519 fibptr->done = 2; 550 /* Do nothing ... satisfy
520 up(&fibptr->event_wait); 551 * down_interruptible must_check */
521 } 552 }
553
522 spin_lock_irqsave(&fibptr->event_lock, flags); 554 spin_lock_irqsave(&fibptr->event_lock, flags);
523 if ((fibptr->done == 0) || (fibptr->done == 2)) { 555 if (fibptr->done == 0) {
524 fibptr->done = 2; /* Tell interrupt we aborted */ 556 fibptr->done = 2; /* Tell interrupt we aborted */
525 spin_unlock_irqrestore(&fibptr->event_lock, flags); 557 spin_unlock_irqrestore(&fibptr->event_lock, flags);
526 return -EINTR; 558 return -ERESTARTSYS;
527 } 559 }
528 spin_unlock_irqrestore(&fibptr->event_lock, flags); 560 spin_unlock_irqrestore(&fibptr->event_lock, flags);
529 BUG_ON(fibptr->done == 0); 561 BUG_ON(fibptr->done == 0);
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
689 721
690int aac_fib_complete(struct fib *fibptr) 722int aac_fib_complete(struct fib *fibptr)
691{ 723{
724 unsigned long flags;
692 struct hw_fib * hw_fib = fibptr->hw_fib_va; 725 struct hw_fib * hw_fib = fibptr->hw_fib_va;
693 726
694 /* 727 /*
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr)
709 * command is complete that we had sent to the adapter and this 742 * command is complete that we had sent to the adapter and this
710 * cdb could be reused. 743 * cdb could be reused.
711 */ 744 */
745 spin_lock_irqsave(&fibptr->event_lock, flags);
746 if (fibptr->done == 2) {
747 spin_unlock_irqrestore(&fibptr->event_lock, flags);
748 return 0;
749 }
750 spin_unlock_irqrestore(&fibptr->event_lock, flags);
751
712 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 752 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
713 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 753 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
714 { 754 {
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
1355 1395
1356 if (status >= 0) 1396 if (status >= 0)
1357 aac_fib_complete(fibctx); 1397 aac_fib_complete(fibctx);
1358 aac_fib_free(fibctx); 1398 /* FIB should be freed only after getting
1399 * the response from the F/W */
1400 if (status != -ERESTARTSYS)
1401 aac_fib_free(fibctx);
1359 } 1402 }
1360 } 1403 }
1361 1404
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data)
1759 struct fib *fibptr; 1802 struct fib *fibptr;
1760 1803
1761 if ((fibptr = aac_fib_alloc(dev))) { 1804 if ((fibptr = aac_fib_alloc(dev))) {
1805 int status;
1762 __le32 *info; 1806 __le32 *info;
1763 1807
1764 aac_fib_init(fibptr); 1808 aac_fib_init(fibptr);
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data)
1769 1813
1770 *info = cpu_to_le32(now.tv_sec); 1814 *info = cpu_to_le32(now.tv_sec);
1771 1815
1772 (void)aac_fib_send(SendHostTime, 1816 status = aac_fib_send(SendHostTime,
1773 fibptr, 1817 fibptr,
1774 sizeof(*info), 1818 sizeof(*info),
1775 FsaNormal, 1819 FsaNormal,
1776 1, 1, 1820 1, 1,
1777 NULL, 1821 NULL,
1778 NULL); 1822 NULL);
1779 aac_fib_complete(fibptr); 1823 /* Do not set XferState to zero unless
1780 aac_fib_free(fibptr); 1824 * receives a response from F/W */
1825 if (status >= 0)
1826 aac_fib_complete(fibptr);
1827 /* FIB should be freed only after
1828 * getting the response from the F/W */
1829 if (status != -ERESTARTSYS)
1830 aac_fib_free(fibptr);
1781 } 1831 }
1782 difference = (long)(unsigned)update_interval*HZ; 1832 difference = (long)(unsigned)update_interval*HZ;
1783 } else { 1833 } else {
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index abc9ef5d1b10..9c7408fe8c7d 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q)
57 struct hw_fib * hwfib; 57 struct hw_fib * hwfib;
58 struct fib * fib; 58 struct fib * fib;
59 int consumed = 0; 59 int consumed = 0;
60 unsigned long flags; 60 unsigned long flags, mflags;
61 61
62 spin_lock_irqsave(q->lock, flags); 62 spin_lock_irqsave(q->lock, flags);
63 /* 63 /*
64 * Keep pulling response QEs off the response queue and waking 64 * Keep pulling response QEs off the response queue and waking
65 * up the waiters until there are no more QEs. We then return 65 * up the waiters until there are no more QEs. We then return
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q)
125 } else { 125 } else {
126 unsigned long flagv; 126 unsigned long flagv;
127 spin_lock_irqsave(&fib->event_lock, flagv); 127 spin_lock_irqsave(&fib->event_lock, flagv);
128 if (!fib->done) 128 if (!fib->done) {
129 fib->done = 1; 129 fib->done = 1;
130 up(&fib->event_wait); 130 up(&fib->event_wait);
131 }
131 spin_unlock_irqrestore(&fib->event_lock, flagv); 132 spin_unlock_irqrestore(&fib->event_lock, flagv);
133
134 spin_lock_irqsave(&dev->manage_lock, mflags);
135 dev->management_fib_count--;
136 spin_unlock_irqrestore(&dev->manage_lock, mflags);
137
132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 138 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
133 if (fib->done == 2) { 139 if (fib->done == 2) {
140 spin_lock_irqsave(&fib->event_lock, flagv);
141 fib->done = 0;
142 spin_unlock_irqrestore(&fib->event_lock, flagv);
134 aac_fib_complete(fib); 143 aac_fib_complete(fib);
135 aac_fib_free(fib); 144 aac_fib_free(fib);
136 } 145 }
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
232 241
233unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 242unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
234{ 243{
244 unsigned long mflags;
235 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 245 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
236 if ((index & 0x00000002L)) { 246 if ((index & 0x00000002L)) {
237 struct hw_fib * hw_fib; 247 struct hw_fib * hw_fib;
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
320 unsigned long flagv; 330 unsigned long flagv;
321 dprintk((KERN_INFO "event_wait up\n")); 331 dprintk((KERN_INFO "event_wait up\n"));
322 spin_lock_irqsave(&fib->event_lock, flagv); 332 spin_lock_irqsave(&fib->event_lock, flagv);
323 if (!fib->done) 333 if (!fib->done) {
324 fib->done = 1; 334 fib->done = 1;
325 up(&fib->event_wait); 335 up(&fib->event_wait);
336 }
326 spin_unlock_irqrestore(&fib->event_lock, flagv); 337 spin_unlock_irqrestore(&fib->event_lock, flagv);
338
339 spin_lock_irqsave(&dev->manage_lock, mflags);
340 dev->management_fib_count--;
341 spin_unlock_irqrestore(&dev->manage_lock, mflags);
342
327 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 343 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
344 if (fib->done == 2) {
345 spin_lock_irqsave(&fib->event_lock, flagv);
346 fib->done = 0;
347 spin_unlock_irqrestore(&fib->event_lock, flagv);
348 aac_fib_complete(fib);
349 aac_fib_free(fib);
350 }
351
328 } 352 }
329 return 0; 353 return 0;
330 } 354 }
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 4d419c155ce9..78971db5b60e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3171 tinfo->curr.transport_version = 2; 3171 tinfo->curr.transport_version = 2;
3172 tinfo->goal.transport_version = 2; 3172 tinfo->goal.transport_version = 2;
3173 tinfo->goal.ppr_options = 0; 3173 tinfo->goal.ppr_options = 0;
3174 /* 3174 if (scb != NULL) {
3175 * Remove any SCBs in the waiting for selection 3175 /*
3176 * queue that may also be for this target so 3176 * Remove any SCBs in the waiting
3177 * that command ordering is preserved. 3177 * for selection queue that may
3178 */ 3178 * also be for this target so that
3179 ahd_freeze_devq(ahd, scb); 3179 * command ordering is preserved.
3180 ahd_qinfifo_requeue_tail(ahd, scb); 3180 */
3181 ahd_freeze_devq(ahd, scb);
3182 ahd_qinfifo_requeue_tail(ahd, scb);
3183 }
3181 printerror = 0; 3184 printerror = 0;
3182 } 3185 }
3183 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) 3186 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3194 MSG_EXT_WDTR_BUS_8_BIT, 3197 MSG_EXT_WDTR_BUS_8_BIT,
3195 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3198 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3196 /*paused*/TRUE); 3199 /*paused*/TRUE);
3197 /* 3200 if (scb != NULL) {
3198 * Remove any SCBs in the waiting for selection 3201 /*
3199 * queue that may also be for this target so that 3202 * Remove any SCBs in the waiting for
3200 * command ordering is preserved. 3203 * selection queue that may also be for
3201 */ 3204 * this target so that command ordering
3202 ahd_freeze_devq(ahd, scb); 3205 * is preserved.
3203 ahd_qinfifo_requeue_tail(ahd, scb); 3206 */
3207 ahd_freeze_devq(ahd, scb);
3208 ahd_qinfifo_requeue_tail(ahd, scb);
3209 }
3204 printerror = 0; 3210 printerror = 0;
3205 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3211 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
3206 && ppr_busfree == 0) { 3212 && ppr_busfree == 0) {
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3217 /*ppr_options*/0, 3223 /*ppr_options*/0,
3218 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3224 AHD_TRANS_CUR|AHD_TRANS_GOAL,
3219 /*paused*/TRUE); 3225 /*paused*/TRUE);
3220 /* 3226 if (scb != NULL) {
3221 * Remove any SCBs in the waiting for selection 3227 /*
3222 * queue that may also be for this target so that 3228 * Remove any SCBs in the waiting for
3223 * command ordering is preserved. 3229 * selection queue that may also be for
3224 */ 3230 * this target so that command ordering
3225 ahd_freeze_devq(ahd, scb); 3231 * is preserved.
3226 ahd_qinfifo_requeue_tail(ahd, scb); 3232 */
3233 ahd_freeze_devq(ahd, scb);
3234 ahd_qinfifo_requeue_tail(ahd, scb);
3235 }
3227 printerror = 0; 3236 printerror = 0;
3228 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3237 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
3229 && ahd_sent_msg(ahd, AHDMSG_1B, 3238 && ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
3251 * the message phases. We check it last in case we 3260 * the message phases. We check it last in case we
3252 * had to send some other message that caused a busfree. 3261 * had to send some other message that caused a busfree.
3253 */ 3262 */
3254 if (printerror != 0 3263 if (scb != NULL && printerror != 0
3255 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3264 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
3256 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3265 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
3257 3266
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2445e399fd60..2445e399fd60 100755..100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8a2a1c5935c6..8a2a1c5935c6 100755..100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 608e675f68c8..1263d9796e89 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1586,8 +1586,7 @@ typedef struct fc_port {
1586 */ 1586 */
1587#define FCF_FABRIC_DEVICE BIT_0 1587#define FCF_FABRIC_DEVICE BIT_0
1588#define FCF_LOGIN_NEEDED BIT_1 1588#define FCF_LOGIN_NEEDED BIT_1
1589#define FCF_TAPE_PRESENT BIT_2 1589#define FCF_FCP2_DEVICE BIT_2
1590#define FCF_FCP2_DEVICE BIT_3
1591 1590
1592/* No loop ID flag. */ 1591/* No loop ID flag. */
1593#define FC_NO_LOOP_ID 0x1000 1592#define FC_NO_LOOP_ID 0x1000
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b4a0eac8f96d..3f8e8495b743 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
205 205
206 switch (data[0]) { 206 switch (data[0]) {
207 case MBS_COMMAND_COMPLETE: 207 case MBS_COMMAND_COMPLETE:
208 if (fcport->flags & FCF_TAPE_PRESENT) 208 if (fcport->flags & FCF_FCP2_DEVICE)
209 opts |= BIT_1; 209 opts |= BIT_1;
210 rval = qla2x00_get_port_database(vha, fcport, opts); 210 rval = qla2x00_get_port_database(vha, fcport, opts);
211 if (rval != QLA_SUCCESS) 211 if (rval != QLA_SUCCESS)
@@ -2726,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2726 2726
2727 /* 2727 /*
2728 * Logout all previous fabric devices marked lost, except 2728 * Logout all previous fabric devices marked lost, except
2729 * tape devices. 2729 * FCP2 devices.
2730 */ 2730 */
2731 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
@@ -2739,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2739 qla2x00_mark_device_lost(vha, fcport, 2739 qla2x00_mark_device_lost(vha, fcport,
2740 ql2xplogiabsentdevice, 0); 2740 ql2xplogiabsentdevice, 0);
2741 if (fcport->loop_id != FC_NO_LOOP_ID && 2741 if (fcport->loop_id != FC_NO_LOOP_ID &&
2742 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2742 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
2743 fcport->port_type != FCT_INITIATOR && 2743 fcport->port_type != FCT_INITIATOR &&
2744 fcport->port_type != FCT_BROADCAST) { 2744 fcport->port_type != FCT_BROADCAST) {
2745 ha->isp_ops->fabric_logout(vha, 2745 ha->isp_ops->fabric_logout(vha,
@@ -3018,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3018 fcport->d_id.b24 = new_fcport->d_id.b24; 3018 fcport->d_id.b24 = new_fcport->d_id.b24;
3019 fcport->flags |= FCF_LOGIN_NEEDED; 3019 fcport->flags |= FCF_LOGIN_NEEDED;
3020 if (fcport->loop_id != FC_NO_LOOP_ID && 3020 if (fcport->loop_id != FC_NO_LOOP_ID &&
3021 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 3021 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3022 fcport->port_type != FCT_INITIATOR && 3022 fcport->port_type != FCT_INITIATOR &&
3023 fcport->port_type != FCT_BROADCAST) { 3023 fcport->port_type != FCT_BROADCAST) {
3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3272,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3272 3272
3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3274 if (rval == QLA_SUCCESS) { 3274 if (rval == QLA_SUCCESS) {
3275 /* Send an ADISC to tape devices.*/ 3275 /* Send an ADISC to FCP2 devices.*/
3276 opts = 0; 3276 opts = 0;
3277 if (fcport->flags & FCF_TAPE_PRESENT) 3277 if (fcport->flags & FCF_FCP2_DEVICE)
3278 opts |= BIT_1; 3278 opts |= BIT_1;
3279 rval = qla2x00_get_port_database(vha, fcport, opts); 3279 rval = qla2x00_get_port_database(vha, fcport, opts);
3280 if (rval != QLA_SUCCESS) { 3280 if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 209f50e788a1..8529eb1f3cd4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1188,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1188 scsi_qla_host_t *vha = shost_priv(sdev->host); 1188 scsi_qla_host_t *vha = shost_priv(sdev->host);
1189 struct qla_hw_data *ha = vha->hw; 1189 struct qla_hw_data *ha = vha->hw;
1190 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1190 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1191 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1192 struct req_que *req = vha->req; 1191 struct req_que *req = vha->req;
1193 1192
1194 if (sdev->tagged_supported) 1193 if (sdev->tagged_supported)
@@ -1197,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1197 scsi_deactivate_tcq(sdev, req->max_q_depth); 1196 scsi_deactivate_tcq(sdev, req->max_q_depth);
1198 1197
1199 rport->dev_loss_tmo = ha->port_down_retry_count; 1198 rport->dev_loss_tmo = ha->port_down_retry_count;
1200 if (sdev->type == TYPE_TAPE)
1201 fcport->flags |= FCF_TAPE_PRESENT;
1202 1199
1203 return 0; 1200 return 0;
1204} 1201}
@@ -2805,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2805 2802
2806 fcport->login_retry--; 2803 fcport->login_retry--;
2807 if (fcport->flags & FCF_FABRIC_DEVICE) { 2804 if (fcport->flags & FCF_FABRIC_DEVICE) {
2808 if (fcport->flags & FCF_TAPE_PRESENT) 2805 if (fcport->flags & FCF_FCP2_DEVICE)
2809 ha->isp_ops->fabric_logout(vha, 2806 ha->isp_ops->fabric_logout(vha,
2810 fcport->loop_id, 2807 fcport->loop_id,
2811 fcport->d_id.b.domain, 2808 fcport->d_id.b.domain,
@@ -3141,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha)
3141 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3138 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3142 atomic_set(&vha->loop_state, LOOP_DEAD); 3139 atomic_set(&vha->loop_state, LOOP_DEAD);
3143 3140
3144 /* Schedule an ISP abort to return any tape commands. */ 3141 /*
3142 * Schedule an ISP abort to return any FCP2-device
3143 * commands.
3144 */
3145 /* NPIV - scan physical port only */ 3145 /* NPIV - scan physical port only */
3146 if (!vha->vp_idx) { 3146 if (!vha->vp_idx) {
3147 spin_lock_irqsave(&ha->hardware_lock, 3147 spin_lock_irqsave(&ha->hardware_lock,
@@ -3158,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3158 if (sp->ctx) 3158 if (sp->ctx)
3159 continue; 3159 continue;
3160 sfcp = sp->fcport; 3160 sfcp = sp->fcport;
3161 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 3161 if (!(sfcp->flags & FCF_FCP2_DEVICE))
3162 continue; 3162 continue;
3163 3163
3164 set_bit(ISP_ABORT_NEEDED, 3164 set_bit(ISP_ABORT_NEEDED,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 010e69b29afe..371dc895972a 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2292 uint32_t faddr, left, burst; 2292 uint32_t faddr, left, burst;
2293 struct qla_hw_data *ha = vha->hw; 2293 struct qla_hw_data *ha = vha->hw;
2294 2294
2295 if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
2296 goto try_fast;
2295 if (offset & 0xfff) 2297 if (offset & 0xfff)
2296 goto slow_read; 2298 goto slow_read;
2297 if (length < OPTROM_BURST_SIZE) 2299 if (length < OPTROM_BURST_SIZE)
2298 goto slow_read; 2300 goto slow_read;
2299 2301
2302try_fast:
2300 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2303 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2301 &optrom_dma, GFP_KERNEL); 2304 &optrom_dma, GFP_KERNEL);
2302 if (!optrom) { 2305 if (!optrom) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a65dd95507c6..ed36279a33c1 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k9" 10#define QLA2XXX_VERSION "8.03.01-k10"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d8927681ec88..c6642423cc67 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
749 */ 749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid; 750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751 751
752 scsi_release_buffers(cmd);
752 blk_end_request_all(req, 0); 753 blk_end_request_all(req, 0);
753 754
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd); 755 scsi_next_command(cmd);
756 return; 756 return;
757 } 757 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index ddfcecd5099f..653f22a8deb9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3527,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req)
3527 if (!done && i->f->bsg_timeout) { 3527 if (!done && i->f->bsg_timeout) {
3528 /* call LLDD to abort the i/o as it has timed out */ 3528 /* call LLDD to abort the i/o as it has timed out */
3529 err = i->f->bsg_timeout(job); 3529 err = i->f->bsg_timeout(job);
3530 if (err) 3530 if (err == -EAGAIN) {
3531 job->ref_cnt--;
3532 return BLK_EH_RESET_TIMER;
3533 } else if (err)
3531 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3534 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3532 "abort failed with status %d\n", err); 3535 "abort failed with status %d\n", err);
3533 } 3536 }
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index df854401af2d..95421fa3b304 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -758,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = {
758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 763 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
763 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 764 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 088f32f29a6e..050ee147592f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -396,8 +396,8 @@ config SBC_FITPC2_WATCHDOG
396 tristate "Compulab SBC-FITPC2 watchdog" 396 tristate "Compulab SBC-FITPC2 watchdog"
397 depends on X86 397 depends on X86
398 ---help--- 398 ---help---
399 This is the driver for the built-in watchdog timer on the fit-PC2 399 This is the driver for the built-in watchdog timer on the fit-PC2,
400 Single-board computer made by Compulab. 400 fit-PC2i, CM-iAM single-board computers made by Compulab.
401 401
402 It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. 402 It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux.
403 When "Watchdog Timer Value" enabled one can set 31-255 s operational range. 403 When "Watchdog Timer Value" enabled one can set 31-255 s operational range.
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index 4f4b35a20d84..3c79dc587958 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/timer.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/fs.h> 24#include <linux/fs.h>
24#include <linux/miscdevice.h> 25#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 91430a89107c..e6763d2a567b 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -46,9 +46,9 @@ static DEFINE_SPINLOCK(wdt_lock);
46static void wdt_send_data(unsigned char command, unsigned char data) 46static void wdt_send_data(unsigned char command, unsigned char data)
47{ 47{
48 outb(command, COMMAND_PORT); 48 outb(command, COMMAND_PORT);
49 mdelay(100); 49 msleep(100);
50 outb(data, DATA_PORT); 50 outb(data, DATA_PORT);
51 mdelay(200); 51 msleep(200);
52} 52}
53 53
54static void wdt_enable(void) 54static void wdt_enable(void)
@@ -202,11 +202,10 @@ static int __init fitpc2_wdt_init(void)
202{ 202{
203 int err; 203 int err;
204 204
205 if (strcmp("SBC-FITPC2", dmi_get_system_info(DMI_BOARD_NAME))) { 205 if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
206 pr_info("board name is: %s. Should be SBC-FITPC2\n",
207 dmi_get_system_info(DMI_BOARD_NAME));
208 return -ENODEV; 206 return -ENODEV;
209 } 207
208 pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
210 209
211 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 210 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
212 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); 211 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);