diff options
Diffstat (limited to 'drivers')
349 files changed, 4768 insertions, 2448 deletions
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bbc2c1315c47..b2586f57e1f5 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle) | |||
935 | struct platform_device *dd; | 935 | struct platform_device *dd; |
936 | 936 | ||
937 | id = dock_station_count; | 937 | id = dock_station_count; |
938 | memset(&ds, 0, sizeof(ds)); | ||
938 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); | 939 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); |
939 | if (IS_ERR(dd)) | 940 | if (IS_ERR(dd)) |
940 | return PTR_ERR(dd); | 941 | return PTR_ERR(dd); |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7c0441f63b39..cc978a8c00b7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { | |||
110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | 110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), |
111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, | 111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, |
112 | (void *)2}, | 112 | (void *)2}, |
113 | { set_max_cstate, "Pavilion zv5000", { | ||
114 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
115 | DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, | ||
116 | (void *)1}, | ||
117 | { set_max_cstate, "Asus L8400B", { | ||
118 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
119 | DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, | ||
120 | (void *)1}, | ||
113 | {}, | 121 | {}, |
114 | }; | 122 | }; |
115 | 123 | ||
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
872 | return(acpi_idle_enter_c1(dev, state)); | 880 | return(acpi_idle_enter_c1(dev, state)); |
873 | 881 | ||
874 | local_irq_disable(); | 882 | local_irq_disable(); |
875 | current_thread_info()->status &= ~TS_POLLING; | 883 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
876 | /* | 884 | current_thread_info()->status &= ~TS_POLLING; |
877 | * TS_POLLING-cleared state must be visible before we test | 885 | /* |
878 | * NEED_RESCHED: | 886 | * TS_POLLING-cleared state must be visible before we test |
879 | */ | 887 | * NEED_RESCHED: |
880 | smp_mb(); | 888 | */ |
889 | smp_mb(); | ||
890 | } | ||
881 | 891 | ||
882 | if (unlikely(need_resched())) { | 892 | if (unlikely(need_resched())) { |
883 | current_thread_info()->status |= TS_POLLING; | 893 | current_thread_info()->status |= TS_POLLING; |
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
957 | } | 967 | } |
958 | 968 | ||
959 | local_irq_disable(); | 969 | local_irq_disable(); |
960 | current_thread_info()->status &= ~TS_POLLING; | 970 | if (cx->entry_method != ACPI_CSTATE_FFH) { |
961 | /* | 971 | current_thread_info()->status &= ~TS_POLLING; |
962 | * TS_POLLING-cleared state must be visible before we test | 972 | /* |
963 | * NEED_RESCHED: | 973 | * TS_POLLING-cleared state must be visible before we test |
964 | */ | 974 | * NEED_RESCHED: |
965 | smp_mb(); | 975 | */ |
976 | smp_mb(); | ||
977 | } | ||
966 | 978 | ||
967 | if (unlikely(need_resched())) { | 979 | if (unlikely(need_resched())) { |
968 | current_thread_info()->status |= TS_POLLING; | 980 | current_thread_info()->status |= TS_POLLING; |
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7247819dbd80..e306ba9aa34e 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c | |||
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) | |||
125 | return status; | 125 | return status; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int early_pdc_done; | ||
129 | |||
128 | void acpi_processor_set_pdc(acpi_handle handle) | 130 | void acpi_processor_set_pdc(acpi_handle handle) |
129 | { | 131 | { |
130 | struct acpi_object_list *obj_list; | 132 | struct acpi_object_list *obj_list; |
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle) | |||
132 | if (arch_has_acpi_pdc() == false) | 134 | if (arch_has_acpi_pdc() == false) |
133 | return; | 135 | return; |
134 | 136 | ||
137 | if (early_pdc_done) | ||
138 | return; | ||
139 | |||
135 | obj_list = acpi_processor_alloc_pdc(); | 140 | obj_list = acpi_processor_alloc_pdc(); |
136 | if (!obj_list) | 141 | if (!obj_list) |
137 | return; | 142 | return; |
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id) | |||
151 | return 0; | 156 | return 0; |
152 | } | 157 | } |
153 | 158 | ||
159 | static int param_early_pdc_optin(char *s) | ||
160 | { | ||
161 | early_pdc_optin = 1; | ||
162 | return 1; | ||
163 | } | ||
164 | __setup("acpi_early_pdc_eval", param_early_pdc_optin); | ||
165 | |||
154 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { | 166 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { |
155 | { | 167 | { |
156 | set_early_pdc_optin, "HP Envy", { | 168 | set_early_pdc_optin, "HP Envy", { |
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void) | |||
192 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 204 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
193 | ACPI_UINT32_MAX, | 205 | ACPI_UINT32_MAX, |
194 | early_init_pdc, NULL, NULL, NULL); | 206 | early_init_pdc, NULL, NULL, NULL); |
207 | |||
208 | early_pdc_done = 1; | ||
195 | } | 209 | } |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 2cabadcc4d8c..a959f6a07508 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr) | |||
413 | if (result) | 413 | if (result) |
414 | goto update_bios; | 414 | goto update_bios; |
415 | 415 | ||
416 | return 0; | 416 | /* We need to call _PPC once when cpufreq starts */ |
417 | if (ignore_ppc != 1) | ||
418 | result = acpi_processor_get_platform_limit(pr); | ||
419 | |||
420 | return result; | ||
417 | 421 | ||
418 | /* | 422 | /* |
419 | * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that | 423 | * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ff9f6226085d..3e009674f333 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, | |||
1336 | 1336 | ||
1337 | if (child) | 1337 | if (child) |
1338 | *child = device; | 1338 | *child = device; |
1339 | return 0; | 1339 | |
1340 | if (device) | ||
1341 | return 0; | ||
1342 | else | ||
1343 | return -ENODEV; | ||
1340 | } | 1344 | } |
1341 | 1345 | ||
1346 | /* | ||
1347 | * acpi_bus_add and acpi_bus_start | ||
1348 | * | ||
1349 | * scan a given ACPI tree and (probably recently hot-plugged) | ||
1350 | * create and add or starts found devices. | ||
1351 | * | ||
1352 | * If no devices were found -ENODEV is returned which does not | ||
1353 | * mean that this is a real error, there just have been no suitable | ||
1354 | * ACPI objects in the table trunk from which the kernel could create | ||
1355 | * a device and add/start an appropriate driver. | ||
1356 | */ | ||
1357 | |||
1342 | int | 1358 | int |
1343 | acpi_bus_add(struct acpi_device **child, | 1359 | acpi_bus_add(struct acpi_device **child, |
1344 | struct acpi_device *parent, acpi_handle handle, int type) | 1360 | struct acpi_device *parent, acpi_handle handle, int type) |
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child, | |||
1348 | memset(&ops, 0, sizeof(ops)); | 1364 | memset(&ops, 0, sizeof(ops)); |
1349 | ops.acpi_op_add = 1; | 1365 | ops.acpi_op_add = 1; |
1350 | 1366 | ||
1351 | acpi_bus_scan(handle, &ops, child); | 1367 | return acpi_bus_scan(handle, &ops, child); |
1352 | return 0; | ||
1353 | } | 1368 | } |
1354 | EXPORT_SYMBOL(acpi_bus_add); | 1369 | EXPORT_SYMBOL(acpi_bus_add); |
1355 | 1370 | ||
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device) | |||
1357 | { | 1372 | { |
1358 | struct acpi_bus_ops ops; | 1373 | struct acpi_bus_ops ops; |
1359 | 1374 | ||
1375 | if (!device) | ||
1376 | return -EINVAL; | ||
1377 | |||
1360 | memset(&ops, 0, sizeof(ops)); | 1378 | memset(&ops, 0, sizeof(ops)); |
1361 | ops.acpi_op_start = 1; | 1379 | ops.acpi_op_start = 1; |
1362 | 1380 | ||
1363 | acpi_bus_scan(device->handle, &ops, NULL); | 1381 | return acpi_bus_scan(device->handle, &ops, NULL); |
1364 | return 0; | ||
1365 | } | 1382 | } |
1366 | EXPORT_SYMBOL(acpi_bus_start); | 1383 | EXPORT_SYMBOL(acpi_bus_start); |
1367 | 1384 | ||
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index f336bca7c450..8a0ed2800e63 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, | |||
213 | unsigned long table_end; | 213 | unsigned long table_end; |
214 | acpi_size tbl_size; | 214 | acpi_size tbl_size; |
215 | 215 | ||
216 | if (acpi_disabled) | 216 | if (acpi_disabled && !acpi_ht) |
217 | return -ENODEV; | 217 | return -ENODEV; |
218 | 218 | ||
219 | if (!handler) | 219 | if (!handler) |
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
280 | struct acpi_table_header *table = NULL; | 280 | struct acpi_table_header *table = NULL; |
281 | acpi_size tbl_size; | 281 | acpi_size tbl_size; |
282 | 282 | ||
283 | if (acpi_disabled) | 283 | if (acpi_disabled && !acpi_ht) |
284 | return -ENODEV; | 284 | return -ENODEV; |
285 | 285 | ||
286 | if (!handler) | 286 | if (!handler) |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8bea100a160..b34390347c16 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
2868 | }, | 2868 | }, |
2869 | .driver_data = "F.23", /* cutoff BIOS version */ | 2869 | .driver_data = "F.23", /* cutoff BIOS version */ |
2870 | }, | 2870 | }, |
2871 | /* | ||
2872 | * Acer eMachines G725 has the same problem. BIOS | ||
2873 | * V1.03 is known to be broken. V3.04 is known to | ||
2874 | * work. Inbetween, there are V1.06, V2.06 and V3.03 | ||
2875 | * that we don't have much idea about. For now, | ||
2876 | * blacklist anything older than V3.04. | ||
2877 | */ | ||
2878 | { | ||
2879 | .ident = "G725", | ||
2880 | .matches = { | ||
2881 | DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), | ||
2882 | DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), | ||
2883 | }, | ||
2884 | .driver_data = "V3.04", /* cutoff BIOS version */ | ||
2885 | }, | ||
2871 | { } /* terminate list */ | 2886 | { } /* terminate list */ |
2872 | }; | 2887 | }; |
2873 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | 2888 | const struct dmi_system_id *dmi = dmi_first_match(sysids); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f4ea5a8c325b..d096fbcbc771 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2875 | * write indication (used for PIO/DMA setup), result TF is | 2875 | * write indication (used for PIO/DMA setup), result TF is |
2876 | * copied back and we don't whine too much about its failure. | 2876 | * copied back and we don't whine too much about its failure. |
2877 | */ | 2877 | */ |
2878 | tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 2878 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) | 2879 | if (scmd->sc_data_direction == DMA_TO_DEVICE) |
2880 | tf->flags |= ATA_TFLAG_WRITE; | 2880 | tf->flags |= ATA_TFLAG_WRITE; |
2881 | 2881 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 741065c9da67..730ef3c384ca 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
893 | do_write); | 893 | do_write); |
894 | } | 894 | } |
895 | 895 | ||
896 | if (!do_write) | ||
897 | flush_dcache_page(page); | ||
898 | |||
896 | qc->curbytes += qc->sect_size; | 899 | qc->curbytes += qc->sect_size; |
897 | qc->cursg_ofs += qc->sect_size; | 900 | qc->cursg_ofs += qc->sect_size; |
898 | 901 | ||
diff --git a/drivers/base/class.c b/drivers/base/class.c index 161746deab4b..6e2c3b064f53 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj) | |||
59 | else | 59 | else |
60 | pr_debug("class '%s' does not have a release() function, " | 60 | pr_debug("class '%s' does not have a release() function, " |
61 | "be careful\n", class->name); | 61 | "be careful\n", class->name); |
62 | |||
63 | kfree(cp); | ||
62 | } | 64 | } |
63 | 65 | ||
64 | static struct sysfs_ops class_sysfs_ops = { | 66 | static struct sysfs_ops class_sysfs_ops = { |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 873e594860d3..9291614ac6b7 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | |||
337 | if (*pos > h->highest_lun) | 337 | if (*pos > h->highest_lun) |
338 | return 0; | 338 | return 0; |
339 | 339 | ||
340 | if (drv == NULL) /* it's possible for h->drv[] to have holes. */ | ||
341 | return 0; | ||
342 | |||
340 | if (drv->heads == 0) | 343 | if (drv->heads == 0) |
341 | return 0; | 344 | return 0; |
342 | 345 | ||
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index e898ad9eb1c3..ab871e00ffc5 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2973,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2973 | goto out_no_q; | 2973 | goto out_no_q; |
2974 | mdev->rq_queue = q; | 2974 | mdev->rq_queue = q; |
2975 | q->queuedata = mdev; | 2975 | q->queuedata = mdev; |
2976 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | ||
2977 | 2976 | ||
2978 | disk = alloc_disk(1); | 2977 | disk = alloc_disk(1); |
2979 | if (!disk) | 2978 | if (!disk) |
@@ -2997,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2997 | q->backing_dev_info.congested_data = mdev; | 2996 | q->backing_dev_info.congested_data = mdev; |
2998 | 2997 | ||
2999 | blk_queue_make_request(q, drbd_make_request_26); | 2998 | blk_queue_make_request(q, drbd_make_request_26); |
2999 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | ||
3000 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3000 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3001 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3001 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3002 | q->queue_lock = &mdev->req_lock; /* needed since we use */ | 3002 | q->queue_lock = &mdev->req_lock; /* needed since we use */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index f22a5283128a..d065c646b35a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1224,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) | |||
1224 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | 1224 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); |
1225 | if (!epoch) { | 1225 | if (!epoch) { |
1226 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); | 1226 | dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); |
1227 | issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | 1227 | issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); |
1228 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); | 1228 | drbd_wait_ee_list_empty(mdev, &mdev->active_ee); |
1229 | if (issue_flush) { | 1229 | if (issue_flush) { |
1230 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); | 1230 | rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 2ddf03ae034e..68b5957f107c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) | |||
322 | pkt_kobj_remove(pd->kobj_stat); | 322 | pkt_kobj_remove(pd->kobj_stat); |
323 | pkt_kobj_remove(pd->kobj_wqueue); | 323 | pkt_kobj_remove(pd->kobj_wqueue); |
324 | if (class_pktcdvd) | 324 | if (class_pktcdvd) |
325 | device_destroy(class_pktcdvd, pd->pkt_dev); | 325 | device_unregister(pd->dev); |
326 | } | 326 | } |
327 | 327 | ||
328 | 328 | ||
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 652367aa6546..058fbccf2f52 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
@@ -195,5 +195,16 @@ config BT_MRVL_SDIO | |||
195 | Say Y here to compile support for Marvell BT-over-SDIO driver | 195 | Say Y here to compile support for Marvell BT-over-SDIO driver |
196 | into the kernel or say M to compile it as module. | 196 | into the kernel or say M to compile it as module. |
197 | 197 | ||
198 | endmenu | 198 | config BT_ATH3K |
199 | tristate "Atheros firmware download driver" | ||
200 | depends on BT_HCIBTUSB | ||
201 | select FW_LOADER | ||
202 | help | ||
203 | Bluetooth firmware download driver. | ||
204 | This driver loads the firmware into the Atheros Bluetooth | ||
205 | chipset. | ||
199 | 206 | ||
207 | Say Y here to compile support for "Atheros firmware download driver" | ||
208 | into the kernel or say M to compile it as module (ath3k). | ||
209 | |||
210 | endmenu | ||
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index b3f57d2d4eb0..7e5aed598121 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o | |||
15 | obj-$(CONFIG_BT_HCIBTUSB) += btusb.o | 15 | obj-$(CONFIG_BT_HCIBTUSB) += btusb.o |
16 | obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o | 16 | obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o |
17 | 17 | ||
18 | obj-$(CONFIG_BT_ATH3K) += ath3k.o | ||
18 | obj-$(CONFIG_BT_MRVL) += btmrvl.o | 19 | obj-$(CONFIG_BT_MRVL) += btmrvl.o |
19 | obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o | 20 | obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o |
20 | 21 | ||
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c new file mode 100644 index 000000000000..add9485ca5b6 --- /dev/null +++ b/drivers/bluetooth/ath3k.c | |||
@@ -0,0 +1,187 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2009 Atheros Communications Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/firmware.h> | ||
29 | #include <linux/usb.h> | ||
30 | #include <net/bluetooth/bluetooth.h> | ||
31 | |||
32 | #define VERSION "1.0" | ||
33 | |||
34 | |||
35 | static struct usb_device_id ath3k_table[] = { | ||
36 | /* Atheros AR3011 */ | ||
37 | { USB_DEVICE(0x0CF3, 0x3000) }, | ||
38 | { } /* Terminating entry */ | ||
39 | }; | ||
40 | |||
41 | MODULE_DEVICE_TABLE(usb, ath3k_table); | ||
42 | |||
43 | #define USB_REQ_DFU_DNLOAD 1 | ||
44 | #define BULK_SIZE 4096 | ||
45 | |||
46 | struct ath3k_data { | ||
47 | struct usb_device *udev; | ||
48 | u8 *fw_data; | ||
49 | u32 fw_size; | ||
50 | u32 fw_sent; | ||
51 | }; | ||
52 | |||
53 | static int ath3k_load_firmware(struct ath3k_data *data, | ||
54 | unsigned char *firmware, | ||
55 | int count) | ||
56 | { | ||
57 | u8 *send_buf; | ||
58 | int err, pipe, len, size, sent = 0; | ||
59 | |||
60 | BT_DBG("ath3k %p udev %p", data, data->udev); | ||
61 | |||
62 | pipe = usb_sndctrlpipe(data->udev, 0); | ||
63 | |||
64 | if ((usb_control_msg(data->udev, pipe, | ||
65 | USB_REQ_DFU_DNLOAD, | ||
66 | USB_TYPE_VENDOR, 0, 0, | ||
67 | firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) { | ||
68 | BT_ERR("Can't change to loading configuration err"); | ||
69 | return -EBUSY; | ||
70 | } | ||
71 | sent += 20; | ||
72 | count -= 20; | ||
73 | |||
74 | send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); | ||
75 | if (!send_buf) { | ||
76 | BT_ERR("Can't allocate memory chunk for firmware"); | ||
77 | return -ENOMEM; | ||
78 | } | ||
79 | |||
80 | while (count) { | ||
81 | size = min_t(uint, count, BULK_SIZE); | ||
82 | pipe = usb_sndbulkpipe(data->udev, 0x02); | ||
83 | memcpy(send_buf, firmware + sent, size); | ||
84 | |||
85 | err = usb_bulk_msg(data->udev, pipe, send_buf, size, | ||
86 | &len, 3000); | ||
87 | |||
88 | if (err || (len != size)) { | ||
89 | BT_ERR("Error in firmware loading err = %d," | ||
90 | "len = %d, size = %d", err, len, size); | ||
91 | goto error; | ||
92 | } | ||
93 | |||
94 | sent += size; | ||
95 | count -= size; | ||
96 | } | ||
97 | |||
98 | kfree(send_buf); | ||
99 | return 0; | ||
100 | |||
101 | error: | ||
102 | kfree(send_buf); | ||
103 | return err; | ||
104 | } | ||
105 | |||
106 | static int ath3k_probe(struct usb_interface *intf, | ||
107 | const struct usb_device_id *id) | ||
108 | { | ||
109 | const struct firmware *firmware; | ||
110 | struct usb_device *udev = interface_to_usbdev(intf); | ||
111 | struct ath3k_data *data; | ||
112 | int size; | ||
113 | |||
114 | BT_DBG("intf %p id %p", intf, id); | ||
115 | |||
116 | if (intf->cur_altsetting->desc.bInterfaceNumber != 0) | ||
117 | return -ENODEV; | ||
118 | |||
119 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
120 | if (!data) | ||
121 | return -ENOMEM; | ||
122 | |||
123 | data->udev = udev; | ||
124 | |||
125 | if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { | ||
126 | kfree(data); | ||
127 | return -EIO; | ||
128 | } | ||
129 | |||
130 | size = max_t(uint, firmware->size, 4096); | ||
131 | data->fw_data = kmalloc(size, GFP_KERNEL); | ||
132 | if (!data->fw_data) { | ||
133 | release_firmware(firmware); | ||
134 | kfree(data); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | |||
138 | memcpy(data->fw_data, firmware->data, firmware->size); | ||
139 | data->fw_size = firmware->size; | ||
140 | data->fw_sent = 0; | ||
141 | release_firmware(firmware); | ||
142 | |||
143 | usb_set_intfdata(intf, data); | ||
144 | if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) { | ||
145 | usb_set_intfdata(intf, NULL); | ||
146 | return -EIO; | ||
147 | } | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static void ath3k_disconnect(struct usb_interface *intf) | ||
153 | { | ||
154 | struct ath3k_data *data = usb_get_intfdata(intf); | ||
155 | |||
156 | BT_DBG("ath3k_disconnect intf %p", intf); | ||
157 | |||
158 | kfree(data->fw_data); | ||
159 | kfree(data); | ||
160 | } | ||
161 | |||
162 | static struct usb_driver ath3k_driver = { | ||
163 | .name = "ath3k", | ||
164 | .probe = ath3k_probe, | ||
165 | .disconnect = ath3k_disconnect, | ||
166 | .id_table = ath3k_table, | ||
167 | }; | ||
168 | |||
169 | static int __init ath3k_init(void) | ||
170 | { | ||
171 | BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION); | ||
172 | return usb_register(&ath3k_driver); | ||
173 | } | ||
174 | |||
175 | static void __exit ath3k_exit(void) | ||
176 | { | ||
177 | usb_deregister(&ath3k_driver); | ||
178 | } | ||
179 | |||
180 | module_init(ath3k_init); | ||
181 | module_exit(ath3k_exit); | ||
182 | |||
183 | MODULE_AUTHOR("Atheros Communications"); | ||
184 | MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); | ||
185 | MODULE_VERSION(VERSION); | ||
186 | MODULE_LICENSE("GPL"); | ||
187 | MODULE_FIRMWARE("ath3k-1.fw"); | ||
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 2acdc605cb4b..c2cf81144715 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c | |||
@@ -503,7 +503,9 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) | |||
503 | unsigned int iobase; | 503 | unsigned int iobase; |
504 | unsigned char reg; | 504 | unsigned char reg; |
505 | 505 | ||
506 | BUG_ON(!info->hdev); | 506 | if (!info || !info->hdev) |
507 | /* our irq handler is shared */ | ||
508 | return IRQ_NONE; | ||
507 | 509 | ||
508 | if (!test_bit(CARD_READY, &(info->hw_state))) | 510 | if (!test_bit(CARD_READY, &(info->hw_state))) |
509 | return IRQ_HANDLED; | 511 | return IRQ_HANDLED; |
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index d814a2755ccb..9f5926aaf57f 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c | |||
@@ -345,7 +345,9 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) | |||
345 | int iir; | 345 | int iir; |
346 | irqreturn_t r = IRQ_NONE; | 346 | irqreturn_t r = IRQ_NONE; |
347 | 347 | ||
348 | BUG_ON(!info->hdev); | 348 | if (!info || !info->hdev) |
349 | /* our irq handler is shared */ | ||
350 | return IRQ_NONE; | ||
349 | 351 | ||
350 | iobase = info->p_dev->io.BasePort1; | 352 | iobase = info->p_dev->io.BasePort1; |
351 | 353 | ||
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index f36defa37764..57d965b7f521 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c | |||
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, | |||
808 | 808 | ||
809 | exit: | 809 | exit: |
810 | sdio_release_host(card->func); | 810 | sdio_release_host(card->func); |
811 | kfree(tmpbuf); | ||
811 | 812 | ||
812 | return ret; | 813 | return ret; |
813 | } | 814 | } |
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index d339464dc15e..91c523099804 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c | |||
@@ -295,7 +295,9 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst) | |||
295 | int iir, lsr; | 295 | int iir, lsr; |
296 | irqreturn_t r = IRQ_NONE; | 296 | irqreturn_t r = IRQ_NONE; |
297 | 297 | ||
298 | BUG_ON(!info->hdev); | 298 | if (!info || !info->hdev) |
299 | /* our irq handler is shared */ | ||
300 | return IRQ_NONE; | ||
299 | 301 | ||
300 | iobase = info->p_dev->io.BasePort1; | 302 | iobase = info->p_dev->io.BasePort1; |
301 | 303 | ||
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 4f02a6f3c980..697591941e17 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c | |||
@@ -299,7 +299,9 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) | |||
299 | int iir, lsr; | 299 | int iir, lsr; |
300 | irqreturn_t r = IRQ_NONE; | 300 | irqreturn_t r = IRQ_NONE; |
301 | 301 | ||
302 | BUG_ON(!info->hdev); | 302 | if (!info || !info->hdev) |
303 | /* our irq handler is shared */ | ||
304 | return IRQ_NONE; | ||
303 | 305 | ||
304 | iobase = info->p_dev->io.BasePort1; | 306 | iobase = info->p_dev->io.BasePort1; |
305 | 307 | ||
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 1afb8968a342..fd50ead59c79 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -729,9 +729,6 @@ int __init agp_amd64_init(void) | |||
729 | if (agp_off) | 729 | if (agp_off) |
730 | return -EINVAL; | 730 | return -EINVAL; |
731 | 731 | ||
732 | if (gart_iommu_aperture) | ||
733 | return agp_bridges_found ? 0 : -ENODEV; | ||
734 | |||
735 | err = pci_register_driver(&agp_amd64_pci_driver); | 732 | err = pci_register_driver(&agp_amd64_pci_driver); |
736 | if (err < 0) | 733 | if (err < 0) |
737 | return err; | 734 | return err; |
@@ -768,16 +765,27 @@ int __init agp_amd64_init(void) | |||
768 | return err; | 765 | return err; |
769 | } | 766 | } |
770 | 767 | ||
768 | static int __init agp_amd64_mod_init(void) | ||
769 | { | ||
770 | #ifndef MODULE | ||
771 | if (gart_iommu_aperture) | ||
772 | return agp_bridges_found ? 0 : -ENODEV; | ||
773 | #endif | ||
774 | return agp_amd64_init(); | ||
775 | } | ||
776 | |||
771 | static void __exit agp_amd64_cleanup(void) | 777 | static void __exit agp_amd64_cleanup(void) |
772 | { | 778 | { |
779 | #ifndef MODULE | ||
773 | if (gart_iommu_aperture) | 780 | if (gart_iommu_aperture) |
774 | return; | 781 | return; |
782 | #endif | ||
775 | if (aperture_resource) | 783 | if (aperture_resource) |
776 | release_resource(aperture_resource); | 784 | release_resource(aperture_resource); |
777 | pci_unregister_driver(&agp_amd64_pci_driver); | 785 | pci_unregister_driver(&agp_amd64_pci_driver); |
778 | } | 786 | } |
779 | 787 | ||
780 | module_init(agp_amd64_init); | 788 | module_init(agp_amd64_mod_init); |
781 | module_exit(agp_amd64_cleanup); | 789 | module_exit(agp_amd64_cleanup); |
782 | 790 | ||
783 | MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); | 791 | MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index be832b6f8279..48788db4e280 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | |||
395 | unsigned long p = *ppos; | 395 | unsigned long p = *ppos; |
396 | ssize_t low_count, read, sz; | 396 | ssize_t low_count, read, sz; |
397 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | 397 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ |
398 | int err = 0; | ||
398 | 399 | ||
399 | read = 0; | 400 | read = 0; |
400 | if (p < (unsigned long) high_memory) { | 401 | if (p < (unsigned long) high_memory) { |
@@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | |||
441 | return -ENOMEM; | 442 | return -ENOMEM; |
442 | while (count > 0) { | 443 | while (count > 0) { |
443 | sz = size_inside_page(p, count); | 444 | sz = size_inside_page(p, count); |
445 | if (!is_vmalloc_or_module_addr((void *)p)) { | ||
446 | err = -ENXIO; | ||
447 | break; | ||
448 | } | ||
444 | sz = vread(kbuf, (char *)p, sz); | 449 | sz = vread(kbuf, (char *)p, sz); |
445 | if (!sz) | 450 | if (!sz) |
446 | break; | 451 | break; |
447 | if (copy_to_user(buf, kbuf, sz)) { | 452 | if (copy_to_user(buf, kbuf, sz)) { |
448 | free_page((unsigned long)kbuf); | 453 | err = -EFAULT; |
449 | return -EFAULT; | 454 | break; |
450 | } | 455 | } |
451 | count -= sz; | 456 | count -= sz; |
452 | buf += sz; | 457 | buf += sz; |
@@ -455,8 +460,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | |||
455 | } | 460 | } |
456 | free_page((unsigned long)kbuf); | 461 | free_page((unsigned long)kbuf); |
457 | } | 462 | } |
458 | *ppos = p; | 463 | *ppos = p; |
459 | return read; | 464 | return read ? read : err; |
460 | } | 465 | } |
461 | 466 | ||
462 | 467 | ||
@@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
520 | ssize_t wrote = 0; | 525 | ssize_t wrote = 0; |
521 | ssize_t virtr = 0; | 526 | ssize_t virtr = 0; |
522 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | 527 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ |
528 | int err = 0; | ||
523 | 529 | ||
524 | if (p < (unsigned long) high_memory) { | 530 | if (p < (unsigned long) high_memory) { |
525 | unsigned long to_write = min_t(unsigned long, count, | 531 | unsigned long to_write = min_t(unsigned long, count, |
@@ -540,14 +546,16 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
540 | unsigned long sz = size_inside_page(p, count); | 546 | unsigned long sz = size_inside_page(p, count); |
541 | unsigned long n; | 547 | unsigned long n; |
542 | 548 | ||
549 | if (!is_vmalloc_or_module_addr((void *)p)) { | ||
550 | err = -ENXIO; | ||
551 | break; | ||
552 | } | ||
543 | n = copy_from_user(kbuf, buf, sz); | 553 | n = copy_from_user(kbuf, buf, sz); |
544 | if (n) { | 554 | if (n) { |
545 | if (wrote + virtr) | 555 | err = -EFAULT; |
546 | break; | 556 | break; |
547 | free_page((unsigned long)kbuf); | ||
548 | return -EFAULT; | ||
549 | } | 557 | } |
550 | sz = vwrite(kbuf, (char *)p, sz); | 558 | vwrite(kbuf, (char *)p, sz); |
551 | count -= sz; | 559 | count -= sz; |
552 | buf += sz; | 560 | buf += sz; |
553 | virtr += sz; | 561 | virtr += sz; |
@@ -556,8 +564,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
556 | free_page((unsigned long)kbuf); | 564 | free_page((unsigned long)kbuf); |
557 | } | 565 | } |
558 | 566 | ||
559 | *ppos = p; | 567 | *ppos = p; |
560 | return virtr + wrote; | 568 | return virtr + wrote ? : err; |
561 | } | 569 | } |
562 | #endif | 570 | #endif |
563 | 571 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8258982b49ec..2849713d2231 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1051,12 +1051,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) | |||
1051 | /* like a named pipe */ | 1051 | /* like a named pipe */ |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | /* | ||
1055 | * If we gave the user some bytes, update the access time. | ||
1056 | */ | ||
1057 | if (count) | ||
1058 | file_accessed(file); | ||
1059 | |||
1060 | return (count ? count : retval); | 1054 | return (count ? count : retval); |
1061 | } | 1055 | } |
1062 | 1056 | ||
@@ -1107,7 +1101,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer, | |||
1107 | size_t count, loff_t *ppos) | 1101 | size_t count, loff_t *ppos) |
1108 | { | 1102 | { |
1109 | size_t ret; | 1103 | size_t ret; |
1110 | struct inode *inode = file->f_path.dentry->d_inode; | ||
1111 | 1104 | ||
1112 | ret = write_pool(&blocking_pool, buffer, count); | 1105 | ret = write_pool(&blocking_pool, buffer, count); |
1113 | if (ret) | 1106 | if (ret) |
@@ -1116,8 +1109,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer, | |||
1116 | if (ret) | 1109 | if (ret) |
1117 | return ret; | 1110 | return ret; |
1118 | 1111 | ||
1119 | inode->i_mtime = current_fs_time(inode->i_sb); | ||
1120 | mark_inode_dirty(inode); | ||
1121 | return (ssize_t)count; | 1112 | return (ssize_t)count; |
1122 | } | 1113 | } |
1123 | 1114 | ||
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index ecba4942fc8e..f58440791e65 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c | |||
@@ -39,12 +39,12 @@ | |||
39 | struct tpm_inf_dev { | 39 | struct tpm_inf_dev { |
40 | int iotype; | 40 | int iotype; |
41 | 41 | ||
42 | void __iomem *mem_base; /* MMIO ioremap'd addr */ | 42 | void __iomem *mem_base; /* MMIO ioremap'd addr */ |
43 | unsigned long map_base; /* phys MMIO base */ | 43 | unsigned long map_base; /* phys MMIO base */ |
44 | unsigned long map_size; /* MMIO region size */ | 44 | unsigned long map_size; /* MMIO region size */ |
45 | unsigned int index_off; /* index register offset */ | 45 | unsigned int index_off; /* index register offset */ |
46 | 46 | ||
47 | unsigned int data_regs; /* Data registers */ | 47 | unsigned int data_regs; /* Data registers */ |
48 | unsigned int data_size; | 48 | unsigned int data_size; |
49 | 49 | ||
50 | unsigned int config_port; /* IO Port config index reg */ | 50 | unsigned int config_port; /* IO Port config index reg */ |
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { | |||
406 | .miscdev = {.fops = &inf_ops,}, | 406 | .miscdev = {.fops = &inf_ops,}, |
407 | }; | 407 | }; |
408 | 408 | ||
409 | static const struct pnp_device_id tpm_pnp_tbl[] = { | 409 | static const struct pnp_device_id tpm_inf_pnp_tbl[] = { |
410 | /* Infineon TPMs */ | 410 | /* Infineon TPMs */ |
411 | {"IFX0101", 0}, | 411 | {"IFX0101", 0}, |
412 | {"IFX0102", 0}, | 412 | {"IFX0102", 0}, |
413 | {"", 0} | 413 | {"", 0} |
414 | }; | 414 | }; |
415 | 415 | ||
416 | MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); | 416 | MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); |
417 | 417 | ||
418 | static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | 418 | static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, |
419 | const struct pnp_device_id *dev_id) | 419 | const struct pnp_device_id *dev_id) |
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
430 | if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && | 430 | if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && |
431 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { | 431 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { |
432 | 432 | ||
433 | tpm_dev.iotype = TPM_INF_IO_PORT; | 433 | tpm_dev.iotype = TPM_INF_IO_PORT; |
434 | 434 | ||
435 | tpm_dev.config_port = pnp_port_start(dev, 0); | 435 | tpm_dev.config_port = pnp_port_start(dev, 0); |
436 | tpm_dev.config_size = pnp_port_len(dev, 0); | 436 | tpm_dev.config_size = pnp_port_len(dev, 0); |
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
459 | goto err_last; | 459 | goto err_last; |
460 | } | 460 | } |
461 | } else if (pnp_mem_valid(dev, 0) && | 461 | } else if (pnp_mem_valid(dev, 0) && |
462 | !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { | 462 | !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { |
463 | 463 | ||
464 | tpm_dev.iotype = TPM_INF_IO_MEM; | 464 | tpm_dev.iotype = TPM_INF_IO_MEM; |
465 | 465 | ||
466 | tpm_dev.map_base = pnp_mem_start(dev, 0); | 466 | tpm_dev.map_base = pnp_mem_start(dev, 0); |
467 | tpm_dev.map_size = pnp_mem_len(dev, 0); | 467 | tpm_dev.map_size = pnp_mem_len(dev, 0); |
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | |||
563 | "product id 0x%02x%02x" | 563 | "product id 0x%02x%02x" |
564 | "%s\n", | 564 | "%s\n", |
565 | tpm_dev.iotype == TPM_INF_IO_PORT ? | 565 | tpm_dev.iotype == TPM_INF_IO_PORT ? |
566 | tpm_dev.config_port : | 566 | tpm_dev.config_port : |
567 | tpm_dev.map_base + tpm_dev.index_off, | 567 | tpm_dev.map_base + tpm_dev.index_off, |
568 | tpm_dev.iotype == TPM_INF_IO_PORT ? | 568 | tpm_dev.iotype == TPM_INF_IO_PORT ? |
569 | tpm_dev.data_regs : | 569 | tpm_dev.data_regs : |
570 | tpm_dev.map_base + tpm_dev.data_regs, | 570 | tpm_dev.map_base + tpm_dev.data_regs, |
571 | version[0], version[1], | 571 | version[0], version[1], |
572 | vendorid[0], vendorid[1], | 572 | vendorid[0], vendorid[1], |
573 | productid[0], productid[1], chipname); | 573 | productid[0], productid[1], chipname); |
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) | |||
607 | iounmap(tpm_dev.mem_base); | 607 | iounmap(tpm_dev.mem_base); |
608 | release_mem_region(tpm_dev.map_base, tpm_dev.map_size); | 608 | release_mem_region(tpm_dev.map_base, tpm_dev.map_size); |
609 | } | 609 | } |
610 | tpm_dev_vendor_release(chip); | ||
610 | tpm_remove_hardware(chip->dev); | 611 | tpm_remove_hardware(chip->dev); |
611 | } | 612 | } |
612 | } | 613 | } |
613 | 614 | ||
615 | static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) | ||
616 | { | ||
617 | struct tpm_chip *chip = pnp_get_drvdata(dev); | ||
618 | int rc; | ||
619 | if (chip) { | ||
620 | u8 savestate[] = { | ||
621 | 0, 193, /* TPM_TAG_RQU_COMMAND */ | ||
622 | 0, 0, 0, 10, /* blob length (in bytes) */ | ||
623 | 0, 0, 0, 152 /* TPM_ORD_SaveState */ | ||
624 | }; | ||
625 | dev_info(&dev->dev, "saving TPM state\n"); | ||
626 | rc = tpm_inf_send(chip, savestate, sizeof(savestate)); | ||
627 | if (rc < 0) { | ||
628 | dev_err(&dev->dev, "error while saving TPM state\n"); | ||
629 | return rc; | ||
630 | } | ||
631 | } | ||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static int tpm_inf_pnp_resume(struct pnp_dev *dev) | ||
636 | { | ||
637 | /* Re-configure TPM after suspending */ | ||
638 | tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
639 | tpm_config_out(IOLIMH, TPM_INF_ADDR); | ||
640 | tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); | ||
641 | tpm_config_out(IOLIML, TPM_INF_ADDR); | ||
642 | tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); | ||
643 | /* activate register */ | ||
644 | tpm_config_out(TPM_DAR, TPM_INF_ADDR); | ||
645 | tpm_config_out(0x01, TPM_INF_DATA); | ||
646 | tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
647 | /* disable RESET, LP and IRQC */ | ||
648 | tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); | ||
649 | return tpm_pm_resume(&dev->dev); | ||
650 | } | ||
651 | |||
614 | static struct pnp_driver tpm_inf_pnp_driver = { | 652 | static struct pnp_driver tpm_inf_pnp_driver = { |
615 | .name = "tpm_inf_pnp", | 653 | .name = "tpm_inf_pnp", |
616 | .driver = { | 654 | .id_table = tpm_inf_pnp_tbl, |
617 | .owner = THIS_MODULE, | ||
618 | .suspend = tpm_pm_suspend, | ||
619 | .resume = tpm_pm_resume, | ||
620 | }, | ||
621 | .id_table = tpm_pnp_tbl, | ||
622 | .probe = tpm_inf_pnp_probe, | 655 | .probe = tpm_inf_pnp_probe, |
623 | .remove = __devexit_p(tpm_inf_pnp_remove), | 656 | .suspend = tpm_inf_pnp_suspend, |
657 | .resume = tpm_inf_pnp_resume, | ||
658 | .remove = __devexit_p(tpm_inf_pnp_remove) | ||
624 | }; | 659 | }; |
625 | 660 | ||
626 | static int __init init_inf(void) | 661 | static int __init init_inf(void) |
@@ -638,5 +673,5 @@ module_exit(cleanup_inf); | |||
638 | 673 | ||
639 | MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); | 674 | MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); |
640 | MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); | 675 | MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); |
641 | MODULE_VERSION("1.9"); | 676 | MODULE_VERSION("1.9.2"); |
642 | MODULE_LICENSE("GPL"); | 677 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c6f3b48be9dd..dcb9083ecde0 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on) | |||
1951 | pid = task_pid(current); | 1951 | pid = task_pid(current); |
1952 | type = PIDTYPE_PID; | 1952 | type = PIDTYPE_PID; |
1953 | } | 1953 | } |
1954 | retval = __f_setown(filp, pid, type, 0); | 1954 | get_pid(pid); |
1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | 1955 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
1956 | retval = __f_setown(filp, pid, type, 0); | ||
1957 | put_pid(pid); | ||
1956 | if (retval) | 1958 | if (retval) |
1957 | goto out; | 1959 | goto out; |
1958 | } else { | 1960 | } else { |
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c index 867b67be9f0a..c7072ba14f48 100644 --- a/drivers/char/uv_mmtimer.c +++ b/drivers/char/uv_mmtimer.c | |||
@@ -89,13 +89,17 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, | |||
89 | switch (cmd) { | 89 | switch (cmd) { |
90 | case MMTIMER_GETOFFSET: /* offset of the counter */ | 90 | case MMTIMER_GETOFFSET: /* offset of the counter */ |
91 | /* | 91 | /* |
92 | * UV RTC register is on its own page | 92 | * Starting with HUB rev 2.0, the UV RTC register is |
93 | * replicated across all cachelines of it's own page. | ||
94 | * This allows faster simultaneous reads from a given socket. | ||
95 | * | ||
96 | * The offset returned is in 64 bit units. | ||
93 | */ | 97 | */ |
94 | if (PAGE_SIZE <= (1 << 16)) | 98 | if (uv_get_min_hub_revision_id() == 1) |
95 | ret = ((UV_LOCAL_MMR_BASE | UVH_RTC) & (PAGE_SIZE-1)) | 99 | ret = 0; |
96 | / 8; | ||
97 | else | 100 | else |
98 | ret = -ENOSYS; | 101 | ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) % |
102 | PAGE_SIZE) / 8; | ||
99 | break; | 103 | break; |
100 | 104 | ||
101 | case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ | 105 | case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */ |
@@ -115,8 +119,8 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd, | |||
115 | ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); | 119 | ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK); |
116 | break; | 120 | break; |
117 | 121 | ||
118 | case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */ | 122 | case MMTIMER_MMAPAVAIL: |
119 | ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0; | 123 | ret = 1; |
120 | break; | 124 | break; |
121 | 125 | ||
122 | case MMTIMER_GETCOUNTER: | 126 | case MMTIMER_GETCOUNTER: |
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index 27d20fac19d1..b314a999aabe 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #define DRV_NAME "cs5535-clockevt" | 22 | #define DRV_NAME "cs5535-clockevt" |
23 | 23 | ||
24 | static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; | 24 | static int timer_irq; |
25 | module_param_named(irq, timer_irq, int, 0644); | 25 | module_param_named(irq, timer_irq, int, 0644); |
26 | MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); | 26 | MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); |
27 | 27 | ||
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index f06024668f99..537c29ac4487 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -36,17 +36,6 @@ MODULE_LICENSE("GPL"); | |||
36 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); | 36 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); |
37 | MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); | 37 | MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); |
38 | 38 | ||
39 | static u32 cn_idx = CN_IDX_CONNECTOR; | ||
40 | static u32 cn_val = CN_VAL_CONNECTOR; | ||
41 | |||
42 | module_param(cn_idx, uint, 0); | ||
43 | module_param(cn_val, uint, 0); | ||
44 | MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); | ||
45 | MODULE_PARM_DESC(cn_val, "Connector's main device val."); | ||
46 | |||
47 | static DEFINE_MUTEX(notify_lock); | ||
48 | static LIST_HEAD(notify_list); | ||
49 | |||
50 | static struct cn_dev cdev; | 39 | static struct cn_dev cdev; |
51 | 40 | ||
52 | static int cn_already_initialized; | 41 | static int cn_already_initialized; |
@@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb) | |||
210 | } | 199 | } |
211 | 200 | ||
212 | /* | 201 | /* |
213 | * Notification routing. | ||
214 | * | ||
215 | * Gets id and checks if there are notification request for it's idx | ||
216 | * and val. If there are such requests notify the listeners with the | ||
217 | * given notify event. | ||
218 | * | ||
219 | */ | ||
220 | static void cn_notify(struct cb_id *id, u32 notify_event) | ||
221 | { | ||
222 | struct cn_ctl_entry *ent; | ||
223 | |||
224 | mutex_lock(¬ify_lock); | ||
225 | list_for_each_entry(ent, ¬ify_list, notify_entry) { | ||
226 | int i; | ||
227 | struct cn_notify_req *req; | ||
228 | struct cn_ctl_msg *ctl = ent->msg; | ||
229 | int idx_found, val_found; | ||
230 | |||
231 | idx_found = val_found = 0; | ||
232 | |||
233 | req = (struct cn_notify_req *)ctl->data; | ||
234 | for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { | ||
235 | if (id->idx >= req->first && | ||
236 | id->idx < req->first + req->range) { | ||
237 | idx_found = 1; | ||
238 | break; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | for (i = 0; i < ctl->val_notify_num; ++i, ++req) { | ||
243 | if (id->val >= req->first && | ||
244 | id->val < req->first + req->range) { | ||
245 | val_found = 1; | ||
246 | break; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if (idx_found && val_found) { | ||
251 | struct cn_msg m = { .ack = notify_event, }; | ||
252 | |||
253 | memcpy(&m.id, id, sizeof(m.id)); | ||
254 | cn_netlink_send(&m, ctl->group, GFP_KERNEL); | ||
255 | } | ||
256 | } | ||
257 | mutex_unlock(¬ify_lock); | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * Callback add routing - adds callback with given ID and name. | 202 | * Callback add routing - adds callback with given ID and name. |
262 | * If there is registered callback with the same ID it will not be added. | 203 | * If there is registered callback with the same ID it will not be added. |
263 | * | 204 | * |
@@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name, | |||
276 | if (err) | 217 | if (err) |
277 | return err; | 218 | return err; |
278 | 219 | ||
279 | cn_notify(id, 0); | ||
280 | |||
281 | return 0; | 220 | return 0; |
282 | } | 221 | } |
283 | EXPORT_SYMBOL_GPL(cn_add_callback); | 222 | EXPORT_SYMBOL_GPL(cn_add_callback); |
@@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id) | |||
295 | struct cn_dev *dev = &cdev; | 234 | struct cn_dev *dev = &cdev; |
296 | 235 | ||
297 | cn_queue_del_callback(dev->cbdev, id); | 236 | cn_queue_del_callback(dev->cbdev, id); |
298 | cn_notify(id, 1); | ||
299 | } | 237 | } |
300 | EXPORT_SYMBOL_GPL(cn_del_callback); | 238 | EXPORT_SYMBOL_GPL(cn_del_callback); |
301 | 239 | ||
302 | /* | ||
303 | * Checks two connector's control messages to be the same. | ||
304 | * Returns 1 if they are the same or if the first one is corrupted. | ||
305 | */ | ||
306 | static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) | ||
307 | { | ||
308 | int i; | ||
309 | struct cn_notify_req *req1, *req2; | ||
310 | |||
311 | if (m1->idx_notify_num != m2->idx_notify_num) | ||
312 | return 0; | ||
313 | |||
314 | if (m1->val_notify_num != m2->val_notify_num) | ||
315 | return 0; | ||
316 | |||
317 | if (m1->len != m2->len) | ||
318 | return 0; | ||
319 | |||
320 | if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) != | ||
321 | m1->len) | ||
322 | return 1; | ||
323 | |||
324 | req1 = (struct cn_notify_req *)m1->data; | ||
325 | req2 = (struct cn_notify_req *)m2->data; | ||
326 | |||
327 | for (i = 0; i < m1->idx_notify_num; ++i) { | ||
328 | if (req1->first != req2->first || req1->range != req2->range) | ||
329 | return 0; | ||
330 | req1++; | ||
331 | req2++; | ||
332 | } | ||
333 | |||
334 | for (i = 0; i < m1->val_notify_num; ++i) { | ||
335 | if (req1->first != req2->first || req1->range != req2->range) | ||
336 | return 0; | ||
337 | req1++; | ||
338 | req2++; | ||
339 | } | ||
340 | |||
341 | return 1; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Main connector device's callback. | ||
346 | * | ||
347 | * Used for notification of a request's processing. | ||
348 | */ | ||
349 | static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) | ||
350 | { | ||
351 | struct cn_ctl_msg *ctl; | ||
352 | struct cn_ctl_entry *ent; | ||
353 | u32 size; | ||
354 | |||
355 | if (msg->len < sizeof(*ctl)) | ||
356 | return; | ||
357 | |||
358 | ctl = (struct cn_ctl_msg *)msg->data; | ||
359 | |||
360 | size = (sizeof(*ctl) + ((ctl->idx_notify_num + | ||
361 | ctl->val_notify_num) * | ||
362 | sizeof(struct cn_notify_req))); | ||
363 | |||
364 | if (msg->len != size) | ||
365 | return; | ||
366 | |||
367 | if (ctl->len + sizeof(*ctl) != msg->len) | ||
368 | return; | ||
369 | |||
370 | /* | ||
371 | * Remove notification. | ||
372 | */ | ||
373 | if (ctl->group == 0) { | ||
374 | struct cn_ctl_entry *n; | ||
375 | |||
376 | mutex_lock(¬ify_lock); | ||
377 | list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { | ||
378 | if (cn_ctl_msg_equals(ent->msg, ctl)) { | ||
379 | list_del(&ent->notify_entry); | ||
380 | kfree(ent); | ||
381 | } | ||
382 | } | ||
383 | mutex_unlock(¬ify_lock); | ||
384 | |||
385 | return; | ||
386 | } | ||
387 | |||
388 | size += sizeof(*ent); | ||
389 | |||
390 | ent = kzalloc(size, GFP_KERNEL); | ||
391 | if (!ent) | ||
392 | return; | ||
393 | |||
394 | ent->msg = (struct cn_ctl_msg *)(ent + 1); | ||
395 | |||
396 | memcpy(ent->msg, ctl, size - sizeof(*ent)); | ||
397 | |||
398 | mutex_lock(¬ify_lock); | ||
399 | list_add(&ent->notify_entry, ¬ify_list); | ||
400 | mutex_unlock(¬ify_lock); | ||
401 | } | ||
402 | |||
403 | static int cn_proc_show(struct seq_file *m, void *v) | 240 | static int cn_proc_show(struct seq_file *m, void *v) |
404 | { | 241 | { |
405 | struct cn_queue_dev *dev = cdev.cbdev; | 242 | struct cn_queue_dev *dev = cdev.cbdev; |
@@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = { | |||
437 | static int __devinit cn_init(void) | 274 | static int __devinit cn_init(void) |
438 | { | 275 | { |
439 | struct cn_dev *dev = &cdev; | 276 | struct cn_dev *dev = &cdev; |
440 | int err; | ||
441 | 277 | ||
442 | dev->input = cn_rx_skb; | 278 | dev->input = cn_rx_skb; |
443 | dev->id.idx = cn_idx; | ||
444 | dev->id.val = cn_val; | ||
445 | 279 | ||
446 | dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, | 280 | dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, |
447 | CN_NETLINK_USERS + 0xf, | 281 | CN_NETLINK_USERS + 0xf, |
@@ -457,14 +291,6 @@ static int __devinit cn_init(void) | |||
457 | 291 | ||
458 | cn_already_initialized = 1; | 292 | cn_already_initialized = 1; |
459 | 293 | ||
460 | err = cn_add_callback(&dev->id, "connector", &cn_callback); | ||
461 | if (err) { | ||
462 | cn_already_initialized = 0; | ||
463 | cn_queue_free_dev(dev->cbdev); | ||
464 | netlink_kernel_release(dev->nls); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | |||
468 | proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); | 294 | proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); |
469 | 295 | ||
470 | return 0; | 296 | return 0; |
@@ -478,7 +304,6 @@ static void __devexit cn_fini(void) | |||
478 | 304 | ||
479 | proc_net_remove(&init_net, "connector"); | 305 | proc_net_remove(&init_net, "connector"); |
480 | 306 | ||
481 | cn_del_callback(&dev->id); | ||
482 | cn_queue_free_dev(dev->cbdev); | 307 | cn_queue_free_dev(dev->cbdev); |
483 | netlink_kernel_release(dev->nls); | 308 | netlink_kernel_release(dev->nls); |
484 | } | 309 | } |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 4b34ade2332b..bd444dc93cf2 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
554 | (dbs_tuners_ins.up_threshold - | 554 | (dbs_tuners_ins.up_threshold - |
555 | dbs_tuners_ins.down_differential); | 555 | dbs_tuners_ins.down_differential); |
556 | 556 | ||
557 | if (freq_next < policy->min) | ||
558 | freq_next = policy->min; | ||
559 | |||
557 | if (!dbs_tuners_ins.powersave_bias) { | 560 | if (!dbs_tuners_ins.powersave_bias) { |
558 | __cpufreq_driver_target(policy, freq_next, | 561 | __cpufreq_driver_target(policy, freq_next, |
559 | CPUFREQ_RELATION_L); | 562 | CPUFREQ_RELATION_L); |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 0af80577dc7b..d3a27e0119bc 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -57,6 +57,23 @@ static int padlock_sha_update(struct shash_desc *desc, | |||
57 | return crypto_shash_update(&dctx->fallback, data, length); | 57 | return crypto_shash_update(&dctx->fallback, data, length); |
58 | } | 58 | } |
59 | 59 | ||
60 | static int padlock_sha_export(struct shash_desc *desc, void *out) | ||
61 | { | ||
62 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | ||
63 | |||
64 | return crypto_shash_export(&dctx->fallback, out); | ||
65 | } | ||
66 | |||
67 | static int padlock_sha_import(struct shash_desc *desc, const void *in) | ||
68 | { | ||
69 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | ||
70 | struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
71 | |||
72 | dctx->fallback.tfm = ctx->fallback; | ||
73 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
74 | return crypto_shash_import(&dctx->fallback, in); | ||
75 | } | ||
76 | |||
60 | static inline void padlock_output_block(uint32_t *src, | 77 | static inline void padlock_output_block(uint32_t *src, |
61 | uint32_t *dst, size_t count) | 78 | uint32_t *dst, size_t count) |
62 | { | 79 | { |
@@ -235,7 +252,10 @@ static struct shash_alg sha1_alg = { | |||
235 | .update = padlock_sha_update, | 252 | .update = padlock_sha_update, |
236 | .finup = padlock_sha1_finup, | 253 | .finup = padlock_sha1_finup, |
237 | .final = padlock_sha1_final, | 254 | .final = padlock_sha1_final, |
255 | .export = padlock_sha_export, | ||
256 | .import = padlock_sha_import, | ||
238 | .descsize = sizeof(struct padlock_sha_desc), | 257 | .descsize = sizeof(struct padlock_sha_desc), |
258 | .statesize = sizeof(struct sha1_state), | ||
239 | .base = { | 259 | .base = { |
240 | .cra_name = "sha1", | 260 | .cra_name = "sha1", |
241 | .cra_driver_name = "sha1-padlock", | 261 | .cra_driver_name = "sha1-padlock", |
@@ -256,7 +276,10 @@ static struct shash_alg sha256_alg = { | |||
256 | .update = padlock_sha_update, | 276 | .update = padlock_sha_update, |
257 | .finup = padlock_sha256_finup, | 277 | .finup = padlock_sha256_finup, |
258 | .final = padlock_sha256_final, | 278 | .final = padlock_sha256_final, |
279 | .export = padlock_sha_export, | ||
280 | .import = padlock_sha_import, | ||
259 | .descsize = sizeof(struct padlock_sha_desc), | 281 | .descsize = sizeof(struct padlock_sha_desc), |
282 | .statesize = sizeof(struct sha256_state), | ||
260 | .base = { | 283 | .base = { |
261 | .cra_name = "sha256", | 284 | .cra_name = "sha256", |
262 | .cra_driver_name = "sha256-padlock", | 285 | .cra_driver_name = "sha256-padlock", |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index b5f2ee0f8e2c..64a937262a40 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data) | |||
613 | cohd_fin->pending_irqs--; | 613 | cohd_fin->pending_irqs--; |
614 | cohc->completed = cohd_fin->desc.cookie; | 614 | cohc->completed = cohd_fin->desc.cookie; |
615 | 615 | ||
616 | BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); | ||
617 | |||
618 | if (cohc->nbr_active_done == 0) | 616 | if (cohc->nbr_active_done == 0) |
619 | return; | 617 | return; |
620 | 618 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6f51a0a7a8bb..e7a3230fb7d5 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device) | |||
826 | chan->dev->chan = NULL; | 826 | chan->dev->chan = NULL; |
827 | mutex_unlock(&dma_list_mutex); | 827 | mutex_unlock(&dma_list_mutex); |
828 | device_unregister(&chan->dev->device); | 828 | device_unregister(&chan->dev->device); |
829 | free_percpu(chan->local); | ||
829 | } | 830 | } |
830 | } | 831 | } |
831 | EXPORT_SYMBOL(dma_async_device_unregister); | 832 | EXPORT_SYMBOL(dma_async_device_unregister); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 8b905161fbf4..948d563941c9 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -467,7 +467,7 @@ err_srcs: | |||
467 | 467 | ||
468 | if (iterations > 0) | 468 | if (iterations > 0) |
469 | while (!kthread_should_stop()) { | 469 | while (!kthread_should_stop()) { |
470 | DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); | 470 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
471 | interruptible_sleep_on(&wait_dmatest_exit); | 471 | interruptible_sleep_on(&wait_dmatest_exit); |
472 | } | 472 | } |
473 | 473 | ||
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5f7a500e18d0..5cc37afe2bc1 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) | |||
249 | if (is_ioat_active(status) || is_ioat_idle(status)) | 249 | if (is_ioat_active(status) || is_ioat_idle(status)) |
250 | ioat_suspend(chan); | 250 | ioat_suspend(chan); |
251 | while (is_ioat_active(status) || is_ioat_idle(status)) { | 251 | while (is_ioat_active(status) || is_ioat_idle(status)) { |
252 | if (end && time_after(jiffies, end)) { | 252 | if (tmo && time_after(jiffies, end)) { |
253 | err = -ETIMEDOUT; | 253 | err = -ETIMEDOUT; |
254 | break; | 254 | break; |
255 | } | 255 | } |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 9a5bc1a7389e..e80bae1673fa 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) | |||
761 | * @buffer_n: buffer number to update. | 761 | * @buffer_n: buffer number to update. |
762 | * 0 or 1 are the only valid values. | 762 | * 0 or 1 are the only valid values. |
763 | * @phyaddr: buffer physical address. | 763 | * @phyaddr: buffer physical address. |
764 | * @return: Returns 0 on success or negative error code on failure. This | ||
765 | * function will fail if the buffer is set to ready. | ||
766 | */ | 764 | */ |
767 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ | 765 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ |
768 | static int ipu_update_channel_buffer(struct idmac_channel *ichan, | 766 | static void ipu_update_channel_buffer(struct idmac_channel *ichan, |
769 | int buffer_n, dma_addr_t phyaddr) | 767 | int buffer_n, dma_addr_t phyaddr) |
770 | { | 768 | { |
771 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 769 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
772 | uint32_t reg; | 770 | uint32_t reg; |
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, | |||
806 | } | 804 | } |
807 | 805 | ||
808 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 806 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
809 | |||
810 | return 0; | ||
811 | } | 807 | } |
812 | 808 | ||
813 | /* Called under spin_lock_irqsave(&ichan->lock) */ | 809 | /* Called under spin_lock_irqsave(&ichan->lock) */ |
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, | |||
816 | { | 812 | { |
817 | unsigned int chan_id = ichan->dma_chan.chan_id; | 813 | unsigned int chan_id = ichan->dma_chan.chan_id; |
818 | struct device *dev = &ichan->dma_chan.dev->device; | 814 | struct device *dev = &ichan->dma_chan.dev->device; |
819 | int ret; | ||
820 | 815 | ||
821 | if (async_tx_test_ack(&desc->txd)) | 816 | if (async_tx_test_ack(&desc->txd)) |
822 | return -EINTR; | 817 | return -EINTR; |
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, | |||
827 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but | 822 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but |
828 | * doing it again shouldn't hurt either. | 823 | * doing it again shouldn't hurt either. |
829 | */ | 824 | */ |
830 | ret = ipu_update_channel_buffer(ichan, buf_idx, | 825 | ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); |
831 | sg_dma_address(sg)); | ||
832 | |||
833 | if (ret < 0) { | ||
834 | dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", | ||
835 | sg, chan_id, buf_idx); | ||
836 | return ret; | ||
837 | } | ||
838 | 826 | ||
839 | ipu_select_buffer(chan_id, buf_idx); | 827 | ipu_select_buffer(chan_id, buf_idx); |
840 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", | 828 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", |
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1379 | 1367 | ||
1380 | if (likely(sgnew) && | 1368 | if (likely(sgnew) && |
1381 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { | 1369 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { |
1382 | callback = desc->txd.callback; | 1370 | callback = descnew->txd.callback; |
1383 | callback_param = desc->txd.callback_param; | 1371 | callback_param = descnew->txd.callback_param; |
1384 | spin_unlock(&ichan->lock); | 1372 | spin_unlock(&ichan->lock); |
1385 | callback(callback_param); | 1373 | if (callback) |
1374 | callback(callback_param); | ||
1386 | spin_lock(&ichan->lock); | 1375 | spin_lock(&ichan->lock); |
1387 | } | 1376 | } |
1388 | 1377 | ||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 000dc67b85b7..3391e6739d06 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | |||
2658 | * the memory system completely. A command line option allows to force-enable | 2658 | * the memory system completely. A command line option allows to force-enable |
2659 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | 2659 | * hardware ECC later in amd64_enable_ecc_error_reporting(). |
2660 | */ | 2660 | */ |
2661 | static const char *ecc_warning = | 2661 | static const char *ecc_msg = |
2662 | "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" | 2662 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" |
2663 | " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" | 2663 | " Either enable ECC checking or force module loading by setting " |
2664 | " Also, use of the override can cause unknown side effects.\n"; | 2664 | "'ecc_enable_override'.\n" |
2665 | " (Note that use of the override may cause unknown side effects.)\n"; | ||
2665 | 2666 | ||
2666 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | 2667 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) |
2667 | { | 2668 | { |
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | |||
2673 | 2674 | ||
2674 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | 2675 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); |
2675 | if (!ecc_enabled) | 2676 | if (!ecc_enabled) |
2676 | amd64_printk(KERN_WARNING, "This node reports that Memory ECC " | 2677 | amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " |
2677 | "is currently disabled, set F3x%x[22] (%s).\n", | 2678 | "is currently disabled, set F3x%x[22] (%s).\n", |
2678 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | 2679 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); |
2679 | else | 2680 | else |
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | |||
2681 | 2682 | ||
2682 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); | 2683 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); |
2683 | if (!nb_mce_en) | 2684 | if (!nb_mce_en) |
2684 | amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " | 2685 | amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " |
2685 | "0x%08x[4] on node %d to enable.\n", | 2686 | "0x%08x[4] on node %d to enable.\n", |
2686 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | 2687 | MSR_IA32_MCG_CTL, pvt->mc_node_id); |
2687 | 2688 | ||
2688 | if (!ecc_enabled || !nb_mce_en) { | 2689 | if (!ecc_enabled || !nb_mce_en) { |
2689 | if (!ecc_enable_override) { | 2690 | if (!ecc_enable_override) { |
2690 | amd64_printk(KERN_WARNING, "%s", ecc_warning); | 2691 | amd64_printk(KERN_NOTICE, "%s", ecc_msg); |
2691 | return -ENODEV; | 2692 | return -ENODEV; |
2692 | } | 2693 | } |
2693 | ecc_enable_override = 0; | 2694 | ecc_enable_override = 0; |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index cf27402af97b..ecd5928d7110 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) | |||
804 | end <<= (24 - PAGE_SHIFT); | 804 | end <<= (24 - PAGE_SHIFT); |
805 | end |= (1 << (24 - PAGE_SHIFT)) - 1; | 805 | end |= (1 << (24 - PAGE_SHIFT)) - 1; |
806 | 806 | ||
807 | csrow->first_page = start >> PAGE_SHIFT; | 807 | csrow->first_page = start; |
808 | csrow->last_page = end >> PAGE_SHIFT; | 808 | csrow->last_page = end; |
809 | csrow->nr_pages = end + 1 - start; | 809 | csrow->nr_pages = end + 1 - start; |
810 | csrow->grain = 8; | 810 | csrow->grain = 8; |
811 | csrow->mtype = mtype; | 811 | csrow->mtype = mtype; |
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, | |||
892 | 892 | ||
893 | mpc85xx_init_csrows(mci); | 893 | mpc85xx_init_csrows(mci); |
894 | 894 | ||
895 | #ifdef CONFIG_EDAC_DEBUG | ||
896 | edac_mc_register_mcidev_debug((struct attribute **)debug_attr); | ||
897 | #endif | ||
898 | |||
899 | /* store the original error disable bits */ | 895 | /* store the original error disable bits */ |
900 | orig_ddr_err_disable = | 896 | orig_ddr_err_disable = |
901 | in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); | 897 | in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); |
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 7083bcc1b9c7..5045156c5313 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -57,6 +57,8 @@ static LIST_HEAD(descriptor_list); | |||
57 | static int descriptor_count; | 57 | static int descriptor_count; |
58 | 58 | ||
59 | static __be32 tmp_config_rom[256]; | 59 | static __be32 tmp_config_rom[256]; |
60 | /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ | ||
61 | static size_t config_rom_length = 1 + 4 + 1 + 1; | ||
60 | 62 | ||
61 | #define BIB_CRC(v) ((v) << 0) | 63 | #define BIB_CRC(v) ((v) << 0) |
62 | #define BIB_CRC_LENGTH(v) ((v) << 16) | 64 | #define BIB_CRC_LENGTH(v) ((v) << 16) |
@@ -73,7 +75,7 @@ static __be32 tmp_config_rom[256]; | |||
73 | #define BIB_CMC ((1) << 30) | 75 | #define BIB_CMC ((1) << 30) |
74 | #define BIB_IMC ((1) << 31) | 76 | #define BIB_IMC ((1) << 31) |
75 | 77 | ||
76 | static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) | 78 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) |
77 | { | 79 | { |
78 | struct fw_descriptor *desc; | 80 | struct fw_descriptor *desc; |
79 | int i, j, k, length; | 81 | int i, j, k, length; |
@@ -130,23 +132,30 @@ static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) | |||
130 | for (i = 0; i < j; i += length + 1) | 132 | for (i = 0; i < j; i += length + 1) |
131 | length = fw_compute_block_crc(config_rom + i); | 133 | length = fw_compute_block_crc(config_rom + i); |
132 | 134 | ||
133 | return j; | 135 | WARN_ON(j != config_rom_length); |
134 | } | 136 | } |
135 | 137 | ||
136 | static void update_config_roms(void) | 138 | static void update_config_roms(void) |
137 | { | 139 | { |
138 | struct fw_card *card; | 140 | struct fw_card *card; |
139 | size_t length; | ||
140 | 141 | ||
141 | list_for_each_entry (card, &card_list, link) { | 142 | list_for_each_entry (card, &card_list, link) { |
142 | length = generate_config_rom(card, tmp_config_rom); | 143 | generate_config_rom(card, tmp_config_rom); |
143 | card->driver->set_config_rom(card, tmp_config_rom, length); | 144 | card->driver->set_config_rom(card, tmp_config_rom, |
145 | config_rom_length); | ||
144 | } | 146 | } |
145 | } | 147 | } |
146 | 148 | ||
149 | static size_t required_space(struct fw_descriptor *desc) | ||
150 | { | ||
151 | /* descriptor + entry into root dir + optional immediate entry */ | ||
152 | return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); | ||
153 | } | ||
154 | |||
147 | int fw_core_add_descriptor(struct fw_descriptor *desc) | 155 | int fw_core_add_descriptor(struct fw_descriptor *desc) |
148 | { | 156 | { |
149 | size_t i; | 157 | size_t i; |
158 | int ret; | ||
150 | 159 | ||
151 | /* | 160 | /* |
152 | * Check descriptor is valid; the length of all blocks in the | 161 | * Check descriptor is valid; the length of all blocks in the |
@@ -162,15 +171,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) | |||
162 | 171 | ||
163 | mutex_lock(&card_mutex); | 172 | mutex_lock(&card_mutex); |
164 | 173 | ||
165 | list_add_tail(&desc->link, &descriptor_list); | 174 | if (config_rom_length + required_space(desc) > 256) { |
166 | descriptor_count++; | 175 | ret = -EBUSY; |
167 | if (desc->immediate > 0) | 176 | } else { |
177 | list_add_tail(&desc->link, &descriptor_list); | ||
178 | config_rom_length += required_space(desc); | ||
168 | descriptor_count++; | 179 | descriptor_count++; |
169 | update_config_roms(); | 180 | if (desc->immediate > 0) |
181 | descriptor_count++; | ||
182 | update_config_roms(); | ||
183 | ret = 0; | ||
184 | } | ||
170 | 185 | ||
171 | mutex_unlock(&card_mutex); | 186 | mutex_unlock(&card_mutex); |
172 | 187 | ||
173 | return 0; | 188 | return ret; |
174 | } | 189 | } |
175 | EXPORT_SYMBOL(fw_core_add_descriptor); | 190 | EXPORT_SYMBOL(fw_core_add_descriptor); |
176 | 191 | ||
@@ -179,6 +194,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
179 | mutex_lock(&card_mutex); | 194 | mutex_lock(&card_mutex); |
180 | 195 | ||
181 | list_del(&desc->link); | 196 | list_del(&desc->link); |
197 | config_rom_length -= required_space(desc); | ||
182 | descriptor_count--; | 198 | descriptor_count--; |
183 | if (desc->immediate > 0) | 199 | if (desc->immediate > 0) |
184 | descriptor_count--; | 200 | descriptor_count--; |
@@ -428,7 +444,6 @@ EXPORT_SYMBOL(fw_card_initialize); | |||
428 | int fw_card_add(struct fw_card *card, | 444 | int fw_card_add(struct fw_card *card, |
429 | u32 max_receive, u32 link_speed, u64 guid) | 445 | u32 max_receive, u32 link_speed, u64 guid) |
430 | { | 446 | { |
431 | size_t length; | ||
432 | int ret; | 447 | int ret; |
433 | 448 | ||
434 | card->max_receive = max_receive; | 449 | card->max_receive = max_receive; |
@@ -437,8 +452,8 @@ int fw_card_add(struct fw_card *card, | |||
437 | 452 | ||
438 | mutex_lock(&card_mutex); | 453 | mutex_lock(&card_mutex); |
439 | 454 | ||
440 | length = generate_config_rom(card, tmp_config_rom); | 455 | generate_config_rom(card, tmp_config_rom); |
441 | ret = card->driver->enable(card, tmp_config_rom, length); | 456 | ret = card->driver->enable(card, tmp_config_rom, config_rom_length); |
442 | if (ret == 0) | 457 | if (ret == 0) |
443 | list_add_tail(&card->link, &card_list); | 458 | list_add_tail(&card->link, &card_list); |
444 | 459 | ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6d63849e78e..4eeaed57e219 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/string.h> | ||
38 | #include <linux/time.h> | 39 | #include <linux/time.h> |
39 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
40 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
@@ -595,13 +596,20 @@ static int ioctl_send_request(struct client *client, void *buffer) | |||
595 | client->device->max_speed); | 596 | client->device->max_speed); |
596 | } | 597 | } |
597 | 598 | ||
599 | static inline bool is_fcp_request(struct fw_request *request) | ||
600 | { | ||
601 | return request == NULL; | ||
602 | } | ||
603 | |||
598 | static void release_request(struct client *client, | 604 | static void release_request(struct client *client, |
599 | struct client_resource *resource) | 605 | struct client_resource *resource) |
600 | { | 606 | { |
601 | struct inbound_transaction_resource *r = container_of(resource, | 607 | struct inbound_transaction_resource *r = container_of(resource, |
602 | struct inbound_transaction_resource, resource); | 608 | struct inbound_transaction_resource, resource); |
603 | 609 | ||
604 | if (r->request) | 610 | if (is_fcp_request(r->request)) |
611 | kfree(r->data); | ||
612 | else | ||
605 | fw_send_response(client->device->card, r->request, | 613 | fw_send_response(client->device->card, r->request, |
606 | RCODE_CONFLICT_ERROR); | 614 | RCODE_CONFLICT_ERROR); |
607 | kfree(r); | 615 | kfree(r); |
@@ -616,6 +624,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
616 | struct address_handler_resource *handler = callback_data; | 624 | struct address_handler_resource *handler = callback_data; |
617 | struct inbound_transaction_resource *r; | 625 | struct inbound_transaction_resource *r; |
618 | struct inbound_transaction_event *e; | 626 | struct inbound_transaction_event *e; |
627 | void *fcp_frame = NULL; | ||
619 | int ret; | 628 | int ret; |
620 | 629 | ||
621 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 630 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
@@ -627,6 +636,18 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
627 | r->data = payload; | 636 | r->data = payload; |
628 | r->length = length; | 637 | r->length = length; |
629 | 638 | ||
639 | if (is_fcp_request(request)) { | ||
640 | /* | ||
641 | * FIXME: Let core-transaction.c manage a | ||
642 | * single reference-counted copy? | ||
643 | */ | ||
644 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); | ||
645 | if (fcp_frame == NULL) | ||
646 | goto failed; | ||
647 | |||
648 | r->data = fcp_frame; | ||
649 | } | ||
650 | |||
630 | r->resource.release = release_request; | 651 | r->resource.release = release_request; |
631 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); | 652 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
632 | if (ret < 0) | 653 | if (ret < 0) |
@@ -640,13 +661,15 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
640 | e->request.closure = handler->closure; | 661 | e->request.closure = handler->closure; |
641 | 662 | ||
642 | queue_event(handler->client, &e->event, | 663 | queue_event(handler->client, &e->event, |
643 | &e->request, sizeof(e->request), payload, length); | 664 | &e->request, sizeof(e->request), r->data, length); |
644 | return; | 665 | return; |
645 | 666 | ||
646 | failed: | 667 | failed: |
647 | kfree(r); | 668 | kfree(r); |
648 | kfree(e); | 669 | kfree(e); |
649 | if (request) | 670 | kfree(fcp_frame); |
671 | |||
672 | if (!is_fcp_request(request)) | ||
650 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 673 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
651 | } | 674 | } |
652 | 675 | ||
@@ -717,18 +740,17 @@ static int ioctl_send_response(struct client *client, void *buffer) | |||
717 | 740 | ||
718 | r = container_of(resource, struct inbound_transaction_resource, | 741 | r = container_of(resource, struct inbound_transaction_resource, |
719 | resource); | 742 | resource); |
720 | if (r->request) { | 743 | if (is_fcp_request(r->request)) |
721 | if (request->length < r->length) | 744 | goto out; |
722 | r->length = request->length; | 745 | |
723 | if (copy_from_user(r->data, u64_to_uptr(request->data), | 746 | if (request->length < r->length) |
724 | r->length)) { | 747 | r->length = request->length; |
725 | ret = -EFAULT; | 748 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { |
726 | kfree(r->request); | 749 | ret = -EFAULT; |
727 | goto out; | 750 | kfree(r->request); |
728 | } | 751 | goto out; |
729 | fw_send_response(client->device->card, r->request, | ||
730 | request->rcode); | ||
731 | } | 752 | } |
753 | fw_send_response(client->device->card, r->request, request->rcode); | ||
732 | out: | 754 | out: |
733 | kfree(r); | 755 | kfree(r); |
734 | 756 | ||
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index cbaf420c36c5..2d3dc7ded0a9 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
893 | 893 | ||
894 | static struct kmem_cache *fwnet_packet_task_cache; | 894 | static struct kmem_cache *fwnet_packet_task_cache; |
895 | 895 | ||
896 | static void fwnet_free_ptask(struct fwnet_packet_task *ptask) | ||
897 | { | ||
898 | dev_kfree_skb_any(ptask->skb); | ||
899 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
900 | } | ||
901 | |||
896 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); | 902 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); |
897 | 903 | ||
898 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | 904 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) |
899 | { | 905 | { |
900 | struct fwnet_device *dev; | 906 | struct fwnet_device *dev = ptask->dev; |
901 | unsigned long flags; | 907 | unsigned long flags; |
902 | 908 | bool free; | |
903 | dev = ptask->dev; | ||
904 | 909 | ||
905 | spin_lock_irqsave(&dev->lock, flags); | 910 | spin_lock_irqsave(&dev->lock, flags); |
906 | list_del(&ptask->pt_link); | ||
907 | spin_unlock_irqrestore(&dev->lock, flags); | ||
908 | 911 | ||
909 | ptask->outstanding_pkts--; /* FIXME access inside lock */ | 912 | ptask->outstanding_pkts--; |
913 | |||
914 | /* Check whether we or the networking TX soft-IRQ is last user. */ | ||
915 | free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); | ||
916 | |||
917 | if (ptask->outstanding_pkts == 0) | ||
918 | list_del(&ptask->pt_link); | ||
919 | |||
920 | spin_unlock_irqrestore(&dev->lock, flags); | ||
910 | 921 | ||
911 | if (ptask->outstanding_pkts > 0) { | 922 | if (ptask->outstanding_pkts > 0) { |
912 | u16 dg_size; | 923 | u16 dg_size; |
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |||
951 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; | 962 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; |
952 | } | 963 | } |
953 | fwnet_send_packet(ptask); | 964 | fwnet_send_packet(ptask); |
954 | } else { | ||
955 | dev_kfree_skb_any(ptask->skb); | ||
956 | kmem_cache_free(fwnet_packet_task_cache, ptask); | ||
957 | } | 965 | } |
966 | |||
967 | if (free) | ||
968 | fwnet_free_ptask(ptask); | ||
958 | } | 969 | } |
959 | 970 | ||
960 | static void fwnet_write_complete(struct fw_card *card, int rcode, | 971 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
977 | unsigned tx_len; | 988 | unsigned tx_len; |
978 | struct rfc2734_header *bufhdr; | 989 | struct rfc2734_header *bufhdr; |
979 | unsigned long flags; | 990 | unsigned long flags; |
991 | bool free; | ||
980 | 992 | ||
981 | dev = ptask->dev; | 993 | dev = ptask->dev; |
982 | tx_len = ptask->max_payload; | 994 | tx_len = ptask->max_payload; |
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1022 | generation, SCODE_100, 0ULL, ptask->skb->data, | 1034 | generation, SCODE_100, 0ULL, ptask->skb->data, |
1023 | tx_len + 8, fwnet_write_complete, ptask); | 1035 | tx_len + 8, fwnet_write_complete, ptask); |
1024 | 1036 | ||
1025 | /* FIXME race? */ | ||
1026 | spin_lock_irqsave(&dev->lock, flags); | 1037 | spin_lock_irqsave(&dev->lock, flags); |
1027 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | 1038 | |
1039 | /* If the AT tasklet already ran, we may be last user. */ | ||
1040 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | ||
1041 | if (!free) | ||
1042 | list_add_tail(&ptask->pt_link, &dev->broadcasted_list); | ||
1043 | |||
1028 | spin_unlock_irqrestore(&dev->lock, flags); | 1044 | spin_unlock_irqrestore(&dev->lock, flags); |
1029 | 1045 | ||
1030 | return 0; | 1046 | goto out; |
1031 | } | 1047 | } |
1032 | 1048 | ||
1033 | fw_send_request(dev->card, &ptask->transaction, | 1049 | fw_send_request(dev->card, &ptask->transaction, |
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) | |||
1035 | ptask->generation, ptask->speed, ptask->fifo_addr, | 1051 | ptask->generation, ptask->speed, ptask->fifo_addr, |
1036 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); | 1052 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); |
1037 | 1053 | ||
1038 | /* FIXME race? */ | ||
1039 | spin_lock_irqsave(&dev->lock, flags); | 1054 | spin_lock_irqsave(&dev->lock, flags); |
1040 | list_add_tail(&ptask->pt_link, &dev->sent_list); | 1055 | |
1056 | /* If the AT tasklet already ran, we may be last user. */ | ||
1057 | free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); | ||
1058 | if (!free) | ||
1059 | list_add_tail(&ptask->pt_link, &dev->sent_list); | ||
1060 | |||
1041 | spin_unlock_irqrestore(&dev->lock, flags); | 1061 | spin_unlock_irqrestore(&dev->lock, flags); |
1042 | 1062 | ||
1043 | dev->netdev->trans_start = jiffies; | 1063 | dev->netdev->trans_start = jiffies; |
1064 | out: | ||
1065 | if (free) | ||
1066 | fwnet_free_ptask(ptask); | ||
1044 | 1067 | ||
1045 | return 0; | 1068 | return 0; |
1046 | } | 1069 | } |
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) | |||
1298 | spin_unlock_irqrestore(&dev->lock, flags); | 1321 | spin_unlock_irqrestore(&dev->lock, flags); |
1299 | 1322 | ||
1300 | ptask->max_payload = max_payload; | 1323 | ptask->max_payload = max_payload; |
1324 | INIT_LIST_HEAD(&ptask->pt_link); | ||
1325 | |||
1301 | fwnet_send_packet(ptask); | 1326 | fwnet_send_packet(ptask); |
1302 | 1327 | ||
1303 | return NETDEV_TX_OK; | 1328 | return NETDEV_TX_OK; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a61571c63c59..43ebf337b131 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2101 | u32 payload_index, payload_end_index, next_page_index; | 2101 | u32 payload_index, payload_end_index, next_page_index; |
2102 | int page, end_page, i, length, offset; | 2102 | int page, end_page, i, length, offset; |
2103 | 2103 | ||
2104 | /* | ||
2105 | * FIXME: Cycle lost behavior should be configurable: lose | ||
2106 | * packet, retransmit or terminate.. | ||
2107 | */ | ||
2108 | |||
2109 | p = packet; | 2104 | p = packet; |
2110 | payload_index = payload; | 2105 | payload_index = payload; |
2111 | 2106 | ||
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2135 | if (!p->skip) { | 2130 | if (!p->skip) { |
2136 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); | 2131 | d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); |
2137 | d[0].req_count = cpu_to_le16(8); | 2132 | d[0].req_count = cpu_to_le16(8); |
2133 | /* | ||
2134 | * Link the skip address to this descriptor itself. This causes | ||
2135 | * a context to skip a cycle whenever lost cycles or FIFO | ||
2136 | * overruns occur, without dropping the data. The application | ||
2137 | * should then decide whether this is an error condition or not. | ||
2138 | * FIXME: Make the context's cycle-lost behaviour configurable? | ||
2139 | */ | ||
2140 | d[0].branch_address = cpu_to_le32(d_bus | z); | ||
2138 | 2141 | ||
2139 | header = (__le32 *) &d[1]; | 2142 | header = (__le32 *) &d[1]; |
2140 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | | 2143 | header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | |
@@ -2420,6 +2423,7 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2420 | 2423 | ||
2421 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | 2424 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT |
2422 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | 2425 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 |
2426 | #define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 | ||
2423 | 2427 | ||
2424 | static int __devinit pci_probe(struct pci_dev *dev, | 2428 | static int __devinit pci_probe(struct pci_dev *dev, |
2425 | const struct pci_device_id *ent) | 2429 | const struct pci_device_id *ent) |
@@ -2488,7 +2492,8 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2488 | #if !defined(CONFIG_X86_32) | 2492 | #if !defined(CONFIG_X86_32) |
2489 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | 2493 | /* dual-buffer mode is broken with descriptor addresses above 2G */ |
2490 | if (dev->vendor == PCI_VENDOR_ID_TI && | 2494 | if (dev->vendor == PCI_VENDOR_ID_TI && |
2491 | dev->device == PCI_DEVICE_ID_TI_TSB43AB22) | 2495 | (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || |
2496 | dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) | ||
2492 | ohci->use_dualbuffer = false; | 2497 | ohci->use_dualbuffer = false; |
2493 | #endif | 2498 | #endif |
2494 | 2499 | ||
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 96eddd17e050..305c59003963 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -66,6 +66,8 @@ config DRM_RADEON | |||
66 | 66 | ||
67 | If M is selected, the module will be called radeon. | 67 | If M is selected, the module will be called radeon. |
68 | 68 | ||
69 | source "drivers/gpu/drm/radeon/Kconfig" | ||
70 | |||
69 | config DRM_I810 | 71 | config DRM_I810 |
70 | tristate "Intel I810" | 72 | tristate "Intel I810" |
71 | depends on DRM && AGP && AGP_INTEL | 73 | depends on DRM && AGP && AGP_INTEL |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index a1fce68e3bbe..17be051b7aa3 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c | |||
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
113 | 113 | ||
114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { | 114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { |
115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", | 115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", |
116 | gart_info->table_mask); | 116 | (unsigned long long)gart_info->table_mask); |
117 | ret = 1; | 117 | ret = 1; |
118 | goto done; | 118 | goto done; |
119 | } | 119 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index defcaf108460..ab6c97330412 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
598 | return mode; | 598 | return mode; |
599 | } | 599 | } |
600 | 600 | ||
601 | /* | ||
602 | * EDID is delightfully ambiguous about how interlaced modes are to be | ||
603 | * encoded. Our internal representation is of frame height, but some | ||
604 | * HDTV detailed timings are encoded as field height. | ||
605 | * | ||
606 | * The format list here is from CEA, in frame size. Technically we | ||
607 | * should be checking refresh rate too. Whatever. | ||
608 | */ | ||
609 | static void | ||
610 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | ||
611 | struct detailed_pixel_timing *pt) | ||
612 | { | ||
613 | int i; | ||
614 | static const struct { | ||
615 | int w, h; | ||
616 | } cea_interlaced[] = { | ||
617 | { 1920, 1080 }, | ||
618 | { 720, 480 }, | ||
619 | { 1440, 480 }, | ||
620 | { 2880, 480 }, | ||
621 | { 720, 576 }, | ||
622 | { 1440, 576 }, | ||
623 | { 2880, 576 }, | ||
624 | }; | ||
625 | static const int n_sizes = | ||
626 | sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); | ||
627 | |||
628 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | ||
629 | return; | ||
630 | |||
631 | for (i = 0; i < n_sizes; i++) { | ||
632 | if ((mode->hdisplay == cea_interlaced[i].w) && | ||
633 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | ||
634 | mode->vdisplay *= 2; | ||
635 | mode->vsync_start *= 2; | ||
636 | mode->vsync_end *= 2; | ||
637 | mode->vtotal *= 2; | ||
638 | mode->vtotal |= 1; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
643 | } | ||
644 | |||
601 | /** | 645 | /** |
602 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 646 | * drm_mode_detailed - create a new mode from an EDID detailed timing section |
603 | * @dev: DRM device (needed to create new mode) | 647 | * @dev: DRM device (needed to create new mode) |
@@ -633,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
633 | return NULL; | 677 | return NULL; |
634 | } | 678 | } |
635 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { | 679 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { |
636 | printk(KERN_WARNING "integrated sync not supported\n"); | 680 | printk(KERN_WARNING "composite sync not supported\n"); |
637 | return NULL; | ||
638 | } | 681 | } |
639 | 682 | ||
640 | /* it is incorrect if hsync/vsync width is zero */ | 683 | /* it is incorrect if hsync/vsync width is zero */ |
@@ -681,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
681 | 724 | ||
682 | drm_mode_set_name(mode); | 725 | drm_mode_set_name(mode); |
683 | 726 | ||
684 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 727 | drm_mode_do_interlace_quirk(mode, pt); |
685 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
686 | 728 | ||
687 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 729 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { |
688 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 730 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1c2b7d44ec05..0f9e90552dc4 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info) | |||
389 | break; | 389 | break; |
390 | /* Display: Off; HSync: On, VSync: On */ | 390 | /* Display: Off; HSync: On, VSync: On */ |
391 | case FB_BLANK_NORMAL: | 391 | case FB_BLANK_NORMAL: |
392 | drm_fb_helper_off(info, DRM_MODE_DPMS_ON); | 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); |
393 | break; | 393 | break; |
394 | /* Display: Off; HSync: Off, VSync: On */ | 394 | /* Display: Off; HSync: Off, VSync: On */ |
395 | case FB_BLANK_HSYNC_SUSPEND: | 395 | case FB_BLANK_HSYNC_SUSPEND: |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e9dbb481c469..8bf3770f294e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
158 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
159 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
160 | obj->size = size; | 147 | obj->size = size; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cdec32977129..2ac074c8f5d2 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
405 | wasted += alignment - tmp; | 405 | wasted += alignment - tmp; |
406 | } | 406 | } |
407 | 407 | ||
408 | if (entry->size >= size + wasted) { | 408 | if (entry->size >= size + wasted && |
409 | (entry->start + wasted + size) <= end) { | ||
409 | if (!best_match) | 410 | if (!best_match) |
410 | return entry; | 411 | return entry; |
411 | if (entry->size < best_size) { | 412 | if (entry->size < best_size) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c9998c4dceb..a894ade03093 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { |
291 | obj = obj_priv->obj; | 291 | obj = obj_priv->obj; |
292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
293 | ret = i915_gem_object_get_pages(obj); | 293 | ret = i915_gem_object_get_pages(obj, 0); |
294 | if (ret) { | 294 | if (ret) { |
295 | DRM_ERROR("Failed to get pages: %d\n", ret); | 295 | DRM_ERROR("Failed to get pages: %d\n", ret); |
296 | spin_unlock(&dev_priv->mm.active_list_lock); | 296 | spin_unlock(&dev_priv->mm.active_list_lock); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e660ac07f3b2..2307f98349f7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
735 | if (cmdbuf->num_cliprects) { | 735 | if (cmdbuf->num_cliprects) { |
736 | cliprects = kcalloc(cmdbuf->num_cliprects, | 736 | cliprects = kcalloc(cmdbuf->num_cliprects, |
737 | sizeof(struct drm_clip_rect), GFP_KERNEL); | 737 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
738 | if (cliprects == NULL) | 738 | if (cliprects == NULL) { |
739 | ret = -ENOMEM; | ||
739 | goto fail_batch_free; | 740 | goto fail_batch_free; |
741 | } | ||
740 | 742 | ||
741 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 743 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
742 | cmdbuf->num_cliprects * | 744 | cmdbuf->num_cliprects * |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 46d88965852a..cf4cb3e9a0c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = { | |||
120 | 120 | ||
121 | const static struct intel_device_info intel_pineview_info = { | 121 | const static struct intel_device_info intel_pineview_info = { |
122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | 122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, |
123 | .has_pipe_cxsr = 1, | 123 | .need_gfx_hws = 1, |
124 | .has_hotplug = 1, | 124 | .has_hotplug = 1, |
125 | }; | 125 | }; |
126 | 126 | ||
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = { | |||
174 | MODULE_DEVICE_TABLE(pci, pciidlist); | 174 | MODULE_DEVICE_TABLE(pci, pciidlist); |
175 | #endif | 175 | #endif |
176 | 176 | ||
177 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | 177 | static int i915_drm_freeze(struct drm_device *dev) |
178 | { | 178 | { |
179 | struct drm_i915_private *dev_priv = dev->dev_private; | 179 | struct drm_i915_private *dev_priv = dev->dev_private; |
180 | 180 | ||
181 | if (!dev || !dev_priv) { | ||
182 | DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); | ||
183 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
184 | return -ENODEV; | ||
185 | } | ||
186 | |||
187 | if (state.event == PM_EVENT_PRETHAW) | ||
188 | return 0; | ||
189 | |||
190 | pci_save_state(dev->pdev); | 181 | pci_save_state(dev->pdev); |
191 | 182 | ||
192 | /* If KMS is active, we do the leavevt stuff here */ | 183 | /* If KMS is active, we do the leavevt stuff here */ |
193 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 184 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
194 | if (i915_gem_idle(dev)) | 185 | int error = i915_gem_idle(dev); |
186 | if (error) { | ||
195 | dev_err(&dev->pdev->dev, | 187 | dev_err(&dev->pdev->dev, |
196 | "GEM idle failed, resume may fail\n"); | 188 | "GEM idle failed, resume might fail\n"); |
189 | return error; | ||
190 | } | ||
197 | drm_irq_uninstall(dev); | 191 | drm_irq_uninstall(dev); |
198 | } | 192 | } |
199 | 193 | ||
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
201 | 195 | ||
202 | intel_opregion_free(dev, 1); | 196 | intel_opregion_free(dev, 1); |
203 | 197 | ||
198 | /* Modeset on resume, not lid events */ | ||
199 | dev_priv->modeset_on_lid = 0; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | ||
205 | { | ||
206 | int error; | ||
207 | |||
208 | if (!dev || !dev->dev_private) { | ||
209 | DRM_ERROR("dev: %p\n", dev); | ||
210 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
211 | return -ENODEV; | ||
212 | } | ||
213 | |||
214 | if (state.event == PM_EVENT_PRETHAW) | ||
215 | return 0; | ||
216 | |||
217 | error = i915_drm_freeze(dev); | ||
218 | if (error) | ||
219 | return error; | ||
220 | |||
204 | if (state.event == PM_EVENT_SUSPEND) { | 221 | if (state.event == PM_EVENT_SUSPEND) { |
205 | /* Shut down the device */ | 222 | /* Shut down the device */ |
206 | pci_disable_device(dev->pdev); | 223 | pci_disable_device(dev->pdev); |
207 | pci_set_power_state(dev->pdev, PCI_D3hot); | 224 | pci_set_power_state(dev->pdev, PCI_D3hot); |
208 | } | 225 | } |
209 | 226 | ||
210 | /* Modeset on resume, not lid events */ | ||
211 | dev_priv->modeset_on_lid = 0; | ||
212 | |||
213 | return 0; | 227 | return 0; |
214 | } | 228 | } |
215 | 229 | ||
216 | static int i915_resume(struct drm_device *dev) | 230 | static int i915_drm_thaw(struct drm_device *dev) |
217 | { | 231 | { |
218 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
219 | int ret = 0; | 233 | int error = 0; |
220 | |||
221 | if (pci_enable_device(dev->pdev)) | ||
222 | return -1; | ||
223 | pci_set_master(dev->pdev); | ||
224 | 234 | ||
225 | i915_restore_state(dev); | 235 | i915_restore_state(dev); |
226 | 236 | ||
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev) | |||
231 | mutex_lock(&dev->struct_mutex); | 241 | mutex_lock(&dev->struct_mutex); |
232 | dev_priv->mm.suspended = 0; | 242 | dev_priv->mm.suspended = 0; |
233 | 243 | ||
234 | ret = i915_gem_init_ringbuffer(dev); | 244 | error = i915_gem_init_ringbuffer(dev); |
235 | if (ret != 0) | ||
236 | ret = -1; | ||
237 | mutex_unlock(&dev->struct_mutex); | 245 | mutex_unlock(&dev->struct_mutex); |
238 | 246 | ||
239 | drm_irq_install(dev); | 247 | drm_irq_install(dev); |
240 | } | 248 | |
241 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
242 | /* Resume the modeset for every activated CRTC */ | 249 | /* Resume the modeset for every activated CRTC */ |
243 | drm_helper_resume_force_mode(dev); | 250 | drm_helper_resume_force_mode(dev); |
244 | } | 251 | } |
245 | 252 | ||
246 | dev_priv->modeset_on_lid = 0; | 253 | dev_priv->modeset_on_lid = 0; |
247 | 254 | ||
248 | return ret; | 255 | return error; |
256 | } | ||
257 | |||
258 | static int i915_resume(struct drm_device *dev) | ||
259 | { | ||
260 | if (pci_enable_device(dev->pdev)) | ||
261 | return -EIO; | ||
262 | |||
263 | pci_set_master(dev->pdev); | ||
264 | |||
265 | return i915_drm_thaw(dev); | ||
249 | } | 266 | } |
250 | 267 | ||
251 | /** | 268 | /** |
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev) | |||
386 | drm_put_dev(dev); | 403 | drm_put_dev(dev); |
387 | } | 404 | } |
388 | 405 | ||
389 | static int | 406 | static int i915_pm_suspend(struct device *dev) |
390 | i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
391 | { | 407 | { |
392 | struct drm_device *dev = pci_get_drvdata(pdev); | 408 | struct pci_dev *pdev = to_pci_dev(dev); |
409 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
410 | int error; | ||
393 | 411 | ||
394 | return i915_suspend(dev, state); | 412 | if (!drm_dev || !drm_dev->dev_private) { |
395 | } | 413 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
414 | return -ENODEV; | ||
415 | } | ||
396 | 416 | ||
397 | static int | 417 | error = i915_drm_freeze(drm_dev); |
398 | i915_pci_resume(struct pci_dev *pdev) | 418 | if (error) |
399 | { | 419 | return error; |
400 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
401 | 420 | ||
402 | return i915_resume(dev); | 421 | pci_disable_device(pdev); |
403 | } | 422 | pci_set_power_state(pdev, PCI_D3hot); |
404 | 423 | ||
405 | static int | 424 | return 0; |
406 | i915_pm_suspend(struct device *dev) | ||
407 | { | ||
408 | return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); | ||
409 | } | 425 | } |
410 | 426 | ||
411 | static int | 427 | static int i915_pm_resume(struct device *dev) |
412 | i915_pm_resume(struct device *dev) | ||
413 | { | 428 | { |
414 | return i915_pci_resume(to_pci_dev(dev)); | 429 | struct pci_dev *pdev = to_pci_dev(dev); |
415 | } | 430 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
416 | 431 | ||
417 | static int | 432 | return i915_resume(drm_dev); |
418 | i915_pm_freeze(struct device *dev) | ||
419 | { | ||
420 | return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); | ||
421 | } | 433 | } |
422 | 434 | ||
423 | static int | 435 | static int i915_pm_freeze(struct device *dev) |
424 | i915_pm_thaw(struct device *dev) | ||
425 | { | 436 | { |
426 | /* thaw during hibernate, do nothing! */ | 437 | struct pci_dev *pdev = to_pci_dev(dev); |
427 | return 0; | 438 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
439 | |||
440 | if (!drm_dev || !drm_dev->dev_private) { | ||
441 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | ||
442 | return -ENODEV; | ||
443 | } | ||
444 | |||
445 | return i915_drm_freeze(drm_dev); | ||
428 | } | 446 | } |
429 | 447 | ||
430 | static int | 448 | static int i915_pm_thaw(struct device *dev) |
431 | i915_pm_poweroff(struct device *dev) | ||
432 | { | 449 | { |
433 | return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); | 450 | struct pci_dev *pdev = to_pci_dev(dev); |
451 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
452 | |||
453 | return i915_drm_thaw(drm_dev); | ||
434 | } | 454 | } |
435 | 455 | ||
436 | static int | 456 | static int i915_pm_poweroff(struct device *dev) |
437 | i915_pm_restore(struct device *dev) | ||
438 | { | 457 | { |
439 | return i915_pci_resume(to_pci_dev(dev)); | 458 | struct pci_dev *pdev = to_pci_dev(dev); |
459 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
460 | |||
461 | return i915_drm_freeze(drm_dev); | ||
440 | } | 462 | } |
441 | 463 | ||
442 | const struct dev_pm_ops i915_pm_ops = { | 464 | const struct dev_pm_ops i915_pm_ops = { |
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = { | |||
445 | .freeze = i915_pm_freeze, | 467 | .freeze = i915_pm_freeze, |
446 | .thaw = i915_pm_thaw, | 468 | .thaw = i915_pm_thaw, |
447 | .poweroff = i915_pm_poweroff, | 469 | .poweroff = i915_pm_poweroff, |
448 | .restore = i915_pm_restore, | 470 | .restore = i915_pm_resume, |
449 | }; | 471 | }; |
450 | 472 | ||
451 | static struct vm_operations_struct i915_gem_vm_ops = { | 473 | static struct vm_operations_struct i915_gem_vm_ops = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c1669488b5a..b99b6a841d95 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -493,6 +493,15 @@ typedef struct drm_i915_private { | |||
493 | struct list_head flushing_list; | 493 | struct list_head flushing_list; |
494 | 494 | ||
495 | /** | 495 | /** |
496 | * List of objects currently pending a GPU write flush. | ||
497 | * | ||
498 | * All elements on this list will belong to either the | ||
499 | * active_list or flushing_list, last_rendering_seqno can | ||
500 | * be used to differentiate between the two elements. | ||
501 | */ | ||
502 | struct list_head gpu_write_list; | ||
503 | |||
504 | /** | ||
496 | * LRU list of objects which are not in the ringbuffer and | 505 | * LRU list of objects which are not in the ringbuffer and |
497 | * are ready to unbind, but are still in the GTT. | 506 | * are ready to unbind, but are still in the GTT. |
498 | * | 507 | * |
@@ -592,6 +601,8 @@ struct drm_i915_gem_object { | |||
592 | 601 | ||
593 | /** This object's place on the active/flushing/inactive lists */ | 602 | /** This object's place on the active/flushing/inactive lists */ |
594 | struct list_head list; | 603 | struct list_head list; |
604 | /** This object's place on GPU write list */ | ||
605 | struct list_head gpu_write_list; | ||
595 | 606 | ||
596 | /** This object's place on the fenced object LRU */ | 607 | /** This object's place on the fenced object LRU */ |
597 | struct list_head fence_list; | 608 | struct list_head fence_list; |
@@ -872,7 +883,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, | |||
872 | void i915_gem_detach_phys_object(struct drm_device *dev, | 883 | void i915_gem_detach_phys_object(struct drm_device *dev, |
873 | struct drm_gem_object *obj); | 884 | struct drm_gem_object *obj); |
874 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 885 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
875 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 886 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
876 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 887 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
877 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 888 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
878 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 889 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c67924ca80c..ec8a0d7ffa39 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 277 | ||
278 | mutex_lock(&dev->struct_mutex); | 278 | mutex_lock(&dev->struct_mutex); |
279 | 279 | ||
280 | ret = i915_gem_object_get_pages(obj); | 280 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 281 | if (ret != 0) |
282 | goto fail_unlock; | 282 | goto fail_unlock; |
283 | 283 | ||
@@ -321,40 +321,24 @@ fail_unlock: | |||
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 324 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 325 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 326 | { |
339 | int ret; | 327 | int ret; |
340 | 328 | ||
341 | ret = i915_gem_object_get_pages(obj); | 329 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 330 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 331 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 332 | * to make some space by throwing out some old buffers. |
345 | */ | 333 | */ |
346 | if (ret == -ENOMEM) { | 334 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 335 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 336 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 337 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 338 | if (ret) |
352 | return ret; | 339 | return ret; |
353 | 340 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 341 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 342 | } |
359 | 343 | ||
360 | return ret; | 344 | return ret; |
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 774 | ||
791 | mutex_lock(&dev->struct_mutex); | 775 | mutex_lock(&dev->struct_mutex); |
792 | 776 | ||
793 | ret = i915_gem_object_get_pages(obj); | 777 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 778 | if (ret != 0) |
795 | goto fail_unlock; | 779 | goto fail_unlock; |
796 | 780 | ||
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1568 | else | 1552 | else |
1569 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1553 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1570 | 1554 | ||
1555 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1556 | |||
1571 | obj_priv->last_rendering_seqno = 0; | 1557 | obj_priv->last_rendering_seqno = 0; |
1572 | if (obj_priv->active) { | 1558 | if (obj_priv->active) { |
1573 | obj_priv->active = 0; | 1559 | obj_priv->active = 0; |
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1638 | struct drm_i915_gem_object *obj_priv, *next; | 1624 | struct drm_i915_gem_object *obj_priv, *next; |
1639 | 1625 | ||
1640 | list_for_each_entry_safe(obj_priv, next, | 1626 | list_for_each_entry_safe(obj_priv, next, |
1641 | &dev_priv->mm.flushing_list, list) { | 1627 | &dev_priv->mm.gpu_write_list, |
1628 | gpu_write_list) { | ||
1642 | struct drm_gem_object *obj = obj_priv->obj; | 1629 | struct drm_gem_object *obj = obj_priv->obj; |
1643 | 1630 | ||
1644 | if ((obj->write_domain & flush_domains) == | 1631 | if ((obj->write_domain & flush_domains) == |
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1646 | uint32_t old_write_domain = obj->write_domain; | 1633 | uint32_t old_write_domain = obj->write_domain; |
1647 | 1634 | ||
1648 | obj->write_domain = 0; | 1635 | obj->write_domain = 0; |
1636 | list_del_init(&obj_priv->gpu_write_list); | ||
1649 | i915_gem_object_move_to_active(obj, seqno); | 1637 | i915_gem_object_move_to_active(obj, seqno); |
1650 | 1638 | ||
1651 | trace_i915_gem_object_change_domain(obj, | 1639 | trace_i915_gem_object_change_domain(obj, |
@@ -2100,8 +2088,8 @@ static int | |||
2100 | i915_gem_evict_everything(struct drm_device *dev) | 2088 | i915_gem_evict_everything(struct drm_device *dev) |
2101 | { | 2089 | { |
2102 | drm_i915_private_t *dev_priv = dev->dev_private; | 2090 | drm_i915_private_t *dev_priv = dev->dev_private; |
2103 | uint32_t seqno; | ||
2104 | int ret; | 2091 | int ret; |
2092 | uint32_t seqno; | ||
2105 | bool lists_empty; | 2093 | bool lists_empty; |
2106 | 2094 | ||
2107 | spin_lock(&dev_priv->mm.active_list_lock); | 2095 | spin_lock(&dev_priv->mm.active_list_lock); |
@@ -2123,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
2123 | if (ret) | 2111 | if (ret) |
2124 | return ret; | 2112 | return ret; |
2125 | 2113 | ||
2114 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2115 | |||
2126 | ret = i915_gem_evict_from_inactive_list(dev); | 2116 | ret = i915_gem_evict_from_inactive_list(dev); |
2127 | if (ret) | 2117 | if (ret) |
2128 | return ret; | 2118 | return ret; |
@@ -2230,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2230 | } | 2220 | } |
2231 | 2221 | ||
2232 | int | 2222 | int |
2233 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2223 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2224 | gfp_t gfpmask) | ||
2234 | { | 2225 | { |
2235 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2226 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2236 | int page_count, i; | 2227 | int page_count, i; |
@@ -2256,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2256 | inode = obj->filp->f_path.dentry->d_inode; | 2247 | inode = obj->filp->f_path.dentry->d_inode; |
2257 | mapping = inode->i_mapping; | 2248 | mapping = inode->i_mapping; |
2258 | for (i = 0; i < page_count; i++) { | 2249 | for (i = 0; i < page_count; i++) { |
2259 | page = read_mapping_page(mapping, i, NULL); | 2250 | page = read_cache_page_gfp(mapping, i, |
2251 | mapping_gfp_mask (mapping) | | ||
2252 | __GFP_COLD | | ||
2253 | gfpmask); | ||
2260 | if (IS_ERR(page)) { | 2254 | if (IS_ERR(page)) { |
2261 | ret = PTR_ERR(page); | 2255 | ret = PTR_ERR(page); |
2262 | i915_gem_object_put_pages(obj); | 2256 | i915_gem_object_put_pages(obj); |
@@ -2579,7 +2573,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2579 | drm_i915_private_t *dev_priv = dev->dev_private; | 2573 | drm_i915_private_t *dev_priv = dev->dev_private; |
2580 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2574 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2581 | struct drm_mm_node *free_space; | 2575 | struct drm_mm_node *free_space; |
2582 | bool retry_alloc = false; | 2576 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2583 | int ret; | 2577 | int ret; |
2584 | 2578 | ||
2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2579 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
@@ -2623,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2623 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2617 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2624 | obj->size, obj_priv->gtt_offset); | 2618 | obj->size, obj_priv->gtt_offset); |
2625 | #endif | 2619 | #endif |
2626 | if (retry_alloc) { | 2620 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2627 | i915_gem_object_set_page_gfp_mask (obj, | ||
2628 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2629 | } | ||
2630 | ret = i915_gem_object_get_pages(obj); | ||
2631 | if (retry_alloc) { | ||
2632 | i915_gem_object_set_page_gfp_mask (obj, | ||
2633 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2634 | } | ||
2635 | if (ret) { | 2621 | if (ret) { |
2636 | drm_mm_put_block(obj_priv->gtt_space); | 2622 | drm_mm_put_block(obj_priv->gtt_space); |
2637 | obj_priv->gtt_space = NULL; | 2623 | obj_priv->gtt_space = NULL; |
@@ -2641,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2641 | ret = i915_gem_evict_something(dev, obj->size); | 2627 | ret = i915_gem_evict_something(dev, obj->size); |
2642 | if (ret) { | 2628 | if (ret) { |
2643 | /* now try to shrink everyone else */ | 2629 | /* now try to shrink everyone else */ |
2644 | if (! retry_alloc) { | 2630 | if (gfpmask) { |
2645 | retry_alloc = true; | 2631 | gfpmask = 0; |
2646 | goto search_free; | 2632 | goto search_free; |
2647 | } | 2633 | } |
2648 | 2634 | ||
2649 | return ret; | 2635 | return ret; |
@@ -2721,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2721 | old_write_domain = obj->write_domain; | 2707 | old_write_domain = obj->write_domain; |
2722 | i915_gem_flush(dev, 0, obj->write_domain); | 2708 | i915_gem_flush(dev, 0, obj->write_domain); |
2723 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2709 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2724 | obj->write_domain = 0; | 2710 | BUG_ON(obj->write_domain); |
2725 | i915_gem_object_move_to_active(obj, seqno); | 2711 | i915_gem_object_move_to_active(obj, seqno); |
2726 | 2712 | ||
2727 | trace_i915_gem_object_change_domain(obj, | 2713 | trace_i915_gem_object_change_domain(obj, |
@@ -3584,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | |||
3584 | uint32_t reloc_count = 0, i; | 3570 | uint32_t reloc_count = 0, i; |
3585 | int ret = 0; | 3571 | int ret = 0; |
3586 | 3572 | ||
3573 | if (relocs == NULL) | ||
3574 | return 0; | ||
3575 | |||
3587 | for (i = 0; i < buffer_count; i++) { | 3576 | for (i = 0; i < buffer_count; i++) { |
3588 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3577 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3589 | int unwritten; | 3578 | int unwritten; |
@@ -3673,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3673 | struct drm_gem_object *batch_obj; | 3662 | struct drm_gem_object *batch_obj; |
3674 | struct drm_i915_gem_object *obj_priv; | 3663 | struct drm_i915_gem_object *obj_priv; |
3675 | struct drm_clip_rect *cliprects = NULL; | 3664 | struct drm_clip_rect *cliprects = NULL; |
3676 | struct drm_i915_gem_relocation_entry *relocs; | 3665 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3677 | int ret = 0, ret2, i, pinned = 0; | 3666 | int ret = 0, ret2, i, pinned = 0; |
3678 | uint64_t exec_offset; | 3667 | uint64_t exec_offset; |
3679 | uint32_t seqno, flush_domains, reloc_index; | 3668 | uint32_t seqno, flush_domains, reloc_index; |
@@ -3699,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3699 | if (args->num_cliprects != 0) { | 3688 | if (args->num_cliprects != 0) { |
3700 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3689 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
3701 | GFP_KERNEL); | 3690 | GFP_KERNEL); |
3702 | if (cliprects == NULL) | 3691 | if (cliprects == NULL) { |
3692 | ret = -ENOMEM; | ||
3703 | goto pre_mutex_err; | 3693 | goto pre_mutex_err; |
3694 | } | ||
3704 | 3695 | ||
3705 | ret = copy_from_user(cliprects, | 3696 | ret = copy_from_user(cliprects, |
3706 | (struct drm_clip_rect __user *) | 3697 | (struct drm_clip_rect __user *) |
@@ -3742,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3742 | if (object_list[i] == NULL) { | 3733 | if (object_list[i] == NULL) { |
3743 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3734 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3744 | exec_list[i].handle, i); | 3735 | exec_list[i].handle, i); |
3736 | /* prevent error path from reading uninitialized data */ | ||
3737 | args->buffer_count = i + 1; | ||
3745 | ret = -EBADF; | 3738 | ret = -EBADF; |
3746 | goto err; | 3739 | goto err; |
3747 | } | 3740 | } |
@@ -3750,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3750 | if (obj_priv->in_execbuffer) { | 3743 | if (obj_priv->in_execbuffer) { |
3751 | DRM_ERROR("Object %p appears more than once in object list\n", | 3744 | DRM_ERROR("Object %p appears more than once in object list\n", |
3752 | object_list[i]); | 3745 | object_list[i]); |
3746 | /* prevent error path from reading uninitialized data */ | ||
3747 | args->buffer_count = i + 1; | ||
3753 | ret = -EBADF; | 3748 | ret = -EBADF; |
3754 | goto err; | 3749 | goto err; |
3755 | } | 3750 | } |
@@ -3863,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3863 | i915_gem_flush(dev, | 3858 | i915_gem_flush(dev, |
3864 | dev->invalidate_domains, | 3859 | dev->invalidate_domains, |
3865 | dev->flush_domains); | 3860 | dev->flush_domains); |
3866 | if (dev->flush_domains) | 3861 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) |
3867 | (void)i915_add_request(dev, file_priv, | 3862 | (void)i915_add_request(dev, file_priv, |
3868 | dev->flush_domains); | 3863 | dev->flush_domains); |
3869 | } | 3864 | } |
3870 | 3865 | ||
3871 | for (i = 0; i < args->buffer_count; i++) { | 3866 | for (i = 0; i < args->buffer_count; i++) { |
3872 | struct drm_gem_object *obj = object_list[i]; | 3867 | struct drm_gem_object *obj = object_list[i]; |
3868 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3873 | uint32_t old_write_domain = obj->write_domain; | 3869 | uint32_t old_write_domain = obj->write_domain; |
3874 | 3870 | ||
3875 | obj->write_domain = obj->pending_write_domain; | 3871 | obj->write_domain = obj->pending_write_domain; |
3872 | if (obj->write_domain) | ||
3873 | list_move_tail(&obj_priv->gpu_write_list, | ||
3874 | &dev_priv->mm.gpu_write_list); | ||
3875 | else | ||
3876 | list_del_init(&obj_priv->gpu_write_list); | ||
3877 | |||
3876 | trace_i915_gem_object_change_domain(obj, | 3878 | trace_i915_gem_object_change_domain(obj, |
3877 | obj->read_domains, | 3879 | obj->read_domains, |
3878 | old_write_domain); | 3880 | old_write_domain); |
@@ -3946,6 +3948,7 @@ err: | |||
3946 | 3948 | ||
3947 | mutex_unlock(&dev->struct_mutex); | 3949 | mutex_unlock(&dev->struct_mutex); |
3948 | 3950 | ||
3951 | pre_mutex_err: | ||
3949 | /* Copy the updated relocations out regardless of current error | 3952 | /* Copy the updated relocations out regardless of current error |
3950 | * state. Failure to update the relocs would mean that the next | 3953 | * state. Failure to update the relocs would mean that the next |
3951 | * time userland calls execbuf, it would do so with presumed offset | 3954 | * time userland calls execbuf, it would do so with presumed offset |
@@ -3960,7 +3963,6 @@ err: | |||
3960 | ret = ret2; | 3963 | ret = ret2; |
3961 | } | 3964 | } |
3962 | 3965 | ||
3963 | pre_mutex_err: | ||
3964 | drm_free_large(object_list); | 3966 | drm_free_large(object_list); |
3965 | kfree(cliprects); | 3967 | kfree(cliprects); |
3966 | 3968 | ||
@@ -4383,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4383 | obj_priv->obj = obj; | 4385 | obj_priv->obj = obj; |
4384 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4386 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
4385 | INIT_LIST_HEAD(&obj_priv->list); | 4387 | INIT_LIST_HEAD(&obj_priv->list); |
4388 | INIT_LIST_HEAD(&obj_priv->gpu_write_list); | ||
4386 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4389 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4387 | obj_priv->madv = I915_MADV_WILLNEED; | 4390 | obj_priv->madv = I915_MADV_WILLNEED; |
4388 | 4391 | ||
@@ -4834,6 +4837,7 @@ i915_gem_load(struct drm_device *dev) | |||
4834 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4837 | spin_lock_init(&dev_priv->mm.active_list_lock); |
4835 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4838 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4836 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4839 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4840 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4837 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4841 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4838 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 4842 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
4839 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4843 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
@@ -4946,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4946 | if (!obj_priv->phys_obj) | 4950 | if (!obj_priv->phys_obj) |
4947 | return; | 4951 | return; |
4948 | 4952 | ||
4949 | ret = i915_gem_object_get_pages(obj); | 4953 | ret = i915_gem_object_get_pages(obj, 0); |
4950 | if (ret) | 4954 | if (ret) |
4951 | goto out; | 4955 | goto out; |
4952 | 4956 | ||
@@ -5004,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5004 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 5008 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
5005 | obj_priv->phys_obj->cur_obj = obj; | 5009 | obj_priv->phys_obj->cur_obj = obj; |
5006 | 5010 | ||
5007 | ret = i915_gem_object_get_pages(obj); | 5011 | ret = i915_gem_object_get_pages(obj, 0); |
5008 | if (ret) { | 5012 | if (ret) { |
5009 | DRM_ERROR("failed to get page list\n"); | 5013 | DRM_ERROR("failed to get page list\n"); |
5010 | goto out; | 5014 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 89a071a3e6fb..a17d6bdfe63e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
309 | if (de_iir & DE_GSE) | 309 | if (de_iir & DE_GSE) |
310 | ironlake_opregion_gse_intr(dev); | 310 | ironlake_opregion_gse_intr(dev); |
311 | 311 | ||
312 | if (de_iir & DE_PLANEA_FLIP_DONE) { | ||
313 | intel_prepare_page_flip(dev, 0); | ||
314 | intel_finish_page_flip(dev, 0); | ||
315 | } | ||
316 | |||
317 | if (de_iir & DE_PLANEB_FLIP_DONE) { | ||
318 | intel_prepare_page_flip(dev, 1); | ||
319 | intel_finish_page_flip(dev, 1); | ||
320 | } | ||
321 | |||
322 | if (de_iir & DE_PIPEA_VBLANK) | ||
323 | drm_handle_vblank(dev, 0); | ||
324 | |||
325 | if (de_iir & DE_PIPEB_VBLANK) | ||
326 | drm_handle_vblank(dev, 1); | ||
327 | |||
312 | /* check event from PCH */ | 328 | /* check event from PCH */ |
313 | if ((de_iir & DE_PCH_EVENT) && | 329 | if ((de_iir & DE_PCH_EVENT) && |
314 | (pch_iir & SDE_HOTPLUG_MASK)) { | 330 | (pch_iir & SDE_HOTPLUG_MASK)) { |
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
844 | if (!(pipeconf & PIPEACONF_ENABLE)) | 860 | if (!(pipeconf & PIPEACONF_ENABLE)) |
845 | return -EINVAL; | 861 | return -EINVAL; |
846 | 862 | ||
847 | if (IS_IRONLAKE(dev)) | ||
848 | return 0; | ||
849 | |||
850 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 863 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
851 | if (IS_I965G(dev)) | 864 | if (IS_IRONLAKE(dev)) |
865 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
866 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
867 | else if (IS_I965G(dev)) | ||
852 | i915_enable_pipestat(dev_priv, pipe, | 868 | i915_enable_pipestat(dev_priv, pipe, |
853 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 869 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
854 | else | 870 | else |
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
866 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 882 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
867 | unsigned long irqflags; | 883 | unsigned long irqflags; |
868 | 884 | ||
869 | if (IS_IRONLAKE(dev)) | ||
870 | return; | ||
871 | |||
872 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 885 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
873 | i915_disable_pipestat(dev_priv, pipe, | 886 | if (IS_IRONLAKE(dev)) |
874 | PIPE_VBLANK_INTERRUPT_ENABLE | | 887 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
875 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 888 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
889 | else | ||
890 | i915_disable_pipestat(dev_priv, pipe, | ||
891 | PIPE_VBLANK_INTERRUPT_ENABLE | | ||
892 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
876 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 893 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
877 | } | 894 | } |
878 | 895 | ||
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1015 | { | 1032 | { |
1016 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1033 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1017 | /* enable kind of interrupts always enabled */ | 1034 | /* enable kind of interrupts always enabled */ |
1018 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; | 1035 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1036 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | ||
1019 | u32 render_mask = GT_USER_INTERRUPT; | 1037 | u32 render_mask = GT_USER_INTERRUPT; |
1020 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1038 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1021 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1039 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1022 | 1040 | ||
1023 | dev_priv->irq_mask_reg = ~display_mask; | 1041 | dev_priv->irq_mask_reg = ~display_mask; |
1024 | dev_priv->de_irq_enable_reg = display_mask; | 1042 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
1025 | 1043 | ||
1026 | /* should always can generate irq */ | 1044 | /* should always can generate irq */ |
1027 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1045 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 847006c5218e..ab1bd2d3d3b6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -338,6 +338,7 @@ | |||
338 | #define FBC_CTL_PERIODIC (1<<30) | 338 | #define FBC_CTL_PERIODIC (1<<30) |
339 | #define FBC_CTL_INTERVAL_SHIFT (16) | 339 | #define FBC_CTL_INTERVAL_SHIFT (16) |
340 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 340 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
341 | #define FBC_C3_IDLE (1<<13) | ||
341 | #define FBC_CTL_STRIDE_SHIFT (5) | 342 | #define FBC_CTL_STRIDE_SHIFT (5) |
342 | #define FBC_CTL_FENCENO (1<<0) | 343 | #define FBC_CTL_FENCENO (1<<0) |
343 | #define FBC_COMMAND 0x0320c | 344 | #define FBC_COMMAND 0x0320c |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ddefc871edfe..79dd4026586f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
157 | adpa = I915_READ(PCH_ADPA); | 157 | adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | /* disable HPD first */ | ||
161 | I915_WRITE(PCH_ADPA, adpa); | ||
162 | (void)I915_READ(PCH_ADPA); | ||
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
162 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 165 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45da78ef4a92..b27202d23ebc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -240,33 +240,86 @@ struct intel_limit { | |||
240 | #define IRONLAKE_DOT_MAX 350000 | 240 | #define IRONLAKE_DOT_MAX 350000 |
241 | #define IRONLAKE_VCO_MIN 1760000 | 241 | #define IRONLAKE_VCO_MIN 1760000 |
242 | #define IRONLAKE_VCO_MAX 3510000 | 242 | #define IRONLAKE_VCO_MAX 3510000 |
243 | #define IRONLAKE_N_MIN 1 | ||
244 | #define IRONLAKE_N_MAX 6 | ||
245 | #define IRONLAKE_M_MIN 79 | ||
246 | #define IRONLAKE_M_MAX 127 | ||
247 | #define IRONLAKE_M1_MIN 12 | 243 | #define IRONLAKE_M1_MIN 12 |
248 | #define IRONLAKE_M1_MAX 22 | 244 | #define IRONLAKE_M1_MAX 22 |
249 | #define IRONLAKE_M2_MIN 5 | 245 | #define IRONLAKE_M2_MIN 5 |
250 | #define IRONLAKE_M2_MAX 9 | 246 | #define IRONLAKE_M2_MAX 9 |
251 | #define IRONLAKE_P_SDVO_DAC_MIN 5 | ||
252 | #define IRONLAKE_P_SDVO_DAC_MAX 80 | ||
253 | #define IRONLAKE_P_LVDS_MIN 28 | ||
254 | #define IRONLAKE_P_LVDS_MAX 112 | ||
255 | #define IRONLAKE_P1_MIN 1 | ||
256 | #define IRONLAKE_P1_MAX 8 | ||
257 | #define IRONLAKE_P2_SDVO_DAC_SLOW 10 | ||
258 | #define IRONLAKE_P2_SDVO_DAC_FAST 5 | ||
259 | #define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ | ||
260 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ | ||
261 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | 247 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
262 | 248 | ||
263 | #define IRONLAKE_P_DISPLAY_PORT_MIN 10 | 249 | /* We have parameter ranges for different type of outputs. */ |
264 | #define IRONLAKE_P_DISPLAY_PORT_MAX 20 | 250 | |
265 | #define IRONLAKE_P2_DISPLAY_PORT_FAST 10 | 251 | /* DAC & HDMI Refclk 120Mhz */ |
266 | #define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 | 252 | #define IRONLAKE_DAC_N_MIN 1 |
267 | #define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 | 253 | #define IRONLAKE_DAC_N_MAX 5 |
268 | #define IRONLAKE_P1_DISPLAY_PORT_MIN 1 | 254 | #define IRONLAKE_DAC_M_MIN 79 |
269 | #define IRONLAKE_P1_DISPLAY_PORT_MAX 2 | 255 | #define IRONLAKE_DAC_M_MAX 127 |
256 | #define IRONLAKE_DAC_P_MIN 5 | ||
257 | #define IRONLAKE_DAC_P_MAX 80 | ||
258 | #define IRONLAKE_DAC_P1_MIN 1 | ||
259 | #define IRONLAKE_DAC_P1_MAX 8 | ||
260 | #define IRONLAKE_DAC_P2_SLOW 10 | ||
261 | #define IRONLAKE_DAC_P2_FAST 5 | ||
262 | |||
263 | /* LVDS single-channel 120Mhz refclk */ | ||
264 | #define IRONLAKE_LVDS_S_N_MIN 1 | ||
265 | #define IRONLAKE_LVDS_S_N_MAX 3 | ||
266 | #define IRONLAKE_LVDS_S_M_MIN 79 | ||
267 | #define IRONLAKE_LVDS_S_M_MAX 118 | ||
268 | #define IRONLAKE_LVDS_S_P_MIN 28 | ||
269 | #define IRONLAKE_LVDS_S_P_MAX 112 | ||
270 | #define IRONLAKE_LVDS_S_P1_MIN 2 | ||
271 | #define IRONLAKE_LVDS_S_P1_MAX 8 | ||
272 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | ||
273 | #define IRONLAKE_LVDS_S_P2_FAST 14 | ||
274 | |||
275 | /* LVDS dual-channel 120Mhz refclk */ | ||
276 | #define IRONLAKE_LVDS_D_N_MIN 1 | ||
277 | #define IRONLAKE_LVDS_D_N_MAX 3 | ||
278 | #define IRONLAKE_LVDS_D_M_MIN 79 | ||
279 | #define IRONLAKE_LVDS_D_M_MAX 127 | ||
280 | #define IRONLAKE_LVDS_D_P_MIN 14 | ||
281 | #define IRONLAKE_LVDS_D_P_MAX 56 | ||
282 | #define IRONLAKE_LVDS_D_P1_MIN 2 | ||
283 | #define IRONLAKE_LVDS_D_P1_MAX 8 | ||
284 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | ||
285 | #define IRONLAKE_LVDS_D_P2_FAST 7 | ||
286 | |||
287 | /* LVDS single-channel 100Mhz refclk */ | ||
288 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | ||
289 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | ||
290 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | ||
291 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | ||
292 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | ||
293 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | ||
294 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | ||
295 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | ||
296 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | ||
297 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | ||
298 | |||
299 | /* LVDS dual-channel 100Mhz refclk */ | ||
300 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | ||
301 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | ||
302 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | ||
303 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | ||
304 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | ||
305 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | ||
306 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | ||
307 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | ||
308 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | ||
309 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | ||
310 | |||
311 | /* DisplayPort */ | ||
312 | #define IRONLAKE_DP_N_MIN 1 | ||
313 | #define IRONLAKE_DP_N_MAX 2 | ||
314 | #define IRONLAKE_DP_M_MIN 81 | ||
315 | #define IRONLAKE_DP_M_MAX 90 | ||
316 | #define IRONLAKE_DP_P_MIN 10 | ||
317 | #define IRONLAKE_DP_P_MAX 20 | ||
318 | #define IRONLAKE_DP_P2_FAST 10 | ||
319 | #define IRONLAKE_DP_P2_SLOW 10 | ||
320 | #define IRONLAKE_DP_P2_LIMIT 0 | ||
321 | #define IRONLAKE_DP_P1_MIN 1 | ||
322 | #define IRONLAKE_DP_P1_MAX 2 | ||
270 | 323 | ||
271 | static bool | 324 | static bool |
272 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 325 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = { | |||
474 | .find_pll = intel_find_best_PLL, | 527 | .find_pll = intel_find_best_PLL, |
475 | }; | 528 | }; |
476 | 529 | ||
477 | static const intel_limit_t intel_limits_ironlake_sdvo = { | 530 | static const intel_limit_t intel_limits_ironlake_dac = { |
478 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 531 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
479 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 532 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
480 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 533 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, |
481 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 534 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, |
482 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 535 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
483 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 536 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
484 | .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, | 537 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, |
485 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 538 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, |
486 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 539 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
487 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, | 540 | .p2_slow = IRONLAKE_DAC_P2_SLOW, |
488 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, | 541 | .p2_fast = IRONLAKE_DAC_P2_FAST }, |
489 | .find_pll = intel_g4x_find_best_PLL, | 542 | .find_pll = intel_g4x_find_best_PLL, |
490 | }; | 543 | }; |
491 | 544 | ||
492 | static const intel_limit_t intel_limits_ironlake_lvds = { | 545 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
493 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 546 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
494 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 547 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
495 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 548 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, |
496 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 549 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, |
497 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 550 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
498 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 551 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
499 | .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, | 552 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, |
500 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 553 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, |
501 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 554 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
502 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, | 555 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, |
503 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, | 556 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, |
557 | .find_pll = intel_g4x_find_best_PLL, | ||
558 | }; | ||
559 | |||
560 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | ||
561 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
562 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
563 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, | ||
564 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, | ||
565 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
566 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
567 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, | ||
568 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, | ||
569 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
570 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, | ||
571 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, | ||
572 | .find_pll = intel_g4x_find_best_PLL, | ||
573 | }; | ||
574 | |||
575 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | ||
576 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
577 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
578 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, | ||
579 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, | ||
580 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
581 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
582 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, | ||
583 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, | ||
584 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
585 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, | ||
586 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, | ||
587 | .find_pll = intel_g4x_find_best_PLL, | ||
588 | }; | ||
589 | |||
590 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | ||
591 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
592 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
593 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, | ||
594 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, | ||
595 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
596 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
597 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, | ||
598 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, | ||
599 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
600 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, | ||
601 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, | ||
504 | .find_pll = intel_g4x_find_best_PLL, | 602 | .find_pll = intel_g4x_find_best_PLL, |
505 | }; | 603 | }; |
506 | 604 | ||
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = { | |||
509 | .max = IRONLAKE_DOT_MAX }, | 607 | .max = IRONLAKE_DOT_MAX }, |
510 | .vco = { .min = IRONLAKE_VCO_MIN, | 608 | .vco = { .min = IRONLAKE_VCO_MIN, |
511 | .max = IRONLAKE_VCO_MAX}, | 609 | .max = IRONLAKE_VCO_MAX}, |
512 | .n = { .min = IRONLAKE_N_MIN, | 610 | .n = { .min = IRONLAKE_DP_N_MIN, |
513 | .max = IRONLAKE_N_MAX }, | 611 | .max = IRONLAKE_DP_N_MAX }, |
514 | .m = { .min = IRONLAKE_M_MIN, | 612 | .m = { .min = IRONLAKE_DP_M_MIN, |
515 | .max = IRONLAKE_M_MAX }, | 613 | .max = IRONLAKE_DP_M_MAX }, |
516 | .m1 = { .min = IRONLAKE_M1_MIN, | 614 | .m1 = { .min = IRONLAKE_M1_MIN, |
517 | .max = IRONLAKE_M1_MAX }, | 615 | .max = IRONLAKE_M1_MAX }, |
518 | .m2 = { .min = IRONLAKE_M2_MIN, | 616 | .m2 = { .min = IRONLAKE_M2_MIN, |
519 | .max = IRONLAKE_M2_MAX }, | 617 | .max = IRONLAKE_M2_MAX }, |
520 | .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, | 618 | .p = { .min = IRONLAKE_DP_P_MIN, |
521 | .max = IRONLAKE_P_DISPLAY_PORT_MAX }, | 619 | .max = IRONLAKE_DP_P_MAX }, |
522 | .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, | 620 | .p1 = { .min = IRONLAKE_DP_P1_MIN, |
523 | .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, | 621 | .max = IRONLAKE_DP_P1_MAX}, |
524 | .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, | 622 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, |
525 | .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, | 623 | .p2_slow = IRONLAKE_DP_P2_SLOW, |
526 | .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, | 624 | .p2_fast = IRONLAKE_DP_P2_FAST }, |
527 | .find_pll = intel_find_pll_ironlake_dp, | 625 | .find_pll = intel_find_pll_ironlake_dp, |
528 | }; | 626 | }; |
529 | 627 | ||
530 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 628 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
531 | { | 629 | { |
630 | struct drm_device *dev = crtc->dev; | ||
631 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
532 | const intel_limit_t *limit; | 632 | const intel_limit_t *limit; |
533 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 633 | int refclk = 120; |
534 | limit = &intel_limits_ironlake_lvds; | 634 | |
535 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | 635 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
636 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
637 | refclk = 100; | ||
638 | |||
639 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
640 | LVDS_CLKB_POWER_UP) { | ||
641 | /* LVDS dual channel */ | ||
642 | if (refclk == 100) | ||
643 | limit = &intel_limits_ironlake_dual_lvds_100m; | ||
644 | else | ||
645 | limit = &intel_limits_ironlake_dual_lvds; | ||
646 | } else { | ||
647 | if (refclk == 100) | ||
648 | limit = &intel_limits_ironlake_single_lvds_100m; | ||
649 | else | ||
650 | limit = &intel_limits_ironlake_single_lvds; | ||
651 | } | ||
652 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | ||
536 | HAS_eDP) | 653 | HAS_eDP) |
537 | limit = &intel_limits_ironlake_display_port; | 654 | limit = &intel_limits_ironlake_display_port; |
538 | else | 655 | else |
539 | limit = &intel_limits_ironlake_sdvo; | 656 | limit = &intel_limits_ironlake_dac; |
540 | 657 | ||
541 | return limit; | 658 | return limit; |
542 | } | 659 | } |
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
914 | 1031 | ||
915 | /* enable it... */ | 1032 | /* enable it... */ |
916 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 1033 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1034 | if (IS_I945GM(dev)) | ||
1035 | fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ | ||
917 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1036 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
918 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1037 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
919 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1038 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1638 | case DRM_MODE_DPMS_OFF: | 1757 | case DRM_MODE_DPMS_OFF: |
1639 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 1758 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
1640 | 1759 | ||
1760 | drm_vblank_off(dev, pipe); | ||
1641 | /* Disable display plane */ | 1761 | /* Disable display plane */ |
1642 | temp = I915_READ(dspcntr_reg); | 1762 | temp = I915_READ(dspcntr_reg); |
1643 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 1763 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2519 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2639 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2520 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2640 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2521 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2641 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2642 | } else { | ||
2643 | /* Turn off self refresh if both pipes are enabled */ | ||
2644 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2645 | & ~FW_BLC_SELF_EN); | ||
2522 | } | 2646 | } |
2523 | 2647 | ||
2524 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 2648 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", |
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2562 | srwm = 1; | 2686 | srwm = 1; |
2563 | srwm &= 0x3f; | 2687 | srwm &= 0x3f; |
2564 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2688 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2689 | } else { | ||
2690 | /* Turn off self refresh if both pipes are enabled */ | ||
2691 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2692 | & ~FW_BLC_SELF_EN); | ||
2565 | } | 2693 | } |
2566 | 2694 | ||
2567 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2695 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2630 | if (srwm < 0) | 2758 | if (srwm < 0) |
2631 | srwm = 1; | 2759 | srwm = 1; |
2632 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2760 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); |
2761 | } else { | ||
2762 | /* Turn off self refresh if both pipes are enabled */ | ||
2763 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2764 | & ~FW_BLC_SELF_EN); | ||
2633 | } | 2765 | } |
2634 | 2766 | ||
2635 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2767 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
3949 | struct intel_unpin_work { | 4081 | struct intel_unpin_work { |
3950 | struct work_struct work; | 4082 | struct work_struct work; |
3951 | struct drm_device *dev; | 4083 | struct drm_device *dev; |
3952 | struct drm_gem_object *obj; | 4084 | struct drm_gem_object *old_fb_obj; |
4085 | struct drm_gem_object *pending_flip_obj; | ||
3953 | struct drm_pending_vblank_event *event; | 4086 | struct drm_pending_vblank_event *event; |
3954 | int pending; | 4087 | int pending; |
3955 | }; | 4088 | }; |
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
3960 | container_of(__work, struct intel_unpin_work, work); | 4093 | container_of(__work, struct intel_unpin_work, work); |
3961 | 4094 | ||
3962 | mutex_lock(&work->dev->struct_mutex); | 4095 | mutex_lock(&work->dev->struct_mutex); |
3963 | i915_gem_object_unpin(work->obj); | 4096 | i915_gem_object_unpin(work->old_fb_obj); |
3964 | drm_gem_object_unreference(work->obj); | 4097 | drm_gem_object_unreference(work->pending_flip_obj); |
4098 | drm_gem_object_unreference(work->old_fb_obj); | ||
3965 | mutex_unlock(&work->dev->struct_mutex); | 4099 | mutex_unlock(&work->dev->struct_mutex); |
3966 | kfree(work); | 4100 | kfree(work); |
3967 | } | 4101 | } |
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
3984 | spin_lock_irqsave(&dev->event_lock, flags); | 4118 | spin_lock_irqsave(&dev->event_lock, flags); |
3985 | work = intel_crtc->unpin_work; | 4119 | work = intel_crtc->unpin_work; |
3986 | if (work == NULL || !work->pending) { | 4120 | if (work == NULL || !work->pending) { |
4121 | if (work && !work->pending) { | ||
4122 | obj_priv = work->pending_flip_obj->driver_private; | ||
4123 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
4124 | obj_priv, | ||
4125 | atomic_read(&obj_priv->pending_flip)); | ||
4126 | } | ||
3987 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4127 | spin_unlock_irqrestore(&dev->event_lock, flags); |
3988 | return; | 4128 | return; |
3989 | } | 4129 | } |
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4004 | 4144 | ||
4005 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4145 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4006 | 4146 | ||
4007 | obj_priv = work->obj->driver_private; | 4147 | obj_priv = work->pending_flip_obj->driver_private; |
4008 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | 4148 | |
4149 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4150 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4151 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4009 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | 4152 | DRM_WAKEUP(&dev_priv->pending_flip_queue); |
4010 | schedule_work(&work->work); | 4153 | schedule_work(&work->work); |
4011 | } | 4154 | } |
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4018 | unsigned long flags; | 4161 | unsigned long flags; |
4019 | 4162 | ||
4020 | spin_lock_irqsave(&dev->event_lock, flags); | 4163 | spin_lock_irqsave(&dev->event_lock, flags); |
4021 | if (intel_crtc->unpin_work) | 4164 | if (intel_crtc->unpin_work) { |
4022 | intel_crtc->unpin_work->pending = 1; | 4165 | intel_crtc->unpin_work->pending = 1; |
4166 | } else { | ||
4167 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | ||
4168 | } | ||
4023 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4169 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4024 | } | 4170 | } |
4025 | 4171 | ||
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4035 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4181 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4036 | struct intel_unpin_work *work; | 4182 | struct intel_unpin_work *work; |
4037 | unsigned long flags; | 4183 | unsigned long flags; |
4038 | int ret; | 4184 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4185 | int ret, pipesrc; | ||
4039 | RING_LOCALS; | 4186 | RING_LOCALS; |
4040 | 4187 | ||
4041 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4188 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4047 | work->event = event; | 4194 | work->event = event; |
4048 | work->dev = crtc->dev; | 4195 | work->dev = crtc->dev; |
4049 | intel_fb = to_intel_framebuffer(crtc->fb); | 4196 | intel_fb = to_intel_framebuffer(crtc->fb); |
4050 | work->obj = intel_fb->obj; | 4197 | work->old_fb_obj = intel_fb->obj; |
4051 | INIT_WORK(&work->work, intel_unpin_work_fn); | 4198 | INIT_WORK(&work->work, intel_unpin_work_fn); |
4052 | 4199 | ||
4053 | /* We borrow the event spin lock for protecting unpin_work */ | 4200 | /* We borrow the event spin lock for protecting unpin_work */ |
4054 | spin_lock_irqsave(&dev->event_lock, flags); | 4201 | spin_lock_irqsave(&dev->event_lock, flags); |
4055 | if (intel_crtc->unpin_work) { | 4202 | if (intel_crtc->unpin_work) { |
4203 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
4056 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4204 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4057 | kfree(work); | 4205 | kfree(work); |
4058 | mutex_unlock(&dev->struct_mutex); | 4206 | mutex_unlock(&dev->struct_mutex); |
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4066 | 4214 | ||
4067 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4215 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4068 | if (ret != 0) { | 4216 | if (ret != 0) { |
4217 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4218 | obj->driver_private); | ||
4069 | kfree(work); | 4219 | kfree(work); |
4220 | intel_crtc->unpin_work = NULL; | ||
4070 | mutex_unlock(&dev->struct_mutex); | 4221 | mutex_unlock(&dev->struct_mutex); |
4071 | return ret; | 4222 | return ret; |
4072 | } | 4223 | } |
4073 | 4224 | ||
4074 | /* Reference the old fb object for the scheduled work. */ | 4225 | /* Reference the objects for the scheduled work. */ |
4075 | drm_gem_object_reference(work->obj); | 4226 | drm_gem_object_reference(work->old_fb_obj); |
4227 | drm_gem_object_reference(obj); | ||
4076 | 4228 | ||
4077 | crtc->fb = fb; | 4229 | crtc->fb = fb; |
4078 | i915_gem_object_flush_write_domain(obj); | 4230 | i915_gem_object_flush_write_domain(obj); |
4079 | drm_vblank_get(dev, intel_crtc->pipe); | 4231 | drm_vblank_get(dev, intel_crtc->pipe); |
4080 | obj_priv = obj->driver_private; | 4232 | obj_priv = obj->driver_private; |
4081 | atomic_inc(&obj_priv->pending_flip); | 4233 | atomic_inc(&obj_priv->pending_flip); |
4234 | work->pending_flip_obj = obj; | ||
4082 | 4235 | ||
4083 | BEGIN_LP_RING(4); | 4236 | BEGIN_LP_RING(4); |
4084 | OUT_RING(MI_DISPLAY_FLIP | | 4237 | OUT_RING(MI_DISPLAY_FLIP | |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4086 | OUT_RING(fb->pitch); | 4239 | OUT_RING(fb->pitch); |
4087 | if (IS_I965G(dev)) { | 4240 | if (IS_I965G(dev)) { |
4088 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4241 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
4089 | OUT_RING((fb->width << 16) | fb->height); | 4242 | pipesrc = I915_READ(pipesrc_reg); |
4243 | OUT_RING(pipesrc & 0x0fff0fff); | ||
4090 | } else { | 4244 | } else { |
4091 | OUT_RING(obj_priv->gtt_offset); | 4245 | OUT_RING(obj_priv->gtt_offset); |
4092 | OUT_RING(MI_NOOP); | 4246 | OUT_RING(MI_NOOP); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 371d753e362b..aaabbcbe5905 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
148 | 148 | ||
149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
150 | 150 | ||
151 | ret = i915_gem_object_pin(fbo, PAGE_SIZE); | 151 | ret = i915_gem_object_pin(fbo, 64*1024); |
152 | if (ret) { | 152 | if (ret) { |
153 | DRM_ERROR("failed to pin fb: %d\n", ret); | 153 | DRM_ERROR("failed to pin fb: %d\n", ret); |
154 | goto out_unref; | 154 | goto out_unref; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index aa74e59bec61..c2e8a45780d5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
611 | { | 611 | { |
612 | .ident = "Samsung SX20S", | 612 | .ident = "Samsung SX20S", |
613 | .matches = { | 613 | .matches = { |
614 | DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), | 614 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), |
615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | 615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), |
616 | }, | 616 | }, |
617 | }, | 617 | }, |
@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
623 | }, | 623 | }, |
624 | }, | 624 | }, |
625 | { | 625 | { |
626 | .ident = "Aspire 1810T", | ||
627 | .matches = { | ||
628 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
629 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
630 | }, | ||
631 | }, | ||
632 | { | ||
626 | .ident = "PC-81005", | 633 | .ident = "PC-81005", |
627 | .matches = { | 634 | .matches = { |
628 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | 635 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), |
629 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | 636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), |
630 | }, | 637 | }, |
631 | }, | 638 | }, |
639 | { | ||
640 | .ident = "Clevo M5x0N", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
643 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
644 | }, | ||
645 | }, | ||
632 | { } | 646 | { } |
633 | }; | 647 | }; |
634 | 648 | ||
@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
643 | { | 657 | { |
644 | enum drm_connector_status status = connector_status_connected; | 658 | enum drm_connector_status status = connector_status_connected; |
645 | 659 | ||
646 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | 660 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) |
647 | status = connector_status_disconnected; | 661 | status = connector_status_disconnected; |
648 | 662 | ||
649 | return status; | 663 | return status; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index eaacfd0920df..82678d30ab06 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2347 | (1 << INTEL_ANALOG_CLONE_BIT); | 2347 | (1 << INTEL_ANALOG_CLONE_BIT); |
2348 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
2349 | |||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2353 | sdvo_priv->is_tv = true; | ||
2354 | intel_output->needs_tv_clock = true; | ||
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2348 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2356 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2349 | 2357 | ||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 1cf488247a16..48227e744753 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev) | |||
90 | { | 90 | { |
91 | int result; | 91 | int result; |
92 | 92 | ||
93 | if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, | 93 | if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, |
94 | &result)) | 94 | &result)) |
95 | return -ENODEV; | 95 | return -ENODEV; |
96 | 96 | ||
97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); | 97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); |
98 | 98 | ||
99 | if (result & 0x1) { /* Stamina mode - disable the external GPU */ | 99 | if (result) { /* Ensure that the external GPU is enabled */ |
100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
101 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
102 | NULL); | ||
103 | } else { /* Stamina mode - disable the external GPU */ | ||
100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, | 104 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, |
101 | NULL); | 105 | NULL); |
102 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, | 106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, |
103 | NULL); | 107 | NULL); |
104 | } else { /* Ensure that the external GPU is enabled */ | ||
105 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
107 | NULL); | ||
108 | } | 108 | } |
109 | 109 | ||
110 | return 0; | 110 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index ba143972769f..0e9cd1d49130 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg) | |||
310 | struct drm_device *dev = bios->dev; | 310 | struct drm_device *dev = bios->dev; |
311 | 311 | ||
312 | /* C51 has misaligned regs on purpose. Marvellous */ | 312 | /* C51 has misaligned regs on purpose. Marvellous */ |
313 | if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { | 313 | if (reg & 0x2 || |
314 | NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", | 314 | (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) |
315 | reg); | 315 | NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); |
316 | return 0; | 316 | |
317 | } | 317 | /* warn on C51 regs that haven't been verified accessible in tracing */ |
318 | /* | ||
319 | * Warn on C51 regs that have not been verified accessible in | ||
320 | * mmiotracing | ||
321 | */ | ||
322 | if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && | 318 | if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && |
323 | reg != 0x130d && reg != 0x1311 && reg != 0x60081d) | 319 | reg != 0x130d && reg != 0x1311 && reg != 0x60081d) |
324 | NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", | 320 | NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", |
325 | reg); | 321 | reg); |
326 | 322 | ||
327 | /* Trust the init scripts on G80 */ | 323 | if (reg >= (8*1024*1024)) { |
328 | if (dev_priv->card_type >= NV_50) | 324 | NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg); |
329 | return 1; | 325 | return 0; |
330 | |||
331 | #define WITHIN(x, y, z) ((x >= y) && (x < y + z)) | ||
332 | if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE)) | ||
333 | return 1; | ||
334 | if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE)) | ||
335 | return 1; | ||
336 | if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE)) | ||
337 | return 1; | ||
338 | if (dev_priv->VBIOS.pub.chip_version >= 0x30 && | ||
339 | (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600)) | ||
340 | return 1; | ||
341 | if (dev_priv->VBIOS.pub.chip_version >= 0x40 && | ||
342 | WITHIN(reg, 0xc000, 0x48)) | ||
343 | return 1; | ||
344 | if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204) | ||
345 | return 1; | ||
346 | if (dev_priv->VBIOS.pub.chip_version >= 0x40) { | ||
347 | if (reg == 0x00011014 || reg == 0x00020328) | ||
348 | return 1; | ||
349 | if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */ | ||
350 | return 1; | ||
351 | } | 326 | } |
352 | if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE)) | ||
353 | return 1; | ||
354 | if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE)) | ||
355 | return 1; | ||
356 | if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2)) | ||
357 | return 1; | ||
358 | if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2)) | ||
359 | return 1; | ||
360 | if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0) | ||
361 | return 1; | ||
362 | if (dev_priv->VBIOS.pub.chip_version == 0x51 && | ||
363 | WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE)) | ||
364 | return 1; | ||
365 | #undef WITHIN | ||
366 | |||
367 | NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); | ||
368 | 327 | ||
369 | return 0; | 328 | return 1; |
370 | } | 329 | } |
371 | 330 | ||
372 | static bool | 331 | static bool |
@@ -1906,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1906 | 1865 | ||
1907 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; | 1866 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
1908 | 1867 | ||
1909 | if (dev_priv->card_type >= NV_50) | 1868 | if (dev_priv->card_type >= NV_40) |
1910 | return 1; | 1869 | return 1; |
1911 | 1870 | ||
1912 | /* | 1871 | /* |
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr | |||
3196 | } | 3155 | } |
3197 | #ifdef __powerpc__ | 3156 | #ifdef __powerpc__ |
3198 | /* Powerbook specific quirks */ | 3157 | /* Powerbook specific quirks */ |
3199 | if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) | 3158 | if ((dev->pci_device & 0xffff) == 0x0179 || |
3200 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); | 3159 | (dev->pci_device & 0xffff) == 0x0189 || |
3201 | if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { | 3160 | (dev->pci_device & 0xffff) == 0x0329) { |
3202 | if (script == LVDS_PANEL_ON) { | 3161 | if (script == LVDS_RESET) { |
3203 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); | 3162 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); |
3204 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); | 3163 | |
3205 | } | 3164 | } else if (script == LVDS_PANEL_ON) { |
3206 | if (script == LVDS_PANEL_OFF) { | 3165 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, |
3207 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); | 3166 | bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) |
3208 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); | 3167 | | (1 << 31)); |
3168 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, | ||
3169 | bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); | ||
3170 | |||
3171 | } else if (script == LVDS_PANEL_OFF) { | ||
3172 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, | ||
3173 | bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | ||
3174 | & ~(1 << 31)); | ||
3175 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, | ||
3176 | bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); | ||
3209 | } | 3177 | } |
3210 | } | 3178 | } |
3211 | #endif | 3179 | #endif |
@@ -3797,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3797 | */ | 3765 | */ |
3798 | 3766 | ||
3799 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 3767 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
3800 | struct init_exec iexec = {true, false}; | ||
3801 | struct nvbios *bios = &dev_priv->VBIOS; | 3768 | struct nvbios *bios = &dev_priv->VBIOS; |
3802 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; | 3769 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; |
3803 | uint8_t *otable = NULL; | 3770 | uint8_t *otable = NULL; |
@@ -3877,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3877 | } | 3844 | } |
3878 | } | 3845 | } |
3879 | 3846 | ||
3880 | bios->display.output = dcbent; | ||
3881 | |||
3882 | if (pxclk == 0) { | 3847 | if (pxclk == 0) { |
3883 | script = ROM16(otable[6]); | 3848 | script = ROM16(otable[6]); |
3884 | if (!script) { | 3849 | if (!script) { |
@@ -3887,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3887 | } | 3852 | } |
3888 | 3853 | ||
3889 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); | 3854 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); |
3890 | parse_init_table(bios, script, &iexec); | 3855 | nouveau_bios_run_init_table(dev, script, dcbent); |
3891 | } else | 3856 | } else |
3892 | if (pxclk == -1) { | 3857 | if (pxclk == -1) { |
3893 | script = ROM16(otable[8]); | 3858 | script = ROM16(otable[8]); |
@@ -3897,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3897 | } | 3862 | } |
3898 | 3863 | ||
3899 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); | 3864 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); |
3900 | parse_init_table(bios, script, &iexec); | 3865 | nouveau_bios_run_init_table(dev, script, dcbent); |
3901 | } else | 3866 | } else |
3902 | if (pxclk == -2) { | 3867 | if (pxclk == -2) { |
3903 | if (table[4] >= 12) | 3868 | if (table[4] >= 12) |
@@ -3910,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3910 | } | 3875 | } |
3911 | 3876 | ||
3912 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); | 3877 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); |
3913 | parse_init_table(bios, script, &iexec); | 3878 | nouveau_bios_run_init_table(dev, script, dcbent); |
3914 | } else | 3879 | } else |
3915 | if (pxclk > 0) { | 3880 | if (pxclk > 0) { |
3916 | script = ROM16(otable[table[4] + i*6 + 2]); | 3881 | script = ROM16(otable[table[4] + i*6 + 2]); |
@@ -3922,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3922 | } | 3887 | } |
3923 | 3888 | ||
3924 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); | 3889 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); |
3925 | parse_init_table(bios, script, &iexec); | 3890 | nouveau_bios_run_init_table(dev, script, dcbent); |
3926 | } else | 3891 | } else |
3927 | if (pxclk < 0) { | 3892 | if (pxclk < 0) { |
3928 | script = ROM16(otable[table[4] + i*6 + 4]); | 3893 | script = ROM16(otable[table[4] + i*6 + 4]); |
@@ -3934,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3934 | } | 3899 | } |
3935 | 3900 | ||
3936 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); | 3901 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); |
3937 | parse_init_table(bios, script, &iexec); | 3902 | nouveau_bios_run_init_table(dev, script, dcbent); |
3938 | } | 3903 | } |
3939 | 3904 | ||
3940 | return 0; | 3905 | return 0; |
@@ -5434,52 +5399,49 @@ static bool | |||
5434 | parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, | 5399 | parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, |
5435 | uint32_t conn, uint32_t conf, struct dcb_entry *entry) | 5400 | uint32_t conn, uint32_t conf, struct dcb_entry *entry) |
5436 | { | 5401 | { |
5437 | if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && | 5402 | switch (conn & 0x0000000f) { |
5438 | conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && | 5403 | case 0: |
5439 | conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && | 5404 | entry->type = OUTPUT_ANALOG; |
5440 | conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && | 5405 | break; |
5441 | conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && | 5406 | case 1: |
5442 | conn != 0xf2205004 && conn != 0xf2209004) { | 5407 | entry->type = OUTPUT_TV; |
5443 | NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); | 5408 | break; |
5444 | 5409 | case 2: | |
5445 | /* cause output setting to fail for !TV, so message is seen */ | 5410 | case 3: |
5446 | if ((conn & 0xf) != 0x1) | ||
5447 | dcb->entries = 0; | ||
5448 | |||
5449 | return false; | ||
5450 | } | ||
5451 | /* most of the below is a "best guess" atm */ | ||
5452 | entry->type = conn & 0xf; | ||
5453 | if (entry->type == 2) | ||
5454 | /* another way of specifying straps based lvds... */ | ||
5455 | entry->type = OUTPUT_LVDS; | 5411 | entry->type = OUTPUT_LVDS; |
5456 | if (entry->type == 4) { /* digital */ | 5412 | break; |
5457 | if (conn & 0x10) | 5413 | case 4: |
5458 | entry->type = OUTPUT_LVDS; | 5414 | switch ((conn & 0x000000f0) >> 4) { |
5459 | else | 5415 | case 0: |
5460 | entry->type = OUTPUT_TMDS; | 5416 | entry->type = OUTPUT_TMDS; |
5417 | break; | ||
5418 | case 1: | ||
5419 | entry->type = OUTPUT_LVDS; | ||
5420 | break; | ||
5421 | default: | ||
5422 | NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", | ||
5423 | (conn & 0x000000f0) >> 4); | ||
5424 | return false; | ||
5425 | } | ||
5426 | break; | ||
5427 | default: | ||
5428 | NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); | ||
5429 | return false; | ||
5461 | } | 5430 | } |
5462 | /* what's in bits 5-13? could be some encoder maker thing, in tv case */ | 5431 | |
5463 | entry->i2c_index = (conn >> 14) & 0xf; | 5432 | entry->i2c_index = (conn & 0x0003c000) >> 14; |
5464 | /* raw heads field is in range 0-1, so move to 1-2 */ | 5433 | entry->heads = ((conn & 0x001c0000) >> 18) + 1; |
5465 | entry->heads = ((conn >> 18) & 0x7) + 1; | 5434 | entry->or = entry->heads; /* same as heads, hopefully safe enough */ |
5466 | entry->location = (conn >> 21) & 0xf; | 5435 | entry->location = (conn & 0x01e00000) >> 21; |
5467 | /* unused: entry->bus = (conn >> 25) & 0x7; */ | 5436 | entry->bus = (conn & 0x0e000000) >> 25; |
5468 | /* set or to be same as heads -- hopefully safe enough */ | ||
5469 | entry->or = entry->heads; | ||
5470 | entry->duallink_possible = false; | 5437 | entry->duallink_possible = false; |
5471 | 5438 | ||
5472 | switch (entry->type) { | 5439 | switch (entry->type) { |
5473 | case OUTPUT_ANALOG: | 5440 | case OUTPUT_ANALOG: |
5474 | entry->crtconf.maxfreq = (conf & 0xffff) * 10; | 5441 | entry->crtconf.maxfreq = (conf & 0xffff) * 10; |
5475 | break; | 5442 | break; |
5476 | case OUTPUT_LVDS: | 5443 | case OUTPUT_TV: |
5477 | /* | 5444 | entry->tvconf.has_component_output = false; |
5478 | * This is probably buried in conn's unknown bits. | ||
5479 | * This will upset EDID-ful models, if they exist | ||
5480 | */ | ||
5481 | entry->lvdsconf.use_straps_for_mode = true; | ||
5482 | entry->lvdsconf.use_power_scripts = true; | ||
5483 | break; | 5445 | break; |
5484 | case OUTPUT_TMDS: | 5446 | case OUTPUT_TMDS: |
5485 | /* | 5447 | /* |
@@ -5488,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, | |||
5488 | */ | 5450 | */ |
5489 | fabricate_vga_output(dcb, entry->i2c_index, entry->heads); | 5451 | fabricate_vga_output(dcb, entry->i2c_index, entry->heads); |
5490 | break; | 5452 | break; |
5491 | case OUTPUT_TV: | 5453 | case OUTPUT_LVDS: |
5492 | entry->tvconf.has_component_output = false; | 5454 | if ((conn & 0x00003f00) != 0x10) |
5455 | entry->lvdsconf.use_straps_for_mode = true; | ||
5456 | entry->lvdsconf.use_power_scripts = true; | ||
5457 | break; | ||
5458 | default: | ||
5493 | break; | 5459 | break; |
5494 | } | 5460 | } |
5495 | 5461 | ||
@@ -5564,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) | |||
5564 | dcb->entries = newentries; | 5530 | dcb->entries = newentries; |
5565 | } | 5531 | } |
5566 | 5532 | ||
5567 | static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | 5533 | static int |
5534 | parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | ||
5568 | { | 5535 | { |
5536 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
5569 | struct bios_parsed_dcb *bdcb = &bios->bdcb; | 5537 | struct bios_parsed_dcb *bdcb = &bios->bdcb; |
5570 | struct parsed_dcb *dcb; | 5538 | struct parsed_dcb *dcb; |
5571 | uint16_t dcbptr, i2ctabptr = 0; | 5539 | uint16_t dcbptr = 0, i2ctabptr = 0; |
5572 | uint8_t *dcbtable; | 5540 | uint8_t *dcbtable; |
5573 | uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; | 5541 | uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; |
5574 | bool configblock = true; | 5542 | bool configblock = true; |
@@ -5579,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two | |||
5579 | dcb->entries = 0; | 5547 | dcb->entries = 0; |
5580 | 5548 | ||
5581 | /* get the offset from 0x36 */ | 5549 | /* get the offset from 0x36 */ |
5582 | dcbptr = ROM16(bios->data[0x36]); | 5550 | if (dev_priv->card_type > NV_04) { |
5551 | dcbptr = ROM16(bios->data[0x36]); | ||
5552 | if (dcbptr == 0x0000) | ||
5553 | NV_WARN(dev, "No output data (DCB) found in BIOS\n"); | ||
5554 | } | ||
5583 | 5555 | ||
5556 | /* this situation likely means a really old card, pre DCB */ | ||
5584 | if (dcbptr == 0x0) { | 5557 | if (dcbptr == 0x0) { |
5585 | NV_WARN(dev, "No output data (DCB) found in BIOS, " | 5558 | NV_INFO(dev, "Assuming a CRT output exists\n"); |
5586 | "assuming a CRT output exists\n"); | ||
5587 | /* this situation likely means a really old card, pre DCB */ | ||
5588 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); | 5559 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); |
5589 | 5560 | ||
5590 | if (nv04_tv_identify(dev, | 5561 | if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) |
5591 | bios->legacy.i2c_indices.tv) >= 0) | ||
5592 | fabricate_tv_output(dcb, twoHeads); | 5562 | fabricate_tv_output(dcb, twoHeads); |
5593 | 5563 | ||
5594 | return 0; | 5564 | return 0; |
@@ -5892,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
5892 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; |
5893 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; |
5894 | 5864 | ||
5865 | mutex_lock(&bios->lock); | ||
5895 | bios->display.output = dcbent; | 5866 | bios->display.output = dcbent; |
5896 | parse_init_table(bios, table, &iexec); | 5867 | parse_init_table(bios, table, &iexec); |
5897 | bios->display.output = NULL; | 5868 | bios->display.output = NULL; |
5869 | mutex_unlock(&bios->lock); | ||
5898 | } | 5870 | } |
5899 | 5871 | ||
5900 | static bool NVInitVBIOS(struct drm_device *dev) | 5872 | static bool NVInitVBIOS(struct drm_device *dev) |
@@ -5903,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
5903 | struct nvbios *bios = &dev_priv->VBIOS; | 5875 | struct nvbios *bios = &dev_priv->VBIOS; |
5904 | 5876 | ||
5905 | memset(bios, 0, sizeof(struct nvbios)); | 5877 | memset(bios, 0, sizeof(struct nvbios)); |
5878 | mutex_init(&bios->lock); | ||
5906 | bios->dev = dev; | 5879 | bios->dev = dev; |
5907 | 5880 | ||
5908 | if (!NVShadowVBIOS(dev, bios->data)) | 5881 | if (!NVShadowVBIOS(dev, bios->data)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 058e98c76d89..fd94bd6dc264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -205,6 +205,8 @@ struct nvbios { | |||
205 | struct drm_device *dev; | 205 | struct drm_device *dev; |
206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; |
207 | 207 | ||
208 | struct mutex lock; | ||
209 | |||
208 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; |
209 | unsigned int length; | 211 | unsigned int length; |
210 | bool execute; | 212 | bool execute; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e342a418d434..028719fddf76 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | 67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, |
68 | * align to to that as well as the page size. Overallocate memory to | 68 | * align to to that as well as the page size. Align the size to the |
69 | * avoid corruption of other buffer objects. | 69 | * appropriate boundaries. This does imply that sizes are rounded up |
70 | * 3-7 pages, so be aware of this and do not waste memory by allocating | ||
71 | * many small buffers. | ||
70 | */ | 72 | */ |
71 | if (dev_priv->card_type == NV_50) { | 73 | if (dev_priv->card_type == NV_50) { |
72 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | 74 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; |
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
77 | case 0x2800: | 79 | case 0x2800: |
78 | case 0x4800: | 80 | case 0x4800: |
79 | case 0x7a00: | 81 | case 0x7a00: |
80 | *size = roundup(*size, block_size); | ||
81 | if (is_power_of_2(block_size)) { | 82 | if (is_power_of_2(block_size)) { |
82 | *size += 3 * block_size; | ||
83 | for (i = 1; i < 10; i++) { | 83 | for (i = 1; i < 10; i++) { |
84 | *align = 12 * i * block_size; | 84 | *align = 12 * i * block_size; |
85 | if (!(*align % 65536)) | 85 | if (!(*align % 65536)) |
86 | break; | 86 | break; |
87 | } | 87 | } |
88 | } else { | 88 | } else { |
89 | *size += 6 * block_size; | ||
90 | for (i = 1; i < 10; i++) { | 89 | for (i = 1; i < 10; i++) { |
91 | *align = 8 * i * block_size; | 90 | *align = 8 * i * block_size; |
92 | if (!(*align % 65536)) | 91 | if (!(*align % 65536)) |
93 | break; | 92 | break; |
94 | } | 93 | } |
95 | } | 94 | } |
95 | *size = roundup(*size, *align); | ||
96 | break; | 96 | break; |
97 | default: | 97 | default: |
98 | break; | 98 | break; |
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |||
469 | 469 | ||
470 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, | 470 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, |
471 | evict, no_wait, new_mem); | 471 | evict, no_wait, new_mem); |
472 | if (nvbo->channel && nvbo->channel != chan) | ||
473 | ret = nouveau_fence_wait(fence, NULL, false, false); | ||
472 | nouveau_fence_unref((void *)&fence); | 474 | nouveau_fence_unref((void *)&fence); |
473 | return ret; | 475 | return ret; |
474 | } | 476 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 343d718a9667..2281f99da7fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
278 | /* Ensure the channel is no longer active on the GPU */ | 278 | /* Ensure the channel is no longer active on the GPU */ |
279 | pfifo->reassign(dev, false); | 279 | pfifo->reassign(dev, false); |
280 | 280 | ||
281 | if (pgraph->channel(dev) == chan) { | 281 | pgraph->fifo_access(dev, false); |
282 | pgraph->fifo_access(dev, false); | 282 | if (pgraph->channel(dev) == chan) |
283 | pgraph->unload_context(dev); | 283 | pgraph->unload_context(dev); |
284 | pgraph->fifo_access(dev, true); | ||
285 | } | ||
286 | pgraph->destroy_context(chan); | 284 | pgraph->destroy_context(chan); |
285 | pgraph->fifo_access(dev, true); | ||
287 | 286 | ||
288 | if (pfifo->channel_id(dev) == chan->id) { | 287 | if (pfifo->channel_id(dev) == chan->id) { |
289 | pfifo->disable(dev); | 288 | pfifo->disable(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 5a10deb8bdbd..d2f63353ea97 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -24,9 +24,12 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <acpi/button.h> | ||
28 | |||
27 | #include "drmP.h" | 29 | #include "drmP.h" |
28 | #include "drm_edid.h" | 30 | #include "drm_edid.h" |
29 | #include "drm_crtc_helper.h" | 31 | #include "drm_crtc_helper.h" |
32 | |||
30 | #include "nouveau_reg.h" | 33 | #include "nouveau_reg.h" |
31 | #include "nouveau_drv.h" | 34 | #include "nouveau_drv.h" |
32 | #include "nouveau_encoder.h" | 35 | #include "nouveau_encoder.h" |
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder) | |||
83 | static void | 86 | static void |
84 | nouveau_connector_destroy(struct drm_connector *drm_connector) | 87 | nouveau_connector_destroy(struct drm_connector *drm_connector) |
85 | { | 88 | { |
86 | struct nouveau_connector *connector = nouveau_connector(drm_connector); | 89 | struct nouveau_connector *nv_connector = |
87 | struct drm_device *dev = connector->base.dev; | 90 | nouveau_connector(drm_connector); |
91 | struct drm_device *dev; | ||
88 | 92 | ||
89 | NV_DEBUG_KMS(dev, "\n"); | 93 | if (!nv_connector) |
90 | |||
91 | if (!connector) | ||
92 | return; | 94 | return; |
93 | 95 | ||
96 | dev = nv_connector->base.dev; | ||
97 | NV_DEBUG_KMS(dev, "\n"); | ||
98 | |||
99 | kfree(nv_connector->edid); | ||
94 | drm_sysfs_connector_remove(drm_connector); | 100 | drm_sysfs_connector_remove(drm_connector); |
95 | drm_connector_cleanup(drm_connector); | 101 | drm_connector_cleanup(drm_connector); |
96 | kfree(drm_connector); | 102 | kfree(drm_connector); |
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
233 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | 239 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) |
234 | nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); | 240 | nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); |
235 | if (nv_encoder && nv_connector->native_mode) { | 241 | if (nv_encoder && nv_connector->native_mode) { |
242 | #ifdef CONFIG_ACPI | ||
243 | if (!nouveau_ignorelid && !acpi_lid_open()) | ||
244 | return connector_status_disconnected; | ||
245 | #endif | ||
236 | nouveau_connector_set_encoder(connector, nv_encoder); | 246 | nouveau_connector_set_encoder(connector, nv_encoder); |
237 | return connector_status_connected; | 247 | return connector_status_connected; |
238 | } | 248 | } |
239 | 249 | ||
250 | /* Cleanup the previous EDID block. */ | ||
251 | if (nv_connector->edid) { | ||
252 | drm_mode_connector_update_edid_property(connector, NULL); | ||
253 | kfree(nv_connector->edid); | ||
254 | nv_connector->edid = NULL; | ||
255 | } | ||
256 | |||
240 | i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); | 257 | i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); |
241 | if (i2c) { | 258 | if (i2c) { |
242 | nouveau_connector_ddc_prepare(connector, &flags); | 259 | nouveau_connector_ddc_prepare(connector, &flags); |
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
247 | if (!nv_connector->edid) { | 264 | if (!nv_connector->edid) { |
248 | NV_ERROR(dev, "DDC responded, but no EDID for %s\n", | 265 | NV_ERROR(dev, "DDC responded, but no EDID for %s\n", |
249 | drm_get_connector_name(connector)); | 266 | drm_get_connector_name(connector)); |
250 | return connector_status_disconnected; | 267 | goto detect_analog; |
251 | } | 268 | } |
252 | 269 | ||
253 | if (nv_encoder->dcb->type == OUTPUT_DP && | 270 | if (nv_encoder->dcb->type == OUTPUT_DP && |
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
281 | return connector_status_connected; | 298 | return connector_status_connected; |
282 | } | 299 | } |
283 | 300 | ||
301 | detect_analog: | ||
284 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 302 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
285 | if (!nv_encoder) | 303 | if (!nv_encoder) |
286 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 304 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev, | |||
687 | */ | 705 | */ |
688 | if (!nv_connector->edid && !nv_connector->native_mode && | 706 | if (!nv_connector->edid && !nv_connector->native_mode && |
689 | !dev_priv->VBIOS.pub.fp_no_ddc) { | 707 | !dev_priv->VBIOS.pub.fp_no_ddc) { |
690 | nv_connector->edid = | 708 | struct edid *edid = |
691 | (struct edid *)nouveau_bios_embedded_edid(dev); | 709 | (struct edid *)nouveau_bios_embedded_edid(dev); |
710 | if (edid) { | ||
711 | nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
712 | *(nv_connector->edid) = *edid; | ||
713 | } | ||
692 | } | 714 | } |
693 | 715 | ||
694 | if (!nv_connector->edid) | 716 | if (!nv_connector->edid) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 7afbe8b40d51..50d9e67745af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) | |||
126 | chan->dma.cur += nr_dwords; | 126 | chan->dma.cur += nr_dwords; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline bool | 129 | /* Fetch and adjust GPU GET pointer |
130 | READ_GET(struct nouveau_channel *chan, uint32_t *get) | 130 | * |
131 | * Returns: | ||
132 | * value >= 0, the adjusted GET pointer | ||
133 | * -EINVAL if GET pointer currently outside main push buffer | ||
134 | * -EBUSY if timeout exceeded | ||
135 | */ | ||
136 | static inline int | ||
137 | READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) | ||
131 | { | 138 | { |
132 | uint32_t val; | 139 | uint32_t val; |
133 | 140 | ||
134 | val = nvchan_rd32(chan, chan->user_get); | 141 | val = nvchan_rd32(chan, chan->user_get); |
135 | if (val < chan->pushbuf_base || | 142 | |
136 | val > chan->pushbuf_base + (chan->dma.max << 2)) { | 143 | /* reset counter as long as GET is still advancing, this is |
137 | /* meaningless to dma_wait() except to know whether the | 144 | * to avoid misdetecting a GPU lockup if the GPU happens to |
138 | * GPU has stalled or not | 145 | * just be processing an operation that takes a long time |
139 | */ | 146 | */ |
140 | *get = val; | 147 | if (val != *prev_get) { |
141 | return false; | 148 | *prev_get = val; |
149 | *timeout = 0; | ||
150 | } | ||
151 | |||
152 | if ((++*timeout & 0xff) == 0) { | ||
153 | DRM_UDELAY(1); | ||
154 | if (*timeout > 100000) | ||
155 | return -EBUSY; | ||
142 | } | 156 | } |
143 | 157 | ||
144 | *get = (val - chan->pushbuf_base) >> 2; | 158 | if (val < chan->pushbuf_base || |
145 | return true; | 159 | val > chan->pushbuf_base + (chan->dma.max << 2)) |
160 | return -EINVAL; | ||
161 | |||
162 | return (val - chan->pushbuf_base) >> 2; | ||
146 | } | 163 | } |
147 | 164 | ||
148 | int | 165 | int |
149 | nouveau_dma_wait(struct nouveau_channel *chan, int size) | 166 | nouveau_dma_wait(struct nouveau_channel *chan, int size) |
150 | { | 167 | { |
151 | uint32_t get, prev_get = 0, cnt = 0; | 168 | uint32_t prev_get = 0, cnt = 0; |
152 | bool get_valid; | 169 | int get; |
153 | 170 | ||
154 | while (chan->dma.free < size) { | 171 | while (chan->dma.free < size) { |
155 | /* reset counter as long as GET is still advancing, this is | 172 | get = READ_GET(chan, &prev_get, &cnt); |
156 | * to avoid misdetecting a GPU lockup if the GPU happens to | 173 | if (unlikely(get == -EBUSY)) |
157 | * just be processing an operation that takes a long time | 174 | return -EBUSY; |
158 | */ | ||
159 | get_valid = READ_GET(chan, &get); | ||
160 | if (get != prev_get) { | ||
161 | prev_get = get; | ||
162 | cnt = 0; | ||
163 | } | ||
164 | |||
165 | if ((++cnt & 0xff) == 0) { | ||
166 | DRM_UDELAY(1); | ||
167 | if (cnt > 100000) | ||
168 | return -EBUSY; | ||
169 | } | ||
170 | 175 | ||
171 | /* loop until we have a usable GET pointer. the value | 176 | /* loop until we have a usable GET pointer. the value |
172 | * we read from the GPU may be outside the main ring if | 177 | * we read from the GPU may be outside the main ring if |
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size) | |||
177 | * from the SKIPS area, so the code below doesn't have to deal | 182 | * from the SKIPS area, so the code below doesn't have to deal |
178 | * with some fun corner cases. | 183 | * with some fun corner cases. |
179 | */ | 184 | */ |
180 | if (!get_valid || get < NOUVEAU_DMA_SKIPS) | 185 | if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) |
181 | continue; | 186 | continue; |
182 | 187 | ||
183 | if (get <= chan->dma.cur) { | 188 | if (get <= chan->dma.cur) { |
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size) | |||
203 | * after processing the currently pending commands. | 208 | * after processing the currently pending commands. |
204 | */ | 209 | */ |
205 | OUT_RING(chan, chan->pushbuf_base | 0x20000000); | 210 | OUT_RING(chan, chan->pushbuf_base | 0x20000000); |
211 | |||
212 | /* wait for GET to depart from the skips area. | ||
213 | * prevents writing GET==PUT and causing a race | ||
214 | * condition that causes us to think the GPU is | ||
215 | * idle when it's not. | ||
216 | */ | ||
217 | do { | ||
218 | get = READ_GET(chan, &prev_get, &cnt); | ||
219 | if (unlikely(get == -EBUSY)) | ||
220 | return -EBUSY; | ||
221 | if (unlikely(get == -EINVAL)) | ||
222 | continue; | ||
223 | } while (get <= NOUVEAU_DMA_SKIPS); | ||
206 | WRITE_PUT(NOUVEAU_DMA_SKIPS); | 224 | WRITE_PUT(NOUVEAU_DMA_SKIPS); |
207 | 225 | ||
208 | /* we're now submitting commands at the start of | 226 | /* we're now submitting commands at the start of |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 9e2926c48579..f954ad93e81f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
490 | if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { | 490 | if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { |
491 | NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", | 491 | NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", |
492 | nv_rd32(dev, NV50_AUXCH_CTRL(index))); | 492 | nv_rd32(dev, NV50_AUXCH_CTRL(index))); |
493 | return -EBUSY; | 493 | ret = -EBUSY; |
494 | goto out; | ||
494 | } | 495 | } |
495 | 496 | ||
496 | udelay(400); | 497 | udelay(400); |
@@ -502,6 +503,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
502 | } | 503 | } |
503 | 504 | ||
504 | if (cmd & 1) { | 505 | if (cmd & 1) { |
506 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
507 | ret = -EREMOTEIO; | ||
508 | goto out; | ||
509 | } | ||
510 | |||
505 | for (i = 0; i < 4; i++) { | 511 | for (i = 0; i < 4; i++) { |
506 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | 512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); |
507 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); | 513 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 06eb993e0883..da3b93b84502 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf; | |||
56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | 56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); |
57 | 57 | ||
58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); | 58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); |
59 | int nouveau_vram_notify; | 59 | int nouveau_vram_notify = 1; |
60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); | 60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); |
61 | 61 | ||
62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); | 62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); |
@@ -71,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)"); | |||
71 | int nouveau_uscript_tmds = -1; | 71 | int nouveau_uscript_tmds = -1; |
72 | module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); | 72 | module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); |
73 | 73 | ||
74 | MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); | ||
75 | int nouveau_ignorelid = 0; | ||
76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | ||
77 | |||
78 | MODULE_PARM_DESC(noagp, "Disable all acceleration"); | ||
79 | int nouveau_noaccel = 0; | ||
80 | module_param_named(noaccel, nouveau_noaccel, int, 0400); | ||
81 | |||
82 | MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); | ||
83 | int nouveau_nofbaccel = 0; | ||
84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | ||
85 | |||
74 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 86 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
75 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 87 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
76 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 88 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 026419fe8791..1c15ef37b71c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -509,6 +509,8 @@ struct drm_nouveau_private { | |||
509 | void __iomem *ramin; | 509 | void __iomem *ramin; |
510 | uint32_t ramin_size; | 510 | uint32_t ramin_size; |
511 | 511 | ||
512 | struct nouveau_bo *vga_ram; | ||
513 | |||
512 | struct workqueue_struct *wq; | 514 | struct workqueue_struct *wq; |
513 | struct work_struct irq_work; | 515 | struct work_struct irq_work; |
514 | 516 | ||
@@ -581,6 +583,7 @@ struct drm_nouveau_private { | |||
581 | uint64_t vm_end; | 583 | uint64_t vm_end; |
582 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | 584 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
583 | int vm_vram_pt_nr; | 585 | int vm_vram_pt_nr; |
586 | uint64_t vram_sys_base; | ||
584 | 587 | ||
585 | /* the mtrr covering the FB */ | 588 | /* the mtrr covering the FB */ |
586 | int fb_mtrr; | 589 | int fb_mtrr; |
@@ -675,6 +678,9 @@ extern char *nouveau_tv_norm; | |||
675 | extern int nouveau_reg_debug; | 678 | extern int nouveau_reg_debug; |
676 | extern char *nouveau_vbios; | 679 | extern char *nouveau_vbios; |
677 | extern int nouveau_ctxfw; | 680 | extern int nouveau_ctxfw; |
681 | extern int nouveau_ignorelid; | ||
682 | extern int nouveau_nofbaccel; | ||
683 | extern int nouveau_noaccel; | ||
678 | 684 | ||
679 | /* nouveau_state.c */ | 685 | /* nouveau_state.c */ |
680 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 686 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0b05c869e0e7..ea879a2efef3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
107 | .fb_setcmap = drm_fb_helper_setcmap, | 107 | .fb_setcmap = drm_fb_helper_setcmap, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static struct fb_ops nv04_fbcon_ops = { | ||
111 | .owner = THIS_MODULE, | ||
112 | .fb_check_var = drm_fb_helper_check_var, | ||
113 | .fb_set_par = drm_fb_helper_set_par, | ||
114 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
115 | .fb_fillrect = nv04_fbcon_fillrect, | ||
116 | .fb_copyarea = nv04_fbcon_copyarea, | ||
117 | .fb_imageblit = nv04_fbcon_imageblit, | ||
118 | .fb_sync = nouveau_fbcon_sync, | ||
119 | .fb_pan_display = drm_fb_helper_pan_display, | ||
120 | .fb_blank = drm_fb_helper_blank, | ||
121 | .fb_setcmap = drm_fb_helper_setcmap, | ||
122 | }; | ||
123 | |||
124 | static struct fb_ops nv50_fbcon_ops = { | ||
125 | .owner = THIS_MODULE, | ||
126 | .fb_check_var = drm_fb_helper_check_var, | ||
127 | .fb_set_par = drm_fb_helper_set_par, | ||
128 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
129 | .fb_fillrect = nv50_fbcon_fillrect, | ||
130 | .fb_copyarea = nv50_fbcon_copyarea, | ||
131 | .fb_imageblit = nv50_fbcon_imageblit, | ||
132 | .fb_sync = nouveau_fbcon_sync, | ||
133 | .fb_pan_display = drm_fb_helper_pan_display, | ||
134 | .fb_blank = drm_fb_helper_blank, | ||
135 | .fb_setcmap = drm_fb_helper_setcmap, | ||
136 | }; | ||
137 | |||
110 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 138 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
111 | u16 blue, int regno) | 139 | u16 blue, int regno) |
112 | { | 140 | { |
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
267 | dev_priv->fbdev_info = info; | 295 | dev_priv->fbdev_info = info; |
268 | 296 | ||
269 | strcpy(info->fix.id, "nouveaufb"); | 297 | strcpy(info->fix.id, "nouveaufb"); |
270 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | 298 | if (nouveau_nofbaccel) |
271 | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; | 299 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; |
300 | else | ||
301 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | ||
302 | FBINFO_HWACCEL_FILLRECT | | ||
303 | FBINFO_HWACCEL_IMAGEBLIT; | ||
272 | info->fbops = &nouveau_fbcon_ops; | 304 | info->fbops = &nouveau_fbcon_ops; |
273 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - | 305 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - |
274 | dev_priv->vm_vram_base; | 306 | dev_priv->vm_vram_base; |
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
316 | par->nouveau_fb = nouveau_fb; | 348 | par->nouveau_fb = nouveau_fb; |
317 | par->dev = dev; | 349 | par->dev = dev; |
318 | 350 | ||
319 | if (dev_priv->channel) { | 351 | if (dev_priv->channel && !nouveau_nofbaccel) { |
320 | switch (dev_priv->card_type) { | 352 | switch (dev_priv->card_type) { |
321 | case NV_50: | 353 | case NV_50: |
322 | nv50_fbcon_accel_init(info); | 354 | nv50_fbcon_accel_init(info); |
355 | info->fbops = &nv50_fbcon_ops; | ||
323 | break; | 356 | break; |
324 | default: | 357 | default: |
325 | nv04_fbcon_accel_init(info); | 358 | nv04_fbcon_accel_init(info); |
359 | info->fbops = &nv04_fbcon_ops; | ||
326 | break; | 360 | break; |
327 | }; | 361 | }; |
328 | } | 362 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 462e0b87b4bd..f9c34e1a8c11 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); | |||
40 | void nouveau_fbcon_restore(void); | 40 | void nouveau_fbcon_restore(void); |
41 | void nouveau_fbcon_zfill(struct drm_device *dev); | 41 | void nouveau_fbcon_zfill(struct drm_device *dev); |
42 | 42 | ||
43 | void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
44 | void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
45 | void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
43 | int nv04_fbcon_accel_init(struct fb_info *info); | 46 | int nv04_fbcon_accel_init(struct fb_info *info); |
47 | void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
48 | void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
49 | void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
44 | int nv50_fbcon_accel_init(struct fb_info *info); | 50 | int nv50_fbcon_accel_init(struct fb_info *info); |
45 | 51 | ||
46 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | 52 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2009db2426c3..70cc30803e3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -321,6 +321,7 @@ retry: | |||
321 | else { | 321 | else { |
322 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | 322 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", |
323 | b->valid_domains); | 323 | b->valid_domains); |
324 | list_add_tail(&nvbo->entry, &op->both_list); | ||
324 | validate_fini(op, NULL); | 325 | validate_fini(op, NULL); |
325 | return -EINVAL; | 326 | return -EINVAL; |
326 | } | 327 | } |
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |||
466 | static int | 467 | static int |
467 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, | 468 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, |
468 | struct drm_nouveau_gem_pushbuf_bo *bo, | 469 | struct drm_nouveau_gem_pushbuf_bo *bo, |
469 | int nr_relocs, uint64_t ptr_relocs, | 470 | unsigned nr_relocs, uint64_t ptr_relocs, |
470 | int nr_dwords, int first_dword, | 471 | unsigned nr_dwords, unsigned first_dword, |
471 | uint32_t *pushbuf, bool is_iomem) | 472 | uint32_t *pushbuf, bool is_iomem) |
472 | { | 473 | { |
473 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | 474 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; |
474 | struct drm_device *dev = chan->dev; | 475 | struct drm_device *dev = chan->dev; |
475 | int ret = 0, i; | 476 | int ret = 0; |
477 | unsigned i; | ||
476 | 478 | ||
477 | reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); | 479 | reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); |
478 | if (IS_ERR(reloc)) | 480 | if (IS_ERR(reloc)) |
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
667 | } | 669 | } |
668 | pbbo = nouveau_gem_object(gem); | 670 | pbbo = nouveau_gem_object(gem); |
669 | 671 | ||
672 | if ((req->offset & 3) || req->nr_dwords < 2 || | ||
673 | (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || | ||
674 | (unsigned long)req->nr_dwords > | ||
675 | ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { | ||
676 | NV_ERROR(dev, "pb call misaligned or out of bounds: " | ||
677 | "%d + %d * 4 > %ld\n", | ||
678 | req->offset, req->nr_dwords, pbbo->bo.mem.size); | ||
679 | ret = -EINVAL; | ||
680 | drm_gem_object_unreference(gem); | ||
681 | goto out; | ||
682 | } | ||
683 | |||
670 | ret = ttm_bo_reserve(&pbbo->bo, false, false, true, | 684 | ret = ttm_bo_reserve(&pbbo->bo, false, false, true, |
671 | chan->fence.sequence); | 685 | chan->fence.sequence); |
672 | if (ret) { | 686 | if (ret) { |
@@ -911,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
911 | } | 925 | } |
912 | 926 | ||
913 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | 927 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { |
928 | spin_lock(&nvbo->bo.lock); | ||
914 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); | 929 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); |
930 | spin_unlock(&nvbo->bo.lock); | ||
915 | } else { | 931 | } else { |
916 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | 932 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); |
917 | if (ret == 0) | 933 | if (ret == 0) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 419f4c2b3b89..c7ebec696747 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c | |||
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | 99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); |
100 | if (!pgraph->ctxprog) { | 100 | if (!pgraph->ctxvals) { |
101 | NV_ERROR(dev, "OOM copying ctxprog\n"); | 101 | NV_ERROR(dev, "OOM copying ctxvals\n"); |
102 | release_firmware(fw); | 102 | release_firmware(fw); |
103 | nouveau_grctx_fini(dev); | 103 | nouveau_grctx_fini(dev); |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 919a619ca7fa..447f9f69d6b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
211 | get + 4); | 211 | get + 4); |
212 | } | 212 | } |
213 | 213 | ||
214 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
215 | uint32_t sem; | ||
216 | |||
217 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
218 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
219 | NV_PFIFO_INTR_SEMAPHORE); | ||
220 | |||
221 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
222 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
223 | |||
224 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
225 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
226 | } | ||
227 | |||
214 | if (status) { | 228 | if (status) { |
215 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | 229 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", |
216 | status, chid); | 230 | status, chid); |
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) | |||
483 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | 497 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { |
484 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | 498 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) |
485 | unhandled = 1; | 499 | unhandled = 1; |
500 | } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { | ||
501 | uint32_t v = nv_rd32(dev, 0x402000); | ||
502 | nv_wr32(dev, 0x402000, v); | ||
503 | |||
504 | /* dump the error anyway for now: it's useful for | ||
505 | Gallium development */ | ||
506 | unhandled = 1; | ||
486 | } else { | 507 | } else { |
487 | unhandled = 1; | 508 | unhandled = 1; |
488 | } | 509 | } |
@@ -559,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) | |||
559 | static void | 580 | static void |
560 | nv50_pgraph_irq_handler(struct drm_device *dev) | 581 | nv50_pgraph_irq_handler(struct drm_device *dev) |
561 | { | 582 | { |
562 | uint32_t status, nsource; | 583 | uint32_t status; |
563 | 584 | ||
564 | status = nv_rd32(dev, NV03_PGRAPH_INTR); | 585 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { |
565 | nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | 586 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); |
566 | 587 | ||
567 | if (status & 0x00000001) { | 588 | if (status & 0x00000001) { |
568 | nouveau_pgraph_intr_notify(dev, nsource); | 589 | nouveau_pgraph_intr_notify(dev, nsource); |
569 | status &= ~0x00000001; | 590 | status &= ~0x00000001; |
570 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | 591 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); |
571 | } | 592 | } |
572 | 593 | ||
573 | if (status & 0x00000010) { | 594 | if (status & 0x00000010) { |
574 | nouveau_pgraph_intr_error(dev, nsource | | 595 | nouveau_pgraph_intr_error(dev, nsource | |
575 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); | 596 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); |
576 | 597 | ||
577 | status &= ~0x00000010; | 598 | status &= ~0x00000010; |
578 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | 599 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); |
579 | } | 600 | } |
580 | 601 | ||
581 | if (status & 0x00001000) { | 602 | if (status & 0x00001000) { |
582 | nv_wr32(dev, 0x400500, 0x00000000); | 603 | nv_wr32(dev, 0x400500, 0x00000000); |
583 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | 604 | nv_wr32(dev, NV03_PGRAPH_INTR, |
584 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, | 605 | NV_PGRAPH_INTR_CONTEXT_SWITCH); |
585 | NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | 606 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, |
586 | nv_wr32(dev, 0x400500, 0x00010001); | 607 | NV40_PGRAPH_INTR_EN) & |
608 | ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
609 | nv_wr32(dev, 0x400500, 0x00010001); | ||
587 | 610 | ||
588 | nv50_graph_context_switch(dev); | 611 | nv50_graph_context_switch(dev); |
589 | 612 | ||
590 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | 613 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; |
591 | } | 614 | } |
592 | 615 | ||
593 | if (status & 0x00100000) { | 616 | if (status & 0x00100000) { |
594 | nouveau_pgraph_intr_error(dev, nsource | | 617 | nouveau_pgraph_intr_error(dev, nsource | |
595 | NV03_PGRAPH_NSOURCE_DATA_ERROR); | 618 | NV03_PGRAPH_NSOURCE_DATA_ERROR); |
596 | 619 | ||
597 | status &= ~0x00100000; | 620 | status &= ~0x00100000; |
598 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | 621 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); |
599 | } | 622 | } |
600 | 623 | ||
601 | if (status & 0x00200000) { | 624 | if (status & 0x00200000) { |
602 | int r; | 625 | int r; |
603 | 626 | ||
604 | nouveau_pgraph_intr_error(dev, nsource | | 627 | nouveau_pgraph_intr_error(dev, nsource | |
605 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); | 628 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); |
606 | 629 | ||
607 | NV_ERROR(dev, "magic set 1:\n"); | 630 | NV_ERROR(dev, "magic set 1:\n"); |
608 | for (r = 0x408900; r <= 0x408910; r += 4) | 631 | for (r = 0x408900; r <= 0x408910; r += 4) |
609 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 632 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
610 | nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); | 633 | nv_rd32(dev, r)); |
611 | for (r = 0x408e08; r <= 0x408e24; r += 4) | 634 | nv_wr32(dev, 0x408900, |
612 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 635 | nv_rd32(dev, 0x408904) | 0xc0000000); |
613 | nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); | 636 | for (r = 0x408e08; r <= 0x408e24; r += 4) |
614 | 637 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | |
615 | NV_ERROR(dev, "magic set 2:\n"); | 638 | nv_rd32(dev, r)); |
616 | for (r = 0x409900; r <= 0x409910; r += 4) | 639 | nv_wr32(dev, 0x408e08, |
617 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 640 | nv_rd32(dev, 0x408e08) | 0xc0000000); |
618 | nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); | 641 | |
619 | for (r = 0x409e08; r <= 0x409e24; r += 4) | 642 | NV_ERROR(dev, "magic set 2:\n"); |
620 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 643 | for (r = 0x409900; r <= 0x409910; r += 4) |
621 | nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); | 644 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
622 | 645 | nv_rd32(dev, r)); | |
623 | status &= ~0x00200000; | 646 | nv_wr32(dev, 0x409900, |
624 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | 647 | nv_rd32(dev, 0x409904) | 0xc0000000); |
625 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | 648 | for (r = 0x409e08; r <= 0x409e24; r += 4) |
626 | } | 649 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
650 | nv_rd32(dev, r)); | ||
651 | nv_wr32(dev, 0x409e08, | ||
652 | nv_rd32(dev, 0x409e08) | 0xc0000000); | ||
653 | |||
654 | status &= ~0x00200000; | ||
655 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | ||
656 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | ||
657 | } | ||
627 | 658 | ||
628 | if (status) { | 659 | if (status) { |
629 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); | 660 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", |
630 | nv_wr32(dev, NV03_PGRAPH_INTR, status); | 661 | status); |
631 | } | 662 | nv_wr32(dev, NV03_PGRAPH_INTR, status); |
663 | } | ||
632 | 664 | ||
633 | { | 665 | { |
634 | const int isb = (1 << 16) | (1 << 0); | 666 | const int isb = (1 << 16) | (1 << 0); |
635 | 667 | ||
636 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | 668 | if ((nv_rd32(dev, 0x400500) & isb) != isb) |
637 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); | 669 | nv_wr32(dev, 0x400500, |
638 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | 670 | nv_rd32(dev, 0x400500) | isb); |
671 | } | ||
639 | } | 672 | } |
640 | 673 | ||
641 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 674 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
675 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
642 | } | 676 | } |
643 | 677 | ||
644 | static void | 678 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fb9bdd6edf1f..2dc09dbd817d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
285 | uint32_t flags, uint64_t phys) | 285 | uint32_t flags, uint64_t phys) |
286 | { | 286 | { |
287 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 287 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
288 | struct nouveau_gpuobj **pgt; | 288 | struct nouveau_gpuobj *pgt; |
289 | unsigned psz, pfl, pages; | 289 | unsigned block; |
290 | 290 | int i; | |
291 | if (virt >= dev_priv->vm_gart_base && | ||
292 | (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { | ||
293 | psz = 12; | ||
294 | pgt = &dev_priv->gart_info.sg_ctxdma; | ||
295 | pfl = 0x21; | ||
296 | virt -= dev_priv->vm_gart_base; | ||
297 | } else | ||
298 | if (virt >= dev_priv->vm_vram_base && | ||
299 | (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { | ||
300 | psz = 16; | ||
301 | pgt = dev_priv->vm_vram_pt; | ||
302 | pfl = 0x01; | ||
303 | virt -= dev_priv->vm_vram_base; | ||
304 | } else { | ||
305 | NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", | ||
306 | virt, virt + size - 1); | ||
307 | return -EINVAL; | ||
308 | } | ||
309 | 291 | ||
310 | pages = size >> psz; | 292 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; |
293 | size = (size >> 16) << 1; | ||
294 | |||
295 | phys |= ((uint64_t)flags << 32); | ||
296 | phys |= 1; | ||
297 | if (dev_priv->vram_sys_base) { | ||
298 | phys += dev_priv->vram_sys_base; | ||
299 | phys |= 0x30; | ||
300 | } | ||
311 | 301 | ||
312 | dev_priv->engine.instmem.prepare_access(dev, true); | 302 | dev_priv->engine.instmem.prepare_access(dev, true); |
313 | if (flags & 0x80000000) { | 303 | while (size) { |
314 | while (pages--) { | 304 | unsigned offset_h = upper_32_bits(phys); |
315 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | 305 | unsigned offset_l = lower_32_bits(phys); |
316 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | 306 | unsigned pte, end; |
307 | |||
308 | for (i = 7; i >= 0; i--) { | ||
309 | block = 1 << (i + 1); | ||
310 | if (size >= block && !(virt & (block - 1))) | ||
311 | break; | ||
312 | } | ||
313 | offset_l |= (i << 7); | ||
317 | 314 | ||
318 | nv_wo32(dev, pt, pte++, 0x00000000); | 315 | phys += block << 15; |
319 | nv_wo32(dev, pt, pte++, 0x00000000); | 316 | size -= block; |
320 | 317 | ||
321 | virt += (1 << psz); | 318 | while (block) { |
322 | } | 319 | pgt = dev_priv->vm_vram_pt[virt >> 14]; |
323 | } else { | 320 | pte = virt & 0x3ffe; |
324 | while (pages--) { | ||
325 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | ||
326 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | ||
327 | unsigned offset_h = upper_32_bits(phys) & 0xff; | ||
328 | unsigned offset_l = lower_32_bits(phys); | ||
329 | 321 | ||
330 | nv_wo32(dev, pt, pte++, offset_l | pfl); | 322 | end = pte + block; |
331 | nv_wo32(dev, pt, pte++, offset_h | flags); | 323 | if (end > 16384) |
324 | end = 16384; | ||
325 | block -= (end - pte); | ||
326 | virt += (end - pte); | ||
332 | 327 | ||
333 | phys += (1 << psz); | 328 | while (pte < end) { |
334 | virt += (1 << psz); | 329 | nv_wo32(dev, pgt, pte++, offset_l); |
330 | nv_wo32(dev, pgt, pte++, offset_h); | ||
331 | } | ||
335 | } | 332 | } |
336 | } | 333 | } |
337 | dev_priv->engine.instmem.finish_access(dev); | 334 | dev_priv->engine.instmem.finish_access(dev); |
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
356 | void | 353 | void |
357 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 354 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) |
358 | { | 355 | { |
359 | nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); | 356 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
357 | struct nouveau_gpuobj *pgt; | ||
358 | unsigned pages, pte, end; | ||
359 | |||
360 | virt -= dev_priv->vm_vram_base; | ||
361 | pages = (size >> 16) << 1; | ||
362 | |||
363 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
364 | while (pages) { | ||
365 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | ||
366 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
367 | |||
368 | end = pte + pages; | ||
369 | if (end > 16384) | ||
370 | end = 16384; | ||
371 | pages -= (end - pte); | ||
372 | virt += (end - pte) << 15; | ||
373 | |||
374 | while (pte < end) | ||
375 | nv_wo32(dev, pgt, pte++, 0); | ||
376 | } | ||
377 | dev_priv->engine.instmem.finish_access(dev); | ||
378 | |||
379 | nv_wr32(dev, 0x100c80, 0x00050001); | ||
380 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
381 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
382 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
383 | return; | ||
384 | } | ||
385 | |||
386 | nv_wr32(dev, 0x100c80, 0x00000001); | ||
387 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
388 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
389 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
390 | } | ||
360 | } | 391 | } |
361 | 392 | ||
362 | /* | 393 | /* |
@@ -383,9 +414,8 @@ void nouveau_mem_close(struct drm_device *dev) | |||
383 | { | 414 | { |
384 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 415 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
385 | 416 | ||
386 | if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) | 417 | nouveau_bo_unpin(dev_priv->vga_ram); |
387 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); | 418 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); |
388 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
389 | 419 | ||
390 | ttm_bo_device_release(&dev_priv->ttm.bdev); | 420 | ttm_bo_device_release(&dev_priv->ttm.bdev); |
391 | 421 | ||
@@ -622,6 +652,15 @@ nouveau_mem_init(struct drm_device *dev) | |||
622 | return ret; | 652 | return ret; |
623 | } | 653 | } |
624 | 654 | ||
655 | ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, | ||
656 | 0, 0, true, true, &dev_priv->vga_ram); | ||
657 | if (ret == 0) | ||
658 | ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); | ||
659 | if (ret) { | ||
660 | NV_WARN(dev, "failed to reserve VGA memory\n"); | ||
661 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | ||
662 | } | ||
663 | |||
625 | /* GART */ | 664 | /* GART */ |
626 | #if !defined(__powerpc__) && !defined(__ia64__) | 665 | #if !defined(__powerpc__) && !defined(__ia64__) |
627 | if (drm_device_is_agp(dev) && dev->agp) { | 666 | if (drm_device_is_agp(dev) && dev->agp) { |
@@ -653,6 +692,7 @@ nouveau_mem_init(struct drm_device *dev) | |||
653 | dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), | 692 | dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), |
654 | drm_get_resource_len(dev, 1), | 693 | drm_get_resource_len(dev, 1), |
655 | DRM_MTRR_WC); | 694 | DRM_MTRR_WC); |
695 | |||
656 | return 0; | 696 | return 0; |
657 | } | 697 | } |
658 | 698 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 6c66a34b6345..d99dc087f9b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
34 | { | 34 | { |
35 | struct drm_device *dev = chan->dev; | 35 | struct drm_device *dev = chan->dev; |
36 | struct nouveau_bo *ntfy = NULL; | 36 | struct nouveau_bo *ntfy = NULL; |
37 | uint32_t flags; | ||
37 | int ret; | 38 | int ret; |
38 | 39 | ||
39 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? | 40 | if (nouveau_vram_notify) |
40 | TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, | 41 | flags = TTM_PL_FLAG_VRAM; |
42 | else | ||
43 | flags = TTM_PL_FLAG_TT; | ||
44 | |||
45 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, | ||
41 | 0, 0x0000, false, true, &ntfy); | 46 | 0, 0x0000, false, true, &ntfy); |
42 | if (ret) | 47 | if (ret) |
43 | return ret; | 48 | return ret; |
44 | 49 | ||
45 | ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); | 50 | ret = nouveau_bo_pin(ntfy, flags); |
46 | if (ret) | 51 | if (ret) |
47 | goto out_err; | 52 | goto out_err; |
48 | 53 | ||
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
128 | target = NV_DMA_TARGET_PCI; | 133 | target = NV_DMA_TARGET_PCI; |
129 | } else { | 134 | } else { |
130 | target = NV_DMA_TARGET_AGP; | 135 | target = NV_DMA_TARGET_AGP; |
136 | if (dev_priv->card_type >= NV_50) | ||
137 | offset += dev_priv->vm_gart_base; | ||
131 | } | 138 | } |
132 | } else { | 139 | } else { |
133 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", | 140 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6c2cf81716df..e7c100ba63a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -885,11 +885,12 @@ int | |||
885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
886 | struct nouveau_gpuobj **gpuobj_ret) | 886 | struct nouveau_gpuobj **gpuobj_ret) |
887 | { | 887 | { |
888 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 888 | struct drm_nouveau_private *dev_priv; |
889 | struct nouveau_gpuobj *gpuobj; | 889 | struct nouveau_gpuobj *gpuobj; |
890 | 890 | ||
891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | 891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) |
892 | return -EINVAL; | 892 | return -EINVAL; |
893 | dev_priv = chan->dev->dev_private; | ||
893 | 894 | ||
894 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 895 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
895 | if (!gpuobj) | 896 | if (!gpuobj) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 251f1b3b38b9..aa9b310e41be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
@@ -99,6 +99,7 @@ | |||
99 | * the card will hang early on in the X init process. | 99 | * the card will hang early on in the X init process. |
100 | */ | 100 | */ |
101 | # define NV_PMC_ENABLE_UNK13 (1<<13) | 101 | # define NV_PMC_ENABLE_UNK13 (1<<13) |
102 | #define NV40_PMC_GRAPH_UNITS 0x00001540 | ||
102 | #define NV40_PMC_BACKLIGHT 0x000015f0 | 103 | #define NV40_PMC_BACKLIGHT 0x000015f0 |
103 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 | 104 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 |
104 | #define NV40_PMC_1700 0x00001700 | 105 | #define NV40_PMC_1700 0x00001700 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4c7f1e403e80..ed1590577b6c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -54,11 +54,12 @@ static void | |||
54 | nouveau_sgdma_clear(struct ttm_backend *be) | 54 | nouveau_sgdma_clear(struct ttm_backend *be) |
55 | { | 55 | { |
56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
57 | struct drm_device *dev = nvbe->dev; | 57 | struct drm_device *dev; |
58 | |||
59 | NV_DEBUG(nvbe->dev, "\n"); | ||
60 | 58 | ||
61 | if (nvbe && nvbe->pages) { | 59 | if (nvbe && nvbe->pages) { |
60 | dev = nvbe->dev; | ||
61 | NV_DEBUG(dev, "\n"); | ||
62 | |||
62 | if (nvbe->bound) | 63 | if (nvbe->bound) |
63 | be->func->unbind(be); | 64 | be->func->unbind(be); |
64 | 65 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 09b9a46dfc0e..a4851af5b05e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
310 | static unsigned int | 310 | static unsigned int |
311 | nouveau_vga_set_decode(void *priv, bool state) | 311 | nouveau_vga_set_decode(void *priv, bool state) |
312 | { | 312 | { |
313 | struct drm_device *dev = priv; | ||
314 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
315 | |||
316 | if (dev_priv->chipset >= 0x40) | ||
317 | nv_wr32(dev, 0x88054, state); | ||
318 | else | ||
319 | nv_wr32(dev, 0x1854, state); | ||
320 | |||
313 | if (state) | 321 | if (state) |
314 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 322 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
315 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 323 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev) | |||
427 | if (ret) | 435 | if (ret) |
428 | goto out_timer; | 436 | goto out_timer; |
429 | 437 | ||
430 | /* PGRAPH */ | 438 | if (nouveau_noaccel) |
431 | ret = engine->graph.init(dev); | 439 | engine->graph.accel_blocked = true; |
432 | if (ret) | 440 | else { |
433 | goto out_fb; | 441 | /* PGRAPH */ |
442 | ret = engine->graph.init(dev); | ||
443 | if (ret) | ||
444 | goto out_fb; | ||
434 | 445 | ||
435 | /* PFIFO */ | 446 | /* PFIFO */ |
436 | ret = engine->fifo.init(dev); | 447 | ret = engine->fifo.init(dev); |
437 | if (ret) | 448 | if (ret) |
438 | goto out_graph; | 449 | goto out_graph; |
450 | } | ||
439 | 451 | ||
440 | /* this call irq_preinstall, register irq handler and | 452 | /* this call irq_preinstall, register irq handler and |
441 | * call irq_postinstall | 453 | * call irq_postinstall |
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev) | |||
479 | out_irq: | 491 | out_irq: |
480 | drm_irq_uninstall(dev); | 492 | drm_irq_uninstall(dev); |
481 | out_fifo: | 493 | out_fifo: |
482 | engine->fifo.takedown(dev); | 494 | if (!nouveau_noaccel) |
495 | engine->fifo.takedown(dev); | ||
483 | out_graph: | 496 | out_graph: |
484 | engine->graph.takedown(dev); | 497 | if (!nouveau_noaccel) |
498 | engine->graph.takedown(dev); | ||
485 | out_fb: | 499 | out_fb: |
486 | engine->fb.takedown(dev); | 500 | engine->fb.takedown(dev); |
487 | out_timer: | 501 | out_timer: |
@@ -518,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
518 | dev_priv->channel = NULL; | 532 | dev_priv->channel = NULL; |
519 | } | 533 | } |
520 | 534 | ||
521 | engine->fifo.takedown(dev); | 535 | if (!nouveau_noaccel) { |
522 | engine->graph.takedown(dev); | 536 | engine->fifo.takedown(dev); |
537 | engine->graph.takedown(dev); | ||
538 | } | ||
523 | engine->fb.takedown(dev); | 539 | engine->fb.takedown(dev); |
524 | engine->timer.takedown(dev); | 540 | engine->timer.takedown(dev); |
525 | engine->mc.takedown(dev); | 541 | engine->mc.takedown(dev); |
526 | 542 | ||
527 | mutex_lock(&dev->struct_mutex); | 543 | mutex_lock(&dev->struct_mutex); |
544 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
528 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); | 545 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); |
529 | mutex_unlock(&dev->struct_mutex); | 546 | mutex_unlock(&dev->struct_mutex); |
530 | nouveau_sgdma_takedown(dev); | 547 | nouveau_sgdma_takedown(dev); |
@@ -816,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
816 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 833 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
817 | getparam->value = dev_priv->vm_vram_base; | 834 | getparam->value = dev_priv->vm_vram_base; |
818 | break; | 835 | break; |
836 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||
837 | /* NV40 and NV50 versions are quite different, but register | ||
838 | * address is the same. User is supposed to know the card | ||
839 | * family anyway... */ | ||
840 | if (dev_priv->chipset >= 0x40) { | ||
841 | getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||
842 | break; | ||
843 | } | ||
844 | /* FALLTHRU */ | ||
819 | default: | 845 | default: |
820 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); | 846 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); |
821 | return -EINVAL; | 847 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d0e038d28948..1d73b15d70da 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
119 | struct drm_connector *connector) | 119 | struct drm_connector *connector) |
120 | { | 120 | { |
121 | struct drm_device *dev = encoder->dev; | 121 | struct drm_device *dev = encoder->dev; |
122 | uint8_t saved_seq1, saved_pi, saved_rpc1; | 122 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; |
123 | uint8_t saved_palette0[3], saved_palette_mask; | 123 | uint8_t saved_palette0[3], saved_palette_mask; |
124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; | 124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; |
125 | int i; | 125 | int i; |
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
135 | /* only implemented for head A for now */ | 135 | /* only implemented for head A for now */ |
136 | NVSetOwner(dev, 0); | 136 | NVSetOwner(dev, 0); |
137 | 137 | ||
138 | saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); | ||
139 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); | ||
140 | |||
138 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); | 141 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); |
139 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); | 142 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); |
140 | 143 | ||
@@ -203,6 +206,7 @@ out: | |||
203 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); | 206 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); |
204 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); | 207 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); |
205 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); | 208 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); |
209 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); | ||
206 | 210 | ||
207 | if (blue == 0x18) { | 211 | if (blue == 0x18) { |
208 | NV_INFO(dev, "Load detected on head A\n"); | 212 | NV_INFO(dev, "Load detected on head A\n"); |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index d910873c1368..fd01caabd5c3 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
28 | #include "nouveau_fbcon.h" | 28 | #include "nouveau_fbcon.h" |
29 | 29 | ||
30 | static void | 30 | void |
31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
32 | { | 32 | { |
33 | struct nouveau_fbcon_par *par = info->par; | 33 | struct nouveau_fbcon_par *par = info->par; |
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
54 | FIRE_RING(chan); | 54 | FIRE_RING(chan); |
55 | } | 55 | } |
56 | 56 | ||
57 | static void | 57 | void |
58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
59 | { | 59 | { |
60 | struct nouveau_fbcon_par *par = info->par; | 60 | struct nouveau_fbcon_par *par = info->par; |
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
88 | FIRE_RING(chan); | 88 | FIRE_RING(chan); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void | 91 | void |
92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
93 | { | 93 | { |
94 | struct nouveau_fbcon_par *par = info->par; | 94 | struct nouveau_fbcon_par *par = info->par; |
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
307 | 307 | ||
308 | FIRE_RING(chan); | 308 | FIRE_RING(chan); |
309 | 309 | ||
310 | info->fbops->fb_fillrect = nv04_fbcon_fillrect; | ||
311 | info->fbops->fb_copyarea = nv04_fbcon_copyarea; | ||
312 | info->fbops->fb_imageblit = nv04_fbcon_imageblit; | ||
313 | return 0; | 310 | return 0; |
314 | } | 311 | } |
315 | 312 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index a20c206625a2..a3b9563a6f60 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev) | |||
30 | * of vram. For now, only reserve a small piece until we know | 30 | * of vram. For now, only reserve a small piece until we know |
31 | * more about what each chipset requires. | 31 | * more about what each chipset requires. |
32 | */ | 32 | */ |
33 | switch (dev_priv->chipset & 0xf0) { | 33 | switch (dev_priv->chipset) { |
34 | case 0x40: | 34 | case 0x40: |
35 | case 0x47: | 35 | case 0x47: |
36 | case 0x49: | 36 | case 0x49: |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341b..21ac6e49b6ee 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) | |||
579 | nouveau_encoder(encoder)->restore.output); | 579 | nouveau_encoder(encoder)->restore.output); |
580 | 580 | ||
581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); |
582 | |||
583 | nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; | ||
582 | } | 584 | } |
583 | 585 | ||
584 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 586 | static int nv17_tv_create_resources(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 118d3285fd8c..d1a651e3400c 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) | |||
298 | static void | 298 | static void |
299 | nv50_crtc_destroy(struct drm_crtc *crtc) | 299 | nv50_crtc_destroy(struct drm_crtc *crtc) |
300 | { | 300 | { |
301 | struct drm_device *dev = crtc->dev; | 301 | struct drm_device *dev; |
302 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 302 | struct nouveau_crtc *nv_crtc; |
303 | |||
304 | NV_DEBUG_KMS(dev, "\n"); | ||
305 | 303 | ||
306 | if (!crtc) | 304 | if (!crtc) |
307 | return; | 305 | return; |
308 | 306 | ||
307 | dev = crtc->dev; | ||
308 | nv_crtc = nouveau_crtc(crtc); | ||
309 | |||
310 | NV_DEBUG_KMS(dev, "\n"); | ||
311 | |||
309 | drm_crtc_cleanup(&nv_crtc->base); | 312 | drm_crtc_cleanup(&nv_crtc->base); |
310 | 313 | ||
311 | nv50_cursor_fini(nv_crtc); | 314 | nv50_cursor_fini(nv_crtc); |
@@ -432,6 +435,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
432 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 435 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
433 | struct drm_device *dev = crtc->dev; | 436 | struct drm_device *dev = crtc->dev; |
434 | struct drm_encoder *encoder; | 437 | struct drm_encoder *encoder; |
438 | uint32_t dac = 0, sor = 0; | ||
435 | 439 | ||
436 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 440 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
437 | 441 | ||
@@ -439,9 +443,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
439 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 443 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
440 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 444 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
441 | 445 | ||
442 | if (drm_helper_encoder_in_use(encoder)) | 446 | if (!drm_helper_encoder_in_use(encoder)) |
443 | continue; | 447 | continue; |
444 | 448 | ||
449 | if (nv_encoder->dcb->type == OUTPUT_ANALOG || | ||
450 | nv_encoder->dcb->type == OUTPUT_TV) | ||
451 | dac |= (1 << nv_encoder->or); | ||
452 | else | ||
453 | sor |= (1 << nv_encoder->or); | ||
454 | } | ||
455 | |||
456 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
457 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
458 | |||
459 | if (nv_encoder->dcb->type == OUTPUT_ANALOG || | ||
460 | nv_encoder->dcb->type == OUTPUT_TV) { | ||
461 | if (dac & (1 << nv_encoder->or)) | ||
462 | continue; | ||
463 | } else { | ||
464 | if (sor & (1 << nv_encoder->or)) | ||
465 | continue; | ||
466 | } | ||
467 | |||
445 | nv_encoder->disconnect(nv_encoder); | 468 | nv_encoder->disconnect(nv_encoder); |
446 | } | 469 | } |
447 | 470 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e4f279ee61cf..0f57cdf7ccb2 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
4 | #include "nouveau_fbcon.h" | 4 | #include "nouveau_fbcon.h" |
5 | 5 | ||
6 | static void | 6 | void |
7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
8 | { | 8 | { |
9 | struct nouveau_fbcon_par *par = info->par; | 9 | struct nouveau_fbcon_par *par = info->par; |
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
46 | FIRE_RING(chan); | 46 | FIRE_RING(chan); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void | 49 | void |
50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
51 | { | 51 | { |
52 | struct nouveau_fbcon_par *par = info->par; | 52 | struct nouveau_fbcon_par *par = info->par; |
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
81 | FIRE_RING(chan); | 81 | FIRE_RING(chan); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void | 84 | void |
85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
86 | { | 86 | { |
87 | struct nouveau_fbcon_par *par = info->par; | 87 | struct nouveau_fbcon_par *par = info->par; |
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + | 262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + |
263 | dev_priv->vm_vram_base); | 263 | dev_priv->vm_vram_base); |
264 | 264 | ||
265 | info->fbops->fb_fillrect = nv50_fbcon_fillrect; | ||
266 | info->fbops->fb_copyarea = nv50_fbcon_copyarea; | ||
267 | info->fbops->fb_imageblit = nv50_fbcon_imageblit; | ||
268 | return 0; | 265 | return 0; |
269 | } | 266 | } |
270 | 267 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 39caf167587d..204a79ff10f4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
272 | return ret; | 272 | return ret; |
273 | ramfc = chan->ramfc->gpuobj; | 273 | ramfc = chan->ramfc->gpuobj; |
274 | 274 | ||
275 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, | 275 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, |
276 | 0, &chan->cache); | 276 | 0, &chan->cache); |
277 | if (ret) | 277 | if (ret) |
278 | return ret; | 278 | return ret; |
@@ -317,17 +317,20 @@ void | |||
317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
318 | { | 318 | { |
319 | struct drm_device *dev = chan->dev; | 319 | struct drm_device *dev = chan->dev; |
320 | struct nouveau_gpuobj_ref *ramfc = chan->ramfc; | ||
320 | 321 | ||
321 | NV_DEBUG(dev, "ch%d\n", chan->id); | 322 | NV_DEBUG(dev, "ch%d\n", chan->id); |
322 | 323 | ||
323 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 324 | /* This will ensure the channel is seen as disabled. */ |
324 | nouveau_gpuobj_ref_del(dev, &chan->cache); | 325 | chan->ramfc = NULL; |
325 | |||
326 | nv50_fifo_channel_disable(dev, chan->id, false); | 326 | nv50_fifo_channel_disable(dev, chan->id, false); |
327 | 327 | ||
328 | /* Dummy channel, also used on ch 127 */ | 328 | /* Dummy channel, also used on ch 127 */ |
329 | if (chan->id == 0) | 329 | if (chan->id == 0) |
330 | nv50_fifo_channel_disable(dev, 127, false); | 330 | nv50_fifo_channel_disable(dev, 127, false); |
331 | |||
332 | nouveau_gpuobj_ref_del(dev, &ramfc); | ||
333 | nouveau_gpuobj_ref_del(dev, &chan->cache); | ||
331 | } | 334 | } |
332 | 335 | ||
333 | int | 336 | int |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index ca79f32be44c..6d504801b514 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev) | |||
84 | nv_wr32(dev, 0x400804, 0xc0000000); | 84 | nv_wr32(dev, 0x400804, 0xc0000000); |
85 | nv_wr32(dev, 0x406800, 0xc0000000); | 85 | nv_wr32(dev, 0x406800, 0xc0000000); |
86 | nv_wr32(dev, 0x400c04, 0xc0000000); | 86 | nv_wr32(dev, 0x400c04, 0xc0000000); |
87 | nv_wr32(dev, 0x401804, 0xc0000000); | 87 | nv_wr32(dev, 0x401800, 0xc0000000); |
88 | nv_wr32(dev, 0x405018, 0xc0000000); | 88 | nv_wr32(dev, 0x405018, 0xc0000000); |
89 | nv_wr32(dev, 0x402000, 0xc0000000); | 89 | nv_wr32(dev, 0x402000, 0xc0000000); |
90 | 90 | ||
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev) | |||
165 | uint32_t inst; | 165 | uint32_t inst; |
166 | int i; | 166 | int i; |
167 | 167 | ||
168 | /* Be sure we're not in the middle of a context switch or bad things | ||
169 | * will happen, such as unloading the wrong pgraph context. | ||
170 | */ | ||
171 | if (!nv_wait(0x400300, 0x00000001, 0x00000000)) | ||
172 | NV_ERROR(dev, "Ctxprog is still running\n"); | ||
173 | |||
168 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 174 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
169 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 175 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
170 | return NULL; | 176 | return NULL; |
@@ -275,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan) | |||
275 | int | 281 | int |
276 | nv50_graph_unload_context(struct drm_device *dev) | 282 | nv50_graph_unload_context(struct drm_device *dev) |
277 | { | 283 | { |
278 | uint32_t inst, fifo = nv_rd32(dev, 0x400500); | 284 | uint32_t inst; |
279 | 285 | ||
280 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 286 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
281 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 287 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
282 | return 0; | 288 | return 0; |
283 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; | 289 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; |
284 | 290 | ||
285 | nv_wr32(dev, 0x400500, fifo & ~1); | 291 | nouveau_wait_for_idle(dev); |
286 | nv_wr32(dev, 0x400784, inst); | 292 | nv_wr32(dev, 0x400784, inst); |
287 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); | 293 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); |
288 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); | 294 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); |
289 | nouveau_wait_for_idle(dev); | 295 | nouveau_wait_for_idle(dev); |
290 | nv_wr32(dev, 0x400500, fifo); | ||
291 | 296 | ||
292 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); | 297 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); |
293 | return 0; | 298 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 94400f777e7f..f0dc4e36ef05 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev) | |||
76 | for (i = 0x1700; i <= 0x1710; i += 4) | 76 | for (i = 0x1700; i <= 0x1710; i += 4) |
77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); | 77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); |
78 | 78 | ||
79 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | ||
80 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | ||
81 | else | ||
82 | dev_priv->vram_sys_base = 0; | ||
83 | |||
79 | /* Reserve the last MiB of VRAM, we should probably try to avoid | 84 | /* Reserve the last MiB of VRAM, we should probably try to avoid |
80 | * setting up the below tables over the top of the VBIOS image at | 85 | * setting up the below tables over the top of the VBIOS image at |
81 | * some point. | 86 | * some point. |
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev) | |||
172 | * We map the entire fake channel into the start of the PRAMIN BAR | 177 | * We map the entire fake channel into the start of the PRAMIN BAR |
173 | */ | 178 | */ |
174 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, | 179 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, |
175 | 0, &priv->pramin_pt); | 180 | 0, &priv->pramin_pt); |
176 | if (ret) | 181 | if (ret) |
177 | return ret; | 182 | return ret; |
178 | 183 | ||
179 | for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { | 184 | v = c_offset | 1; |
180 | if (v < (c_offset + c_size)) | 185 | if (dev_priv->vram_sys_base) { |
181 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); | 186 | v += dev_priv->vram_sys_base; |
182 | else | 187 | v |= 0x30; |
183 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); | 188 | } |
189 | |||
190 | i = 0; | ||
191 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { | ||
192 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); | ||
193 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | ||
194 | v += 0x1000; | ||
195 | i += 8; | ||
196 | } | ||
197 | |||
198 | while (i < pt_size) { | ||
199 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); | ||
184 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | 200 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); |
201 | i += 8; | ||
185 | } | 202 | } |
186 | 203 | ||
187 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); | 204 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); |
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
416 | { | 433 | { |
417 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 434 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
418 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 435 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
419 | uint32_t pte, pte_end, vram; | 436 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; |
437 | uint32_t pte, pte_end; | ||
438 | uint64_t vram; | ||
420 | 439 | ||
421 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 440 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) |
422 | return -EINVAL; | 441 | return -EINVAL; |
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
424 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", | 443 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", |
425 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 444 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); |
426 | 445 | ||
427 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 446 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
428 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 447 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
429 | vram = gpuobj->im_backing_start; | 448 | vram = gpuobj->im_backing_start; |
430 | 449 | ||
431 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", | 450 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", |
432 | gpuobj->im_pramin->start, pte, pte_end); | 451 | gpuobj->im_pramin->start, pte, pte_end); |
433 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); | 452 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); |
434 | 453 | ||
454 | vram |= 1; | ||
455 | if (dev_priv->vram_sys_base) { | ||
456 | vram += dev_priv->vram_sys_base; | ||
457 | vram |= 0x30; | ||
458 | } | ||
459 | |||
435 | dev_priv->engine.instmem.prepare_access(dev, true); | 460 | dev_priv->engine.instmem.prepare_access(dev, true); |
436 | while (pte < pte_end) { | 461 | while (pte < pte_end) { |
437 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); | 462 | nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); |
438 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 463 | nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); |
439 | |||
440 | pte += 8; | ||
441 | vram += NV50_INSTMEM_PAGE_SIZE; | 464 | vram += NV50_INSTMEM_PAGE_SIZE; |
442 | } | 465 | } |
443 | dev_priv->engine.instmem.finish_access(dev); | 466 | dev_priv->engine.instmem.finish_access(dev); |
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
470 | if (gpuobj->im_bound == 0) | 493 | if (gpuobj->im_bound == 0) |
471 | return -EINVAL; | 494 | return -EINVAL; |
472 | 495 | ||
473 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 496 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
474 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 497 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
475 | 498 | ||
476 | dev_priv->engine.instmem.prepare_access(dev, true); | 499 | dev_priv->engine.instmem.prepare_access(dev, true); |
477 | while (pte < pte_end) { | 500 | while (pte < pte_end) { |
478 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); | 501 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
479 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 502 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
480 | pte += 8; | ||
481 | } | 503 | } |
482 | dev_priv->engine.instmem.finish_access(dev); | 504 | dev_priv->engine.instmem.finish_access(dev); |
483 | 505 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index e395c16d30f5..c2fff543b06f 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -90,11 +90,25 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
90 | { | 90 | { |
91 | struct drm_device *dev = encoder->dev; | 91 | struct drm_device *dev = encoder->dev; |
92 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 92 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
93 | struct drm_encoder *enc; | ||
93 | uint32_t val; | 94 | uint32_t val; |
94 | int or = nv_encoder->or; | 95 | int or = nv_encoder->or; |
95 | 96 | ||
96 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); | 97 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); |
97 | 98 | ||
99 | nv_encoder->last_dpms = mode; | ||
100 | list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { | ||
101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); | ||
102 | |||
103 | if (nvenc == nv_encoder || | ||
104 | nvenc->disconnect != nv50_sor_disconnect || | ||
105 | nvenc->dcb->or != nv_encoder->dcb->or) | ||
106 | continue; | ||
107 | |||
108 | if (nvenc->last_dpms == DRM_MODE_DPMS_ON) | ||
109 | return; | ||
110 | } | ||
111 | |||
98 | /* wait for it to be done */ | 112 | /* wait for it to be done */ |
99 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), | 113 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), |
100 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { | 114 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { |
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 5982321be4d5..1c02d23f6fcc 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig | |||
@@ -1,10 +1,14 @@ | |||
1 | config DRM_RADEON_KMS | 1 | config DRM_RADEON_KMS |
2 | bool "Enable modesetting on radeon by default" | 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" |
3 | depends on DRM_RADEON | 3 | depends on DRM_RADEON |
4 | help | 4 | help |
5 | Choose this option if you want kernel modesetting enabled by default, | 5 | Choose this option if you want kernel modesetting enabled by default. |
6 | and you have a new enough userspace to support this. Running old | 6 | |
7 | userspaces with this enabled will cause pain. | 7 | This is a completely new driver. It's only part of the existing drm |
8 | for compatibility reasons. It requires an entirely different graphics | ||
9 | stack above it and works very differently from the old drm stack. | ||
10 | i.e. don't enable this unless you know what you are doing it may | ||
11 | cause issues or bugs compared to the previous userspace driver stack. | ||
8 | 12 | ||
9 | When kernel modesetting is enabled the IOCTL of radeon/drm | 13 | When kernel modesetting is enabled the IOCTL of radeon/drm |
10 | driver are considered as invalid and an error message is printed | 14 | driver are considered as invalid and an error message is printed |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 388140a7e651..7f152f66f196 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <asm/unaligned.h> | ||
27 | 28 | ||
28 | #define ATOM_DEBUG | 29 | #define ATOM_DEBUG |
29 | 30 | ||
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
212 | case ATOM_ARG_PS: | 213 | case ATOM_ARG_PS: |
213 | idx = U8(*ptr); | 214 | idx = U8(*ptr); |
214 | (*ptr)++; | 215 | (*ptr)++; |
215 | val = le32_to_cpu(ctx->ps[idx]); | 216 | /* get_unaligned_le32 avoids unaligned accesses from atombios |
217 | * tables, noticed on a DEC Alpha. */ | ||
218 | val = get_unaligned_le32((u32 *)&ctx->ps[idx]); | ||
216 | if (print) | 219 | if (print) |
217 | DEBUG("PS[0x%02X,0x%04X]", idx, val); | 220 | DEBUG("PS[0x%02X,0x%04X]", idx, val); |
218 | break; | 221 | break; |
@@ -246,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
246 | case ATOM_WS_ATTRIBUTES: | 249 | case ATOM_WS_ATTRIBUTES: |
247 | val = gctx->io_attr; | 250 | val = gctx->io_attr; |
248 | break; | 251 | break; |
252 | case ATOM_WS_REGPTR: | ||
253 | val = gctx->reg_block; | ||
254 | break; | ||
249 | default: | 255 | default: |
250 | val = ctx->ws[idx]; | 256 | val = ctx->ws[idx]; |
251 | } | 257 | } |
@@ -385,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) | |||
385 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); | 391 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); |
386 | } | 392 | } |
387 | 393 | ||
394 | static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) | ||
395 | { | ||
396 | uint32_t val = 0xCDCDCDCD; | ||
397 | |||
398 | switch (align) { | ||
399 | case ATOM_SRC_DWORD: | ||
400 | val = U32(*ptr); | ||
401 | (*ptr) += 4; | ||
402 | break; | ||
403 | case ATOM_SRC_WORD0: | ||
404 | case ATOM_SRC_WORD8: | ||
405 | case ATOM_SRC_WORD16: | ||
406 | val = U16(*ptr); | ||
407 | (*ptr) += 2; | ||
408 | break; | ||
409 | case ATOM_SRC_BYTE0: | ||
410 | case ATOM_SRC_BYTE8: | ||
411 | case ATOM_SRC_BYTE16: | ||
412 | case ATOM_SRC_BYTE24: | ||
413 | val = U8(*ptr); | ||
414 | (*ptr)++; | ||
415 | break; | ||
416 | } | ||
417 | return val; | ||
418 | } | ||
419 | |||
388 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, | 420 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, |
389 | int *ptr, uint32_t *saved, int print) | 421 | int *ptr, uint32_t *saved, int print) |
390 | { | 422 | { |
@@ -482,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
482 | case ATOM_WS_ATTRIBUTES: | 514 | case ATOM_WS_ATTRIBUTES: |
483 | gctx->io_attr = val; | 515 | gctx->io_attr = val; |
484 | break; | 516 | break; |
517 | case ATOM_WS_REGPTR: | ||
518 | gctx->reg_block = val; | ||
519 | break; | ||
485 | default: | 520 | default: |
486 | ctx->ws[idx] = val; | 521 | ctx->ws[idx] = val; |
487 | } | 522 | } |
@@ -608,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | |||
608 | uint8_t count = U8((*ptr)++); | 643 | uint8_t count = U8((*ptr)++); |
609 | SDEBUG(" count: %d\n", count); | 644 | SDEBUG(" count: %d\n", count); |
610 | if (arg == ATOM_UNIT_MICROSEC) | 645 | if (arg == ATOM_UNIT_MICROSEC) |
611 | schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | 646 | udelay(count); |
612 | else | 647 | else |
613 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); |
614 | } | 649 | } |
@@ -677,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | |||
677 | SDEBUG(" dst: "); | 712 | SDEBUG(" dst: "); |
678 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 713 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
679 | SDEBUG(" src1: "); | 714 | SDEBUG(" src1: "); |
680 | src1 = atom_get_src(ctx, attr, ptr); | 715 | src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); |
681 | SDEBUG(" src2: "); | 716 | SDEBUG(" src2: "); |
682 | src2 = atom_get_src(ctx, attr, ptr); | 717 | src2 = atom_get_src(ctx, attr, ptr); |
683 | dst &= src1; | 718 | dst &= src1; |
@@ -809,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) | |||
809 | SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); | 844 | SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); |
810 | } | 845 | } |
811 | 846 | ||
847 | static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) | ||
848 | { | ||
849 | uint8_t attr = U8((*ptr)++), shift; | ||
850 | uint32_t saved, dst; | ||
851 | int dptr = *ptr; | ||
852 | attr &= 0x38; | ||
853 | attr |= atom_def_dst[attr >> 3] << 6; | ||
854 | SDEBUG(" dst: "); | ||
855 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
856 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
857 | SDEBUG(" shift: %d\n", shift); | ||
858 | dst <<= shift; | ||
859 | SDEBUG(" dst: "); | ||
860 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
861 | } | ||
862 | |||
863 | static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) | ||
864 | { | ||
865 | uint8_t attr = U8((*ptr)++), shift; | ||
866 | uint32_t saved, dst; | ||
867 | int dptr = *ptr; | ||
868 | attr &= 0x38; | ||
869 | attr |= atom_def_dst[attr >> 3] << 6; | ||
870 | SDEBUG(" dst: "); | ||
871 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
872 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
873 | SDEBUG(" shift: %d\n", shift); | ||
874 | dst >>= shift; | ||
875 | SDEBUG(" dst: "); | ||
876 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
877 | } | ||
878 | |||
812 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | 879 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) |
813 | { | 880 | { |
814 | uint8_t attr = U8((*ptr)++), shift; | 881 | uint8_t attr = U8((*ptr)++), shift; |
@@ -818,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
818 | attr |= atom_def_dst[attr >> 3] << 6; | 885 | attr |= atom_def_dst[attr >> 3] << 6; |
819 | SDEBUG(" dst: "); | 886 | SDEBUG(" dst: "); |
820 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 887 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
821 | shift = U8((*ptr)++); | 888 | shift = atom_get_src(ctx, attr, ptr); |
822 | SDEBUG(" shift: %d\n", shift); | 889 | SDEBUG(" shift: %d\n", shift); |
823 | dst <<= shift; | 890 | dst <<= shift; |
824 | SDEBUG(" dst: "); | 891 | SDEBUG(" dst: "); |
@@ -834,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
834 | attr |= atom_def_dst[attr >> 3] << 6; | 901 | attr |= atom_def_dst[attr >> 3] << 6; |
835 | SDEBUG(" dst: "); | 902 | SDEBUG(" dst: "); |
836 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 903 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
837 | shift = U8((*ptr)++); | 904 | shift = atom_get_src(ctx, attr, ptr); |
838 | SDEBUG(" shift: %d\n", shift); | 905 | SDEBUG(" shift: %d\n", shift); |
839 | dst >>= shift; | 906 | dst >>= shift; |
840 | SDEBUG(" dst: "); | 907 | SDEBUG(" dst: "); |
@@ -937,18 +1004,18 @@ static struct { | |||
937 | atom_op_or, ATOM_ARG_FB}, { | 1004 | atom_op_or, ATOM_ARG_FB}, { |
938 | atom_op_or, ATOM_ARG_PLL}, { | 1005 | atom_op_or, ATOM_ARG_PLL}, { |
939 | atom_op_or, ATOM_ARG_MC}, { | 1006 | atom_op_or, ATOM_ARG_MC}, { |
940 | atom_op_shl, ATOM_ARG_REG}, { | 1007 | atom_op_shift_left, ATOM_ARG_REG}, { |
941 | atom_op_shl, ATOM_ARG_PS}, { | 1008 | atom_op_shift_left, ATOM_ARG_PS}, { |
942 | atom_op_shl, ATOM_ARG_WS}, { | 1009 | atom_op_shift_left, ATOM_ARG_WS}, { |
943 | atom_op_shl, ATOM_ARG_FB}, { | 1010 | atom_op_shift_left, ATOM_ARG_FB}, { |
944 | atom_op_shl, ATOM_ARG_PLL}, { | 1011 | atom_op_shift_left, ATOM_ARG_PLL}, { |
945 | atom_op_shl, ATOM_ARG_MC}, { | 1012 | atom_op_shift_left, ATOM_ARG_MC}, { |
946 | atom_op_shr, ATOM_ARG_REG}, { | 1013 | atom_op_shift_right, ATOM_ARG_REG}, { |
947 | atom_op_shr, ATOM_ARG_PS}, { | 1014 | atom_op_shift_right, ATOM_ARG_PS}, { |
948 | atom_op_shr, ATOM_ARG_WS}, { | 1015 | atom_op_shift_right, ATOM_ARG_WS}, { |
949 | atom_op_shr, ATOM_ARG_FB}, { | 1016 | atom_op_shift_right, ATOM_ARG_FB}, { |
950 | atom_op_shr, ATOM_ARG_PLL}, { | 1017 | atom_op_shift_right, ATOM_ARG_PLL}, { |
951 | atom_op_shr, ATOM_ARG_MC}, { | 1018 | atom_op_shift_right, ATOM_ARG_MC}, { |
952 | atom_op_mul, ATOM_ARG_REG}, { | 1019 | atom_op_mul, ATOM_ARG_REG}, { |
953 | atom_op_mul, ATOM_ARG_PS}, { | 1020 | atom_op_mul, ATOM_ARG_PS}, { |
954 | atom_op_mul, ATOM_ARG_WS}, { | 1021 | atom_op_mul, ATOM_ARG_WS}, { |
@@ -1058,8 +1125,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1058 | 1125 | ||
1059 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); | 1126 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); |
1060 | 1127 | ||
1061 | /* reset reg block */ | ||
1062 | ctx->reg_block = 0; | ||
1063 | ectx.ctx = ctx; | 1128 | ectx.ctx = ctx; |
1064 | ectx.ps_shift = ps / 4; | 1129 | ectx.ps_shift = ps / 4; |
1065 | ectx.start = base; | 1130 | ectx.start = base; |
@@ -1096,6 +1161,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1096 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1161 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1097 | { | 1162 | { |
1098 | mutex_lock(&ctx->mutex); | 1163 | mutex_lock(&ctx->mutex); |
1164 | /* reset reg block */ | ||
1165 | ctx->reg_block = 0; | ||
1166 | /* reset fb window */ | ||
1167 | ctx->fb_base = 0; | ||
1168 | /* reset io mode */ | ||
1169 | ctx->io_mode = ATOM_IO_MM; | ||
1099 | atom_execute_table_locked(ctx, index, params); | 1170 | atom_execute_table_locked(ctx, index, params); |
1100 | mutex_unlock(&ctx->mutex); | 1171 | mutex_unlock(&ctx->mutex); |
1101 | } | 1172 | } |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 47fd943f6d14..bc73781423a1 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -91,6 +91,7 @@ | |||
91 | #define ATOM_WS_AND_MASK 0x45 | 91 | #define ATOM_WS_AND_MASK 0x45 |
92 | #define ATOM_WS_FB_WINDOW 0x46 | 92 | #define ATOM_WS_FB_WINDOW 0x46 |
93 | #define ATOM_WS_ATTRIBUTES 0x47 | 93 | #define ATOM_WS_ATTRIBUTES 0x47 |
94 | #define ATOM_WS_REGPTR 0x48 | ||
94 | 95 | ||
95 | #define ATOM_IIO_NOP 0 | 96 | #define ATOM_IIO_NOP 0 |
96 | #define ATOM_IIO_START 1 | 97 | #define ATOM_IIO_START 1 |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 260fcf59f00c..af464e351fbd 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, | |||
307 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | 307 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
308 | args.ucCRTC = radeon_crtc->crtc_id; | 308 | args.ucCRTC = radeon_crtc->crtc_id; |
309 | 309 | ||
310 | printk("executing set crtc dtd timing\n"); | ||
311 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 310 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
312 | } | 311 | } |
313 | 312 | ||
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
347 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | 346 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
348 | args.ucCRTC = radeon_crtc->crtc_id; | 347 | args.ucCRTC = radeon_crtc->crtc_id; |
349 | 348 | ||
350 | printk("executing set crtc timing\n"); | ||
351 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 349 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
352 | } | 350 | } |
353 | 351 | ||
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
409 | } | 407 | } |
410 | } | 408 | } |
411 | 409 | ||
412 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | 410 | union adjust_pixel_clock { |
411 | ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; | ||
412 | }; | ||
413 | |||
414 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, | ||
415 | struct drm_display_mode *mode, | ||
416 | struct radeon_pll *pll) | ||
413 | { | 417 | { |
414 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
415 | struct drm_device *dev = crtc->dev; | 418 | struct drm_device *dev = crtc->dev; |
416 | struct radeon_device *rdev = dev->dev_private; | 419 | struct radeon_device *rdev = dev->dev_private; |
417 | struct drm_encoder *encoder = NULL; | 420 | struct drm_encoder *encoder = NULL; |
418 | struct radeon_encoder *radeon_encoder = NULL; | 421 | struct radeon_encoder *radeon_encoder = NULL; |
419 | uint8_t frev, crev; | 422 | u32 adjusted_clock = mode->clock; |
420 | int index; | ||
421 | SET_PIXEL_CLOCK_PS_ALLOCATION args; | ||
422 | PIXEL_CLOCK_PARAMETERS *spc1_ptr; | ||
423 | PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; | ||
424 | PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; | ||
425 | uint32_t pll_clock = mode->clock; | ||
426 | uint32_t adjusted_clock; | ||
427 | uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | ||
428 | struct radeon_pll *pll; | ||
429 | int pll_flags = 0; | ||
430 | 423 | ||
431 | memset(&args, 0, sizeof(args)); | 424 | /* reset the pll flags */ |
425 | pll->flags = 0; | ||
432 | 426 | ||
433 | if (ASIC_IS_AVIVO(rdev)) { | 427 | if (ASIC_IS_AVIVO(rdev)) { |
434 | if ((rdev->family == CHIP_RS600) || | 428 | if ((rdev->family == CHIP_RS600) || |
435 | (rdev->family == CHIP_RS690) || | 429 | (rdev->family == CHIP_RS690) || |
436 | (rdev->family == CHIP_RS740)) | 430 | (rdev->family == CHIP_RS740)) |
437 | pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | 431 | pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | |
438 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 432 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
439 | 433 | ||
440 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 434 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
441 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 435 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
442 | else | 436 | else |
443 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 437 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
444 | } else { | 438 | } else { |
445 | pll_flags |= RADEON_PLL_LEGACY; | 439 | pll->flags |= RADEON_PLL_LEGACY; |
446 | 440 | ||
447 | if (mode->clock > 200000) /* range limits??? */ | 441 | if (mode->clock > 200000) /* range limits??? */ |
448 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 442 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
449 | else | 443 | else |
450 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 444 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
451 | 445 | ||
452 | } | 446 | } |
453 | 447 | ||
454 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 448 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
455 | if (encoder->crtc == crtc) { | 449 | if (encoder->crtc == crtc) { |
456 | if (!ASIC_IS_AVIVO(rdev)) { | ||
457 | if (encoder->encoder_type != | ||
458 | DRM_MODE_ENCODER_DAC) | ||
459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | ||
460 | if (encoder->encoder_type == | ||
461 | DRM_MODE_ENCODER_LVDS) | ||
462 | pll_flags |= RADEON_PLL_USE_REF_DIV; | ||
463 | } | ||
464 | radeon_encoder = to_radeon_encoder(encoder); | 450 | radeon_encoder = to_radeon_encoder(encoder); |
451 | if (ASIC_IS_AVIVO(rdev)) { | ||
452 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | ||
453 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | ||
454 | adjusted_clock = mode->clock * 2; | ||
455 | } else { | ||
456 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | ||
457 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | ||
458 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) | ||
459 | pll->flags |= RADEON_PLL_USE_REF_DIV; | ||
460 | } | ||
465 | break; | 461 | break; |
466 | } | 462 | } |
467 | } | 463 | } |
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
471 | * special hw requirements. | 467 | * special hw requirements. |
472 | */ | 468 | */ |
473 | if (ASIC_IS_DCE3(rdev)) { | 469 | if (ASIC_IS_DCE3(rdev)) { |
474 | ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; | 470 | union adjust_pixel_clock args; |
471 | struct radeon_encoder_atom_dig *dig; | ||
472 | u8 frev, crev; | ||
473 | int index; | ||
475 | 474 | ||
476 | if (!encoder) | 475 | if (!radeon_encoder->enc_priv) |
477 | return; | 476 | return adjusted_clock; |
478 | 477 | dig = radeon_encoder->enc_priv; | |
479 | memset(&adjust_pll_args, 0, sizeof(adjust_pll_args)); | ||
480 | adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10); | ||
481 | adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id; | ||
482 | adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
483 | 478 | ||
484 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | 479 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); |
485 | atom_execute_table(rdev->mode_info.atom_context, | 480 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
486 | index, (uint32_t *)&adjust_pll_args); | 481 | &crev); |
487 | adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; | 482 | |
488 | } else { | 483 | memset(&args, 0, sizeof(args)); |
489 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 484 | |
490 | if (ASIC_IS_AVIVO(rdev) && | 485 | switch (frev) { |
491 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) | 486 | case 1: |
492 | adjusted_clock = mode->clock * 2; | 487 | switch (crev) { |
493 | else | 488 | case 1: |
494 | adjusted_clock = mode->clock; | 489 | case 2: |
490 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | ||
491 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | ||
492 | args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
493 | |||
494 | atom_execute_table(rdev->mode_info.atom_context, | ||
495 | index, (uint32_t *)&args); | ||
496 | adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; | ||
497 | break; | ||
498 | default: | ||
499 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
500 | return adjusted_clock; | ||
501 | } | ||
502 | break; | ||
503 | default: | ||
504 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
505 | return adjusted_clock; | ||
506 | } | ||
495 | } | 507 | } |
508 | return adjusted_clock; | ||
509 | } | ||
510 | |||
511 | union set_pixel_clock { | ||
512 | SET_PIXEL_CLOCK_PS_ALLOCATION base; | ||
513 | PIXEL_CLOCK_PARAMETERS v1; | ||
514 | PIXEL_CLOCK_PARAMETERS_V2 v2; | ||
515 | PIXEL_CLOCK_PARAMETERS_V3 v3; | ||
516 | }; | ||
517 | |||
518 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | ||
519 | { | ||
520 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
521 | struct drm_device *dev = crtc->dev; | ||
522 | struct radeon_device *rdev = dev->dev_private; | ||
523 | struct drm_encoder *encoder = NULL; | ||
524 | struct radeon_encoder *radeon_encoder = NULL; | ||
525 | u8 frev, crev; | ||
526 | int index; | ||
527 | union set_pixel_clock args; | ||
528 | u32 pll_clock = mode->clock; | ||
529 | u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | ||
530 | struct radeon_pll *pll; | ||
531 | u32 adjusted_clock; | ||
532 | |||
533 | memset(&args, 0, sizeof(args)); | ||
534 | |||
535 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
536 | if (encoder->crtc == crtc) { | ||
537 | radeon_encoder = to_radeon_encoder(encoder); | ||
538 | break; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | if (!radeon_encoder) | ||
543 | return; | ||
496 | 544 | ||
497 | if (radeon_crtc->crtc_id == 0) | 545 | if (radeon_crtc->crtc_id == 0) |
498 | pll = &rdev->clock.p1pll; | 546 | pll = &rdev->clock.p1pll; |
499 | else | 547 | else |
500 | pll = &rdev->clock.p2pll; | 548 | pll = &rdev->clock.p2pll; |
501 | 549 | ||
550 | /* adjust pixel clock as needed */ | ||
551 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll); | ||
552 | |||
502 | if (ASIC_IS_AVIVO(rdev)) { | 553 | if (ASIC_IS_AVIVO(rdev)) { |
503 | if (radeon_new_pll) | 554 | if (radeon_new_pll) |
504 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, | 555 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, |
505 | &fb_div, &frac_fb_div, | 556 | &fb_div, &frac_fb_div, |
506 | &ref_div, &post_div, pll_flags); | 557 | &ref_div, &post_div); |
507 | else | 558 | else |
508 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, | 559 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, |
509 | &fb_div, &frac_fb_div, | 560 | &fb_div, &frac_fb_div, |
510 | &ref_div, &post_div, pll_flags); | 561 | &ref_div, &post_div); |
511 | } else | 562 | } else |
512 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 563 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
513 | &ref_div, &post_div, pll_flags); | 564 | &ref_div, &post_div); |
514 | 565 | ||
515 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 566 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
516 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 567 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
520 | case 1: | 571 | case 1: |
521 | switch (crev) { | 572 | switch (crev) { |
522 | case 1: | 573 | case 1: |
523 | spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; | 574 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
524 | spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 575 | args.v1.usRefDiv = cpu_to_le16(ref_div); |
525 | spc1_ptr->usRefDiv = cpu_to_le16(ref_div); | 576 | args.v1.usFbDiv = cpu_to_le16(fb_div); |
526 | spc1_ptr->usFbDiv = cpu_to_le16(fb_div); | 577 | args.v1.ucFracFbDiv = frac_fb_div; |
527 | spc1_ptr->ucFracFbDiv = frac_fb_div; | 578 | args.v1.ucPostDiv = post_div; |
528 | spc1_ptr->ucPostDiv = post_div; | 579 | args.v1.ucPpll = |
529 | spc1_ptr->ucPpll = | ||
530 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 580 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
531 | spc1_ptr->ucCRTC = radeon_crtc->crtc_id; | 581 | args.v1.ucCRTC = radeon_crtc->crtc_id; |
532 | spc1_ptr->ucRefDivSrc = 1; | 582 | args.v1.ucRefDivSrc = 1; |
533 | break; | 583 | break; |
534 | case 2: | 584 | case 2: |
535 | spc2_ptr = | 585 | args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); |
536 | (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; | 586 | args.v2.usRefDiv = cpu_to_le16(ref_div); |
537 | spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 587 | args.v2.usFbDiv = cpu_to_le16(fb_div); |
538 | spc2_ptr->usRefDiv = cpu_to_le16(ref_div); | 588 | args.v2.ucFracFbDiv = frac_fb_div; |
539 | spc2_ptr->usFbDiv = cpu_to_le16(fb_div); | 589 | args.v2.ucPostDiv = post_div; |
540 | spc2_ptr->ucFracFbDiv = frac_fb_div; | 590 | args.v2.ucPpll = |
541 | spc2_ptr->ucPostDiv = post_div; | ||
542 | spc2_ptr->ucPpll = | ||
543 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 591 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
544 | spc2_ptr->ucCRTC = radeon_crtc->crtc_id; | 592 | args.v2.ucCRTC = radeon_crtc->crtc_id; |
545 | spc2_ptr->ucRefDivSrc = 1; | 593 | args.v2.ucRefDivSrc = 1; |
546 | break; | 594 | break; |
547 | case 3: | 595 | case 3: |
548 | if (!encoder) | 596 | args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); |
549 | return; | 597 | args.v3.usRefDiv = cpu_to_le16(ref_div); |
550 | spc3_ptr = | 598 | args.v3.usFbDiv = cpu_to_le16(fb_div); |
551 | (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; | 599 | args.v3.ucFracFbDiv = frac_fb_div; |
552 | spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 600 | args.v3.ucPostDiv = post_div; |
553 | spc3_ptr->usRefDiv = cpu_to_le16(ref_div); | 601 | args.v3.ucPpll = |
554 | spc3_ptr->usFbDiv = cpu_to_le16(fb_div); | ||
555 | spc3_ptr->ucFracFbDiv = frac_fb_div; | ||
556 | spc3_ptr->ucPostDiv = post_div; | ||
557 | spc3_ptr->ucPpll = | ||
558 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 602 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
559 | spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); | 603 | args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); |
560 | spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; | 604 | args.v3.ucTransmitterId = radeon_encoder->encoder_id; |
561 | spc3_ptr->ucEncoderMode = | 605 | args.v3.ucEncoderMode = |
562 | atombios_get_encoder_mode(encoder); | 606 | atombios_get_encoder_mode(encoder); |
563 | break; | 607 | break; |
564 | default: | 608 | default: |
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
571 | return; | 615 | return; |
572 | } | 616 | } |
573 | 617 | ||
574 | printk("executing set pll\n"); | ||
575 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 618 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
576 | } | 619 | } |
577 | 620 | ||
578 | int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 621 | static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
579 | struct drm_framebuffer *old_fb) | 622 | struct drm_framebuffer *old_fb) |
580 | { | 623 | { |
581 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 624 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
582 | struct drm_device *dev = crtc->dev; | 625 | struct drm_device *dev = crtc->dev; |
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
706 | return 0; | 749 | return 0; |
707 | } | 750 | } |
708 | 751 | ||
752 | int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | ||
753 | struct drm_framebuffer *old_fb) | ||
754 | { | ||
755 | struct drm_device *dev = crtc->dev; | ||
756 | struct radeon_device *rdev = dev->dev_private; | ||
757 | |||
758 | if (ASIC_IS_AVIVO(rdev)) | ||
759 | return avivo_crtc_set_base(crtc, x, y, old_fb); | ||
760 | else | ||
761 | return radeon_crtc_set_base(crtc, x, y, old_fb); | ||
762 | } | ||
763 | |||
764 | /* properly set additional regs when using atombios */ | ||
765 | static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) | ||
766 | { | ||
767 | struct drm_device *dev = crtc->dev; | ||
768 | struct radeon_device *rdev = dev->dev_private; | ||
769 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
770 | u32 disp_merge_cntl; | ||
771 | |||
772 | switch (radeon_crtc->crtc_id) { | ||
773 | case 0: | ||
774 | disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); | ||
775 | disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; | ||
776 | WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); | ||
777 | break; | ||
778 | case 1: | ||
779 | disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | ||
780 | disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | ||
781 | WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); | ||
782 | WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); | ||
783 | WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); | ||
784 | break; | ||
785 | } | ||
786 | } | ||
787 | |||
709 | int atombios_crtc_mode_set(struct drm_crtc *crtc, | 788 | int atombios_crtc_mode_set(struct drm_crtc *crtc, |
710 | struct drm_display_mode *mode, | 789 | struct drm_display_mode *mode, |
711 | struct drm_display_mode *adjusted_mode, | 790 | struct drm_display_mode *adjusted_mode, |
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
727 | else { | 806 | else { |
728 | if (radeon_crtc->crtc_id == 0) | 807 | if (radeon_crtc->crtc_id == 0) |
729 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 808 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
730 | radeon_crtc_set_base(crtc, x, y, old_fb); | 809 | atombios_crtc_set_base(crtc, x, y, old_fb); |
731 | radeon_legacy_atom_set_surface(crtc); | 810 | radeon_legacy_atom_fixup(crtc); |
732 | } | 811 | } |
733 | atombios_overscan_setup(crtc, mode, adjusted_mode); | 812 | atombios_overscan_setup(crtc, mode, adjusted_mode); |
734 | atombios_scaler_setup(crtc); | 813 | atombios_scaler_setup(crtc); |
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
746 | 825 | ||
747 | static void atombios_crtc_prepare(struct drm_crtc *crtc) | 826 | static void atombios_crtc_prepare(struct drm_crtc *crtc) |
748 | { | 827 | { |
749 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
750 | atombios_lock_crtc(crtc, 1); | 828 | atombios_lock_crtc(crtc, 1); |
829 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
751 | } | 830 | } |
752 | 831 | ||
753 | static void atombios_crtc_commit(struct drm_crtc *crtc) | 832 | static void atombios_crtc_commit(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 3eb0ca5b3d73..99915a682d59 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
332 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; | 332 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; |
333 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | 333 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); |
334 | unsigned char *base; | 334 | unsigned char *base; |
335 | int retry_count = 0; | ||
335 | 336 | ||
336 | memset(&args, 0, sizeof(args)); | 337 | memset(&args, 0, sizeof(args)); |
337 | 338 | ||
338 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | 339 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
339 | 340 | ||
341 | retry: | ||
340 | memcpy(base, req_bytes, num_bytes); | 342 | memcpy(base, req_bytes, num_bytes); |
341 | 343 | ||
342 | args.lpAuxRequest = 0; | 344 | args.lpAuxRequest = 0; |
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
347 | 349 | ||
348 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 350 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
349 | 351 | ||
350 | if (args.ucReplyStatus) { | 352 | if (args.ucReplyStatus && !args.ucDataOutLen) { |
351 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", | 353 | if (args.ucReplyStatus == 0x20 && retry_count++ < 10) |
354 | goto retry; | ||
355 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", | ||
352 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | 356 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], |
353 | chan->rec.i2c_id, args.ucReplyStatus); | 357 | chan->rec.i2c_id, args.ucReplyStatus, retry_count); |
354 | return false; | 358 | return false; |
355 | } | 359 | } |
356 | 360 | ||
@@ -468,7 +472,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector, | |||
468 | struct radeon_connector *radeon_connector; | 472 | struct radeon_connector *radeon_connector; |
469 | struct radeon_connector_atom_dig *dig_connector; | 473 | struct radeon_connector_atom_dig *dig_connector; |
470 | 474 | ||
471 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || | 475 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && |
472 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | 476 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) |
473 | return; | 477 | return; |
474 | 478 | ||
@@ -583,7 +587,7 @@ void dp_link_train(struct drm_encoder *encoder, | |||
583 | u8 train_set[4]; | 587 | u8 train_set[4]; |
584 | int i; | 588 | int i; |
585 | 589 | ||
586 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || | 590 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) && |
587 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | 591 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) |
588 | return; | 592 | return; |
589 | 593 | ||
@@ -596,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder, | |||
596 | return; | 600 | return; |
597 | dig_connector = radeon_connector->con_priv; | 601 | dig_connector = radeon_connector->con_priv; |
598 | 602 | ||
599 | if (ASIC_IS_DCE32(rdev)) { | 603 | if (dig->dig_encoder) |
600 | if (dig->dig_block) | 604 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; |
601 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | 605 | else |
602 | else | 606 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; |
603 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | 607 | if (dig_connector->linkb) |
604 | if (dig_connector->linkb) | 608 | enc_id |= ATOM_DP_CONFIG_LINK_B; |
605 | enc_id |= ATOM_DP_CONFIG_LINK_B; | 609 | else |
606 | else | 610 | enc_id |= ATOM_DP_CONFIG_LINK_A; |
607 | enc_id |= ATOM_DP_CONFIG_LINK_A; | ||
608 | } else { | ||
609 | if (dig_connector->linkb) | ||
610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B; | ||
611 | else | ||
612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A; | ||
613 | } | ||
614 | 611 | ||
615 | memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 612 | memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
616 | if (dig_connector->dp_clock == 270000) | 613 | if (dig_connector->dp_clock == 270000) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8760d66e058a..c0d4650cdb79 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); | 354 | return RREG32(RADEON_CRTC2_CRNT_FRAME); |
355 | } | 355 | } |
356 | 356 | ||
357 | /* Who ever call radeon_fence_emit should call ring_lock and ask | ||
358 | * for enough space (today caller are ib schedule and buffer move) */ | ||
357 | void r100_fence_ring_emit(struct radeon_device *rdev, | 359 | void r100_fence_ring_emit(struct radeon_device *rdev, |
358 | struct radeon_fence *fence) | 360 | struct radeon_fence *fence) |
359 | { | 361 | { |
360 | /* Who ever call radeon_fence_emit should call ring_lock and ask | 362 | /* We have to make sure that caches are flushed before |
361 | * for enough space (today caller are ib schedule and buffer move) */ | 363 | * CPU might read something from VRAM. */ |
364 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
365 | radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); | ||
366 | radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); | ||
367 | radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); | ||
362 | /* Wait until IDLE & CLEAN */ | 368 | /* Wait until IDLE & CLEAN */ |
363 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 369 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
364 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | 370 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); |
@@ -1504,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1504 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1510 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1505 | return -EINVAL; | 1511 | return -EINVAL; |
1506 | } | 1512 | } |
1513 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); | ||
1507 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); | 1514 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1508 | track->immd_dwords = pkt->count - 1; | 1515 | track->immd_dwords = pkt->count - 1; |
1509 | r = r100_cs_track_check(p->rdev, track); | 1516 | r = r100_cs_track_check(p->rdev, track); |
@@ -3368,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev) | |||
3368 | 3375 | ||
3369 | void r100_fini(struct radeon_device *rdev) | 3376 | void r100_fini(struct radeon_device *rdev) |
3370 | { | 3377 | { |
3371 | r100_suspend(rdev); | ||
3372 | r100_cp_fini(rdev); | 3378 | r100_cp_fini(rdev); |
3373 | r100_wb_fini(rdev); | 3379 | r100_wb_fini(rdev); |
3374 | r100_ib_fini(rdev); | 3380 | r100_ib_fini(rdev); |
@@ -3399,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev) | |||
3399 | if (rdev->flags & RADEON_IS_AGP) { | 3405 | if (rdev->flags & RADEON_IS_AGP) { |
3400 | r = radeon_agp_init(rdev); | 3406 | r = radeon_agp_init(rdev); |
3401 | if (r) { | 3407 | if (r) { |
3402 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | 3408 | radeon_agp_disable(rdev); |
3403 | rdev->flags &= ~RADEON_IS_AGP; | ||
3404 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
3405 | } else { | 3409 | } else { |
3406 | rdev->mc.gtt_location = rdev->mc.agp_base; | 3410 | rdev->mc.gtt_location = rdev->mc.agp_base; |
3407 | } | 3411 | } |
@@ -3482,13 +3486,12 @@ int r100_init(struct radeon_device *rdev) | |||
3482 | if (r) { | 3486 | if (r) { |
3483 | /* Somethings want wront with the accel init stop accel */ | 3487 | /* Somethings want wront with the accel init stop accel */ |
3484 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3488 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
3485 | r100_suspend(rdev); | ||
3486 | r100_cp_fini(rdev); | 3489 | r100_cp_fini(rdev); |
3487 | r100_wb_fini(rdev); | 3490 | r100_wb_fini(rdev); |
3488 | r100_ib_fini(rdev); | 3491 | r100_ib_fini(rdev); |
3492 | radeon_irq_kms_fini(rdev); | ||
3489 | if (rdev->flags & RADEON_IS_PCI) | 3493 | if (rdev->flags & RADEON_IS_PCI) |
3490 | r100_pci_gart_fini(rdev); | 3494 | r100_pci_gart_fini(rdev); |
3491 | radeon_irq_kms_fini(rdev); | ||
3492 | rdev->accel_working = false; | 3495 | rdev->accel_working = false; |
3493 | } | 3496 | } |
3494 | return 0; | 3497 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 20942127c46b..ff1e0cd608bf 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
371 | case 5: | 371 | case 5: |
372 | case 6: | 372 | case 6: |
373 | case 7: | 373 | case 7: |
374 | /* 1D/2D */ | ||
374 | track->textures[i].tex_coord_type = 0; | 375 | track->textures[i].tex_coord_type = 0; |
375 | break; | 376 | break; |
376 | case 1: | 377 | case 1: |
377 | track->textures[i].tex_coord_type = 1; | 378 | /* CUBE */ |
379 | track->textures[i].tex_coord_type = 2; | ||
378 | break; | 380 | break; |
379 | case 2: | 381 | case 2: |
380 | track->textures[i].tex_coord_type = 2; | 382 | /* 3D */ |
383 | track->textures[i].tex_coord_type = 1; | ||
381 | break; | 384 | break; |
382 | } | 385 | } |
383 | break; | 386 | break; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0051d11b907c..43b55a030b4d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev) | |||
506 | 506 | ||
507 | /* DDR for all card after R300 & IGP */ | 507 | /* DDR for all card after R300 & IGP */ |
508 | rdev->mc.vram_is_ddr = true; | 508 | rdev->mc.vram_is_ddr = true; |
509 | |||
509 | tmp = RREG32(RADEON_MEM_CNTL); | 510 | tmp = RREG32(RADEON_MEM_CNTL); |
510 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | 511 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
511 | rdev->mc.vram_width = 128; | 512 | switch (tmp) { |
512 | } else { | 513 | case 0: rdev->mc.vram_width = 64; break; |
513 | rdev->mc.vram_width = 64; | 514 | case 1: rdev->mc.vram_width = 128; break; |
515 | case 2: rdev->mc.vram_width = 256; break; | ||
516 | default: rdev->mc.vram_width = 128; break; | ||
514 | } | 517 | } |
515 | 518 | ||
516 | r100_vram_init_sizes(rdev); | 519 | r100_vram_init_sizes(rdev); |
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev) | |||
1327 | 1330 | ||
1328 | void r300_fini(struct radeon_device *rdev) | 1331 | void r300_fini(struct radeon_device *rdev) |
1329 | { | 1332 | { |
1330 | r300_suspend(rdev); | ||
1331 | r100_cp_fini(rdev); | 1333 | r100_cp_fini(rdev); |
1332 | r100_wb_fini(rdev); | 1334 | r100_wb_fini(rdev); |
1333 | r100_ib_fini(rdev); | 1335 | r100_ib_fini(rdev); |
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev) | |||
1418 | if (r) { | 1420 | if (r) { |
1419 | /* Somethings want wront with the accel init stop accel */ | 1421 | /* Somethings want wront with the accel init stop accel */ |
1420 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 1422 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
1421 | r300_suspend(rdev); | ||
1422 | r100_cp_fini(rdev); | 1423 | r100_cp_fini(rdev); |
1423 | r100_wb_fini(rdev); | 1424 | r100_wb_fini(rdev); |
1424 | r100_ib_fini(rdev); | 1425 | r100_ib_fini(rdev); |
1426 | radeon_irq_kms_fini(rdev); | ||
1425 | if (rdev->flags & RADEON_IS_PCIE) | 1427 | if (rdev->flags & RADEON_IS_PCIE) |
1426 | rv370_pcie_gart_fini(rdev); | 1428 | rv370_pcie_gart_fini(rdev); |
1427 | if (rdev->flags & RADEON_IS_PCI) | 1429 | if (rdev->flags & RADEON_IS_PCI) |
1428 | r100_pci_gart_fini(rdev); | 1430 | r100_pci_gart_fini(rdev); |
1429 | radeon_irq_kms_fini(rdev); | 1431 | radeon_agp_fini(rdev); |
1430 | rdev->accel_working = false; | 1432 | rdev->accel_working = false; |
1431 | } | 1433 | } |
1432 | return 0; | 1434 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 053404e71a9d..d9373246c97f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev) | |||
50 | if (rdev->flags & RADEON_IS_AGP) { | 50 | if (rdev->flags & RADEON_IS_AGP) { |
51 | r = radeon_agp_init(rdev); | 51 | r = radeon_agp_init(rdev); |
52 | if (r) { | 52 | if (r) { |
53 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | 53 | radeon_agp_disable(rdev); |
54 | rdev->flags &= ~RADEON_IS_AGP; | ||
55 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
56 | } else { | 54 | } else { |
57 | rdev->mc.gtt_location = rdev->mc.agp_base; | 55 | rdev->mc.gtt_location = rdev->mc.agp_base; |
58 | } | 56 | } |
@@ -391,16 +389,15 @@ int r420_init(struct radeon_device *rdev) | |||
391 | if (r) { | 389 | if (r) { |
392 | /* Somethings want wront with the accel init stop accel */ | 390 | /* Somethings want wront with the accel init stop accel */ |
393 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 391 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
394 | r420_suspend(rdev); | ||
395 | r100_cp_fini(rdev); | 392 | r100_cp_fini(rdev); |
396 | r100_wb_fini(rdev); | 393 | r100_wb_fini(rdev); |
397 | r100_ib_fini(rdev); | 394 | r100_ib_fini(rdev); |
395 | radeon_irq_kms_fini(rdev); | ||
398 | if (rdev->flags & RADEON_IS_PCIE) | 396 | if (rdev->flags & RADEON_IS_PCIE) |
399 | rv370_pcie_gart_fini(rdev); | 397 | rv370_pcie_gart_fini(rdev); |
400 | if (rdev->flags & RADEON_IS_PCI) | 398 | if (rdev->flags & RADEON_IS_PCI) |
401 | r100_pci_gart_fini(rdev); | 399 | r100_pci_gart_fini(rdev); |
402 | radeon_agp_fini(rdev); | 400 | radeon_agp_fini(rdev); |
403 | radeon_irq_kms_fini(rdev); | ||
404 | rdev->accel_working = false; | 401 | rdev->accel_working = false; |
405 | } | 402 | } |
406 | return 0; | 403 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 9a189072f2b9..ddf5731eba0d 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev) | |||
294 | if (r) { | 294 | if (r) { |
295 | /* Somethings want wront with the accel init stop accel */ | 295 | /* Somethings want wront with the accel init stop accel */ |
296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 296 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
297 | rv515_suspend(rdev); | ||
298 | r100_cp_fini(rdev); | 297 | r100_cp_fini(rdev); |
299 | r100_wb_fini(rdev); | 298 | r100_wb_fini(rdev); |
300 | r100_ib_fini(rdev); | 299 | r100_ib_fini(rdev); |
300 | radeon_irq_kms_fini(rdev); | ||
301 | rv370_pcie_gart_fini(rdev); | 301 | rv370_pcie_gart_fini(rdev); |
302 | radeon_agp_fini(rdev); | 302 | radeon_agp_fini(rdev); |
303 | radeon_irq_kms_fini(rdev); | ||
304 | rdev->accel_working = false; | 303 | rdev->accel_working = false; |
305 | } | 304 | } |
306 | return 0; | 305 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f5ff3490929f..2ffcf5a03551 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
624 | fixed20_12 a; | 624 | fixed20_12 a; |
625 | u32 tmp; | 625 | u32 tmp; |
626 | int chansize, numchan; | 626 | int chansize, numchan; |
627 | int r; | ||
628 | 627 | ||
629 | /* Get VRAM informations */ | 628 | /* Get VRAM informations */ |
630 | rdev->mc.vram_is_ddr = true; | 629 | rdev->mc.vram_is_ddr = true; |
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
667 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 666 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
668 | 667 | ||
669 | if (rdev->flags & RADEON_IS_AGP) { | 668 | if (rdev->flags & RADEON_IS_AGP) { |
670 | r = radeon_agp_init(rdev); | ||
671 | if (r) | ||
672 | return r; | ||
673 | /* gtt_size is setup by radeon_agp_init */ | 669 | /* gtt_size is setup by radeon_agp_init */ |
674 | rdev->mc.gtt_location = rdev->mc.agp_base; | 670 | rdev->mc.gtt_location = rdev->mc.agp_base; |
675 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; | 671 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; |
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
1658 | rdev->cp.align_mask = 16 - 1; | 1654 | rdev->cp.align_mask = 16 - 1; |
1659 | } | 1655 | } |
1660 | 1656 | ||
1657 | void r600_cp_fini(struct radeon_device *rdev) | ||
1658 | { | ||
1659 | r600_cp_stop(rdev); | ||
1660 | radeon_ring_fini(rdev); | ||
1661 | } | ||
1662 | |||
1661 | 1663 | ||
1662 | /* | 1664 | /* |
1663 | * GPU scratch registers helpers function. | 1665 | * GPU scratch registers helpers function. |
@@ -1792,23 +1794,24 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
1792 | radeon_ring_write(rdev, RB_INT_STAT); | 1794 | radeon_ring_write(rdev, RB_INT_STAT); |
1793 | } | 1795 | } |
1794 | 1796 | ||
1795 | int r600_copy_dma(struct radeon_device *rdev, | ||
1796 | uint64_t src_offset, | ||
1797 | uint64_t dst_offset, | ||
1798 | unsigned num_pages, | ||
1799 | struct radeon_fence *fence) | ||
1800 | { | ||
1801 | /* FIXME: implement */ | ||
1802 | return 0; | ||
1803 | } | ||
1804 | |||
1805 | int r600_copy_blit(struct radeon_device *rdev, | 1797 | int r600_copy_blit(struct radeon_device *rdev, |
1806 | uint64_t src_offset, uint64_t dst_offset, | 1798 | uint64_t src_offset, uint64_t dst_offset, |
1807 | unsigned num_pages, struct radeon_fence *fence) | 1799 | unsigned num_pages, struct radeon_fence *fence) |
1808 | { | 1800 | { |
1809 | r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 1801 | int r; |
1802 | |||
1803 | mutex_lock(&rdev->r600_blit.mutex); | ||
1804 | rdev->r600_blit.vb_ib = NULL; | ||
1805 | r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | ||
1806 | if (r) { | ||
1807 | if (rdev->r600_blit.vb_ib) | ||
1808 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
1809 | mutex_unlock(&rdev->r600_blit.mutex); | ||
1810 | return r; | ||
1811 | } | ||
1810 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 1812 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); |
1811 | r600_blit_done_copy(rdev, fence); | 1813 | r600_blit_done_copy(rdev, fence); |
1814 | mutex_unlock(&rdev->r600_blit.mutex); | ||
1812 | return 0; | 1815 | return 0; |
1813 | } | 1816 | } |
1814 | 1817 | ||
@@ -1864,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev) | |||
1864 | return r; | 1867 | return r; |
1865 | } | 1868 | } |
1866 | r600_gpu_init(rdev); | 1869 | r600_gpu_init(rdev); |
1867 | 1870 | r = r600_blit_init(rdev); | |
1868 | if (!rdev->r600_blit.shader_obj) { | 1871 | if (r) { |
1869 | r = r600_blit_init(rdev); | 1872 | r600_blit_fini(rdev); |
1873 | rdev->asic->copy = NULL; | ||
1874 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
1875 | } | ||
1876 | /* pin copy shader into vram */ | ||
1877 | if (rdev->r600_blit.shader_obj) { | ||
1878 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1879 | if (unlikely(r != 0)) | ||
1880 | return r; | ||
1881 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1882 | &rdev->r600_blit.shader_gpu_addr); | ||
1883 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1870 | if (r) { | 1884 | if (r) { |
1871 | DRM_ERROR("radeon: failed blitter (%d).\n", r); | 1885 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
1872 | return r; | 1886 | return r; |
1873 | } | 1887 | } |
1874 | } | 1888 | } |
1875 | |||
1876 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1877 | if (unlikely(r != 0)) | ||
1878 | return r; | ||
1879 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1880 | &rdev->r600_blit.shader_gpu_addr); | ||
1881 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1882 | if (r) { | ||
1883 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
1884 | return r; | ||
1885 | } | ||
1886 | |||
1887 | /* Enable IRQ */ | 1889 | /* Enable IRQ */ |
1888 | r = r600_irq_init(rdev); | 1890 | r = r600_irq_init(rdev); |
1889 | if (r) { | 1891 | if (r) { |
@@ -1948,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev) | |||
1948 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1950 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1949 | return r; | 1951 | return r; |
1950 | } | 1952 | } |
1953 | |||
1954 | r = r600_audio_init(rdev); | ||
1955 | if (r) { | ||
1956 | DRM_ERROR("radeon: audio resume failed\n"); | ||
1957 | return r; | ||
1958 | } | ||
1959 | |||
1951 | return r; | 1960 | return r; |
1952 | } | 1961 | } |
1953 | 1962 | ||
@@ -1955,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev) | |||
1955 | { | 1964 | { |
1956 | int r; | 1965 | int r; |
1957 | 1966 | ||
1967 | r600_audio_fini(rdev); | ||
1958 | /* FIXME: we should wait for ring to be empty */ | 1968 | /* FIXME: we should wait for ring to be empty */ |
1959 | r600_cp_stop(rdev); | 1969 | r600_cp_stop(rdev); |
1960 | rdev->cp.ready = false; | 1970 | rdev->cp.ready = false; |
1971 | r600_irq_suspend(rdev); | ||
1961 | r600_wb_disable(rdev); | 1972 | r600_wb_disable(rdev); |
1962 | r600_pcie_gart_disable(rdev); | 1973 | r600_pcie_gart_disable(rdev); |
1963 | /* unpin shaders bo */ | 1974 | /* unpin shaders bo */ |
1964 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1975 | if (rdev->r600_blit.shader_obj) { |
1965 | if (unlikely(r != 0)) | 1976 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1966 | return r; | 1977 | if (!r) { |
1967 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 1978 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
1968 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 1979 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
1980 | } | ||
1981 | } | ||
1969 | return 0; | 1982 | return 0; |
1970 | } | 1983 | } |
1971 | 1984 | ||
@@ -2026,6 +2039,11 @@ int r600_init(struct radeon_device *rdev) | |||
2026 | r = radeon_fence_driver_init(rdev); | 2039 | r = radeon_fence_driver_init(rdev); |
2027 | if (r) | 2040 | if (r) |
2028 | return r; | 2041 | return r; |
2042 | if (rdev->flags & RADEON_IS_AGP) { | ||
2043 | r = radeon_agp_init(rdev); | ||
2044 | if (r) | ||
2045 | radeon_agp_disable(rdev); | ||
2046 | } | ||
2029 | r = r600_mc_init(rdev); | 2047 | r = r600_mc_init(rdev); |
2030 | if (r) | 2048 | if (r) |
2031 | return r; | 2049 | return r; |
@@ -2051,22 +2069,25 @@ int r600_init(struct radeon_device *rdev) | |||
2051 | rdev->accel_working = true; | 2069 | rdev->accel_working = true; |
2052 | r = r600_startup(rdev); | 2070 | r = r600_startup(rdev); |
2053 | if (r) { | 2071 | if (r) { |
2054 | r600_suspend(rdev); | 2072 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2073 | r600_cp_fini(rdev); | ||
2055 | r600_wb_fini(rdev); | 2074 | r600_wb_fini(rdev); |
2056 | radeon_ring_fini(rdev); | 2075 | r600_irq_fini(rdev); |
2076 | radeon_irq_kms_fini(rdev); | ||
2057 | r600_pcie_gart_fini(rdev); | 2077 | r600_pcie_gart_fini(rdev); |
2058 | rdev->accel_working = false; | 2078 | rdev->accel_working = false; |
2059 | } | 2079 | } |
2060 | if (rdev->accel_working) { | 2080 | if (rdev->accel_working) { |
2061 | r = radeon_ib_pool_init(rdev); | 2081 | r = radeon_ib_pool_init(rdev); |
2062 | if (r) { | 2082 | if (r) { |
2063 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | 2083 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2064 | rdev->accel_working = false; | ||
2065 | } | ||
2066 | r = r600_ib_test(rdev); | ||
2067 | if (r) { | ||
2068 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
2069 | rdev->accel_working = false; | 2084 | rdev->accel_working = false; |
2085 | } else { | ||
2086 | r = r600_ib_test(rdev); | ||
2087 | if (r) { | ||
2088 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
2089 | rdev->accel_working = false; | ||
2090 | } | ||
2070 | } | 2091 | } |
2071 | } | 2092 | } |
2072 | 2093 | ||
@@ -2078,20 +2099,17 @@ int r600_init(struct radeon_device *rdev) | |||
2078 | 2099 | ||
2079 | void r600_fini(struct radeon_device *rdev) | 2100 | void r600_fini(struct radeon_device *rdev) |
2080 | { | 2101 | { |
2081 | /* Suspend operations */ | ||
2082 | r600_suspend(rdev); | ||
2083 | |||
2084 | r600_audio_fini(rdev); | 2102 | r600_audio_fini(rdev); |
2085 | r600_blit_fini(rdev); | 2103 | r600_blit_fini(rdev); |
2104 | r600_cp_fini(rdev); | ||
2105 | r600_wb_fini(rdev); | ||
2086 | r600_irq_fini(rdev); | 2106 | r600_irq_fini(rdev); |
2087 | radeon_irq_kms_fini(rdev); | 2107 | radeon_irq_kms_fini(rdev); |
2088 | radeon_ring_fini(rdev); | ||
2089 | r600_wb_fini(rdev); | ||
2090 | r600_pcie_gart_fini(rdev); | 2108 | r600_pcie_gart_fini(rdev); |
2109 | radeon_agp_fini(rdev); | ||
2091 | radeon_gem_fini(rdev); | 2110 | radeon_gem_fini(rdev); |
2092 | radeon_fence_driver_fini(rdev); | 2111 | radeon_fence_driver_fini(rdev); |
2093 | radeon_clocks_fini(rdev); | 2112 | radeon_clocks_fini(rdev); |
2094 | radeon_agp_fini(rdev); | ||
2095 | radeon_bo_fini(rdev); | 2113 | radeon_bo_fini(rdev); |
2096 | radeon_atombios_fini(rdev); | 2114 | radeon_atombios_fini(rdev); |
2097 | kfree(rdev->bios); | 2115 | kfree(rdev->bios); |
@@ -2197,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
2197 | rb_bufsz = drm_order(ring_size / 4); | 2215 | rb_bufsz = drm_order(ring_size / 4); |
2198 | ring_size = (1 << rb_bufsz) * 4; | 2216 | ring_size = (1 << rb_bufsz) * 4; |
2199 | rdev->ih.ring_size = ring_size; | 2217 | rdev->ih.ring_size = ring_size; |
2200 | rdev->ih.align_mask = 4 - 1; | 2218 | rdev->ih.ptr_mask = rdev->ih.ring_size - 1; |
2219 | rdev->ih.rptr = 0; | ||
2201 | } | 2220 | } |
2202 | 2221 | ||
2203 | static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | 2222 | static int r600_ih_ring_alloc(struct radeon_device *rdev) |
2204 | { | 2223 | { |
2205 | int r; | 2224 | int r; |
2206 | 2225 | ||
2207 | rdev->ih.ring_size = ring_size; | ||
2208 | /* Allocate ring buffer */ | 2226 | /* Allocate ring buffer */ |
2209 | if (rdev->ih.ring_obj == NULL) { | 2227 | if (rdev->ih.ring_obj == NULL) { |
2210 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2228 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
@@ -2234,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | |||
2234 | return r; | 2252 | return r; |
2235 | } | 2253 | } |
2236 | } | 2254 | } |
2237 | rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; | ||
2238 | rdev->ih.rptr = 0; | ||
2239 | |||
2240 | return 0; | 2255 | return 0; |
2241 | } | 2256 | } |
2242 | 2257 | ||
@@ -2386,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2386 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 2401 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
2387 | 2402 | ||
2388 | /* allocate ring */ | 2403 | /* allocate ring */ |
2389 | ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); | 2404 | ret = r600_ih_ring_alloc(rdev); |
2390 | if (ret) | 2405 | if (ret) |
2391 | return ret; | 2406 | return ret; |
2392 | 2407 | ||
@@ -2449,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2449 | return ret; | 2464 | return ret; |
2450 | } | 2465 | } |
2451 | 2466 | ||
2452 | void r600_irq_fini(struct radeon_device *rdev) | 2467 | void r600_irq_suspend(struct radeon_device *rdev) |
2453 | { | 2468 | { |
2454 | r600_disable_interrupts(rdev); | 2469 | r600_disable_interrupts(rdev); |
2455 | r600_rlc_stop(rdev); | 2470 | r600_rlc_stop(rdev); |
2471 | } | ||
2472 | |||
2473 | void r600_irq_fini(struct radeon_device *rdev) | ||
2474 | { | ||
2475 | r600_irq_suspend(rdev); | ||
2456 | r600_ih_ring_fini(rdev); | 2476 | r600_ih_ring_fini(rdev); |
2457 | } | 2477 | } |
2458 | 2478 | ||
@@ -2467,8 +2487,12 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2467 | return -EINVAL; | 2487 | return -EINVAL; |
2468 | } | 2488 | } |
2469 | /* don't enable anything if the ih is disabled */ | 2489 | /* don't enable anything if the ih is disabled */ |
2470 | if (!rdev->ih.enabled) | 2490 | if (!rdev->ih.enabled) { |
2491 | r600_disable_interrupts(rdev); | ||
2492 | /* force the active interrupt state to all disabled */ | ||
2493 | r600_disable_interrupt_state(rdev); | ||
2471 | return 0; | 2494 | return 0; |
2495 | } | ||
2472 | 2496 | ||
2473 | if (ASIC_IS_DCE3(rdev)) { | 2497 | if (ASIC_IS_DCE3(rdev)) { |
2474 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | 2498 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
@@ -2638,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
2638 | wptr = RREG32(IH_RB_WPTR); | 2662 | wptr = RREG32(IH_RB_WPTR); |
2639 | 2663 | ||
2640 | if (wptr & RB_OVERFLOW) { | 2664 | if (wptr & RB_OVERFLOW) { |
2641 | WARN_ON(1); | 2665 | /* When a ring buffer overflow happen start parsing interrupt |
2642 | /* XXX deal with overflow */ | 2666 | * from the last not overwritten vector (wptr + 16). Hopefully |
2643 | DRM_ERROR("IH RB overflow\n"); | 2667 | * this should allow us to catchup. |
2668 | */ | ||
2669 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | ||
2670 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | ||
2671 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | ||
2644 | tmp = RREG32(IH_RB_CNTL); | 2672 | tmp = RREG32(IH_RB_CNTL); |
2645 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 2673 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
2646 | WREG32(IH_RB_CNTL, tmp); | 2674 | WREG32(IH_RB_CNTL, tmp); |
2647 | } | 2675 | } |
2648 | wptr = wptr & WPTR_OFFSET_MASK; | 2676 | return (wptr & rdev->ih.ptr_mask); |
2649 | |||
2650 | return wptr; | ||
2651 | } | 2677 | } |
2652 | 2678 | ||
2653 | /* r600 IV Ring | 2679 | /* r600 IV Ring |
@@ -2683,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev) | |||
2683 | u32 wptr = r600_get_ih_wptr(rdev); | 2709 | u32 wptr = r600_get_ih_wptr(rdev); |
2684 | u32 rptr = rdev->ih.rptr; | 2710 | u32 rptr = rdev->ih.rptr; |
2685 | u32 src_id, src_data; | 2711 | u32 src_id, src_data; |
2686 | u32 last_entry = rdev->ih.ring_size - 16; | ||
2687 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | 2712 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; |
2688 | unsigned long flags; | 2713 | unsigned long flags; |
2689 | bool queue_hotplug = false; | 2714 | bool queue_hotplug = false; |
2690 | 2715 | ||
2691 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 2716 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
2717 | if (!rdev->ih.enabled) | ||
2718 | return IRQ_NONE; | ||
2692 | 2719 | ||
2693 | spin_lock_irqsave(&rdev->ih.lock, flags); | 2720 | spin_lock_irqsave(&rdev->ih.lock, flags); |
2694 | 2721 | ||
@@ -2817,10 +2844,8 @@ restart_ih: | |||
2817 | } | 2844 | } |
2818 | 2845 | ||
2819 | /* wptr/rptr are in bytes! */ | 2846 | /* wptr/rptr are in bytes! */ |
2820 | if (rptr == last_entry) | 2847 | rptr += 16; |
2821 | rptr = 0; | 2848 | rptr &= rdev->ih.ptr_mask; |
2822 | else | ||
2823 | rptr += 16; | ||
2824 | } | 2849 | } |
2825 | /* make sure wptr hasn't changed while processing */ | 2850 | /* make sure wptr hasn't changed while processing */ |
2826 | wptr = r600_get_ih_wptr(rdev); | 2851 | wptr = r600_get_ih_wptr(rdev); |
@@ -2888,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
2888 | return 0; | 2913 | return 0; |
2889 | #endif | 2914 | #endif |
2890 | } | 2915 | } |
2916 | |||
2917 | /** | ||
2918 | * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl | ||
2919 | * rdev: radeon device structure | ||
2920 | * bo: buffer object struct which userspace is waiting for idle | ||
2921 | * | ||
2922 | * Some R6XX/R7XX doesn't seems to take into account HDP flush performed | ||
2923 | * through ring buffer, this leads to corruption in rendering, see | ||
2924 | * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we | ||
2925 | * directly perform HDP flush by writing register through MMIO. | ||
2926 | */ | ||
2927 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | ||
2928 | { | ||
2929 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
2930 | } | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 99e2c3891a7d..0dcb6904c4ff 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
37 | { | 37 | { |
38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) |
39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev) | |||
261 | if (!r600_audio_chipset_supported(rdev)) | 261 | if (!r600_audio_chipset_supported(rdev)) |
262 | return; | 262 | return; |
263 | 263 | ||
264 | WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); | ||
265 | |||
266 | del_timer(&rdev->audio_timer); | 264 | del_timer(&rdev->audio_timer); |
265 | WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); | ||
267 | } | 266 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 8787ea89dc6e..446b765ac72a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
449 | u32 packet2s[16]; | 449 | u32 packet2s[16]; |
450 | int num_packet2s = 0; | 450 | int num_packet2s = 0; |
451 | 451 | ||
452 | mutex_init(&rdev->r600_blit.mutex); | ||
452 | rdev->r600_blit.state_offset = 0; | 453 | rdev->r600_blit.state_offset = 0; |
453 | 454 | ||
454 | if (rdev->family >= CHIP_RV770) | 455 | if (rdev->family >= CHIP_RV770) |
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
512 | { | 513 | { |
513 | int r; | 514 | int r; |
514 | 515 | ||
516 | if (rdev->r600_blit.shader_obj == NULL) | ||
517 | return; | ||
518 | /* If we can't reserve the bo, unref should be enough to destroy | ||
519 | * it when it becomes idle. | ||
520 | */ | ||
515 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 521 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
516 | if (unlikely(r != 0)) { | 522 | if (!r) { |
517 | dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); | 523 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
518 | goto out_unref; | 524 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
519 | } | 525 | } |
520 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
521 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
522 | out_unref: | ||
523 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | 526 | radeon_bo_unref(&rdev->r600_blit.shader_obj); |
524 | } | 527 | } |
525 | 528 | ||
@@ -540,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
540 | void r600_vb_ib_put(struct radeon_device *rdev) | 543 | void r600_vb_ib_put(struct radeon_device *rdev) |
541 | { | 544 | { |
542 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); |
543 | mutex_lock(&rdev->ib_pool.mutex); | ||
544 | list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); | ||
545 | mutex_unlock(&rdev->ib_pool.mutex); | ||
546 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 546 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
547 | } | 547 | } |
548 | 548 | ||
@@ -555,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
555 | int dwords_per_loop = 76, num_loops; | 555 | int dwords_per_loop = 76, num_loops; |
556 | 556 | ||
557 | r = r600_vb_ib_get(rdev); | 557 | r = r600_vb_ib_get(rdev); |
558 | WARN_ON(r); | 558 | if (r) |
559 | return r; | ||
559 | 560 | ||
560 | /* set_render_target emits 2 extra dwords on rv6xx */ | 561 | /* set_render_target emits 2 extra dwords on rv6xx */ |
561 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) | 562 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) |
@@ -581,7 +582,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
581 | ring_size += 5; /* done copy */ | 582 | ring_size += 5; /* done copy */ |
582 | ring_size += 7; /* fence emit for done copy */ | 583 | ring_size += 7; /* fence emit for done copy */ |
583 | r = radeon_ring_lock(rdev, ring_size); | 584 | r = radeon_ring_lock(rdev, ring_size); |
584 | WARN_ON(r); | 585 | if (r) |
586 | return r; | ||
585 | 587 | ||
586 | set_default_state(rdev); /* 14 */ | 588 | set_default_state(rdev); /* 14 */ |
587 | set_shaders(rdev); /* 26 */ | 589 | set_shaders(rdev); /* 26 */ |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 6d5a711c2e91..75bcf35a0931 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev, | |||
1428 | 1428 | ||
1429 | gb_tiling_config |= R600_BANK_SWAPS(1); | 1429 | gb_tiling_config |= R600_BANK_SWAPS(1); |
1430 | 1430 | ||
1431 | backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, | 1431 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) |
1432 | dev_priv->r600_max_backends, | 1432 | backend_map = 0x28; |
1433 | (0xff << dev_priv->r600_max_backends) & 0xff); | 1433 | else |
1434 | backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, | ||
1435 | dev_priv->r600_max_backends, | ||
1436 | (0xff << dev_priv->r600_max_backends) & 0xff); | ||
1434 | gb_tiling_config |= R600_BACKEND_MAP(backend_map); | 1437 | gb_tiling_config |= R600_BACKEND_MAP(backend_map); |
1435 | 1438 | ||
1436 | cc_gc_shader_pipe_config = | 1439 | cc_gc_shader_pipe_config = |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 44060b92d9e6..e4c45ec16507 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
36 | typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); | 36 | typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); |
37 | static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; | 37 | static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; |
38 | 38 | ||
39 | struct r600_cs_track { | ||
40 | u32 cb_color0_base_last; | ||
41 | }; | ||
42 | |||
39 | /** | 43 | /** |
40 | * r600_cs_packet_parse() - parse cp packet and point ib index to next packet | 44 | * r600_cs_packet_parse() - parse cp packet and point ib index to next packet |
41 | * @parser: parser structure holding parsing context. | 45 | * @parser: parser structure holding parsing context. |
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
177 | } | 181 | } |
178 | 182 | ||
179 | /** | 183 | /** |
184 | * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc | ||
185 | * @parser: parser structure holding parsing context. | ||
186 | * | ||
187 | * Check next packet is relocation packet3, do bo validation and compute | ||
188 | * GPU offset using the provided start. | ||
189 | **/ | ||
190 | static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | ||
191 | { | ||
192 | struct radeon_cs_packet p3reloc; | ||
193 | int r; | ||
194 | |||
195 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | ||
196 | if (r) { | ||
197 | return 0; | ||
198 | } | ||
199 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
200 | return 0; | ||
201 | } | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /** | ||
180 | * r600_cs_packet_next_vline() - parse userspace VLINE packet | 206 | * r600_cs_packet_next_vline() - parse userspace VLINE packet |
181 | * @parser: parser structure holding parsing context. | 207 | * @parser: parser structure holding parsing context. |
182 | * | 208 | * |
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
337 | struct radeon_cs_packet *pkt) | 363 | struct radeon_cs_packet *pkt) |
338 | { | 364 | { |
339 | struct radeon_cs_reloc *reloc; | 365 | struct radeon_cs_reloc *reloc; |
366 | struct r600_cs_track *track; | ||
340 | volatile u32 *ib; | 367 | volatile u32 *ib; |
341 | unsigned idx; | 368 | unsigned idx; |
342 | unsigned i; | 369 | unsigned i; |
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
344 | int r; | 371 | int r; |
345 | u32 idx_value; | 372 | u32 idx_value; |
346 | 373 | ||
374 | track = (struct r600_cs_track *)p->track; | ||
347 | ib = p->ib->ptr; | 375 | ib = p->ib->ptr; |
348 | idx = pkt->idx + 1; | 376 | idx = pkt->idx + 1; |
349 | idx_value = radeon_get_ib_value(p, idx); | 377 | idx_value = radeon_get_ib_value(p, idx); |
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
503 | for (i = 0; i < pkt->count; i++) { | 531 | for (i = 0; i < pkt->count; i++) { |
504 | reg = start_reg + (4 * i); | 532 | reg = start_reg + (4 * i); |
505 | switch (reg) { | 533 | switch (reg) { |
534 | /* This register were added late, there is userspace | ||
535 | * which does provide relocation for those but set | ||
536 | * 0 offset. In order to avoid breaking old userspace | ||
537 | * we detect this and set address to point to last | ||
538 | * CB_COLOR0_BASE, note that if userspace doesn't set | ||
539 | * CB_COLOR0_BASE before this register we will report | ||
540 | * error. Old userspace always set CB_COLOR0_BASE | ||
541 | * before any of this. | ||
542 | */ | ||
543 | case R_0280E0_CB_COLOR0_FRAG: | ||
544 | case R_0280E4_CB_COLOR1_FRAG: | ||
545 | case R_0280E8_CB_COLOR2_FRAG: | ||
546 | case R_0280EC_CB_COLOR3_FRAG: | ||
547 | case R_0280F0_CB_COLOR4_FRAG: | ||
548 | case R_0280F4_CB_COLOR5_FRAG: | ||
549 | case R_0280F8_CB_COLOR6_FRAG: | ||
550 | case R_0280FC_CB_COLOR7_FRAG: | ||
551 | case R_0280C0_CB_COLOR0_TILE: | ||
552 | case R_0280C4_CB_COLOR1_TILE: | ||
553 | case R_0280C8_CB_COLOR2_TILE: | ||
554 | case R_0280CC_CB_COLOR3_TILE: | ||
555 | case R_0280D0_CB_COLOR4_TILE: | ||
556 | case R_0280D4_CB_COLOR5_TILE: | ||
557 | case R_0280D8_CB_COLOR6_TILE: | ||
558 | case R_0280DC_CB_COLOR7_TILE: | ||
559 | if (!r600_cs_packet_next_is_pkt3_nop(p)) { | ||
560 | if (!track->cb_color0_base_last) { | ||
561 | dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | ib[idx+1+i] = track->cb_color0_base_last; | ||
565 | printk_once(KERN_WARNING "radeon: You have old & broken userspace " | ||
566 | "please consider updating mesa & xf86-video-ati\n"); | ||
567 | } else { | ||
568 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
569 | if (r) { | ||
570 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
574 | } | ||
575 | break; | ||
506 | case DB_DEPTH_BASE: | 576 | case DB_DEPTH_BASE: |
507 | case DB_HTILE_DATA_BASE: | 577 | case DB_HTILE_DATA_BASE: |
508 | case CB_COLOR0_BASE: | 578 | case CB_COLOR0_BASE: |
579 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
580 | if (r) { | ||
581 | DRM_ERROR("bad SET_CONTEXT_REG " | ||
582 | "0x%04X\n", reg); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
586 | track->cb_color0_base_last = ib[idx+1+i]; | ||
587 | break; | ||
509 | case CB_COLOR1_BASE: | 588 | case CB_COLOR1_BASE: |
510 | case CB_COLOR2_BASE: | 589 | case CB_COLOR2_BASE: |
511 | case CB_COLOR3_BASE: | 590 | case CB_COLOR3_BASE: |
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
678 | int r600_cs_parse(struct radeon_cs_parser *p) | 757 | int r600_cs_parse(struct radeon_cs_parser *p) |
679 | { | 758 | { |
680 | struct radeon_cs_packet pkt; | 759 | struct radeon_cs_packet pkt; |
760 | struct r600_cs_track *track; | ||
681 | int r; | 761 | int r; |
682 | 762 | ||
763 | track = kzalloc(sizeof(*track), GFP_KERNEL); | ||
764 | p->track = track; | ||
683 | do { | 765 | do { |
684 | r = r600_cs_packet_parse(p, &pkt, p->idx); | 766 | r = r600_cs_packet_parse(p, &pkt, p->idx); |
685 | if (r) { | 767 | if (r) { |
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
757 | /* initialize parser */ | 839 | /* initialize parser */ |
758 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 840 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
759 | parser.filp = filp; | 841 | parser.filp = filp; |
842 | parser.dev = &dev->pdev->dev; | ||
760 | parser.rdev = NULL; | 843 | parser.rdev = NULL; |
761 | parser.family = family; | 844 | parser.family = family; |
762 | parser.ib = &fake_ib; | 845 | parser.ib = &fake_ib; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 05894edadab4..30480881aed1 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -882,4 +882,29 @@ | |||
882 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) | 882 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) |
883 | 883 | ||
884 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | 884 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 |
885 | |||
886 | #define R_0280E0_CB_COLOR0_FRAG 0x0280E0 | ||
887 | #define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) | ||
888 | #define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) | ||
889 | #define C_0280E0_BASE_256B 0x00000000 | ||
890 | #define R_0280E4_CB_COLOR1_FRAG 0x0280E4 | ||
891 | #define R_0280E8_CB_COLOR2_FRAG 0x0280E8 | ||
892 | #define R_0280EC_CB_COLOR3_FRAG 0x0280EC | ||
893 | #define R_0280F0_CB_COLOR4_FRAG 0x0280F0 | ||
894 | #define R_0280F4_CB_COLOR5_FRAG 0x0280F4 | ||
895 | #define R_0280F8_CB_COLOR6_FRAG 0x0280F8 | ||
896 | #define R_0280FC_CB_COLOR7_FRAG 0x0280FC | ||
897 | #define R_0280C0_CB_COLOR0_TILE 0x0280C0 | ||
898 | #define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) | ||
899 | #define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) | ||
900 | #define C_0280C0_BASE_256B 0x00000000 | ||
901 | #define R_0280C4_CB_COLOR1_TILE 0x0280C4 | ||
902 | #define R_0280C8_CB_COLOR2_TILE 0x0280C8 | ||
903 | #define R_0280CC_CB_COLOR3_TILE 0x0280CC | ||
904 | #define R_0280D0_CB_COLOR4_TILE 0x0280D0 | ||
905 | #define R_0280D4_CB_COLOR5_TILE 0x0280D4 | ||
906 | #define R_0280D8_CB_COLOR6_TILE 0x0280D8 | ||
907 | #define R_0280DC_CB_COLOR7_TILE 0x0280DC | ||
908 | |||
909 | |||
885 | #endif | 910 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index eb5f99b9469d..c0356bb193e5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -96,6 +96,7 @@ extern int radeon_audio; | |||
96 | * symbol; | 96 | * symbol; |
97 | */ | 97 | */ |
98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
99 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ | ||
99 | #define RADEON_IB_POOL_SIZE 16 | 100 | #define RADEON_IB_POOL_SIZE 16 |
100 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 101 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 |
101 | #define RADEONFB_CONN_LIMIT 4 | 102 | #define RADEONFB_CONN_LIMIT 4 |
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | |||
363 | */ | 364 | */ |
364 | struct radeon_ib { | 365 | struct radeon_ib { |
365 | struct list_head list; | 366 | struct list_head list; |
366 | unsigned long idx; | 367 | unsigned idx; |
367 | uint64_t gpu_addr; | 368 | uint64_t gpu_addr; |
368 | struct radeon_fence *fence; | 369 | struct radeon_fence *fence; |
369 | uint32_t *ptr; | 370 | uint32_t *ptr; |
370 | uint32_t length_dw; | 371 | uint32_t length_dw; |
372 | bool free; | ||
371 | }; | 373 | }; |
372 | 374 | ||
373 | /* | 375 | /* |
@@ -377,10 +379,9 @@ struct radeon_ib { | |||
377 | struct radeon_ib_pool { | 379 | struct radeon_ib_pool { |
378 | struct mutex mutex; | 380 | struct mutex mutex; |
379 | struct radeon_bo *robj; | 381 | struct radeon_bo *robj; |
380 | struct list_head scheduled_ibs; | ||
381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 382 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
382 | bool ready; | 383 | bool ready; |
383 | DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); | 384 | unsigned head_id; |
384 | }; | 385 | }; |
385 | 386 | ||
386 | struct radeon_cp { | 387 | struct radeon_cp { |
@@ -410,13 +411,13 @@ struct r600_ih { | |||
410 | unsigned wptr_old; | 411 | unsigned wptr_old; |
411 | unsigned ring_size; | 412 | unsigned ring_size; |
412 | uint64_t gpu_addr; | 413 | uint64_t gpu_addr; |
413 | uint32_t align_mask; | ||
414 | uint32_t ptr_mask; | 414 | uint32_t ptr_mask; |
415 | spinlock_t lock; | 415 | spinlock_t lock; |
416 | bool enabled; | 416 | bool enabled; |
417 | }; | 417 | }; |
418 | 418 | ||
419 | struct r600_blit { | 419 | struct r600_blit { |
420 | struct mutex mutex; | ||
420 | struct radeon_bo *shader_obj; | 421 | struct radeon_bo *shader_obj; |
421 | u64 shader_gpu_addr; | 422 | u64 shader_gpu_addr; |
422 | u32 vs_offset, ps_offset; | 423 | u32 vs_offset, ps_offset; |
@@ -465,6 +466,7 @@ struct radeon_cs_chunk { | |||
465 | }; | 466 | }; |
466 | 467 | ||
467 | struct radeon_cs_parser { | 468 | struct radeon_cs_parser { |
469 | struct device *dev; | ||
468 | struct radeon_device *rdev; | 470 | struct radeon_device *rdev; |
469 | struct drm_file *filp; | 471 | struct drm_file *filp; |
470 | /* chunks */ | 472 | /* chunks */ |
@@ -660,6 +662,13 @@ struct radeon_asic { | |||
660 | void (*hpd_fini)(struct radeon_device *rdev); | 662 | void (*hpd_fini)(struct radeon_device *rdev); |
661 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 663 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
662 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 664 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
665 | /* ioctl hw specific callback. Some hw might want to perform special | ||
666 | * operation on specific ioctl. For instance on wait idle some hw | ||
667 | * might want to perform and HDP flush through MMIO as it seems that | ||
668 | * some R6XX/R7XX hw doesn't take HDP flush into account if programmed | ||
669 | * through ring. | ||
670 | */ | ||
671 | void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); | ||
663 | }; | 672 | }; |
664 | 673 | ||
665 | /* | 674 | /* |
@@ -847,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
847 | 856 | ||
848 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 857 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
849 | { | 858 | { |
850 | if (reg < 0x10000) | 859 | if (reg < rdev->rmmio_size) |
851 | return readl(((void __iomem *)rdev->rmmio) + reg); | 860 | return readl(((void __iomem *)rdev->rmmio) + reg); |
852 | else { | 861 | else { |
853 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 862 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
@@ -857,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | |||
857 | 866 | ||
858 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 867 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
859 | { | 868 | { |
860 | if (reg < 0x10000) | 869 | if (reg < rdev->rmmio_size) |
861 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | 870 | writel(v, ((void __iomem *)rdev->rmmio) + reg); |
862 | else { | 871 | else { |
863 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 872 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
@@ -1017,6 +1026,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
1017 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) | 1026 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) |
1018 | 1027 | ||
1019 | /* Common functions */ | 1028 | /* Common functions */ |
1029 | /* AGP */ | ||
1030 | extern void radeon_agp_disable(struct radeon_device *rdev); | ||
1020 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); | 1031 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
1021 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1032 | extern int radeon_modeset_init(struct radeon_device *rdev); |
1022 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1033 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
@@ -1140,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev); | |||
1140 | extern void r600_cp_stop(struct radeon_device *rdev); | 1151 | extern void r600_cp_stop(struct radeon_device *rdev); |
1141 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | 1152 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); |
1142 | extern int r600_cp_resume(struct radeon_device *rdev); | 1153 | extern int r600_cp_resume(struct radeon_device *rdev); |
1154 | extern void r600_cp_fini(struct radeon_device *rdev); | ||
1143 | extern int r600_count_pipe_bits(uint32_t val); | 1155 | extern int r600_count_pipe_bits(uint32_t val); |
1144 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); | 1156 | extern int r600_gart_clear_page(struct radeon_device *rdev, int i); |
1145 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); | 1157 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); |
@@ -1160,7 +1172,8 @@ extern int r600_irq_init(struct radeon_device *rdev); | |||
1160 | extern void r600_irq_fini(struct radeon_device *rdev); | 1172 | extern void r600_irq_fini(struct radeon_device *rdev); |
1161 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | 1173 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); |
1162 | extern int r600_irq_set(struct radeon_device *rdev); | 1174 | extern int r600_irq_set(struct radeon_device *rdev); |
1163 | 1175 | extern void r600_irq_suspend(struct radeon_device *rdev); | |
1176 | /* r600 audio */ | ||
1164 | extern int r600_audio_init(struct radeon_device *rdev); | 1177 | extern int r600_audio_init(struct radeon_device *rdev); |
1165 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); | 1178 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); |
1166 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | 1179 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 220f454ea9fa..c0681a5556dc 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
144 | 144 | ||
145 | ret = drm_agp_info(rdev->ddev, &info); | 145 | ret = drm_agp_info(rdev->ddev, &info); |
146 | if (ret) { | 146 | if (ret) { |
147 | drm_agp_release(rdev->ddev); | ||
147 | DRM_ERROR("Unable to get AGP info: %d\n", ret); | 148 | DRM_ERROR("Unable to get AGP info: %d\n", ret); |
148 | return ret; | 149 | return ret; |
149 | } | 150 | } |
151 | |||
152 | if (rdev->ddev->agp->agp_info.aper_size < 32) { | ||
153 | drm_agp_release(rdev->ddev); | ||
154 | dev_warn(rdev->dev, "AGP aperture too small (%zuM) " | ||
155 | "need at least 32M, disabling AGP\n", | ||
156 | rdev->ddev->agp->agp_info.aper_size); | ||
157 | return -EINVAL; | ||
158 | } | ||
159 | |||
150 | mode.mode = info.mode; | 160 | mode.mode = info.mode; |
151 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; | 161 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; |
152 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); | 162 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
221 | ret = drm_agp_enable(rdev->ddev, mode); | 231 | ret = drm_agp_enable(rdev->ddev, mode); |
222 | if (ret) { | 232 | if (ret) { |
223 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); | 233 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); |
234 | drm_agp_release(rdev->ddev); | ||
224 | return ret; | 235 | return ret; |
225 | } | 236 | } |
226 | 237 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2fbd2e4e9df..05ee1aeac3fd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = { | |||
117 | .hpd_fini = &r100_hpd_fini, | 117 | .hpd_fini = &r100_hpd_fini, |
118 | .hpd_sense = &r100_hpd_sense, | 118 | .hpd_sense = &r100_hpd_sense, |
119 | .hpd_set_polarity = &r100_hpd_set_polarity, | 119 | .hpd_set_polarity = &r100_hpd_set_polarity, |
120 | .ioctl_wait_idle = NULL, | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | 123 | ||
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = { | |||
176 | .hpd_fini = &r100_hpd_fini, | 177 | .hpd_fini = &r100_hpd_fini, |
177 | .hpd_sense = &r100_hpd_sense, | 178 | .hpd_sense = &r100_hpd_sense, |
178 | .hpd_set_polarity = &r100_hpd_set_polarity, | 179 | .hpd_set_polarity = &r100_hpd_set_polarity, |
180 | .ioctl_wait_idle = NULL, | ||
179 | }; | 181 | }; |
180 | 182 | ||
181 | /* | 183 | /* |
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = { | |||
219 | .hpd_fini = &r100_hpd_fini, | 221 | .hpd_fini = &r100_hpd_fini, |
220 | .hpd_sense = &r100_hpd_sense, | 222 | .hpd_sense = &r100_hpd_sense, |
221 | .hpd_set_polarity = &r100_hpd_set_polarity, | 223 | .hpd_set_polarity = &r100_hpd_set_polarity, |
224 | .ioctl_wait_idle = NULL, | ||
222 | }; | 225 | }; |
223 | 226 | ||
224 | 227 | ||
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = { | |||
267 | .hpd_fini = &r100_hpd_fini, | 270 | .hpd_fini = &r100_hpd_fini, |
268 | .hpd_sense = &r100_hpd_sense, | 271 | .hpd_sense = &r100_hpd_sense, |
269 | .hpd_set_polarity = &r100_hpd_set_polarity, | 272 | .hpd_set_polarity = &r100_hpd_set_polarity, |
273 | .ioctl_wait_idle = NULL, | ||
270 | }; | 274 | }; |
271 | 275 | ||
272 | 276 | ||
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = { | |||
323 | .hpd_fini = &rs600_hpd_fini, | 327 | .hpd_fini = &rs600_hpd_fini, |
324 | .hpd_sense = &rs600_hpd_sense, | 328 | .hpd_sense = &rs600_hpd_sense, |
325 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 329 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
330 | .ioctl_wait_idle = NULL, | ||
326 | }; | 331 | }; |
327 | 332 | ||
328 | 333 | ||
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = { | |||
370 | .hpd_fini = &rs600_hpd_fini, | 375 | .hpd_fini = &rs600_hpd_fini, |
371 | .hpd_sense = &rs600_hpd_sense, | 376 | .hpd_sense = &rs600_hpd_sense, |
372 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 377 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
378 | .ioctl_wait_idle = NULL, | ||
373 | }; | 379 | }; |
374 | 380 | ||
375 | 381 | ||
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = { | |||
421 | .hpd_fini = &rs600_hpd_fini, | 427 | .hpd_fini = &rs600_hpd_fini, |
422 | .hpd_sense = &rs600_hpd_sense, | 428 | .hpd_sense = &rs600_hpd_sense, |
423 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 429 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
430 | .ioctl_wait_idle = NULL, | ||
424 | }; | 431 | }; |
425 | 432 | ||
426 | 433 | ||
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = { | |||
463 | .hpd_fini = &rs600_hpd_fini, | 470 | .hpd_fini = &rs600_hpd_fini, |
464 | .hpd_sense = &rs600_hpd_sense, | 471 | .hpd_sense = &rs600_hpd_sense, |
465 | .hpd_set_polarity = &rs600_hpd_set_polarity, | 472 | .hpd_set_polarity = &rs600_hpd_set_polarity, |
473 | .ioctl_wait_idle = NULL, | ||
466 | }; | 474 | }; |
467 | 475 | ||
468 | /* | 476 | /* |
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev); | |||
504 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 512 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
505 | void r600_hpd_set_polarity(struct radeon_device *rdev, | 513 | void r600_hpd_set_polarity(struct radeon_device *rdev, |
506 | enum radeon_hpd_id hpd); | 514 | enum radeon_hpd_id hpd); |
515 | extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); | ||
507 | 516 | ||
508 | static struct radeon_asic r600_asic = { | 517 | static struct radeon_asic r600_asic = { |
509 | .init = &r600_init, | 518 | .init = &r600_init, |
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = { | |||
538 | .hpd_fini = &r600_hpd_fini, | 547 | .hpd_fini = &r600_hpd_fini, |
539 | .hpd_sense = &r600_hpd_sense, | 548 | .hpd_sense = &r600_hpd_sense, |
540 | .hpd_set_polarity = &r600_hpd_set_polarity, | 549 | .hpd_set_polarity = &r600_hpd_set_polarity, |
550 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
541 | }; | 551 | }; |
542 | 552 | ||
543 | /* | 553 | /* |
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = { | |||
582 | .hpd_fini = &r600_hpd_fini, | 592 | .hpd_fini = &r600_hpd_fini, |
583 | .hpd_sense = &r600_hpd_sense, | 593 | .hpd_sense = &r600_hpd_sense, |
584 | .hpd_set_polarity = &r600_hpd_set_polarity, | 594 | .hpd_set_polarity = &r600_hpd_set_polarity, |
595 | .ioctl_wait_idle = r600_ioctl_wait_idle, | ||
585 | }; | 596 | }; |
586 | 597 | ||
587 | #endif | 598 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fa82ca74324e..4d8831548a5f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
206 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 206 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* Asrock RS600 board lists the DVI port as HDMI */ | ||
210 | if ((dev->pdev->device == 0x7941) && | ||
211 | (dev->pdev->subsystem_vendor == 0x1849) && | ||
212 | (dev->pdev->subsystem_device == 0x7941)) { | ||
213 | if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
214 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
215 | *connector_type = DRM_MODE_CONNECTOR_DVID; | ||
216 | } | ||
217 | |||
209 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 218 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
210 | if ((dev->pdev->device == 0x7941) && | 219 | if ((dev->pdev->device == 0x7941) && |
211 | (dev->pdev->subsystem_vendor == 0x147b) && | 220 | (dev->pdev->subsystem_vendor == 0x147b) && |
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
287 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 296 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
288 | } | 297 | } |
289 | 298 | ||
299 | /* XFX Pine Group device rv730 reports no VGA DDC lines | ||
300 | * even though they are wired up to record 0x93 | ||
301 | */ | ||
302 | if ((dev->pdev->device == 0x9498) && | ||
303 | (dev->pdev->subsystem_vendor == 0x1682) && | ||
304 | (dev->pdev->subsystem_device == 0x2452)) { | ||
305 | struct radeon_device *rdev = dev->dev_private; | ||
306 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); | ||
307 | } | ||
290 | return true; | 308 | return true; |
291 | } | 309 | } |
292 | 310 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 4ddfd4b5bc51..7932dc4d6b90 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
65 | if (r) { | 65 | if (r) { |
66 | goto out_cleanup; | 66 | goto out_cleanup; |
67 | } | 67 | } |
68 | start_jiffies = jiffies; | 68 | |
69 | for (i = 0; i < n; i++) { | 69 | /* r100 doesn't have dma engine so skip the test */ |
70 | r = radeon_fence_create(rdev, &fence); | 70 | if (rdev->asic->copy_dma) { |
71 | if (r) { | 71 | |
72 | goto out_cleanup; | 72 | start_jiffies = jiffies; |
73 | for (i = 0; i < n; i++) { | ||
74 | r = radeon_fence_create(rdev, &fence); | ||
75 | if (r) { | ||
76 | goto out_cleanup; | ||
77 | } | ||
78 | |||
79 | r = radeon_copy_dma(rdev, saddr, daddr, | ||
80 | size / RADEON_GPU_PAGE_SIZE, fence); | ||
81 | |||
82 | if (r) { | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | r = radeon_fence_wait(fence, false); | ||
86 | if (r) { | ||
87 | goto out_cleanup; | ||
88 | } | ||
89 | radeon_fence_unref(&fence); | ||
73 | } | 90 | } |
74 | r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); | 91 | end_jiffies = jiffies; |
75 | if (r) { | 92 | time = end_jiffies - start_jiffies; |
76 | goto out_cleanup; | 93 | time = jiffies_to_msecs(time); |
94 | if (time > 0) { | ||
95 | i = ((n * size) >> 10) / time; | ||
96 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" | ||
97 | " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", | ||
98 | n, size >> 10, | ||
99 | sdomain, ddomain, time, | ||
100 | i, i * 1000, (i * 1000) / 1024); | ||
77 | } | 101 | } |
78 | r = radeon_fence_wait(fence, false); | ||
79 | if (r) { | ||
80 | goto out_cleanup; | ||
81 | } | ||
82 | radeon_fence_unref(&fence); | ||
83 | } | ||
84 | end_jiffies = jiffies; | ||
85 | time = end_jiffies - start_jiffies; | ||
86 | time = jiffies_to_msecs(time); | ||
87 | if (time > 0) { | ||
88 | i = ((n * size) >> 10) / time; | ||
89 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" | ||
90 | " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, | ||
91 | sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); | ||
92 | } | 102 | } |
103 | |||
93 | start_jiffies = jiffies; | 104 | start_jiffies = jiffies; |
94 | for (i = 0; i < n; i++) { | 105 | for (i = 0; i < n; i++) { |
95 | r = radeon_fence_create(rdev, &fence); | 106 | r = radeon_fence_create(rdev, &fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 812f24dbc2a8..73c4405bf42f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | |||
56 | else if (post_div == 3) | 56 | else if (post_div == 3) |
57 | sclk >>= 2; | 57 | sclk >>= 2; |
58 | else if (post_div == 4) | 58 | else if (post_div == 4) |
59 | sclk >>= 4; | 59 | sclk >>= 3; |
60 | 60 | ||
61 | return sclk; | 61 | return sclk; |
62 | } | 62 | } |
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | |||
86 | else if (post_div == 3) | 86 | else if (post_div == 3) |
87 | mclk >>= 2; | 87 | mclk >>= 2; |
88 | else if (post_div == 4) | 88 | else if (post_div == 4) |
89 | mclk >>= 4; | 89 | mclk >>= 3; |
90 | 90 | ||
91 | return mclk; | 91 | return mclk; |
92 | } | 92 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 579c8920e081..e7b19440102e 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder | |||
971 | lvds->native_mode.vdisplay); | 971 | lvds->native_mode.vdisplay); |
972 | 972 | ||
973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); | 973 | lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); |
974 | if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) | 974 | lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000); |
975 | lvds->panel_vcc_delay = 2000; | ||
976 | 975 | ||
977 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); | 976 | lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); |
978 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; | 977 | lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 55266416fa47..65f81942f399 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
580 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 580 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
581 | struct drm_encoder *encoder; | 581 | struct drm_encoder *encoder; |
582 | struct drm_encoder_helper_funcs *encoder_funcs; | 582 | struct drm_encoder_helper_funcs *encoder_funcs; |
583 | bool dret; | 583 | bool dret = false; |
584 | enum drm_connector_status ret = connector_status_disconnected; | 584 | enum drm_connector_status ret = connector_status_disconnected; |
585 | 585 | ||
586 | encoder = radeon_best_single_encoder(connector); | 586 | encoder = radeon_best_single_encoder(connector); |
587 | if (!encoder) | 587 | if (!encoder) |
588 | ret = connector_status_disconnected; | 588 | ret = connector_status_disconnected; |
589 | 589 | ||
590 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); | 590 | if (radeon_connector->ddc_bus) { |
591 | dret = radeon_ddc_probe(radeon_connector); | 591 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
592 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | 592 | dret = radeon_ddc_probe(radeon_connector); |
593 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | ||
594 | } | ||
593 | if (dret) { | 595 | if (dret) { |
594 | if (radeon_connector->edid) { | 596 | if (radeon_connector->edid) { |
595 | kfree(radeon_connector->edid); | 597 | kfree(radeon_connector->edid); |
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
740 | struct drm_mode_object *obj; | 742 | struct drm_mode_object *obj; |
741 | int i; | 743 | int i; |
742 | enum drm_connector_status ret = connector_status_disconnected; | 744 | enum drm_connector_status ret = connector_status_disconnected; |
743 | bool dret; | 745 | bool dret = false; |
744 | 746 | ||
745 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); | 747 | if (radeon_connector->ddc_bus) { |
746 | dret = radeon_ddc_probe(radeon_connector); | 748 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
747 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | 749 | dret = radeon_ddc_probe(radeon_connector); |
750 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | ||
751 | } | ||
748 | if (dret) { | 752 | if (dret) { |
749 | if (radeon_connector->edid) { | 753 | if (radeon_connector->edid) { |
750 | kfree(radeon_connector->edid); | 754 | kfree(radeon_connector->edid); |
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
776 | * connected and the DVI port disconnected. If the edid doesn't | 780 | * connected and the DVI port disconnected. If the edid doesn't |
777 | * say HDMI, vice versa. | 781 | * say HDMI, vice versa. |
778 | */ | 782 | */ |
779 | if (radeon_connector->shared_ddc && connector_status_connected) { | 783 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
780 | struct drm_device *dev = connector->dev; | 784 | struct drm_device *dev = connector->dev; |
781 | struct drm_connector *list_connector; | 785 | struct drm_connector *list_connector; |
782 | struct radeon_connector *list_radeon_connector; | 786 | struct radeon_connector *list_radeon_connector; |
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1056 | return; | 1060 | return; |
1057 | } | 1061 | } |
1058 | if (radeon_connector->ddc_bus && i2c_bus->valid) { | 1062 | if (radeon_connector->ddc_bus && i2c_bus->valid) { |
1059 | if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, | 1063 | if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { |
1060 | sizeof(struct radeon_i2c_bus_rec)) == 0) { | ||
1061 | radeon_connector->shared_ddc = true; | 1064 | radeon_connector->shared_ddc = true; |
1062 | shared_ddc = true; | 1065 | shared_ddc = true; |
1063 | } | 1066 | } |
@@ -1343,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1343 | radeon_connector->dac_load_detect = false; | 1346 | radeon_connector->dac_load_detect = false; |
1344 | drm_connector_attach_property(&radeon_connector->base, | 1347 | drm_connector_attach_property(&radeon_connector->base, |
1345 | rdev->mode_info.load_detect_property, | 1348 | rdev->mode_info.load_detect_property, |
1346 | 1); | 1349 | radeon_connector->dac_load_detect); |
1347 | drm_connector_attach_property(&radeon_connector->base, | 1350 | drm_connector_attach_property(&radeon_connector->base, |
1348 | rdev->mode_info.tv_std_property, | 1351 | rdev->mode_info.tv_std_property, |
1349 | radeon_combios_get_tv_info(rdev)); | 1352 | radeon_combios_get_tv_info(rdev)); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 65590a0f1d93..e9d085021c1f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
86 | &p->validated); | 86 | &p->validated); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated); |
90 | } | 90 | } |
91 | 91 | ||
92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
189 | { | 189 | { |
190 | unsigned i; | 190 | unsigned i; |
191 | 191 | ||
192 | if (error) { | 192 | if (!error && parser->ib) { |
193 | radeon_bo_list_unvalidate(&parser->validated, | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); |
194 | parser->ib->fence); | ||
195 | } else { | ||
196 | radeon_bo_list_unreserve(&parser->validated); | ||
197 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | ||
198 | for (i = 0; i < parser->nrelocs; i++) { | 196 | for (i = 0; i < parser->nrelocs; i++) { |
199 | if (parser->relocs[i].gobj) { | 197 | if (parser->relocs[i].gobj) { |
200 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | mutex_lock(&parser->rdev->ddev->struct_mutex); |
@@ -231,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
231 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 229 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
232 | parser.filp = filp; | 230 | parser.filp = filp; |
233 | parser.rdev = rdev; | 231 | parser.rdev = rdev; |
232 | parser.dev = rdev->dev; | ||
234 | r = radeon_cs_parser_init(&parser, data); | 233 | r = radeon_cs_parser_init(&parser, data); |
235 | if (r) { | 234 | if (r) { |
236 | DRM_ERROR("Failed to initialize parser !\n"); | 235 | DRM_ERROR("Failed to initialize parser !\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0c51f8e46613..768b1509fa03 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | 544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | 545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
546 | } | 546 | } |
547 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
547 | } | 548 | } |
548 | 549 | ||
549 | void radeon_check_arguments(struct radeon_device *rdev) | 550 | void radeon_check_arguments(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0ec491ead2ff..7e17a362b54b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
278 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); | 278 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); |
279 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | 279 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
280 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); | 280 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); |
281 | if (radeon_connector->ddc_bus) | 281 | if (radeon_connector->ddc_bus) { |
282 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | 282 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", |
283 | radeon_connector->ddc_bus->rec.mask_clk_reg, | 283 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
284 | radeon_connector->ddc_bus->rec.mask_data_reg, | 284 | radeon_connector->ddc_bus->rec.mask_data_reg, |
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
288 | radeon_connector->ddc_bus->rec.en_data_reg, | 288 | radeon_connector->ddc_bus->rec.en_data_reg, |
289 | radeon_connector->ddc_bus->rec.y_clk_reg, | 289 | radeon_connector->ddc_bus->rec.y_clk_reg, |
290 | radeon_connector->ddc_bus->rec.y_data_reg); | 290 | radeon_connector->ddc_bus->rec.y_data_reg); |
291 | } else { | ||
292 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | ||
293 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | ||
294 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
295 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || | ||
296 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
297 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
298 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); | ||
299 | } | ||
291 | DRM_INFO(" Encoders:\n"); | 300 | DRM_INFO(" Encoders:\n"); |
292 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 301 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
293 | radeon_encoder = to_radeon_encoder(encoder); | 302 | radeon_encoder = to_radeon_encoder(encoder); |
@@ -357,7 +366,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
357 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 366 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
358 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 367 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
359 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 368 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
360 | if (dig->dp_i2c_bus) | 369 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
370 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) | ||
361 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | 371 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); |
362 | } | 372 | } |
363 | if (!radeon_connector->ddc_bus) | 373 | if (!radeon_connector->ddc_bus) |
@@ -410,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
410 | uint32_t *fb_div_p, | 420 | uint32_t *fb_div_p, |
411 | uint32_t *frac_fb_div_p, | 421 | uint32_t *frac_fb_div_p, |
412 | uint32_t *ref_div_p, | 422 | uint32_t *ref_div_p, |
413 | uint32_t *post_div_p, | 423 | uint32_t *post_div_p) |
414 | int flags) | ||
415 | { | 424 | { |
416 | uint32_t min_ref_div = pll->min_ref_div; | 425 | uint32_t min_ref_div = pll->min_ref_div; |
417 | uint32_t max_ref_div = pll->max_ref_div; | 426 | uint32_t max_ref_div = pll->max_ref_div; |
427 | uint32_t min_post_div = pll->min_post_div; | ||
428 | uint32_t max_post_div = pll->max_post_div; | ||
418 | uint32_t min_fractional_feed_div = 0; | 429 | uint32_t min_fractional_feed_div = 0; |
419 | uint32_t max_fractional_feed_div = 0; | 430 | uint32_t max_fractional_feed_div = 0; |
420 | uint32_t best_vco = pll->best_vco; | 431 | uint32_t best_vco = pll->best_vco; |
@@ -430,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
430 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); | 441 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); |
431 | freq = freq * 1000; | 442 | freq = freq * 1000; |
432 | 443 | ||
433 | if (flags & RADEON_PLL_USE_REF_DIV) | 444 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
434 | min_ref_div = max_ref_div = pll->reference_div; | 445 | min_ref_div = max_ref_div = pll->reference_div; |
435 | else { | 446 | else { |
436 | while (min_ref_div < max_ref_div-1) { | 447 | while (min_ref_div < max_ref_div-1) { |
@@ -445,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
445 | } | 456 | } |
446 | } | 457 | } |
447 | 458 | ||
448 | if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 459 | if (pll->flags & RADEON_PLL_USE_POST_DIV) |
460 | min_post_div = max_post_div = pll->post_div; | ||
461 | |||
462 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
449 | min_fractional_feed_div = pll->min_frac_feedback_div; | 463 | min_fractional_feed_div = pll->min_frac_feedback_div; |
450 | max_fractional_feed_div = pll->max_frac_feedback_div; | 464 | max_fractional_feed_div = pll->max_frac_feedback_div; |
451 | } | 465 | } |
452 | 466 | ||
453 | for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { | 467 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { |
454 | uint32_t ref_div; | 468 | uint32_t ref_div; |
455 | 469 | ||
456 | if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 470 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
457 | continue; | 471 | continue; |
458 | 472 | ||
459 | /* legacy radeons only have a few post_divs */ | 473 | /* legacy radeons only have a few post_divs */ |
460 | if (flags & RADEON_PLL_LEGACY) { | 474 | if (pll->flags & RADEON_PLL_LEGACY) { |
461 | if ((post_div == 5) || | 475 | if ((post_div == 5) || |
462 | (post_div == 7) || | 476 | (post_div == 7) || |
463 | (post_div == 9) || | 477 | (post_div == 9) || |
@@ -504,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
504 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; | 518 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
505 | current_freq = radeon_div(tmp, ref_div * post_div); | 519 | current_freq = radeon_div(tmp, ref_div * post_div); |
506 | 520 | ||
507 | if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { | 521 | if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
508 | error = freq - current_freq; | 522 | error = freq - current_freq; |
509 | error = error < 0 ? 0xffffffff : error; | 523 | error = error < 0 ? 0xffffffff : error; |
510 | } else | 524 | } else |
@@ -531,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
531 | best_freq = current_freq; | 545 | best_freq = current_freq; |
532 | best_error = error; | 546 | best_error = error; |
533 | best_vco_diff = vco_diff; | 547 | best_vco_diff = vco_diff; |
534 | } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || | 548 | } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || |
535 | ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || | 549 | ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || |
536 | ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || | 550 | ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || |
537 | ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || | 551 | ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || |
538 | ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || | 552 | ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || |
539 | ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { | 553 | ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { |
540 | best_post_div = post_div; | 554 | best_post_div = post_div; |
541 | best_ref_div = ref_div; | 555 | best_ref_div = ref_div; |
542 | best_feedback_div = feedback_div; | 556 | best_feedback_div = feedback_div; |
@@ -572,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
572 | uint32_t *fb_div_p, | 586 | uint32_t *fb_div_p, |
573 | uint32_t *frac_fb_div_p, | 587 | uint32_t *frac_fb_div_p, |
574 | uint32_t *ref_div_p, | 588 | uint32_t *ref_div_p, |
575 | uint32_t *post_div_p, | 589 | uint32_t *post_div_p) |
576 | int flags) | ||
577 | { | 590 | { |
578 | fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; | 591 | fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; |
579 | fixed20_12 pll_out_max, pll_out_min; | 592 | fixed20_12 pll_out_max, pll_out_min; |
@@ -667,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
667 | radeonfb_remove(dev, fb); | 680 | radeonfb_remove(dev, fb); |
668 | 681 | ||
669 | if (radeon_fb->obj) { | 682 | if (radeon_fb->obj) { |
670 | radeon_gem_object_unpin(radeon_fb->obj); | ||
671 | mutex_lock(&dev->struct_mutex); | 683 | mutex_lock(&dev->struct_mutex); |
672 | drm_gem_object_unreference(radeon_fb->obj); | 684 | drm_gem_object_unreference(radeon_fb->obj); |
673 | mutex_unlock(&dev->struct_mutex); | 685 | mutex_unlock(&dev->struct_mutex); |
@@ -715,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev, | |||
715 | struct drm_gem_object *obj; | 727 | struct drm_gem_object *obj; |
716 | 728 | ||
717 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); | 729 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); |
718 | 730 | if (obj == NULL) { | |
731 | dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " | ||
732 | "can't create framebuffer\n", mode_cmd->handle); | ||
733 | return NULL; | ||
734 | } | ||
719 | return radeon_framebuffer_create(dev, mode_cmd, obj); | 735 | return radeon_framebuffer_create(dev, mode_cmd, obj); |
720 | } | 736 | } |
721 | 737 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index e13785282a82..c57ad606504d 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -106,9 +106,10 @@ | |||
106 | * 1.29- R500 3D cmd buffer support | 106 | * 1.29- R500 3D cmd buffer support |
107 | * 1.30- Add support for occlusion queries | 107 | * 1.30- Add support for occlusion queries |
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | ||
109 | */ | 110 | */ |
110 | #define DRIVER_MAJOR 1 | 111 | #define DRIVER_MAJOR 1 |
111 | #define DRIVER_MINOR 31 | 112 | #define DRIVER_MINOR 32 |
112 | #define DRIVER_PATCHLEVEL 0 | 113 | #define DRIVER_PATCHLEVEL 0 |
113 | 114 | ||
114 | enum radeon_cp_microcode_version { | 115 | enum radeon_cp_microcode_version { |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 82eb551970b9..3c91724457ca 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t | |||
156 | return ret; | 156 | return ret; |
157 | } | 157 | } |
158 | 158 | ||
159 | static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | ||
160 | { | ||
161 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
162 | switch (radeon_encoder->encoder_id) { | ||
163 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
164 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
165 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
166 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
167 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
168 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
169 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
170 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
171 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
172 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
173 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
174 | return true; | ||
175 | default: | ||
176 | return false; | ||
177 | } | ||
178 | } | ||
159 | void | 179 | void |
160 | radeon_link_encoder_connector(struct drm_device *dev) | 180 | radeon_link_encoder_connector(struct drm_device *dev) |
161 | { | 181 | { |
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
202 | 222 | ||
203 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 223 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
204 | radeon_connector = to_radeon_connector(connector); | 224 | radeon_connector = to_radeon_connector(connector); |
205 | if (radeon_encoder->devices & radeon_connector->devices) | 225 | if (radeon_encoder->active_device & radeon_connector->devices) |
206 | return connector; | 226 | return connector; |
207 | } | 227 | } |
208 | return NULL; | 228 | return NULL; |
@@ -676,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
676 | 696 | ||
677 | memset(&args, 0, sizeof(args)); | 697 | memset(&args, 0, sizeof(args)); |
678 | 698 | ||
679 | if (ASIC_IS_DCE32(rdev)) { | 699 | if (dig->dig_encoder) |
680 | if (dig->dig_block) | 700 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); |
681 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | 701 | else |
682 | else | 702 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); |
683 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | 703 | num = dig->dig_encoder + 1; |
684 | num = dig->dig_block + 1; | ||
685 | } else { | ||
686 | switch (radeon_encoder->encoder_id) { | ||
687 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
688 | /* XXX doesn't really matter which dig encoder we pick as long as it's | ||
689 | * not already in use | ||
690 | */ | ||
691 | if (dig_connector->linkb) | ||
692 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | ||
693 | else | ||
694 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | ||
695 | num = 1; | ||
696 | break; | ||
697 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
698 | /* Only dig2 encoder can drive LVTMA */ | ||
699 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | ||
700 | num = 2; | ||
701 | break; | ||
702 | } | ||
703 | } | ||
704 | 704 | ||
705 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); | 705 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); |
706 | 706 | ||
@@ -822,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
822 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 822 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
823 | } | 823 | } |
824 | if (ASIC_IS_DCE32(rdev)) { | 824 | if (ASIC_IS_DCE32(rdev)) { |
825 | if (dig->dig_block) | 825 | if (dig->dig_encoder == 1) |
826 | args.v2.acConfig.ucEncoderSel = 1; | 826 | args.v2.acConfig.ucEncoderSel = 1; |
827 | if (dig_connector->linkb) | 827 | if (dig_connector->linkb) |
828 | args.v2.acConfig.ucLinkSel = 1; | 828 | args.v2.acConfig.ucLinkSel = 1; |
@@ -849,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
849 | args.v2.acConfig.fCoherentMode = 1; | 849 | args.v2.acConfig.fCoherentMode = 1; |
850 | } | 850 | } |
851 | } else { | 851 | } else { |
852 | |||
852 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 853 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
853 | 854 | ||
855 | if (dig->dig_encoder) | ||
856 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
857 | else | ||
858 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
859 | |||
854 | switch (radeon_encoder->encoder_id) { | 860 | switch (radeon_encoder->encoder_id) { |
855 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 861 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
856 | /* XXX doesn't really matter which dig encoder we pick as long as it's | ||
857 | * not already in use | ||
858 | */ | ||
859 | if (dig_connector->linkb) | ||
860 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
861 | else | ||
862 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
863 | if (rdev->flags & RADEON_IS_IGP) { | 862 | if (rdev->flags & RADEON_IS_IGP) { |
864 | if (radeon_encoder->pixel_clock > 165000) { | 863 | if (radeon_encoder->pixel_clock > 165000) { |
865 | if (dig_connector->igp_lane_info & 0x3) | 864 | if (dig_connector->igp_lane_info & 0x3) |
@@ -878,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
878 | } | 877 | } |
879 | } | 878 | } |
880 | break; | 879 | break; |
881 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
882 | /* Only dig2 encoder can drive LVTMA */ | ||
883 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
884 | break; | ||
885 | } | 880 | } |
886 | 881 | ||
887 | if (radeon_encoder->pixel_clock > 165000) | 882 | if (radeon_encoder->pixel_clock > 165000) |
@@ -1046,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1046 | union crtc_sourc_param args; | 1041 | union crtc_sourc_param args; |
1047 | int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); | 1042 | int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); |
1048 | uint8_t frev, crev; | 1043 | uint8_t frev, crev; |
1044 | struct radeon_encoder_atom_dig *dig; | ||
1049 | 1045 | ||
1050 | memset(&args, 0, sizeof(args)); | 1046 | memset(&args, 0, sizeof(args)); |
1051 | 1047 | ||
@@ -1109,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1109 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1105 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1110 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1106 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1111 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1107 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1112 | if (ASIC_IS_DCE32(rdev)) { | 1108 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1113 | if (radeon_crtc->crtc_id) | 1109 | dig = radeon_encoder->enc_priv; |
1114 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | 1110 | if (dig->dig_encoder) |
1115 | else | 1111 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
1116 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | 1112 | else |
1117 | } else { | 1113 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; |
1118 | struct drm_connector *connector; | ||
1119 | struct radeon_connector *radeon_connector; | ||
1120 | struct radeon_connector_atom_dig *dig_connector; | ||
1121 | |||
1122 | connector = radeon_get_connector_for_encoder(encoder); | ||
1123 | if (!connector) | ||
1124 | return; | ||
1125 | radeon_connector = to_radeon_connector(connector); | ||
1126 | if (!radeon_connector->con_priv) | ||
1127 | return; | ||
1128 | dig_connector = radeon_connector->con_priv; | ||
1129 | |||
1130 | /* XXX doesn't really matter which dig encoder we pick as long as it's | ||
1131 | * not already in use | ||
1132 | */ | ||
1133 | if (dig_connector->linkb) | ||
1134 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
1135 | else | ||
1136 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
1137 | } | ||
1138 | break; | 1114 | break; |
1139 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1115 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1140 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | 1116 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; |
1141 | break; | 1117 | break; |
1142 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1143 | /* Only dig2 encoder can drive LVTMA */ | ||
1144 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
1145 | break; | ||
1146 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1118 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
1147 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 1119 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
1148 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | 1120 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
@@ -1202,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, | |||
1202 | } | 1174 | } |
1203 | } | 1175 | } |
1204 | 1176 | ||
1177 | static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | ||
1178 | { | ||
1179 | struct drm_device *dev = encoder->dev; | ||
1180 | struct radeon_device *rdev = dev->dev_private; | ||
1181 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1182 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1183 | struct drm_encoder *test_encoder; | ||
1184 | struct radeon_encoder_atom_dig *dig; | ||
1185 | uint32_t dig_enc_in_use = 0; | ||
1186 | /* on DCE32 and encoder can driver any block so just crtc id */ | ||
1187 | if (ASIC_IS_DCE32(rdev)) { | ||
1188 | return radeon_crtc->crtc_id; | ||
1189 | } | ||
1190 | |||
1191 | /* on DCE3 - LVTMA can only be driven by DIGB */ | ||
1192 | list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { | ||
1193 | struct radeon_encoder *radeon_test_encoder; | ||
1194 | |||
1195 | if (encoder == test_encoder) | ||
1196 | continue; | ||
1197 | |||
1198 | if (!radeon_encoder_is_digital(test_encoder)) | ||
1199 | continue; | ||
1200 | |||
1201 | radeon_test_encoder = to_radeon_encoder(test_encoder); | ||
1202 | dig = radeon_test_encoder->enc_priv; | ||
1203 | |||
1204 | if (dig->dig_encoder >= 0) | ||
1205 | dig_enc_in_use |= (1 << dig->dig_encoder); | ||
1206 | } | ||
1207 | |||
1208 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { | ||
1209 | if (dig_enc_in_use & 0x2) | ||
1210 | DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); | ||
1211 | return 1; | ||
1212 | } | ||
1213 | if (!(dig_enc_in_use & 1)) | ||
1214 | return 0; | ||
1215 | return 1; | ||
1216 | } | ||
1217 | |||
1205 | static void | 1218 | static void |
1206 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | 1219 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, |
1207 | struct drm_display_mode *mode, | 1220 | struct drm_display_mode *mode, |
@@ -1214,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1214 | 1227 | ||
1215 | if (radeon_encoder->active_device & | 1228 | if (radeon_encoder->active_device & |
1216 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | 1229 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { |
1217 | if (radeon_encoder->enc_priv) { | 1230 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
1218 | struct radeon_encoder_atom_dig *dig; | 1231 | if (dig) |
1219 | 1232 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | |
1220 | dig = radeon_encoder->enc_priv; | ||
1221 | dig->dig_block = radeon_crtc->crtc_id; | ||
1222 | } | ||
1223 | } | 1233 | } |
1224 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1234 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1225 | 1235 | ||
@@ -1379,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | |||
1379 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | 1389 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) |
1380 | { | 1390 | { |
1381 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1391 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1392 | struct radeon_encoder_atom_dig *dig; | ||
1382 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1393 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1394 | |||
1395 | if (radeon_encoder_is_digital(encoder)) { | ||
1396 | dig = radeon_encoder->enc_priv; | ||
1397 | dig->dig_encoder = -1; | ||
1398 | } | ||
1383 | radeon_encoder->active_device = 0; | 1399 | radeon_encoder->active_device = 0; |
1384 | } | 1400 | } |
1385 | 1401 | ||
@@ -1436,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
1436 | 1452 | ||
1437 | /* coherent mode by default */ | 1453 | /* coherent mode by default */ |
1438 | dig->coherent_mode = true; | 1454 | dig->coherent_mode = true; |
1455 | dig->dig_encoder = -1; | ||
1439 | 1456 | ||
1440 | return dig; | 1457 | return dig; |
1441 | } | 1458 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 3ba213d1b06c..d71e346e9ab5 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev, | |||
248 | if (ret) | 248 | if (ret) |
249 | goto out_unref; | 249 | goto out_unref; |
250 | 250 | ||
251 | memset_io(fbptr, 0xff, aligned_size); | 251 | memset_io(fbptr, 0x0, aligned_size); |
252 | 252 | ||
253 | strcpy(info->fix.id, "radeondrmfb"); | 253 | strcpy(info->fix.id, "radeondrmfb"); |
254 | 254 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0e1325e18534..db8e9a355a01 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
308 | } | 308 | } |
309 | robj = gobj->driver_private; | 309 | robj = gobj->driver_private; |
310 | r = radeon_bo_wait(robj, NULL, false); | 310 | r = radeon_bo_wait(robj, NULL, false); |
311 | /* callback hw specific functions if any */ | ||
312 | if (robj->rdev->asic->ioctl_wait_idle) | ||
313 | robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); | ||
311 | mutex_lock(&dev->struct_mutex); | 314 | mutex_lock(&dev->struct_mutex); |
312 | drm_gem_object_unreference(gobj); | 315 | drm_gem_object_unreference(gobj); |
313 | mutex_unlock(&dev->struct_mutex); | 316 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cc27485a07ad..b6d8081e1246 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | /* properly set crtc bpp when using atombios */ | ||
343 | void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) | ||
344 | { | ||
345 | struct drm_device *dev = crtc->dev; | ||
346 | struct radeon_device *rdev = dev->dev_private; | ||
347 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
348 | int format; | ||
349 | uint32_t crtc_gen_cntl; | ||
350 | uint32_t disp_merge_cntl; | ||
351 | uint32_t crtc_pitch; | ||
352 | |||
353 | switch (crtc->fb->bits_per_pixel) { | ||
354 | case 8: | ||
355 | format = 2; | ||
356 | break; | ||
357 | case 15: /* 555 */ | ||
358 | format = 3; | ||
359 | break; | ||
360 | case 16: /* 565 */ | ||
361 | format = 4; | ||
362 | break; | ||
363 | case 24: /* RGB */ | ||
364 | format = 5; | ||
365 | break; | ||
366 | case 32: /* xRGB */ | ||
367 | format = 6; | ||
368 | break; | ||
369 | default: | ||
370 | return; | ||
371 | } | ||
372 | |||
373 | crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + | ||
374 | ((crtc->fb->bits_per_pixel * 8) - 1)) / | ||
375 | (crtc->fb->bits_per_pixel * 8)); | ||
376 | crtc_pitch |= crtc_pitch << 16; | ||
377 | |||
378 | WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); | ||
379 | |||
380 | switch (radeon_crtc->crtc_id) { | ||
381 | case 0: | ||
382 | disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); | ||
383 | disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; | ||
384 | WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); | ||
385 | |||
386 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff; | ||
387 | crtc_gen_cntl |= (format << 8); | ||
388 | crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN; | ||
389 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); | ||
390 | break; | ||
391 | case 1: | ||
392 | disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | ||
393 | disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | ||
394 | WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); | ||
395 | |||
396 | crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff; | ||
397 | crtc_gen_cntl |= (format << 8); | ||
398 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl); | ||
399 | WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); | ||
400 | WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 342 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
406 | struct drm_framebuffer *old_fb) | 343 | struct drm_framebuffer *old_fb) |
407 | { | 344 | { |
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
755 | uint32_t post_divider = 0; | 692 | uint32_t post_divider = 0; |
756 | uint32_t freq = 0; | 693 | uint32_t freq = 0; |
757 | uint8_t pll_gain; | 694 | uint8_t pll_gain; |
758 | int pll_flags = RADEON_PLL_LEGACY; | ||
759 | bool use_bios_divs = false; | 695 | bool use_bios_divs = false; |
760 | /* PLL registers */ | 696 | /* PLL registers */ |
761 | uint32_t pll_ref_div = 0; | 697 | uint32_t pll_ref_div = 0; |
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
789 | else | 725 | else |
790 | pll = &rdev->clock.p1pll; | 726 | pll = &rdev->clock.p1pll; |
791 | 727 | ||
728 | pll->flags = RADEON_PLL_LEGACY; | ||
729 | |||
792 | if (mode->clock > 200000) /* range limits??? */ | 730 | if (mode->clock > 200000) /* range limits??? */ |
793 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 731 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
794 | else | 732 | else |
795 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 733 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
796 | 734 | ||
797 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 735 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
798 | if (encoder->crtc == crtc) { | 736 | if (encoder->crtc == crtc) { |
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
804 | } | 742 | } |
805 | 743 | ||
806 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 744 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
807 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 745 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
808 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | 746 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { |
809 | if (!rdev->is_atom_bios) { | 747 | if (!rdev->is_atom_bios) { |
810 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 748 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
819 | } | 757 | } |
820 | } | 758 | } |
821 | } | 759 | } |
822 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 760 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
823 | } | 761 | } |
824 | } | 762 | } |
825 | } | 763 | } |
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
829 | if (!use_bios_divs) { | 767 | if (!use_bios_divs) { |
830 | radeon_compute_pll(pll, mode->clock, | 768 | radeon_compute_pll(pll, mode->clock, |
831 | &freq, &feedback_div, &frac_fb_div, | 769 | &freq, &feedback_div, &frac_fb_div, |
832 | &reference_div, &post_divider, | 770 | &reference_div, &post_divider); |
833 | pll_flags); | ||
834 | 771 | ||
835 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { | 772 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { |
836 | if (post_div->divider == post_divider) | 773 | if (post_div->divider == post_divider) |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 91cb041cb40d..e81b2aeb6a8f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -125,16 +125,24 @@ struct radeon_tmds_pll { | |||
125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | 125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
127 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 127 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
128 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | ||
128 | 129 | ||
129 | struct radeon_pll { | 130 | struct radeon_pll { |
130 | uint16_t reference_freq; | 131 | /* reference frequency */ |
131 | uint16_t reference_div; | 132 | uint32_t reference_freq; |
133 | |||
134 | /* fixed dividers */ | ||
135 | uint32_t reference_div; | ||
136 | uint32_t post_div; | ||
137 | |||
138 | /* pll in/out limits */ | ||
132 | uint32_t pll_in_min; | 139 | uint32_t pll_in_min; |
133 | uint32_t pll_in_max; | 140 | uint32_t pll_in_max; |
134 | uint32_t pll_out_min; | 141 | uint32_t pll_out_min; |
135 | uint32_t pll_out_max; | 142 | uint32_t pll_out_max; |
136 | uint16_t xclk; | 143 | uint32_t best_vco; |
137 | 144 | ||
145 | /* divider limits */ | ||
138 | uint32_t min_ref_div; | 146 | uint32_t min_ref_div; |
139 | uint32_t max_ref_div; | 147 | uint32_t max_ref_div; |
140 | uint32_t min_post_div; | 148 | uint32_t min_post_div; |
@@ -143,7 +151,12 @@ struct radeon_pll { | |||
143 | uint32_t max_feedback_div; | 151 | uint32_t max_feedback_div; |
144 | uint32_t min_frac_feedback_div; | 152 | uint32_t min_frac_feedback_div; |
145 | uint32_t max_frac_feedback_div; | 153 | uint32_t max_frac_feedback_div; |
146 | uint32_t best_vco; | 154 | |
155 | /* flags for the current clock */ | ||
156 | uint32_t flags; | ||
157 | |||
158 | /* pll id */ | ||
159 | uint32_t id; | ||
147 | }; | 160 | }; |
148 | 161 | ||
149 | struct radeon_i2c_chan { | 162 | struct radeon_i2c_chan { |
@@ -286,7 +299,7 @@ struct radeon_atom_ss { | |||
286 | struct radeon_encoder_atom_dig { | 299 | struct radeon_encoder_atom_dig { |
287 | /* atom dig */ | 300 | /* atom dig */ |
288 | bool coherent_mode; | 301 | bool coherent_mode; |
289 | int dig_block; | 302 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ |
290 | /* atom lvds */ | 303 | /* atom lvds */ |
291 | uint32_t lvds_misc; | 304 | uint32_t lvds_misc; |
292 | uint16_t panel_pwr_delay; | 305 | uint16_t panel_pwr_delay; |
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll, | |||
417 | uint32_t *fb_div_p, | 430 | uint32_t *fb_div_p, |
418 | uint32_t *frac_fb_div_p, | 431 | uint32_t *frac_fb_div_p, |
419 | uint32_t *ref_div_p, | 432 | uint32_t *ref_div_p, |
420 | uint32_t *post_div_p, | 433 | uint32_t *post_div_p); |
421 | int flags); | ||
422 | 434 | ||
423 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | 435 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, |
424 | uint64_t freq, | 436 | uint64_t freq, |
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
426 | uint32_t *fb_div_p, | 438 | uint32_t *fb_div_p, |
427 | uint32_t *frac_fb_div_p, | 439 | uint32_t *frac_fb_div_p, |
428 | uint32_t *ref_div_p, | 440 | uint32_t *ref_div_p, |
429 | uint32_t *post_div_p, | 441 | uint32_t *post_div_p); |
430 | int flags); | ||
431 | 442 | ||
432 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | 443 | extern void radeon_setup_encoder_clones(struct drm_device *dev); |
433 | 444 | ||
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); | |||
453 | 464 | ||
454 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 465 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
455 | struct drm_framebuffer *old_fb); | 466 | struct drm_framebuffer *old_fb); |
456 | extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc); | ||
457 | 467 | ||
458 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 468 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
459 | struct drm_file *file_priv, | 469 | struct drm_file *file_priv, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 4e636de877b2..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo) | |||
220 | 220 | ||
221 | int radeon_bo_evict_vram(struct radeon_device *rdev) | 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
222 | { | 222 | { |
223 | if (rdev->flags & RADEON_IS_IGP) { | 223 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
224 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | ||
224 | if (rdev->mc.igp_sideport_enabled == false) | 225 | if (rdev->mc.igp_sideport_enabled == false) |
225 | /* Useless to evict on IGP chips */ | 226 | /* Useless to evict on IGP chips */ |
226 | return 0; | 227 | return 0; |
@@ -305,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
305 | } | 306 | } |
306 | } | 307 | } |
307 | 308 | ||
308 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) |
309 | { | 310 | { |
310 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; |
311 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; |
312 | struct radeon_fence *old_fence = NULL; | ||
313 | int r; | 313 | int r; |
314 | 314 | ||
315 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); |
@@ -333,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
333 | } | 333 | } |
334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
335 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; |
336 | if (fence) { | ||
337 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
338 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
339 | bo->tbo.sync_obj_arg = NULL; | ||
340 | } | ||
341 | if (old_fence) { | ||
342 | radeon_fence_unref(&old_fence); | ||
343 | } | ||
344 | } | 336 | } |
345 | return 0; | 337 | return 0; |
346 | } | 338 | } |
347 | 339 | ||
348 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
349 | { | 341 | { |
350 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; |
351 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; |
352 | 344 | struct radeon_fence *old_fence = NULL; | |
353 | if (fence) | 345 | |
354 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { |
355 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; |
356 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); |
357 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
358 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
359 | } | 351 | bo->tbo.sync_obj_arg = NULL; |
352 | spin_unlock(&bo->tbo.lock); | ||
353 | if (old_fence) { | ||
354 | radeon_fence_unref(&old_fence); | ||
360 | } | 355 | } |
361 | radeon_bo_list_unreserve(head); | 356 | } |
362 | } | 357 | } |
363 | 358 | ||
364 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad1..7ab43de1e244 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
156 | struct list_head *head); | 156 | struct list_head *head); |
157 | extern int radeon_bo_list_reserve(struct list_head *head); | 157 | extern int radeon_bo_list_reserve(struct list_head *head); |
158 | extern void radeon_bo_list_unreserve(struct list_head *head); | 158 | extern void radeon_bo_list_unreserve(struct list_head *head); |
159 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | 159 | extern int radeon_bo_list_validate(struct list_head *head); |
160 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | 160 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); |
161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
162 | struct vm_area_struct *vma); | 162 | struct vm_area_struct *vma); |
163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4d..6579eb4c1f28 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) | |||
41 | { | 41 | { |
42 | struct radeon_fence *fence; | 42 | struct radeon_fence *fence; |
43 | struct radeon_ib *nib; | 43 | struct radeon_ib *nib; |
44 | unsigned long i; | 44 | int r = 0, i, c; |
45 | int r = 0; | ||
46 | 45 | ||
47 | *ib = NULL; | 46 | *ib = NULL; |
48 | r = radeon_fence_create(rdev, &fence); | 47 | r = radeon_fence_create(rdev, &fence); |
49 | if (r) { | 48 | if (r) { |
50 | DRM_ERROR("failed to create fence for new IB\n"); | 49 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
51 | return r; | 50 | return r; |
52 | } | 51 | } |
53 | mutex_lock(&rdev->ib_pool.mutex); | 52 | mutex_lock(&rdev->ib_pool.mutex); |
54 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 53 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
55 | if (i < RADEON_IB_POOL_SIZE) { | 54 | i &= (RADEON_IB_POOL_SIZE - 1); |
56 | set_bit(i, rdev->ib_pool.alloc_bm); | 55 | if (rdev->ib_pool.ibs[i].free) { |
57 | rdev->ib_pool.ibs[i].length_dw = 0; | 56 | nib = &rdev->ib_pool.ibs[i]; |
58 | *ib = &rdev->ib_pool.ibs[i]; | 57 | break; |
59 | mutex_unlock(&rdev->ib_pool.mutex); | 58 | } |
60 | goto out; | ||
61 | } | 59 | } |
62 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { | 60 | if (nib == NULL) { |
63 | /* we go do nothings here */ | 61 | /* This should never happen, it means we allocated all |
62 | * IB and haven't scheduled one yet, return EBUSY to | ||
63 | * userspace hoping that on ioctl recall we get better | ||
64 | * luck | ||
65 | */ | ||
66 | dev_err(rdev->dev, "no free indirect buffer !\n"); | ||
64 | mutex_unlock(&rdev->ib_pool.mutex); | 67 | mutex_unlock(&rdev->ib_pool.mutex); |
65 | DRM_ERROR("all IB allocated none scheduled.\n"); | 68 | radeon_fence_unref(&fence); |
66 | r = -EINVAL; | 69 | return -EBUSY; |
67 | goto out; | ||
68 | } | 70 | } |
69 | /* get the first ib on the scheduled list */ | 71 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
70 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, | 72 | nib->free = false; |
71 | struct radeon_ib, list); | 73 | if (nib->fence) { |
72 | if (nib->fence == NULL) { | ||
73 | /* we go do nothings here */ | ||
74 | mutex_unlock(&rdev->ib_pool.mutex); | 74 | mutex_unlock(&rdev->ib_pool.mutex); |
75 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); | 75 | r = radeon_fence_wait(nib->fence, false); |
76 | r = -EINVAL; | 76 | if (r) { |
77 | goto out; | 77 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", |
78 | } | 78 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); |
79 | mutex_unlock(&rdev->ib_pool.mutex); | 79 | mutex_lock(&rdev->ib_pool.mutex); |
80 | 80 | nib->free = true; | |
81 | r = radeon_fence_wait(nib->fence, false); | 81 | mutex_unlock(&rdev->ib_pool.mutex); |
82 | if (r) { | 82 | radeon_fence_unref(&fence); |
83 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, | 83 | return r; |
84 | (unsigned long)nib->gpu_addr, nib->length_dw); | 84 | } |
85 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); | 85 | mutex_lock(&rdev->ib_pool.mutex); |
86 | goto out; | ||
87 | } | 86 | } |
88 | radeon_fence_unref(&nib->fence); | 87 | radeon_fence_unref(&nib->fence); |
89 | 88 | nib->fence = fence; | |
90 | nib->length_dw = 0; | 89 | nib->length_dw = 0; |
91 | |||
92 | /* scheduled list is accessed here */ | ||
93 | mutex_lock(&rdev->ib_pool.mutex); | ||
94 | list_del(&nib->list); | ||
95 | INIT_LIST_HEAD(&nib->list); | ||
96 | mutex_unlock(&rdev->ib_pool.mutex); | 90 | mutex_unlock(&rdev->ib_pool.mutex); |
97 | |||
98 | *ib = nib; | 91 | *ib = nib; |
99 | out: | 92 | return 0; |
100 | if (r) { | ||
101 | radeon_fence_unref(&fence); | ||
102 | } else { | ||
103 | (*ib)->fence = fence; | ||
104 | } | ||
105 | return r; | ||
106 | } | 93 | } |
107 | 94 | ||
108 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 95 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
113 | if (tmp == NULL) { | 100 | if (tmp == NULL) { |
114 | return; | 101 | return; |
115 | } | 102 | } |
116 | mutex_lock(&rdev->ib_pool.mutex); | 103 | if (!tmp->fence->emited) |
117 | if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { | ||
118 | /* IB is scheduled & not signaled don't do anythings */ | ||
119 | mutex_unlock(&rdev->ib_pool.mutex); | ||
120 | return; | ||
121 | } | ||
122 | list_del(&tmp->list); | ||
123 | INIT_LIST_HEAD(&tmp->list); | ||
124 | if (tmp->fence) | ||
125 | radeon_fence_unref(&tmp->fence); | 104 | radeon_fence_unref(&tmp->fence); |
126 | 105 | mutex_lock(&rdev->ib_pool.mutex); | |
127 | tmp->length_dw = 0; | 106 | tmp->free = true; |
128 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); | ||
129 | mutex_unlock(&rdev->ib_pool.mutex); | 107 | mutex_unlock(&rdev->ib_pool.mutex); |
130 | } | 108 | } |
131 | 109 | ||
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
135 | 113 | ||
136 | if (!ib->length_dw || !rdev->cp.ready) { | 114 | if (!ib->length_dw || !rdev->cp.ready) { |
137 | /* TODO: Nothings in the ib we should report. */ | 115 | /* TODO: Nothings in the ib we should report. */ |
138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 116 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
139 | return -EINVAL; | 117 | return -EINVAL; |
140 | } | 118 | } |
141 | 119 | ||
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
148 | radeon_ring_ib_execute(rdev, ib); | 126 | radeon_ring_ib_execute(rdev, ib); |
149 | radeon_fence_emit(rdev, ib->fence); | 127 | radeon_fence_emit(rdev, ib->fence); |
150 | mutex_lock(&rdev->ib_pool.mutex); | 128 | mutex_lock(&rdev->ib_pool.mutex); |
151 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); | 129 | /* once scheduled IB is considered free and protected by the fence */ |
130 | ib->free = true; | ||
152 | mutex_unlock(&rdev->ib_pool.mutex); | 131 | mutex_unlock(&rdev->ib_pool.mutex); |
153 | radeon_ring_unlock_commit(rdev); | 132 | radeon_ring_unlock_commit(rdev); |
154 | return 0; | 133 | return 0; |
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
164 | if (rdev->ib_pool.robj) | 143 | if (rdev->ib_pool.robj) |
165 | return 0; | 144 | return 0; |
166 | /* Allocate 1M object buffer */ | 145 | /* Allocate 1M object buffer */ |
167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | ||
168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 146 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
169 | true, RADEON_GEM_DOMAIN_GTT, | 147 | true, RADEON_GEM_DOMAIN_GTT, |
170 | &rdev->ib_pool.robj); | 148 | &rdev->ib_pool.robj); |
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
195 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | 173 | rdev->ib_pool.ibs[i].ptr = ptr + offset; |
196 | rdev->ib_pool.ibs[i].idx = i; | 174 | rdev->ib_pool.ibs[i].idx = i; |
197 | rdev->ib_pool.ibs[i].length_dw = 0; | 175 | rdev->ib_pool.ibs[i].length_dw = 0; |
198 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); | 176 | rdev->ib_pool.ibs[i].free = true; |
199 | } | 177 | } |
200 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 178 | rdev->ib_pool.head_id = 0; |
201 | rdev->ib_pool.ready = true; | 179 | rdev->ib_pool.ready = true; |
202 | DRM_INFO("radeon: ib pool ready.\n"); | 180 | DRM_INFO("radeon: ib pool ready.\n"); |
203 | if (radeon_debugfs_ib_init(rdev)) { | 181 | if (radeon_debugfs_ib_init(rdev)) { |
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
214 | return; | 192 | return; |
215 | } | 193 | } |
216 | mutex_lock(&rdev->ib_pool.mutex); | 194 | mutex_lock(&rdev->ib_pool.mutex); |
217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | ||
218 | if (rdev->ib_pool.robj) { | 195 | if (rdev->ib_pool.robj) { |
219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 196 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
220 | if (likely(r == 0)) { | 197 | if (likely(r == 0)) { |
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
363 | if (ib == NULL) { | 340 | if (ib == NULL) { |
364 | return 0; | 341 | return 0; |
365 | } | 342 | } |
366 | seq_printf(m, "IB %04lu\n", ib->idx); | 343 | seq_printf(m, "IB %04u\n", ib->idx); |
367 | seq_printf(m, "IB fence %p\n", ib->fence); | 344 | seq_printf(m, "IB fence %p\n", ib->fence); |
368 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | 345 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
369 | for (i = 0; i < ib->length_dw; i++) { | 346 | for (i = 0; i < ib->length_dw; i++) { |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200 index 6021c8849a16..c29ac434ac9c 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r200 +++ b/drivers/gpu/drm/radeon/reg_srcs/r200 | |||
@@ -91,6 +91,8 @@ r200 0x3294 | |||
91 | 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL | 91 | 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL |
92 | 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL | 92 | 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL |
93 | 0x22c4 SE_TCL_POINT_SPRITE_CNTL | 93 | 0x22c4 SE_TCL_POINT_SPRITE_CNTL |
94 | 0x22d0 SE_PVS_CNTL | ||
95 | 0x22d4 SE_PVS_CONST_CNTL | ||
94 | 0x2648 RE_POINTSIZE | 96 | 0x2648 RE_POINTSIZE |
95 | 0x26c0 RE_TOP_LEFT | 97 | 0x26c0 RE_TOP_LEFT |
96 | 0x26c4 RE_MISC | 98 | 0x26c4 RE_MISC |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 9f5418983e2a..287fcebfb4e6 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | 225 | ||
226 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | ||
227 | { | ||
228 | unsigned i; | ||
229 | uint32_t tmp; | ||
230 | |||
231 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
232 | /* read MC_STATUS */ | ||
233 | tmp = RREG32(0x0150); | ||
234 | if (tmp & (1 << 2)) { | ||
235 | return 0; | ||
236 | } | ||
237 | DRM_UDELAY(1); | ||
238 | } | ||
239 | return -1; | ||
240 | } | ||
241 | |||
226 | void rs400_gpu_init(struct radeon_device *rdev) | 242 | void rs400_gpu_init(struct radeon_device *rdev) |
227 | { | 243 | { |
228 | /* FIXME: HDP same place on rs400 ? */ | 244 | /* FIXME: HDP same place on rs400 ? */ |
229 | r100_hdp_reset(rdev); | 245 | r100_hdp_reset(rdev); |
230 | /* FIXME: is this correct ? */ | 246 | /* FIXME: is this correct ? */ |
231 | r420_pipes_init(rdev); | 247 | r420_pipes_init(rdev); |
232 | if (r300_mc_wait_for_idle(rdev)) { | 248 | if (rs400_mc_wait_for_idle(rdev)) { |
233 | printk(KERN_WARNING "Failed to wait MC idle while " | 249 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
234 | "programming pipes. Bad things might happen.\n"); | 250 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); |
235 | } | 251 | } |
236 | } | 252 | } |
237 | 253 | ||
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev) | |||
370 | r100_mc_stop(rdev, &save); | 386 | r100_mc_stop(rdev, &save); |
371 | 387 | ||
372 | /* Wait for mc idle */ | 388 | /* Wait for mc idle */ |
373 | if (r300_mc_wait_for_idle(rdev)) | 389 | if (rs400_mc_wait_for_idle(rdev)) |
374 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | 390 | dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); |
375 | WREG32(R_000148_MC_FB_LOCATION, | 391 | WREG32(R_000148_MC_FB_LOCATION, |
376 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | | 392 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
377 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); | 393 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev) | |||
448 | 464 | ||
449 | void rs400_fini(struct radeon_device *rdev) | 465 | void rs400_fini(struct radeon_device *rdev) |
450 | { | 466 | { |
451 | rs400_suspend(rdev); | ||
452 | r100_cp_fini(rdev); | 467 | r100_cp_fini(rdev); |
453 | r100_wb_fini(rdev); | 468 | r100_wb_fini(rdev); |
454 | r100_ib_fini(rdev); | 469 | r100_ib_fini(rdev); |
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev) | |||
527 | if (r) { | 542 | if (r) { |
528 | /* Somethings want wront with the accel init stop accel */ | 543 | /* Somethings want wront with the accel init stop accel */ |
529 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 544 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
530 | rs400_suspend(rdev); | ||
531 | r100_cp_fini(rdev); | 545 | r100_cp_fini(rdev); |
532 | r100_wb_fini(rdev); | 546 | r100_wb_fini(rdev); |
533 | r100_ib_fini(rdev); | 547 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d5255751e7b3..c3818562a13e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev) | |||
610 | 610 | ||
611 | void rs600_fini(struct radeon_device *rdev) | 611 | void rs600_fini(struct radeon_device *rdev) |
612 | { | 612 | { |
613 | rs600_suspend(rdev); | ||
614 | r100_cp_fini(rdev); | 613 | r100_cp_fini(rdev); |
615 | r100_wb_fini(rdev); | 614 | r100_wb_fini(rdev); |
616 | r100_ib_fini(rdev); | 615 | r100_ib_fini(rdev); |
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev) | |||
689 | if (r) { | 688 | if (r) { |
690 | /* Somethings want wront with the accel init stop accel */ | 689 | /* Somethings want wront with the accel init stop accel */ |
691 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 690 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
692 | rs600_suspend(rdev); | ||
693 | r100_cp_fini(rdev); | 691 | r100_cp_fini(rdev); |
694 | r100_wb_fini(rdev); | 692 | r100_wb_fini(rdev); |
695 | r100_ib_fini(rdev); | 693 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index cd31da913771..06e2771aee5a 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev) | |||
676 | 676 | ||
677 | void rs690_fini(struct radeon_device *rdev) | 677 | void rs690_fini(struct radeon_device *rdev) |
678 | { | 678 | { |
679 | rs690_suspend(rdev); | ||
680 | r100_cp_fini(rdev); | 679 | r100_cp_fini(rdev); |
681 | r100_wb_fini(rdev); | 680 | r100_wb_fini(rdev); |
682 | r100_ib_fini(rdev); | 681 | r100_ib_fini(rdev); |
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev) | |||
756 | if (r) { | 755 | if (r) { |
757 | /* Somethings want wront with the accel init stop accel */ | 756 | /* Somethings want wront with the accel init stop accel */ |
758 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 757 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
759 | rs690_suspend(rdev); | ||
760 | r100_cp_fini(rdev); | 758 | r100_cp_fini(rdev); |
761 | r100_wb_fini(rdev); | 759 | r100_wb_fini(rdev); |
762 | r100_ib_fini(rdev); | 760 | r100_ib_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 62756717b044..0e1e6b8632b8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev) | |||
537 | 537 | ||
538 | void rv515_fini(struct radeon_device *rdev) | 538 | void rv515_fini(struct radeon_device *rdev) |
539 | { | 539 | { |
540 | rv515_suspend(rdev); | ||
541 | r100_cp_fini(rdev); | 540 | r100_cp_fini(rdev); |
542 | r100_wb_fini(rdev); | 541 | r100_wb_fini(rdev); |
543 | r100_ib_fini(rdev); | 542 | r100_ib_fini(rdev); |
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev) | |||
615 | if (r) { | 614 | if (r) { |
616 | /* Somethings want wront with the accel init stop accel */ | 615 | /* Somethings want wront with the accel init stop accel */ |
617 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 616 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
618 | rv515_suspend(rdev); | ||
619 | r100_cp_fini(rdev); | 617 | r100_cp_fini(rdev); |
620 | r100_wb_fini(rdev); | 618 | r100_wb_fini(rdev); |
621 | r100_ib_fini(rdev); | 619 | r100_ib_fini(rdev); |
620 | radeon_irq_kms_fini(rdev); | ||
622 | rv370_pcie_gart_fini(rdev); | 621 | rv370_pcie_gart_fini(rdev); |
623 | radeon_agp_fini(rdev); | 622 | radeon_agp_fini(rdev); |
624 | radeon_irq_kms_fini(rdev); | ||
625 | rdev->accel_working = false; | 623 | rdev->accel_working = false; |
626 | } | 624 | } |
627 | return 0; | 625 | return 0; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 59c71245fb91..03021674d097 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
549 | 549 | ||
550 | gb_tiling_config |= BANK_SWAPS(1); | 550 | gb_tiling_config |= BANK_SWAPS(1); |
551 | 551 | ||
552 | backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, | 552 | if (rdev->family == CHIP_RV740) |
553 | rdev->config.rv770.max_backends, | 553 | backend_map = 0x28; |
554 | (0xff << rdev->config.rv770.max_backends) & 0xff); | 554 | else |
555 | backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, | ||
556 | rdev->config.rv770.max_backends, | ||
557 | (0xff << rdev->config.rv770.max_backends) & 0xff); | ||
555 | gb_tiling_config |= BACKEND_MAP(backend_map); | 558 | gb_tiling_config |= BACKEND_MAP(backend_map); |
556 | 559 | ||
557 | cc_gc_shader_pipe_config = | 560 | cc_gc_shader_pipe_config = |
@@ -779,7 +782,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
779 | fixed20_12 a; | 782 | fixed20_12 a; |
780 | u32 tmp; | 783 | u32 tmp; |
781 | int chansize, numchan; | 784 | int chansize, numchan; |
782 | int r; | ||
783 | 785 | ||
784 | /* Get VRAM informations */ | 786 | /* Get VRAM informations */ |
785 | rdev->mc.vram_is_ddr = true; | 787 | rdev->mc.vram_is_ddr = true; |
@@ -822,9 +824,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
822 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 824 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
823 | 825 | ||
824 | if (rdev->flags & RADEON_IS_AGP) { | 826 | if (rdev->flags & RADEON_IS_AGP) { |
825 | r = radeon_agp_init(rdev); | ||
826 | if (r) | ||
827 | return r; | ||
828 | /* gtt_size is setup by radeon_agp_init */ | 827 | /* gtt_size is setup by radeon_agp_init */ |
829 | rdev->mc.gtt_location = rdev->mc.agp_base; | 828 | rdev->mc.gtt_location = rdev->mc.agp_base; |
830 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; | 829 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; |
@@ -891,26 +890,25 @@ static int rv770_startup(struct radeon_device *rdev) | |||
891 | return r; | 890 | return r; |
892 | } | 891 | } |
893 | rv770_gpu_init(rdev); | 892 | rv770_gpu_init(rdev); |
894 | 893 | r = r600_blit_init(rdev); | |
895 | if (!rdev->r600_blit.shader_obj) { | 894 | if (r) { |
896 | r = r600_blit_init(rdev); | 895 | r600_blit_fini(rdev); |
896 | rdev->asic->copy = NULL; | ||
897 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
898 | } | ||
899 | /* pin copy shader into vram */ | ||
900 | if (rdev->r600_blit.shader_obj) { | ||
901 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
902 | if (unlikely(r != 0)) | ||
903 | return r; | ||
904 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
905 | &rdev->r600_blit.shader_gpu_addr); | ||
906 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
897 | if (r) { | 907 | if (r) { |
898 | DRM_ERROR("radeon: failed blitter (%d).\n", r); | 908 | DRM_ERROR("failed to pin blit object %d\n", r); |
899 | return r; | 909 | return r; |
900 | } | 910 | } |
901 | } | 911 | } |
902 | |||
903 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
904 | if (unlikely(r != 0)) | ||
905 | return r; | ||
906 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
907 | &rdev->r600_blit.shader_gpu_addr); | ||
908 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
909 | if (r) { | ||
910 | DRM_ERROR("failed to pin blit object %d\n", r); | ||
911 | return r; | ||
912 | } | ||
913 | |||
914 | /* Enable IRQ */ | 912 | /* Enable IRQ */ |
915 | r = r600_irq_init(rdev); | 913 | r = r600_irq_init(rdev); |
916 | if (r) { | 914 | if (r) { |
@@ -972,13 +970,16 @@ int rv770_suspend(struct radeon_device *rdev) | |||
972 | /* FIXME: we should wait for ring to be empty */ | 970 | /* FIXME: we should wait for ring to be empty */ |
973 | r700_cp_stop(rdev); | 971 | r700_cp_stop(rdev); |
974 | rdev->cp.ready = false; | 972 | rdev->cp.ready = false; |
973 | r600_irq_suspend(rdev); | ||
975 | r600_wb_disable(rdev); | 974 | r600_wb_disable(rdev); |
976 | rv770_pcie_gart_disable(rdev); | 975 | rv770_pcie_gart_disable(rdev); |
977 | /* unpin shaders bo */ | 976 | /* unpin shaders bo */ |
978 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 977 | if (rdev->r600_blit.shader_obj) { |
979 | if (likely(r == 0)) { | 978 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
980 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 979 | if (likely(r == 0)) { |
981 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 980 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
981 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
982 | } | ||
982 | } | 983 | } |
983 | return 0; | 984 | return 0; |
984 | } | 985 | } |
@@ -1037,6 +1038,11 @@ int rv770_init(struct radeon_device *rdev) | |||
1037 | r = radeon_fence_driver_init(rdev); | 1038 | r = radeon_fence_driver_init(rdev); |
1038 | if (r) | 1039 | if (r) |
1039 | return r; | 1040 | return r; |
1041 | if (rdev->flags & RADEON_IS_AGP) { | ||
1042 | r = radeon_agp_init(rdev); | ||
1043 | if (r) | ||
1044 | radeon_agp_disable(rdev); | ||
1045 | } | ||
1040 | r = rv770_mc_init(rdev); | 1046 | r = rv770_mc_init(rdev); |
1041 | if (r) | 1047 | if (r) |
1042 | return r; | 1048 | return r; |
@@ -1062,22 +1068,25 @@ int rv770_init(struct radeon_device *rdev) | |||
1062 | rdev->accel_working = true; | 1068 | rdev->accel_working = true; |
1063 | r = rv770_startup(rdev); | 1069 | r = rv770_startup(rdev); |
1064 | if (r) { | 1070 | if (r) { |
1065 | rv770_suspend(rdev); | 1071 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1072 | r600_cp_fini(rdev); | ||
1066 | r600_wb_fini(rdev); | 1073 | r600_wb_fini(rdev); |
1067 | radeon_ring_fini(rdev); | 1074 | r600_irq_fini(rdev); |
1075 | radeon_irq_kms_fini(rdev); | ||
1068 | rv770_pcie_gart_fini(rdev); | 1076 | rv770_pcie_gart_fini(rdev); |
1069 | rdev->accel_working = false; | 1077 | rdev->accel_working = false; |
1070 | } | 1078 | } |
1071 | if (rdev->accel_working) { | 1079 | if (rdev->accel_working) { |
1072 | r = radeon_ib_pool_init(rdev); | 1080 | r = radeon_ib_pool_init(rdev); |
1073 | if (r) { | 1081 | if (r) { |
1074 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | 1082 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
1075 | rdev->accel_working = false; | ||
1076 | } | ||
1077 | r = r600_ib_test(rdev); | ||
1078 | if (r) { | ||
1079 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1080 | rdev->accel_working = false; | 1083 | rdev->accel_working = false; |
1084 | } else { | ||
1085 | r = r600_ib_test(rdev); | ||
1086 | if (r) { | ||
1087 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
1088 | rdev->accel_working = false; | ||
1089 | } | ||
1081 | } | 1090 | } |
1082 | } | 1091 | } |
1083 | return 0; | 1092 | return 0; |
@@ -1085,13 +1094,11 @@ int rv770_init(struct radeon_device *rdev) | |||
1085 | 1094 | ||
1086 | void rv770_fini(struct radeon_device *rdev) | 1095 | void rv770_fini(struct radeon_device *rdev) |
1087 | { | 1096 | { |
1088 | rv770_suspend(rdev); | ||
1089 | |||
1090 | r600_blit_fini(rdev); | 1097 | r600_blit_fini(rdev); |
1098 | r600_cp_fini(rdev); | ||
1099 | r600_wb_fini(rdev); | ||
1091 | r600_irq_fini(rdev); | 1100 | r600_irq_fini(rdev); |
1092 | radeon_irq_kms_fini(rdev); | 1101 | radeon_irq_kms_fini(rdev); |
1093 | radeon_ring_fini(rdev); | ||
1094 | r600_wb_fini(rdev); | ||
1095 | rv770_pcie_gart_fini(rdev); | 1102 | rv770_pcie_gart_fini(rdev); |
1096 | radeon_gem_fini(rdev); | 1103 | radeon_gem_fini(rdev); |
1097 | radeon_fence_driver_fini(rdev); | 1104 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2920f9a279e1..c7320ce4567d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -426,7 +426,8 @@ moved: | |||
426 | bdev->man[bo->mem.mem_type].gpu_offset; | 426 | bdev->man[bo->mem.mem_type].gpu_offset; |
427 | bo->cur_placement = bo->mem.placement; | 427 | bo->cur_placement = bo->mem.placement; |
428 | spin_unlock(&bo->lock); | 428 | spin_unlock(&bo->lock); |
429 | } | 429 | } else |
430 | bo->offset = 0; | ||
430 | 431 | ||
431 | return 0; | 432 | return 0; |
432 | 433 | ||
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
523 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | 524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
524 | { | 525 | { |
525 | struct ttm_bo_global *glob = bdev->glob; | 526 | struct ttm_bo_global *glob = bdev->glob; |
526 | struct ttm_buffer_object *entry, *nentry; | 527 | struct ttm_buffer_object *entry = NULL; |
527 | struct list_head *list, *next; | 528 | int ret = 0; |
528 | int ret; | ||
529 | 529 | ||
530 | spin_lock(&glob->lru_lock); | 530 | spin_lock(&glob->lru_lock); |
531 | list_for_each_safe(list, next, &bdev->ddestroy) { | 531 | if (list_empty(&bdev->ddestroy)) |
532 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | 532 | goto out_unlock; |
533 | nentry = NULL; | ||
534 | 533 | ||
535 | /* | 534 | entry = list_first_entry(&bdev->ddestroy, |
536 | * Protect the next list entry from destruction while we | 535 | struct ttm_buffer_object, ddestroy); |
537 | * unlock the lru_lock. | 536 | kref_get(&entry->list_kref); |
538 | */ | ||
539 | 537 | ||
540 | if (next != &bdev->ddestroy) { | 538 | for (;;) { |
541 | nentry = list_entry(next, struct ttm_buffer_object, | 539 | struct ttm_buffer_object *nentry = NULL; |
542 | ddestroy); | 540 | |
541 | if (entry->ddestroy.next != &bdev->ddestroy) { | ||
542 | nentry = list_first_entry(&entry->ddestroy, | ||
543 | struct ttm_buffer_object, ddestroy); | ||
543 | kref_get(&nentry->list_kref); | 544 | kref_get(&nentry->list_kref); |
544 | } | 545 | } |
545 | kref_get(&entry->list_kref); | ||
546 | 546 | ||
547 | spin_unlock(&glob->lru_lock); | 547 | spin_unlock(&glob->lru_lock); |
548 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 548 | ret = ttm_bo_cleanup_refs(entry, remove_all); |
549 | kref_put(&entry->list_kref, ttm_bo_release_list); | 549 | kref_put(&entry->list_kref, ttm_bo_release_list); |
550 | entry = nentry; | ||
551 | |||
552 | if (ret || !entry) | ||
553 | goto out; | ||
550 | 554 | ||
551 | spin_lock(&glob->lru_lock); | 555 | spin_lock(&glob->lru_lock); |
552 | if (nentry) { | 556 | if (list_empty(&entry->ddestroy)) |
553 | bool next_onlist = !list_empty(next); | ||
554 | spin_unlock(&glob->lru_lock); | ||
555 | kref_put(&nentry->list_kref, ttm_bo_release_list); | ||
556 | spin_lock(&glob->lru_lock); | ||
557 | /* | ||
558 | * Someone might have raced us and removed the | ||
559 | * next entry from the list. We don't bother restarting | ||
560 | * list traversal. | ||
561 | */ | ||
562 | |||
563 | if (!next_onlist) | ||
564 | break; | ||
565 | } | ||
566 | if (ret) | ||
567 | break; | 557 | break; |
568 | } | 558 | } |
569 | ret = !list_empty(&bdev->ddestroy); | ||
570 | spin_unlock(&glob->lru_lock); | ||
571 | 559 | ||
560 | out_unlock: | ||
561 | spin_unlock(&glob->lru_lock); | ||
562 | out: | ||
563 | if (entry) | ||
564 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
572 | return ret; | 565 | return ret; |
573 | } | 566 | } |
574 | 567 | ||
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
950 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], | 943 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
951 | ~TTM_PL_MASK_MEMTYPE); | 944 | ~TTM_PL_MASK_MEMTYPE); |
952 | 945 | ||
946 | |||
947 | if (mem_type == TTM_PL_SYSTEM) { | ||
948 | mem->mem_type = mem_type; | ||
949 | mem->placement = cur_flags; | ||
950 | mem->mm_node = NULL; | ||
951 | return 0; | ||
952 | } | ||
953 | |||
953 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, | 954 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
954 | interruptible, no_wait); | 955 | interruptible, no_wait); |
955 | if (ret == 0 && mem->mm_node) { | 956 | if (ret == 0 && mem->mm_node) { |
@@ -1019,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, | |||
1019 | struct ttm_mem_reg *mem) | 1020 | struct ttm_mem_reg *mem) |
1020 | { | 1021 | { |
1021 | int i; | 1022 | int i; |
1023 | struct drm_mm_node *node = mem->mm_node; | ||
1024 | |||
1025 | if (node && placement->lpfn != 0 && | ||
1026 | (node->start < placement->fpfn || | ||
1027 | node->start + node->size > placement->lpfn)) | ||
1028 | return -1; | ||
1022 | 1029 | ||
1023 | for (i = 0; i < placement->num_placement; i++) { | 1030 | for (i = 0; i < placement->num_placement; i++) { |
1024 | if ((placement->placement[i] & mem->placement & | 1031 | if ((placement->placement[i] & mem->placement & |
@@ -1844,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1844 | * anyone tries to access a ttm page. | 1851 | * anyone tries to access a ttm page. |
1845 | */ | 1852 | */ |
1846 | 1853 | ||
1854 | if (bo->bdev->driver->swap_notify) | ||
1855 | bo->bdev->driver->swap_notify(bo); | ||
1856 | |||
1847 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | 1857 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); |
1848 | out: | 1858 | out: |
1849 | 1859 | ||
@@ -1864,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) | |||
1864 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) | 1874 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) |
1865 | ; | 1875 | ; |
1866 | } | 1876 | } |
1877 | EXPORT_SYMBOL(ttm_bo_swapout_all); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2ecf7d0c64f6..5ca37a58a98c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
53 | { | 53 | { |
54 | struct ttm_tt *ttm = bo->ttm; | 54 | struct ttm_tt *ttm = bo->ttm; |
55 | struct ttm_mem_reg *old_mem = &bo->mem; | 55 | struct ttm_mem_reg *old_mem = &bo->mem; |
56 | uint32_t save_flags = old_mem->placement; | ||
57 | int ret; | 56 | int ret; |
58 | 57 | ||
59 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | 58 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
62 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | 61 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
63 | TTM_PL_MASK_MEM); | 62 | TTM_PL_MASK_MEM); |
64 | old_mem->mem_type = TTM_PL_SYSTEM; | 63 | old_mem->mem_type = TTM_PL_SYSTEM; |
65 | save_flags = old_mem->placement; | ||
66 | } | 64 | } |
67 | 65 | ||
68 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | 66 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
77 | 75 | ||
78 | *old_mem = *new_mem; | 76 | *old_mem = *new_mem; |
79 | new_mem->mm_node = NULL; | 77 | new_mem->mm_node = NULL; |
80 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | 78 | |
81 | return 0; | 79 | return 0; |
82 | } | 80 | } |
83 | EXPORT_SYMBOL(ttm_bo_move_ttm); | 81 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
219 | void *old_iomap; | 217 | void *old_iomap; |
220 | void *new_iomap; | 218 | void *new_iomap; |
221 | int ret; | 219 | int ret; |
222 | uint32_t save_flags = old_mem->placement; | ||
223 | unsigned long i; | 220 | unsigned long i; |
224 | unsigned long page; | 221 | unsigned long page; |
225 | unsigned long add = 0; | 222 | unsigned long add = 0; |
@@ -270,7 +267,6 @@ out2: | |||
270 | 267 | ||
271 | *old_mem = *new_mem; | 268 | *old_mem = *new_mem; |
272 | new_mem->mm_node = NULL; | 269 | new_mem->mm_node = NULL; |
273 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | ||
274 | 270 | ||
275 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { | 271 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
276 | ttm_tt_unbind(ttm); | 272 | ttm_tt_unbind(ttm); |
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
537 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 533 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
538 | struct ttm_mem_reg *old_mem = &bo->mem; | 534 | struct ttm_mem_reg *old_mem = &bo->mem; |
539 | int ret; | 535 | int ret; |
540 | uint32_t save_flags = old_mem->placement; | ||
541 | struct ttm_buffer_object *ghost_obj; | 536 | struct ttm_buffer_object *ghost_obj; |
542 | void *tmp_obj = NULL; | 537 | void *tmp_obj = NULL; |
543 | 538 | ||
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
598 | 593 | ||
599 | *old_mem = *new_mem; | 594 | *old_mem = *new_mem; |
600 | new_mem->mm_node = NULL; | 595 | new_mem->mm_node = NULL; |
601 | ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); | 596 | |
602 | return 0; | 597 | return 0; |
603 | } | 598 | } |
604 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | 599 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f619ebcaa4ec..3d172ef04ee1 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock) | |||
288 | wake_up_all(&lock->queue); | 288 | wake_up_all(&lock->queue); |
289 | spin_unlock(&lock->lock); | 289 | spin_unlock(&lock->lock); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL(ttm_suspend_unlock); | ||
291 | 292 | ||
292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | 293 | static bool __ttm_suspend_lock(struct ttm_lock *lock) |
293 | { | 294 | { |
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock) | |||
309 | { | 310 | { |
310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | 311 | wait_event(lock->queue, __ttm_suspend_lock(lock)); |
311 | } | 312 | } |
313 | EXPORT_SYMBOL(ttm_suspend_lock); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 1099abac824b..75e9d6f86ba4 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -109,8 +109,8 @@ struct ttm_ref_object { | |||
109 | struct drm_hash_item hash; | 109 | struct drm_hash_item hash; |
110 | struct list_head head; | 110 | struct list_head head; |
111 | struct kref kref; | 111 | struct kref kref; |
112 | struct ttm_base_object *obj; | ||
113 | enum ttm_ref_type ref_type; | 112 | enum ttm_ref_type ref_type; |
113 | struct ttm_base_object *obj; | ||
114 | struct ttm_object_file *tfile; | 114 | struct ttm_object_file *tfile; |
115 | }; | 115 | }; |
116 | 116 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9c2b1cc5dba5..3d47a2c12322 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate); | |||
196 | 196 | ||
197 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
198 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |
199 | enum ttm_caching_state c_state) | 199 | enum ttm_caching_state c_old, |
200 | enum ttm_caching_state c_new) | ||
200 | { | 201 | { |
202 | int ret = 0; | ||
203 | |||
201 | if (PageHighMem(p)) | 204 | if (PageHighMem(p)) |
202 | return 0; | 205 | return 0; |
203 | 206 | ||
204 | switch (c_state) { | 207 | if (c_old != tt_cached) { |
205 | case tt_cached: | 208 | /* p isn't in the default caching state, set it to |
206 | return set_pages_wb(p, 1); | 209 | * writeback first to free its current memtype. */ |
207 | case tt_wc: | 210 | |
208 | return set_memory_wc((unsigned long) page_address(p), 1); | 211 | ret = set_pages_wb(p, 1); |
209 | default: | 212 | if (ret) |
210 | return set_pages_uc(p, 1); | 213 | return ret; |
211 | } | 214 | } |
215 | |||
216 | if (c_new == tt_wc) | ||
217 | ret = set_memory_wc((unsigned long) page_address(p), 1); | ||
218 | else if (c_new == tt_uncached) | ||
219 | ret = set_pages_uc(p, 1); | ||
220 | |||
221 | return ret; | ||
212 | } | 222 | } |
213 | #else /* CONFIG_X86 */ | 223 | #else /* CONFIG_X86 */ |
214 | static inline int ttm_tt_set_page_caching(struct page *p, | 224 | static inline int ttm_tt_set_page_caching(struct page *p, |
215 | enum ttm_caching_state c_state) | 225 | enum ttm_caching_state c_old, |
226 | enum ttm_caching_state c_new) | ||
216 | { | 227 | { |
217 | return 0; | 228 | return 0; |
218 | } | 229 | } |
@@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, | |||
245 | for (i = 0; i < ttm->num_pages; ++i) { | 256 | for (i = 0; i < ttm->num_pages; ++i) { |
246 | cur_page = ttm->pages[i]; | 257 | cur_page = ttm->pages[i]; |
247 | if (likely(cur_page != NULL)) { | 258 | if (likely(cur_page != NULL)) { |
248 | ret = ttm_tt_set_page_caching(cur_page, c_state); | 259 | ret = ttm_tt_set_page_caching(cur_page, |
260 | ttm->caching_state, | ||
261 | c_state); | ||
249 | if (unlikely(ret != 0)) | 262 | if (unlikely(ret != 0)) |
250 | goto out_err; | 263 | goto out_err; |
251 | } | 264 | } |
@@ -259,7 +272,7 @@ out_err: | |||
259 | for (j = 0; j < i; ++j) { | 272 | for (j = 0; j < i; ++j) { |
260 | cur_page = ttm->pages[j]; | 273 | cur_page = ttm->pages[j]; |
261 | if (likely(cur_page != NULL)) { | 274 | if (likely(cur_page != NULL)) { |
262 | (void)ttm_tt_set_page_caching(cur_page, | 275 | (void)ttm_tt_set_page_caching(cur_page, c_state, |
263 | ttm->caching_state); | 276 | ttm->caching_state); |
264 | } | 277 | } |
265 | } | 278 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index d6f2d2b882e9..825ebe3d89d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = { | |||
48 | .busy_placement = &vram_placement_flags | 48 | .busy_placement = &vram_placement_flags |
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct ttm_placement vmw_vram_sys_placement = { | ||
52 | .fpfn = 0, | ||
53 | .lpfn = 0, | ||
54 | .num_placement = 1, | ||
55 | .placement = &vram_placement_flags, | ||
56 | .num_busy_placement = 1, | ||
57 | .busy_placement = &sys_placement_flags | ||
58 | }; | ||
59 | |||
51 | struct ttm_placement vmw_vram_ne_placement = { | 60 | struct ttm_placement vmw_vram_ne_placement = { |
52 | .fpfn = 0, | 61 | .fpfn = 0, |
53 | .lpfn = 0, | 62 | .lpfn = 0, |
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
172 | return 0; | 181 | return 0; |
173 | } | 182 | } |
174 | 183 | ||
184 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
185 | struct ttm_mem_reg *new_mem) | ||
186 | { | ||
187 | if (new_mem->mem_type != TTM_PL_SYSTEM) | ||
188 | vmw_dmabuf_gmr_unbind(bo); | ||
189 | } | ||
190 | |||
191 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
192 | { | ||
193 | vmw_dmabuf_gmr_unbind(bo); | ||
194 | } | ||
195 | |||
175 | /** | 196 | /** |
176 | * FIXME: We're using the old vmware polling method to sync. | 197 | * FIXME: We're using the old vmware polling method to sync. |
177 | * Do this with fences instead. | 198 | * Do this with fences instead. |
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
225 | .sync_obj_wait = vmw_sync_obj_wait, | 246 | .sync_obj_wait = vmw_sync_obj_wait, |
226 | .sync_obj_flush = vmw_sync_obj_flush, | 247 | .sync_obj_flush = vmw_sync_obj_flush, |
227 | .sync_obj_unref = vmw_sync_obj_unref, | 248 | .sync_obj_unref = vmw_sync_obj_unref, |
228 | .sync_obj_ref = vmw_sync_obj_ref | 249 | .sync_obj_ref = vmw_sync_obj_ref, |
250 | .move_notify = vmw_move_notify, | ||
251 | .swap_notify = vmw_swap_notify | ||
229 | }; | 252 | }; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 1db1ef30be2b..0c9c0811f42d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx"; | |||
147 | 147 | ||
148 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 148 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
149 | static void vmw_master_init(struct vmw_master *); | 149 | static void vmw_master_init(struct vmw_master *); |
150 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
151 | void *ptr); | ||
150 | 152 | ||
151 | static void vmw_print_capabilities(uint32_t capabilities) | 153 | static void vmw_print_capabilities(uint32_t capabilities) |
152 | { | 154 | { |
@@ -207,6 +209,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
207 | { | 209 | { |
208 | struct vmw_private *dev_priv; | 210 | struct vmw_private *dev_priv; |
209 | int ret; | 211 | int ret; |
212 | uint32_t svga_id; | ||
210 | 213 | ||
211 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 214 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
212 | if (unlikely(dev_priv == NULL)) { | 215 | if (unlikely(dev_priv == NULL)) { |
@@ -217,6 +220,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
217 | 220 | ||
218 | dev_priv->dev = dev; | 221 | dev_priv->dev = dev; |
219 | dev_priv->vmw_chipset = chipset; | 222 | dev_priv->vmw_chipset = chipset; |
223 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
220 | mutex_init(&dev_priv->hw_mutex); | 224 | mutex_init(&dev_priv->hw_mutex); |
221 | mutex_init(&dev_priv->cmdbuf_mutex); | 225 | mutex_init(&dev_priv->cmdbuf_mutex); |
222 | rwlock_init(&dev_priv->resource_lock); | 226 | rwlock_init(&dev_priv->resource_lock); |
@@ -236,6 +240,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
236 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | 240 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); |
237 | 241 | ||
238 | mutex_lock(&dev_priv->hw_mutex); | 242 | mutex_lock(&dev_priv->hw_mutex); |
243 | |||
244 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | ||
245 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | ||
246 | if (svga_id != SVGA_ID_2) { | ||
247 | ret = -ENOSYS; | ||
248 | DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id); | ||
249 | mutex_unlock(&dev_priv->hw_mutex); | ||
250 | goto out_err0; | ||
251 | } | ||
252 | |||
239 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); | 253 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
240 | 254 | ||
241 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 255 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
@@ -334,22 +348,24 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
334 | */ | 348 | */ |
335 | 349 | ||
336 | DRM_INFO("It appears like vesafb is loaded. " | 350 | DRM_INFO("It appears like vesafb is loaded. " |
337 | "Ignore above error if any. Entering stealth mode.\n"); | 351 | "Ignore above error if any.\n"); |
338 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
339 | if (unlikely(ret != 0)) { | 353 | if (unlikely(ret != 0)) { |
340 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
341 | goto out_no_device; | 355 | goto out_no_device; |
342 | } | 356 | } |
343 | vmw_kms_init(dev_priv); | ||
344 | vmw_overlay_init(dev_priv); | ||
345 | } else { | ||
346 | ret = vmw_request_device(dev_priv); | ||
347 | if (unlikely(ret != 0)) | ||
348 | goto out_no_device; | ||
349 | vmw_kms_init(dev_priv); | ||
350 | vmw_overlay_init(dev_priv); | ||
351 | vmw_fb_init(dev_priv); | ||
352 | } | 357 | } |
358 | ret = vmw_request_device(dev_priv); | ||
359 | if (unlikely(ret != 0)) | ||
360 | goto out_no_device; | ||
361 | vmw_kms_init(dev_priv); | ||
362 | vmw_overlay_init(dev_priv); | ||
363 | vmw_fb_init(dev_priv); | ||
364 | |||
365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | ||
366 | register_pm_notifier(&dev_priv->pm_nb); | ||
367 | |||
368 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); | ||
353 | 369 | ||
354 | return 0; | 370 | return 0; |
355 | 371 | ||
@@ -385,17 +401,17 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
385 | 401 | ||
386 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | 402 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); |
387 | 403 | ||
388 | if (!dev_priv->stealth) { | 404 | unregister_pm_notifier(&dev_priv->pm_nb); |
389 | vmw_fb_close(dev_priv); | 405 | |
390 | vmw_kms_close(dev_priv); | 406 | vmw_fb_close(dev_priv); |
391 | vmw_overlay_close(dev_priv); | 407 | vmw_kms_close(dev_priv); |
392 | vmw_release_device(dev_priv); | 408 | vmw_overlay_close(dev_priv); |
393 | pci_release_regions(dev->pdev); | 409 | vmw_release_device(dev_priv); |
394 | } else { | 410 | if (dev_priv->stealth) |
395 | vmw_kms_close(dev_priv); | ||
396 | vmw_overlay_close(dev_priv); | ||
397 | pci_release_region(dev->pdev, 2); | 411 | pci_release_region(dev->pdev, 2); |
398 | } | 412 | else |
413 | pci_release_regions(dev->pdev); | ||
414 | |||
399 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
400 | drm_irq_uninstall(dev_priv->dev); | 416 | drm_irq_uninstall(dev_priv->dev); |
401 | if (dev->devname == vmw_devname) | 417 | if (dev->devname == vmw_devname) |
@@ -564,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
564 | int ret = 0; | 580 | int ret = 0; |
565 | 581 | ||
566 | DRM_INFO("Master set.\n"); | 582 | DRM_INFO("Master set.\n"); |
567 | if (dev_priv->stealth) { | ||
568 | ret = vmw_request_device(dev_priv); | ||
569 | if (unlikely(ret != 0)) | ||
570 | return ret; | ||
571 | } | ||
572 | 583 | ||
573 | if (active) { | 584 | if (active) { |
574 | BUG_ON(active != &dev_priv->fbdev_master); | 585 | BUG_ON(active != &dev_priv->fbdev_master); |
@@ -628,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, | |||
628 | 639 | ||
629 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
630 | 641 | ||
631 | if (dev_priv->stealth) { | ||
632 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
633 | if (unlikely(ret != 0)) | ||
634 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
635 | vmw_release_device(dev_priv); | ||
636 | } | ||
637 | dev_priv->active_master = &dev_priv->fbdev_master; | 642 | dev_priv->active_master = &dev_priv->fbdev_master; |
638 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
639 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
640 | 645 | ||
641 | if (!dev_priv->stealth) | 646 | vmw_fb_on(dev_priv); |
642 | vmw_fb_on(dev_priv); | ||
643 | } | 647 | } |
644 | 648 | ||
645 | 649 | ||
@@ -650,6 +654,57 @@ static void vmw_remove(struct pci_dev *pdev) | |||
650 | drm_put_dev(dev); | 654 | drm_put_dev(dev); |
651 | } | 655 | } |
652 | 656 | ||
657 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
658 | void *ptr) | ||
659 | { | ||
660 | struct vmw_private *dev_priv = | ||
661 | container_of(nb, struct vmw_private, pm_nb); | ||
662 | struct vmw_master *vmaster = dev_priv->active_master; | ||
663 | |||
664 | switch (val) { | ||
665 | case PM_HIBERNATION_PREPARE: | ||
666 | case PM_SUSPEND_PREPARE: | ||
667 | ttm_suspend_lock(&vmaster->lock); | ||
668 | |||
669 | /** | ||
670 | * This empties VRAM and unbinds all GMR bindings. | ||
671 | * Buffer contents is moved to swappable memory. | ||
672 | */ | ||
673 | ttm_bo_swapout_all(&dev_priv->bdev); | ||
674 | break; | ||
675 | case PM_POST_HIBERNATION: | ||
676 | case PM_POST_SUSPEND: | ||
677 | ttm_suspend_unlock(&vmaster->lock); | ||
678 | break; | ||
679 | case PM_RESTORE_PREPARE: | ||
680 | break; | ||
681 | case PM_POST_RESTORE: | ||
682 | break; | ||
683 | default: | ||
684 | break; | ||
685 | } | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * These might not be needed with the virtual SVGA device. | ||
691 | */ | ||
692 | |||
693 | int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
694 | { | ||
695 | pci_save_state(pdev); | ||
696 | pci_disable_device(pdev); | ||
697 | pci_set_power_state(pdev, PCI_D3hot); | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | int vmw_pci_resume(struct pci_dev *pdev) | ||
702 | { | ||
703 | pci_set_power_state(pdev, PCI_D0); | ||
704 | pci_restore_state(pdev); | ||
705 | return pci_enable_device(pdev); | ||
706 | } | ||
707 | |||
653 | static struct drm_driver driver = { | 708 | static struct drm_driver driver = { |
654 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | 709 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
655 | DRIVER_MODESET, | 710 | DRIVER_MODESET, |
@@ -689,7 +744,9 @@ static struct drm_driver driver = { | |||
689 | .name = VMWGFX_DRIVER_NAME, | 744 | .name = VMWGFX_DRIVER_NAME, |
690 | .id_table = vmw_pci_id_list, | 745 | .id_table = vmw_pci_id_list, |
691 | .probe = vmw_probe, | 746 | .probe = vmw_probe, |
692 | .remove = vmw_remove | 747 | .remove = vmw_remove, |
748 | .suspend = vmw_pci_suspend, | ||
749 | .resume = vmw_pci_resume | ||
693 | }, | 750 | }, |
694 | .name = VMWGFX_DRIVER_NAME, | 751 | .name = VMWGFX_DRIVER_NAME, |
695 | .desc = VMWGFX_DRIVER_DESC, | 752 | .desc = VMWGFX_DRIVER_DESC, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e61bd85b6975..356dc935ec13 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -32,16 +32,17 @@ | |||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "vmwgfx_drm.h" | 33 | #include "vmwgfx_drm.h" |
34 | #include "drm_hashtab.h" | 34 | #include "drm_hashtab.h" |
35 | #include "linux/suspend.h" | ||
35 | #include "ttm/ttm_bo_driver.h" | 36 | #include "ttm/ttm_bo_driver.h" |
36 | #include "ttm/ttm_object.h" | 37 | #include "ttm/ttm_object.h" |
37 | #include "ttm/ttm_lock.h" | 38 | #include "ttm/ttm_lock.h" |
38 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
39 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
40 | 41 | ||
41 | #define VMWGFX_DRIVER_DATE "20090724" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
42 | #define VMWGFX_DRIVER_MAJOR 0 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
43 | #define VMWGFX_DRIVER_MINOR 1 | 44 | #define VMWGFX_DRIVER_MINOR 0 |
44 | #define VMWGFX_DRIVER_PATCHLEVEL 2 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
45 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
46 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
47 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
@@ -95,6 +96,8 @@ struct vmw_surface { | |||
95 | struct drm_vmw_size *sizes; | 96 | struct drm_vmw_size *sizes; |
96 | uint32_t num_sizes; | 97 | uint32_t num_sizes; |
97 | 98 | ||
99 | bool scanout; | ||
100 | |||
98 | /* TODO so far just a extra pointer */ | 101 | /* TODO so far just a extra pointer */ |
99 | struct vmw_cursor_snooper snooper; | 102 | struct vmw_cursor_snooper snooper; |
100 | }; | 103 | }; |
@@ -110,6 +113,7 @@ struct vmw_fifo_state { | |||
110 | unsigned long static_buffer_size; | 113 | unsigned long static_buffer_size; |
111 | bool using_bounce_buffer; | 114 | bool using_bounce_buffer; |
112 | uint32_t capabilities; | 115 | uint32_t capabilities; |
116 | struct mutex fifo_mutex; | ||
113 | struct rw_semaphore rwsem; | 117 | struct rw_semaphore rwsem; |
114 | }; | 118 | }; |
115 | 119 | ||
@@ -210,7 +214,7 @@ struct vmw_private { | |||
210 | * Fencing and IRQs. | 214 | * Fencing and IRQs. |
211 | */ | 215 | */ |
212 | 216 | ||
213 | uint32_t fence_seq; | 217 | atomic_t fence_seq; |
214 | wait_queue_head_t fence_queue; | 218 | wait_queue_head_t fence_queue; |
215 | wait_queue_head_t fifo_queue; | 219 | wait_queue_head_t fifo_queue; |
216 | atomic_t fence_queue_waiters; | 220 | atomic_t fence_queue_waiters; |
@@ -258,6 +262,7 @@ struct vmw_private { | |||
258 | 262 | ||
259 | struct vmw_master *active_master; | 263 | struct vmw_master *active_master; |
260 | struct vmw_master fbdev_master; | 264 | struct vmw_master fbdev_master; |
265 | struct notifier_block pm_nb; | ||
261 | }; | 266 | }; |
262 | 267 | ||
263 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 268 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -353,6 +358,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
353 | struct vmw_dma_buffer *bo); | 358 | struct vmw_dma_buffer *bo); |
354 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | 359 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, |
355 | struct vmw_dma_buffer *bo); | 360 | struct vmw_dma_buffer *bo); |
361 | extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); | ||
356 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 362 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
357 | struct drm_file *file_priv); | 363 | struct drm_file *file_priv); |
358 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 364 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
@@ -386,6 +392,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | |||
386 | uint32_t *sequence); | 392 | uint32_t *sequence); |
387 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 393 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
388 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | 394 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); |
395 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | ||
389 | 396 | ||
390 | /** | 397 | /** |
391 | * TTM glue - vmwgfx_ttm_glue.c | 398 | * TTM glue - vmwgfx_ttm_glue.c |
@@ -401,6 +408,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | |||
401 | 408 | ||
402 | extern struct ttm_placement vmw_vram_placement; | 409 | extern struct ttm_placement vmw_vram_placement; |
403 | extern struct ttm_placement vmw_vram_ne_placement; | 410 | extern struct ttm_placement vmw_vram_ne_placement; |
411 | extern struct ttm_placement vmw_vram_sys_placement; | ||
404 | extern struct ttm_placement vmw_sys_placement; | 412 | extern struct ttm_placement vmw_sys_placement; |
405 | extern struct ttm_bo_driver vmw_bo_driver; | 413 | extern struct ttm_bo_driver vmw_bo_driver; |
406 | extern int vmw_dma_quiescent(struct drm_device *dev); | 414 | extern int vmw_dma_quiescent(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2e92da567403..0897359b3e4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
183 | } | 183 | } |
184 | 184 | ||
185 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 185 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
186 | struct vmw_sw_context *sw_context, | 186 | struct vmw_sw_context *sw_context, |
187 | SVGA3dCmdHeader *header) | 187 | SVGAGuestPtr *ptr, |
188 | struct vmw_dma_buffer **vmw_bo_p) | ||
188 | { | 189 | { |
189 | uint32_t handle; | ||
190 | struct vmw_dma_buffer *vmw_bo = NULL; | 190 | struct vmw_dma_buffer *vmw_bo = NULL; |
191 | struct ttm_buffer_object *bo; | 191 | struct ttm_buffer_object *bo; |
192 | struct vmw_surface *srf = NULL; | 192 | uint32_t handle = ptr->gmrId; |
193 | struct vmw_dma_cmd { | ||
194 | SVGA3dCmdHeader header; | ||
195 | SVGA3dCmdSurfaceDMA dma; | ||
196 | } *cmd; | ||
197 | struct vmw_relocation *reloc; | 193 | struct vmw_relocation *reloc; |
198 | int ret; | ||
199 | uint32_t cur_validate_node; | 194 | uint32_t cur_validate_node; |
200 | struct ttm_validate_buffer *val_buf; | 195 | struct ttm_validate_buffer *val_buf; |
196 | int ret; | ||
201 | 197 | ||
202 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
203 | handle = cmd->dma.guest.ptr.gmrId; | ||
204 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 198 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
205 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
206 | DRM_ERROR("Could not find or use GMR region.\n"); | 200 | DRM_ERROR("Could not find or use GMR region.\n"); |
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
209 | bo = &vmw_bo->base; | 203 | bo = &vmw_bo->base; |
210 | 204 | ||
211 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 205 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
212 | DRM_ERROR("Max number of DMA commands per submission" | 206 | DRM_ERROR("Max number relocations per submission" |
213 | " exceeded\n"); | 207 | " exceeded\n"); |
214 | ret = -EINVAL; | 208 | ret = -EINVAL; |
215 | goto out_no_reloc; | 209 | goto out_no_reloc; |
216 | } | 210 | } |
217 | 211 | ||
218 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 212 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
219 | reloc->location = &cmd->dma.guest.ptr; | 213 | reloc->location = ptr; |
220 | 214 | ||
221 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
222 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { |
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
234 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 228 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
235 | ++sw_context->cur_val_buf; | 229 | ++sw_context->cur_val_buf; |
236 | } | 230 | } |
231 | *vmw_bo_p = vmw_bo; | ||
232 | return 0; | ||
233 | |||
234 | out_no_reloc: | ||
235 | vmw_dmabuf_unreference(&vmw_bo); | ||
236 | vmw_bo_p = NULL; | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | ||
241 | struct vmw_sw_context *sw_context, | ||
242 | SVGA3dCmdHeader *header) | ||
243 | { | ||
244 | struct vmw_dma_buffer *vmw_bo; | ||
245 | struct vmw_query_cmd { | ||
246 | SVGA3dCmdHeader header; | ||
247 | SVGA3dCmdEndQuery q; | ||
248 | } *cmd; | ||
249 | int ret; | ||
237 | 250 | ||
251 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
252 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
253 | if (unlikely(ret != 0)) | ||
254 | return ret; | ||
255 | |||
256 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
257 | &cmd->q.guestResult, | ||
258 | &vmw_bo); | ||
259 | if (unlikely(ret != 0)) | ||
260 | return ret; | ||
261 | |||
262 | vmw_dmabuf_unreference(&vmw_bo); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | ||
267 | struct vmw_sw_context *sw_context, | ||
268 | SVGA3dCmdHeader *header) | ||
269 | { | ||
270 | struct vmw_dma_buffer *vmw_bo; | ||
271 | struct vmw_query_cmd { | ||
272 | SVGA3dCmdHeader header; | ||
273 | SVGA3dCmdWaitForQuery q; | ||
274 | } *cmd; | ||
275 | int ret; | ||
276 | |||
277 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
278 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
279 | if (unlikely(ret != 0)) | ||
280 | return ret; | ||
281 | |||
282 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
283 | &cmd->q.guestResult, | ||
284 | &vmw_bo); | ||
285 | if (unlikely(ret != 0)) | ||
286 | return ret; | ||
287 | |||
288 | vmw_dmabuf_unreference(&vmw_bo); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | |||
293 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | ||
294 | struct vmw_sw_context *sw_context, | ||
295 | SVGA3dCmdHeader *header) | ||
296 | { | ||
297 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
298 | struct ttm_buffer_object *bo; | ||
299 | struct vmw_surface *srf = NULL; | ||
300 | struct vmw_dma_cmd { | ||
301 | SVGA3dCmdHeader header; | ||
302 | SVGA3dCmdSurfaceDMA dma; | ||
303 | } *cmd; | ||
304 | int ret; | ||
305 | |||
306 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
307 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
308 | &cmd->dma.guest.ptr, | ||
309 | &vmw_bo); | ||
310 | if (unlikely(ret != 0)) | ||
311 | return ret; | ||
312 | |||
313 | bo = &vmw_bo->base; | ||
238 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 314 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
239 | cmd->dma.host.sid, &srf); | 315 | cmd->dma.host.sid, &srf); |
240 | if (ret) { | 316 | if (ret) { |
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
379 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 455 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
380 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 456 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
381 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 457 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
382 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | 458 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
383 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), | 459 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
384 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 460 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
385 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 461 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
386 | &vmw_cmd_blt_surf_screen_check) | 462 | &vmw_cmd_blt_surf_screen_check) |
@@ -490,10 +566,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
490 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | 566 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) |
491 | return 0; | 567 | return 0; |
492 | 568 | ||
569 | /** | ||
570 | * Put BO in VRAM, only if there is space. | ||
571 | */ | ||
572 | |||
573 | ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); | ||
574 | if (unlikely(ret == -ERESTARTSYS)) | ||
575 | return ret; | ||
576 | |||
577 | /** | ||
578 | * Otherwise, set it up as GMR. | ||
579 | */ | ||
580 | |||
581 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | ||
582 | return 0; | ||
583 | |||
493 | ret = vmw_gmr_bind(dev_priv, bo); | 584 | ret = vmw_gmr_bind(dev_priv, bo); |
494 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | 585 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
495 | return ret; | 586 | return ret; |
496 | 587 | ||
588 | /** | ||
589 | * If that failed, try VRAM again, this time evicting | ||
590 | * previous contents. | ||
591 | */ | ||
497 | 592 | ||
498 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); | 593 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
499 | return ret; | 594 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 641dde76ada1..a93367041cdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; |
560 | #endif | 560 | #endif |
561 | 561 | ||
562 | info->aperture_base = vmw_priv->vram_start; | ||
563 | info->aperture_size = vmw_priv->vram_size; | ||
564 | |||
562 | /* | 565 | /* |
563 | * Dirty & Deferred IO | 566 | * Dirty & Deferred IO |
564 | */ | 567 | */ |
@@ -649,14 +652,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
649 | if (unlikely(ret != 0)) | 652 | if (unlikely(ret != 0)) |
650 | goto err_unlock; | 653 | goto err_unlock; |
651 | 654 | ||
652 | if (vmw_bo->gmr_bound) { | ||
653 | vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id); | ||
654 | spin_lock(&bo->glob->lru_lock); | ||
655 | ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id); | ||
656 | spin_unlock(&bo->glob->lru_lock); | ||
657 | vmw_bo->gmr_bound = NULL; | ||
658 | } | ||
659 | |||
660 | ret = ttm_bo_validate(bo, &ne_placement, false, false); | 655 | ret = ttm_bo_validate(bo, &ne_placement, false, false); |
661 | ttm_bo_unreserve(bo); | 656 | ttm_bo_unreserve(bo); |
662 | err_unlock: | 657 | err_unlock: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 01feb48af333..39d43a01d846 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -29,6 +29,25 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "ttm/ttm_placement.h" | 30 | #include "ttm/ttm_placement.h" |
31 | 31 | ||
32 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | ||
33 | { | ||
34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
35 | uint32_t fifo_min, hwversion; | ||
36 | |||
37 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
38 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) | ||
39 | return false; | ||
40 | |||
41 | hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); | ||
42 | if (hwversion == 0) | ||
43 | return false; | ||
44 | |||
45 | if (hwversion < SVGA3D_HWVERSION_WS65_B1) | ||
46 | return false; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | |||
32 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 51 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
33 | { | 52 | { |
34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 53 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
@@ -55,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
55 | fifo->reserved_size = 0; | 74 | fifo->reserved_size = 0; |
56 | fifo->using_bounce_buffer = false; | 75 | fifo->using_bounce_buffer = false; |
57 | 76 | ||
77 | mutex_init(&fifo->fifo_mutex); | ||
58 | init_rwsem(&fifo->rwsem); | 78 | init_rwsem(&fifo->rwsem); |
59 | 79 | ||
60 | /* | 80 | /* |
@@ -98,8 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
98 | (unsigned int) min, | 118 | (unsigned int) min, |
99 | (unsigned int) fifo->capabilities); | 119 | (unsigned int) fifo->capabilities); |
100 | 120 | ||
101 | dev_priv->fence_seq = (uint32_t) -100; | 121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
102 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
103 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
104 | 123 | ||
105 | return vmw_fifo_send_fence(dev_priv, &dummy); | 124 | return vmw_fifo_send_fence(dev_priv, &dummy); |
@@ -265,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
265 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | 284 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
266 | int ret; | 285 | int ret; |
267 | 286 | ||
268 | down_write(&fifo_state->rwsem); | 287 | mutex_lock(&fifo_state->fifo_mutex); |
269 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 288 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
270 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 289 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
271 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | 290 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); |
@@ -333,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
333 | } | 352 | } |
334 | out_err: | 353 | out_err: |
335 | fifo_state->reserved_size = 0; | 354 | fifo_state->reserved_size = 0; |
336 | up_write(&fifo_state->rwsem); | 355 | mutex_unlock(&fifo_state->fifo_mutex); |
337 | return NULL; | 356 | return NULL; |
338 | } | 357 | } |
339 | 358 | ||
@@ -408,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
408 | 427 | ||
409 | } | 428 | } |
410 | 429 | ||
430 | down_write(&fifo_state->rwsem); | ||
411 | if (fifo_state->using_bounce_buffer || reserveable) { | 431 | if (fifo_state->using_bounce_buffer || reserveable) { |
412 | next_cmd += bytes; | 432 | next_cmd += bytes; |
413 | if (next_cmd >= max) | 433 | if (next_cmd >= max) |
@@ -419,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
419 | if (reserveable) | 439 | if (reserveable) |
420 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); | 440 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); |
421 | mb(); | 441 | mb(); |
422 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
423 | up_write(&fifo_state->rwsem); | 442 | up_write(&fifo_state->rwsem); |
443 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
444 | mutex_unlock(&fifo_state->fifo_mutex); | ||
424 | } | 445 | } |
425 | 446 | ||
426 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | 447 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) |
@@ -433,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
433 | 454 | ||
434 | fm = vmw_fifo_reserve(dev_priv, bytes); | 455 | fm = vmw_fifo_reserve(dev_priv, bytes); |
435 | if (unlikely(fm == NULL)) { | 456 | if (unlikely(fm == NULL)) { |
436 | down_write(&fifo_state->rwsem); | 457 | *sequence = atomic_read(&dev_priv->fence_seq); |
437 | *sequence = dev_priv->fence_seq; | ||
438 | up_write(&fifo_state->rwsem); | ||
439 | ret = -ENOMEM; | 458 | ret = -ENOMEM; |
440 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | 459 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, |
441 | false, 3*HZ); | 460 | false, 3*HZ); |
@@ -443,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
443 | } | 462 | } |
444 | 463 | ||
445 | do { | 464 | do { |
446 | *sequence = dev_priv->fence_seq++; | 465 | *sequence = atomic_add_return(1, &dev_priv->fence_seq); |
447 | } while (*sequence == 0); | 466 | } while (*sequence == 0); |
448 | 467 | ||
449 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | 468 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 5fa6a4ed238a..1c7a316454d8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -43,11 +43,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
43 | param->value = vmw_overlay_num_free_overlays(dev_priv); | 43 | param->value = vmw_overlay_num_free_overlays(dev_priv); |
44 | break; | 44 | break; |
45 | case DRM_VMW_PARAM_3D: | 45 | case DRM_VMW_PARAM_3D: |
46 | param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; | 46 | param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; |
47 | break; | 47 | break; |
48 | case DRM_VMW_PARAM_FIFO_OFFSET: | 48 | case DRM_VMW_PARAM_FIFO_OFFSET: |
49 | param->value = dev_priv->mmio_start; | 49 | param->value = dev_priv->mmio_start; |
50 | break; | 50 | break; |
51 | case DRM_VMW_PARAM_HW_CAPS: | ||
52 | param->value = dev_priv->capabilities; | ||
53 | break; | ||
54 | case DRM_VMW_PARAM_FIFO_CAPS: | ||
55 | param->value = dev_priv->fifo.capabilities; | ||
56 | break; | ||
51 | default: | 57 | default: |
52 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 58 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
53 | param->param); | 59 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index d40086fc8647..4d7cb5393860 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, | |||
85 | return true; | 85 | return true; |
86 | 86 | ||
87 | /** | 87 | /** |
88 | * Below is to signal stale fences that have wrapped. | ||
89 | * First, block fence submission. | ||
90 | */ | ||
91 | |||
92 | down_read(&fifo_state->rwsem); | ||
93 | |||
94 | /** | ||
95 | * Then check if the sequence is higher than what we've actually | 88 | * Then check if the sequence is higher than what we've actually |
96 | * emitted. Then the fence is stale and signaled. | 89 | * emitted. Then the fence is stale and signaled. |
97 | */ | 90 | */ |
98 | 91 | ||
99 | ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); | 92 | ret = ((atomic_read(&dev_priv->fence_seq) - sequence) |
100 | up_read(&fifo_state->rwsem); | 93 | > VMW_FENCE_WRAP); |
101 | 94 | ||
102 | return ret; | 95 | return ret; |
103 | } | 96 | } |
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
127 | 120 | ||
128 | if (fifo_idle) | 121 | if (fifo_idle) |
129 | down_read(&fifo_state->rwsem); | 122 | down_read(&fifo_state->rwsem); |
130 | signal_seq = dev_priv->fence_seq; | 123 | signal_seq = atomic_read(&dev_priv->fence_seq); |
131 | ret = 0; | 124 | ret = 0; |
132 | 125 | ||
133 | for (;;) { | 126 | for (;;) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b1af76e371c3..31f9afed0a63 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
553 | } *cmd; | 553 | } *cmd; |
554 | int i, increment = 1; | 554 | int i, increment = 1; |
555 | 555 | ||
556 | if (!num_clips || | 556 | if (!num_clips) { |
557 | !(dev_priv->fifo.capabilities & | ||
558 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
559 | num_clips = 1; | 557 | num_clips = 1; |
560 | clips = &norect; | 558 | clips = &norect; |
561 | norect.x1 = norect.y1 = 0; | 559 | norect.x1 = norect.y1 = 0; |
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
574 | 572 | ||
575 | for (i = 0; i < num_clips; i++, clips += increment) { | 573 | for (i = 0; i < num_clips; i++, clips += increment) { |
576 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | 574 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); |
577 | cmd[i].body.x = cpu_to_le32(clips[i].x1); | 575 | cmd[i].body.x = cpu_to_le32(clips->x1); |
578 | cmd[i].body.y = cpu_to_le32(clips[i].y1); | 576 | cmd[i].body.y = cpu_to_le32(clips->y1); |
579 | cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); | 577 | cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); |
580 | cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); | 578 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); |
581 | } | 579 | } |
582 | 580 | ||
583 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 581 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); |
@@ -709,6 +707,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
709 | if (ret) | 707 | if (ret) |
710 | goto try_dmabuf; | 708 | goto try_dmabuf; |
711 | 709 | ||
710 | if (!surface->scanout) | ||
711 | goto err_not_scanout; | ||
712 | |||
712 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | 713 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, |
713 | mode_cmd->width, mode_cmd->height); | 714 | mode_cmd->width, mode_cmd->height); |
714 | 715 | ||
@@ -742,6 +743,13 @@ try_dmabuf: | |||
742 | } | 743 | } |
743 | 744 | ||
744 | return &vfb->base; | 745 | return &vfb->base; |
746 | |||
747 | err_not_scanout: | ||
748 | DRM_ERROR("surface not marked as scanout\n"); | ||
749 | /* vmw_user_surface_lookup takes one ref */ | ||
750 | vmw_surface_unreference(&surface); | ||
751 | |||
752 | return NULL; | ||
745 | } | 753 | } |
746 | 754 | ||
747 | static int vmw_kms_fb_changed(struct drm_device *dev) | 755 | static int vmw_kms_fb_changed(struct drm_device *dev) |
@@ -761,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
761 | 769 | ||
762 | drm_mode_config_init(dev); | 770 | drm_mode_config_init(dev); |
763 | dev->mode_config.funcs = &vmw_kms_funcs; | 771 | dev->mode_config.funcs = &vmw_kms_funcs; |
764 | dev->mode_config.min_width = 640; | 772 | dev->mode_config.min_width = 1; |
765 | dev->mode_config.min_height = 480; | 773 | dev->mode_config.min_height = 1; |
766 | dev->mode_config.max_width = 2048; | 774 | dev->mode_config.max_width = dev_priv->fb_max_width; |
767 | dev->mode_config.max_height = 2048; | 775 | dev->mode_config.max_height = dev_priv->fb_max_height; |
768 | 776 | ||
769 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 777 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
770 | 778 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index bb6e6a096d25..5b6eabeb7f51 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | |||
104 | bool pin, bool interruptible) | 104 | bool pin, bool interruptible) |
105 | { | 105 | { |
106 | struct ttm_buffer_object *bo = &buf->base; | 106 | struct ttm_buffer_object *bo = &buf->base; |
107 | struct ttm_bo_global *glob = bo->glob; | ||
108 | struct ttm_placement *overlay_placement = &vmw_vram_placement; | 107 | struct ttm_placement *overlay_placement = &vmw_vram_placement; |
109 | int ret; | 108 | int ret; |
110 | 109 | ||
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | |||
116 | if (unlikely(ret != 0)) | 115 | if (unlikely(ret != 0)) |
117 | goto err; | 116 | goto err; |
118 | 117 | ||
119 | if (buf->gmr_bound) { | ||
120 | vmw_gmr_unbind(dev_priv, buf->gmr_id); | ||
121 | spin_lock(&glob->lru_lock); | ||
122 | ida_remove(&dev_priv->gmr_ida, buf->gmr_id); | ||
123 | spin_unlock(&glob->lru_lock); | ||
124 | buf->gmr_bound = NULL; | ||
125 | } | ||
126 | |||
127 | if (pin) | 118 | if (pin) |
128 | overlay_placement = &vmw_vram_ne_placement; | 119 | overlay_placement = &vmw_vram_ne_placement; |
129 | 120 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c012d5927f65..f8fbbc67a406 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -574,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
574 | 574 | ||
575 | srf->flags = req->flags; | 575 | srf->flags = req->flags; |
576 | srf->format = req->format; | 576 | srf->format = req->format; |
577 | srf->scanout = req->scanout; | ||
577 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 578 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
578 | srf->num_sizes = 0; | 579 | srf->num_sizes = 0; |
579 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | 580 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) |
@@ -599,6 +600,26 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
599 | if (unlikely(ret != 0)) | 600 | if (unlikely(ret != 0)) |
600 | goto out_err1; | 601 | goto out_err1; |
601 | 602 | ||
603 | if (srf->scanout && | ||
604 | srf->num_sizes == 1 && | ||
605 | srf->sizes[0].width == 64 && | ||
606 | srf->sizes[0].height == 64 && | ||
607 | srf->format == SVGA3D_A8R8G8B8) { | ||
608 | |||
609 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
610 | /* clear the image */ | ||
611 | if (srf->snooper.image) { | ||
612 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
613 | } else { | ||
614 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
615 | ret = -ENOMEM; | ||
616 | goto out_err1; | ||
617 | } | ||
618 | } else { | ||
619 | srf->snooper.image = NULL; | ||
620 | } | ||
621 | srf->snooper.crtc = NULL; | ||
622 | |||
602 | user_srf->base.shareable = false; | 623 | user_srf->base.shareable = false; |
603 | user_srf->base.tfile = NULL; | 624 | user_srf->base.tfile = NULL; |
604 | 625 | ||
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
622 | return ret; | 643 | return ret; |
623 | } | 644 | } |
624 | 645 | ||
625 | if (srf->flags & (1 << 9) && | ||
626 | srf->num_sizes == 1 && | ||
627 | srf->sizes[0].width == 64 && | ||
628 | srf->sizes[0].height == 64 && | ||
629 | srf->format == SVGA3D_A8R8G8B8) { | ||
630 | |||
631 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
632 | /* clear the image */ | ||
633 | if (srf->snooper.image) | ||
634 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
635 | else | ||
636 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
637 | |||
638 | } else { | ||
639 | srf->snooper.image = NULL; | ||
640 | } | ||
641 | srf->snooper.crtc = NULL; | ||
642 | |||
643 | rep->sid = user_srf->base.hash.key; | 646 | rep->sid = user_srf->base.hash.key; |
644 | if (rep->sid == SVGA3D_INVALID_ID) | 647 | if (rep->sid == SVGA3D_INVALID_ID) |
645 | DRM_ERROR("Created bad Surface ID.\n"); | 648 | DRM_ERROR("Created bad Surface ID.\n"); |
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | |||
754 | return bo_user_size + page_array_size; | 757 | return bo_user_size + page_array_size; |
755 | } | 758 | } |
756 | 759 | ||
757 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 760 | void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) |
758 | { | 761 | { |
759 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 762 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
760 | struct ttm_bo_global *glob = bo->glob; | 763 | struct ttm_bo_global *glob = bo->glob; |
761 | struct vmw_private *dev_priv = | 764 | struct vmw_private *dev_priv = |
762 | container_of(bo->bdev, struct vmw_private, bdev); | 765 | container_of(bo->bdev, struct vmw_private, bdev); |
763 | 766 | ||
764 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
765 | if (vmw_bo->gmr_bound) { | 767 | if (vmw_bo->gmr_bound) { |
766 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | 768 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); |
767 | spin_lock(&glob->lru_lock); | 769 | spin_lock(&glob->lru_lock); |
768 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | 770 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); |
769 | spin_unlock(&glob->lru_lock); | 771 | spin_unlock(&glob->lru_lock); |
772 | vmw_bo->gmr_bound = false; | ||
770 | } | 773 | } |
774 | } | ||
775 | |||
776 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | ||
777 | { | ||
778 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
779 | struct ttm_bo_global *glob = bo->glob; | ||
780 | |||
781 | vmw_dmabuf_gmr_unbind(bo); | ||
782 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
771 | kfree(vmw_bo); | 783 | kfree(vmw_bo); |
772 | } | 784 | } |
773 | 785 | ||
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
813 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | 825 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
814 | { | 826 | { |
815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 827 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
816 | struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; | ||
817 | struct ttm_bo_global *glob = bo->glob; | 828 | struct ttm_bo_global *glob = bo->glob; |
818 | struct vmw_private *dev_priv = | ||
819 | container_of(bo->bdev, struct vmw_private, bdev); | ||
820 | 829 | ||
830 | vmw_dmabuf_gmr_unbind(bo); | ||
821 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 831 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
822 | if (vmw_bo->gmr_bound) { | ||
823 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
824 | spin_lock(&glob->lru_lock); | ||
825 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
826 | spin_unlock(&glob->lru_lock); | ||
827 | } | ||
828 | kfree(vmw_user_bo); | 832 | kfree(vmw_user_bo); |
829 | } | 833 | } |
830 | 834 | ||
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
868 | } | 872 | } |
869 | 873 | ||
870 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | 874 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, |
871 | &vmw_vram_placement, true, | 875 | &vmw_vram_sys_placement, true, |
872 | &vmw_user_dmabuf_destroy); | 876 | &vmw_user_dmabuf_destroy); |
873 | if (unlikely(ret != 0)) | 877 | if (unlikely(ret != 0)) |
874 | return ret; | 878 | return ret; |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 1ac0c93603c9..2f6cf69ecb39 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
961 | remaining -= 7; | 961 | remaining -= 7; |
962 | pr_devel("client 0x%p called 'target'\n", priv); | 962 | pr_devel("client 0x%p called 'target'\n", priv); |
963 | /* if target is default */ | 963 | /* if target is default */ |
964 | if (!strncmp(buf, "default", 7)) | 964 | if (!strncmp(curr_pos, "default", 7)) |
965 | pdev = pci_dev_get(vga_default_device()); | 965 | pdev = pci_dev_get(vga_default_device()); |
966 | else { | 966 | else { |
967 | if (!vga_pci_str_to_vars(curr_pos, remaining, | 967 | if (!vga_pci_str_to_vars(curr_pos, remaining, |
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a31e77c776ae..b8156b4893bb 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c | |||
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; | |||
179 | * | 179 | * |
180 | * Some, but not all, of these voltages have low/high limits. | 180 | * Some, but not all, of these voltages have low/high limits. |
181 | */ | 181 | */ |
182 | #define ADT7462_VOLT_COUNT 12 | 182 | #define ADT7462_VOLT_COUNT 13 |
183 | 183 | ||
184 | #define ADT7462_VENDOR 0x41 | 184 | #define ADT7462_VENDOR 0x41 |
185 | #define ADT7462_DEVICE 0x62 | 185 | #define ADT7462_DEVICE 0x62 |
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 1c89d922d619..fa9708c2d723 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c | |||
@@ -686,7 +686,6 @@ static ssize_t set_fan1_div( | |||
686 | data->fan1_div = 4; | 686 | data->fan1_div = 4; |
687 | break; | 687 | break; |
688 | default: | 688 | default: |
689 | mutex_unlock(&data->update_lock); | ||
690 | count = -EINVAL; | 689 | count = -EINVAL; |
691 | goto EXIT; | 690 | goto EXIT; |
692 | } | 691 | } |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 6811346c1c62..028284f544e3 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -1329,17 +1329,16 @@ static int atk_add(struct acpi_device *device) | |||
1329 | &buf, ACPI_TYPE_PACKAGE); | 1329 | &buf, ACPI_TYPE_PACKAGE); |
1330 | if (ret != AE_OK) { | 1330 | if (ret != AE_OK) { |
1331 | dev_dbg(&device->dev, "atk: method MBIF not found\n"); | 1331 | dev_dbg(&device->dev, "atk: method MBIF not found\n"); |
1332 | err = -ENODEV; | 1332 | } else { |
1333 | goto out; | 1333 | obj = buf.pointer; |
1334 | } | 1334 | if (obj->package.count >= 2) { |
1335 | 1335 | union acpi_object *id = &obj->package.elements[1]; | |
1336 | obj = buf.pointer; | 1336 | if (id->type == ACPI_TYPE_STRING) |
1337 | if (obj->package.count >= 2 && | 1337 | dev_dbg(&device->dev, "board ID = %s\n", |
1338 | obj->package.elements[1].type == ACPI_TYPE_STRING) { | 1338 | id->string.pointer); |
1339 | dev_dbg(&device->dev, "board ID = %s\n", | 1339 | } |
1340 | obj->package.elements[1].string.pointer); | 1340 | ACPI_FREE(buf.pointer); |
1341 | } | 1341 | } |
1342 | ACPI_FREE(buf.pointer); | ||
1343 | 1342 | ||
1344 | err = atk_probe_if(data); | 1343 | err = atk_probe_if(data); |
1345 | if (err) { | 1344 | if (err) { |
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index bd0fc67e804b..fa0728232e71 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c | |||
@@ -768,6 +768,7 @@ leave: | |||
768 | static int watchdog_open(struct inode *inode, struct file *filp) | 768 | static int watchdog_open(struct inode *inode, struct file *filp) |
769 | { | 769 | { |
770 | struct fschmd_data *pos, *data = NULL; | 770 | struct fschmd_data *pos, *data = NULL; |
771 | int watchdog_is_open; | ||
771 | 772 | ||
772 | /* We get called from drivers/char/misc.c with misc_mtx hold, and we | 773 | /* We get called from drivers/char/misc.c with misc_mtx hold, and we |
773 | call misc_register() from fschmd_probe() with watchdog_data_mutex | 774 | call misc_register() from fschmd_probe() with watchdog_data_mutex |
@@ -782,10 +783,12 @@ static int watchdog_open(struct inode *inode, struct file *filp) | |||
782 | } | 783 | } |
783 | } | 784 | } |
784 | /* Note we can never not have found data, so we don't check for this */ | 785 | /* Note we can never not have found data, so we don't check for this */ |
785 | kref_get(&data->kref); | 786 | watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open); |
787 | if (!watchdog_is_open) | ||
788 | kref_get(&data->kref); | ||
786 | mutex_unlock(&watchdog_data_mutex); | 789 | mutex_unlock(&watchdog_data_mutex); |
787 | 790 | ||
788 | if (test_and_set_bit(0, &data->watchdog_is_open)) | 791 | if (watchdog_is_open) |
789 | return -EBUSY; | 792 | return -EBUSY; |
790 | 793 | ||
791 | /* Start the watchdog */ | 794 | /* Start the watchdog */ |
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index cadcbd90ff3b..72ff2c4e757d 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c | |||
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev) | |||
851 | static int __init lm78_isa_found(unsigned short address) | 851 | static int __init lm78_isa_found(unsigned short address) |
852 | { | 852 | { |
853 | int val, save, found = 0; | 853 | int val, save, found = 0; |
854 | 854 | int port; | |
855 | /* We have to request the region in two parts because some | 855 | |
856 | boards declare base+4 to base+7 as a PNP device */ | 856 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
857 | if (!request_region(address, 4, "lm78")) { | 857 | * to base+7 and some base+5 to base+6. So we better request each port |
858 | pr_debug("lm78: Failed to request low part of region\n"); | 858 | * individually for the probing phase. */ |
859 | return 0; | 859 | for (port = address; port < address + LM78_EXTENT; port++) { |
860 | } | 860 | if (!request_region(port, 1, "lm78")) { |
861 | if (!request_region(address + 4, 4, "lm78")) { | 861 | pr_debug("lm78: Failed to request port 0x%x\n", port); |
862 | pr_debug("lm78: Failed to request high part of region\n"); | 862 | goto release; |
863 | release_region(address, 4); | 863 | } |
864 | return 0; | ||
865 | } | 864 | } |
866 | 865 | ||
867 | #define REALLY_SLOW_IO | 866 | #define REALLY_SLOW_IO |
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address) | |||
925 | val & 0x80 ? "LM79" : "LM78", (int)address); | 924 | val & 0x80 ? "LM79" : "LM78", (int)address); |
926 | 925 | ||
927 | release: | 926 | release: |
928 | release_region(address + 4, 4); | 927 | for (port--; port >= address; port--) |
929 | release_region(address, 4); | 928 | release_region(port, 1); |
930 | return found; | 929 | return found; |
931 | } | 930 | } |
932 | 931 | ||
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index 9ca97818bd4b..8fa462f2b570 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c | |||
@@ -488,7 +488,7 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
488 | } | 488 | } |
489 | 489 | ||
490 | /* Restore device to its initial state */ | 490 | /* Restore device to its initial state */ |
491 | static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) | 491 | static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) |
492 | { | 492 | { |
493 | if ((sio_data->activate & 0x01) == 0) { | 493 | if ((sio_data->activate & 0x01) == 0) { |
494 | superio_enter(); | 494 | superio_enter(); |
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 05f9225b6f94..32d4adee73db 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c | |||
@@ -1793,17 +1793,17 @@ static int __init | |||
1793 | w83781d_isa_found(unsigned short address) | 1793 | w83781d_isa_found(unsigned short address) |
1794 | { | 1794 | { |
1795 | int val, save, found = 0; | 1795 | int val, save, found = 0; |
1796 | 1796 | int port; | |
1797 | /* We have to request the region in two parts because some | 1797 | |
1798 | boards declare base+4 to base+7 as a PNP device */ | 1798 | /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
1799 | if (!request_region(address, 4, "w83781d")) { | 1799 | * to base+7 and some base+5 to base+6. So we better request each port |
1800 | pr_debug("w83781d: Failed to request low part of region\n"); | 1800 | * individually for the probing phase. */ |
1801 | return 0; | 1801 | for (port = address; port < address + W83781D_EXTENT; port++) { |
1802 | } | 1802 | if (!request_region(port, 1, "w83781d")) { |
1803 | if (!request_region(address + 4, 4, "w83781d")) { | 1803 | pr_debug("w83781d: Failed to request port 0x%x\n", |
1804 | pr_debug("w83781d: Failed to request high part of region\n"); | 1804 | port); |
1805 | release_region(address, 4); | 1805 | goto release; |
1806 | return 0; | 1806 | } |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | #define REALLY_SLOW_IO | 1809 | #define REALLY_SLOW_IO |
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address) | |||
1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); | 1877 | val == 0x30 ? "W83782D" : "W83781D", (int)address); |
1878 | 1878 | ||
1879 | release: | 1879 | release: |
1880 | release_region(address + 4, 4); | 1880 | for (port--; port >= address; port--) |
1881 | release_region(address, 4); | 1881 | release_region(port, 1); |
1882 | return found; | 1882 | return found; |
1883 | } | 1883 | } |
1884 | 1884 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index e3654d683e15..75bf820e7ccb 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -226,7 +226,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) | |||
226 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 226 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); |
227 | temp &= ~(I2CR_MSTA | I2CR_MTX); | 227 | temp &= ~(I2CR_MSTA | I2CR_MTX); |
228 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 228 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); |
229 | i2c_imx->stopped = 1; | ||
230 | } | 229 | } |
231 | if (cpu_is_mx1()) { | 230 | if (cpu_is_mx1()) { |
232 | /* | 231 | /* |
@@ -236,8 +235,10 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) | |||
236 | udelay(i2c_imx->disable_delay); | 235 | udelay(i2c_imx->disable_delay); |
237 | } | 236 | } |
238 | 237 | ||
239 | if (!i2c_imx->stopped) | 238 | if (!i2c_imx->stopped) { |
240 | i2c_imx_bus_busy(i2c_imx, 0); | 239 | i2c_imx_bus_busy(i2c_imx, 0); |
240 | i2c_imx->stopped = 1; | ||
241 | } | ||
241 | 242 | ||
242 | /* Disable I2C controller */ | 243 | /* Disable I2C controller */ |
243 | writeb(0, i2c_imx->base + IMX_I2C_I2CR); | 244 | writeb(0, i2c_imx->base + IMX_I2C_I2CR); |
@@ -496,22 +497,23 @@ static int __init i2c_imx_probe(struct platform_device *pdev) | |||
496 | } | 497 | } |
497 | 498 | ||
498 | res_size = resource_size(res); | 499 | res_size = resource_size(res); |
500 | |||
501 | if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { | ||
502 | ret = -EBUSY; | ||
503 | goto fail0; | ||
504 | } | ||
505 | |||
499 | base = ioremap(res->start, res_size); | 506 | base = ioremap(res->start, res_size); |
500 | if (!base) { | 507 | if (!base) { |
501 | dev_err(&pdev->dev, "ioremap failed\n"); | 508 | dev_err(&pdev->dev, "ioremap failed\n"); |
502 | ret = -EIO; | 509 | ret = -EIO; |
503 | goto fail0; | 510 | goto fail1; |
504 | } | 511 | } |
505 | 512 | ||
506 | i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); | 513 | i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); |
507 | if (!i2c_imx) { | 514 | if (!i2c_imx) { |
508 | dev_err(&pdev->dev, "can't allocate interface\n"); | 515 | dev_err(&pdev->dev, "can't allocate interface\n"); |
509 | ret = -ENOMEM; | 516 | ret = -ENOMEM; |
510 | goto fail1; | ||
511 | } | ||
512 | |||
513 | if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { | ||
514 | ret = -EBUSY; | ||
515 | goto fail2; | 517 | goto fail2; |
516 | } | 518 | } |
517 | 519 | ||
@@ -582,11 +584,11 @@ fail5: | |||
582 | fail4: | 584 | fail4: |
583 | clk_put(i2c_imx->clk); | 585 | clk_put(i2c_imx->clk); |
584 | fail3: | 586 | fail3: |
585 | release_mem_region(i2c_imx->res->start, resource_size(res)); | ||
586 | fail2: | ||
587 | kfree(i2c_imx); | 587 | kfree(i2c_imx); |
588 | fail1: | 588 | fail2: |
589 | iounmap(base); | 589 | iounmap(base); |
590 | fail1: | ||
591 | release_mem_region(res->start, resource_size(res)); | ||
590 | fail0: | 592 | fail0: |
591 | if (pdata && pdata->exit) | 593 | if (pdata && pdata->exit) |
592 | pdata->exit(&pdev->dev); | 594 | pdata->exit(&pdev->dev); |
@@ -618,8 +620,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev) | |||
618 | 620 | ||
619 | clk_put(i2c_imx->clk); | 621 | clk_put(i2c_imx->clk); |
620 | 622 | ||
621 | release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); | ||
622 | iounmap(i2c_imx->base); | 623 | iounmap(i2c_imx->base); |
624 | release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); | ||
623 | kfree(i2c_imx); | 625 | kfree(i2c_imx); |
624 | return 0; | 626 | return 0; |
625 | } | 627 | } |
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index b1c050ff311d..e29b6d5ba8ef 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/types.h> | ||
16 | 17 | ||
17 | /* include interfaces to usb layer */ | 18 | /* include interfaces to usb layer */ |
18 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
@@ -31,8 +32,8 @@ | |||
31 | #define CMD_I2C_IO_END (1<<1) | 32 | #define CMD_I2C_IO_END (1<<1) |
32 | 33 | ||
33 | /* i2c bit delay, default is 10us -> 100kHz */ | 34 | /* i2c bit delay, default is 10us -> 100kHz */ |
34 | static int delay = 10; | 35 | static unsigned short delay = 10; |
35 | module_param(delay, int, 0); | 36 | module_param(delay, ushort, 0); |
36 | MODULE_PARM_DESC(delay, "bit delay in microseconds, " | 37 | MODULE_PARM_DESC(delay, "bit delay in microseconds, " |
37 | "e.g. 10 for 100kHz (default is 100kHz)"); | 38 | "e.g. 10 for 100kHz (default is 100kHz)"); |
38 | 39 | ||
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) | |||
109 | 110 | ||
110 | static u32 usb_func(struct i2c_adapter *adapter) | 111 | static u32 usb_func(struct i2c_adapter *adapter) |
111 | { | 112 | { |
112 | u32 func; | 113 | __le32 func; |
113 | 114 | ||
114 | /* get functionality from adapter */ | 115 | /* get functionality from adapter */ |
115 | if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != | 116 | if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != |
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter) | |||
118 | return 0; | 119 | return 0; |
119 | } | 120 | } |
120 | 121 | ||
121 | return func; | 122 | return le32_to_cpu(func); |
122 | } | 123 | } |
123 | 124 | ||
124 | /* This is the actual algorithm we define */ | 125 | /* This is the actual algorithm we define */ |
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, | |||
216 | "i2c-tiny-usb at bus %03d device %03d", | 217 | "i2c-tiny-usb at bus %03d device %03d", |
217 | dev->usb_dev->bus->busnum, dev->usb_dev->devnum); | 218 | dev->usb_dev->bus->busnum, dev->usb_dev->devnum); |
218 | 219 | ||
219 | if (usb_write(&dev->adapter, CMD_SET_DELAY, | 220 | if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { |
220 | cpu_to_le16(delay), 0, NULL, 0) != 0) { | ||
221 | dev_err(&dev->adapter.dev, | 221 | dev_err(&dev->adapter.dev, |
222 | "failure setting delay to %dus\n", delay); | 222 | "failure setting delay to %dus\n", delay); |
223 | retval = -EIO; | 223 | retval = -EIO; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cc9b5940fa97..875e34e0b235 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2115 | if (ret) | 2115 | if (ret) |
2116 | goto err1; | 2116 | goto err1; |
2117 | 2117 | ||
2118 | if (cma_loopback_addr(addr)) { | 2118 | if (!cma_any_addr(addr)) { |
2119 | ret = cma_bind_loopback(id_priv); | ||
2120 | } else if (!cma_zero_addr(addr)) { | ||
2121 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); | 2119 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); |
2122 | if (ret) | 2120 | if (ret) |
2123 | goto err1; | 2121 | goto err1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index b3684060465e..100da8542bba 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -346,10 +346,8 @@ static int ipathfs_fill_super(struct super_block *sb, void *data, | |||
346 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | 346 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { |
347 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 347 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
348 | ret = create_device_files(sb, dd); | 348 | ret = create_device_files(sb, dd); |
349 | if (ret) { | 349 | if (ret) |
350 | deactivate_locked_super(sb); | ||
351 | goto bail; | 350 | goto bail; |
352 | } | ||
353 | spin_lock_irqsave(&ipath_devs_lock, flags); | 351 | spin_lock_irqsave(&ipath_devs_lock, flags); |
354 | } | 352 | } |
355 | 353 | ||
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c index aa6713b4a988..291d9393d359 100644 --- a/drivers/input/input-polldev.c +++ b/drivers/input/input-polldev.c | |||
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input) | |||
100 | struct input_polled_dev *dev = input_get_drvdata(input); | 100 | struct input_polled_dev *dev = input_get_drvdata(input); |
101 | 101 | ||
102 | cancel_delayed_work_sync(&dev->work); | 102 | cancel_delayed_work_sync(&dev->work); |
103 | /* | ||
104 | * Clean up work struct to remove references to the workqueue. | ||
105 | * It may be destroyed by the next call. This causes problems | ||
106 | * at next device open-close in case of poll_interval == 0. | ||
107 | */ | ||
108 | INIT_DELAYED_WORK(&dev->work, dev->work.work.func); | ||
103 | input_polldev_stop_workqueue(); | 109 | input_polldev_stop_workqueue(); |
104 | 110 | ||
105 | if (dev->close) | 111 | if (dev->close) |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 30b503b8d67b..86cb2d2196ff 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -46,6 +46,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = { | |||
46 | ABS_MT_TOOL_TYPE, | 46 | ABS_MT_TOOL_TYPE, |
47 | ABS_MT_BLOB_ID, | 47 | ABS_MT_BLOB_ID, |
48 | ABS_MT_TRACKING_ID, | 48 | ABS_MT_TRACKING_ID, |
49 | ABS_MT_PRESSURE, | ||
49 | 0 | 50 | 0 |
50 | }; | 51 | }; |
51 | static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; | 52 | static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; |
diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c index 33309fe44e20..c8f5a9a3fa14 100644 --- a/drivers/input/misc/winbond-cir.c +++ b/drivers/input/misc/winbond-cir.c | |||
@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data) | |||
768 | return; | 768 | return; |
769 | } | 769 | } |
770 | 770 | ||
771 | dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " | 771 | dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " |
772 | "toggle %u mode %u scan 0x%08X\n", | 772 | "toggle %u mode %u scan 0x%08X\n", |
773 | address, | 773 | address, |
774 | command, | 774 | command, |
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c index 6d7aa10d10f0..7c1d7d420ae3 100644 --- a/drivers/input/mouse/lifebook.c +++ b/drivers/input/mouse/lifebook.c | |||
@@ -53,6 +53,12 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = { | |||
53 | { | 53 | { |
54 | /* LifeBook B */ | 54 | /* LifeBook B */ |
55 | .matches = { | 55 | .matches = { |
56 | DMI_MATCH(DMI_PRODUCT_NAME, "Lifebook B Series"), | ||
57 | }, | ||
58 | }, | ||
59 | { | ||
60 | /* LifeBook B */ | ||
61 | .matches = { | ||
56 | DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), | 62 | DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), |
57 | }, | 63 | }, |
58 | }, | 64 | }, |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 9774bdfaa482..d8c0c8d6992c 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio) | |||
1141 | psmouse_deactivate(parent); | 1141 | psmouse_deactivate(parent); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | psmouse_deactivate(psmouse); | 1144 | psmouse_set_state(psmouse, PSMOUSE_INITIALIZING); |
1145 | |||
1146 | /* | ||
1147 | * Disable stream mode so cleanup routine can proceed undisturbed. | ||
1148 | */ | ||
1149 | if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) | ||
1150 | printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n", | ||
1151 | psmouse->ps2dev.serio->phys); | ||
1145 | 1152 | ||
1146 | if (psmouse->cleanup) | 1153 | if (psmouse->cleanup) |
1147 | psmouse->cleanup(psmouse); | 1154 | psmouse->cleanup(psmouse); |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index d84a36e545f6..b54aee7cd9e3 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev) | |||
1161 | return 0; | 1161 | return 0; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int i8042_pm_thaw(struct device *dev) | ||
1165 | { | ||
1166 | i8042_interrupt(0, NULL); | ||
1167 | |||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1164 | static const struct dev_pm_ops i8042_pm_ops = { | 1171 | static const struct dev_pm_ops i8042_pm_ops = { |
1165 | .suspend = i8042_pm_reset, | 1172 | .suspend = i8042_pm_reset, |
1166 | .resume = i8042_pm_restore, | 1173 | .resume = i8042_pm_restore, |
1174 | .thaw = i8042_pm_thaw, | ||
1167 | .poweroff = i8042_pm_reset, | 1175 | .poweroff = i8042_pm_reset, |
1168 | .restore = i8042_pm_restore, | 1176 | .restore = i8042_pm_restore, |
1169 | }; | 1177 | }; |
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c index c21e6d3a8844..794d070c6900 100644 --- a/drivers/input/touchscreen/ad7879.c +++ b/drivers/input/touchscreen/ad7879.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
48 | #include <linux/spi/spi.h> | 48 | #include <linux/spi/spi.h> |
49 | #include <linux/i2c.h> | 49 | #include <linux/i2c.h> |
50 | #include <linux/gpio.h> | ||
50 | 51 | ||
51 | #include <linux/spi/ad7879.h> | 52 | #include <linux/spi/ad7879.h> |
52 | 53 | ||
@@ -132,7 +133,9 @@ struct ad7879 { | |||
132 | struct input_dev *input; | 133 | struct input_dev *input; |
133 | struct work_struct work; | 134 | struct work_struct work; |
134 | struct timer_list timer; | 135 | struct timer_list timer; |
135 | 136 | #ifdef CONFIG_GPIOLIB | |
137 | struct gpio_chip gc; | ||
138 | #endif | ||
136 | struct mutex mutex; | 139 | struct mutex mutex; |
137 | unsigned disabled:1; /* P: mutex */ | 140 | unsigned disabled:1; /* P: mutex */ |
138 | 141 | ||
@@ -150,11 +153,9 @@ struct ad7879 { | |||
150 | u8 median; | 153 | u8 median; |
151 | u16 x_plate_ohms; | 154 | u16 x_plate_ohms; |
152 | u16 pressure_max; | 155 | u16 pressure_max; |
153 | u16 gpio_init; | ||
154 | u16 cmd_crtl1; | 156 | u16 cmd_crtl1; |
155 | u16 cmd_crtl2; | 157 | u16 cmd_crtl2; |
156 | u16 cmd_crtl3; | 158 | u16 cmd_crtl3; |
157 | unsigned gpio:1; | ||
158 | }; | 159 | }; |
159 | 160 | ||
160 | static int ad7879_read(bus_device *, u8); | 161 | static int ad7879_read(bus_device *, u8); |
@@ -237,24 +238,6 @@ static irqreturn_t ad7879_irq(int irq, void *handle) | |||
237 | 238 | ||
238 | static void ad7879_setup(struct ad7879 *ts) | 239 | static void ad7879_setup(struct ad7879 *ts) |
239 | { | 240 | { |
240 | ts->cmd_crtl3 = AD7879_YPLUS_BIT | | ||
241 | AD7879_XPLUS_BIT | | ||
242 | AD7879_Z2_BIT | | ||
243 | AD7879_Z1_BIT | | ||
244 | AD7879_TEMPMASK_BIT | | ||
245 | AD7879_AUXVBATMASK_BIT | | ||
246 | AD7879_GPIOALERTMASK_BIT; | ||
247 | |||
248 | ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR | | ||
249 | AD7879_AVG(ts->averaging) | | ||
250 | AD7879_MFS(ts->median) | | ||
251 | AD7879_FCD(ts->first_conversion_delay) | | ||
252 | ts->gpio_init; | ||
253 | |||
254 | ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 | | ||
255 | AD7879_ACQ(ts->acquisition_time) | | ||
256 | AD7879_TMR(ts->pen_down_acc_interval); | ||
257 | |||
258 | ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); | 241 | ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); |
259 | ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3); | 242 | ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3); |
260 | ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1); | 243 | ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1); |
@@ -324,48 +307,132 @@ static ssize_t ad7879_disable_store(struct device *dev, | |||
324 | 307 | ||
325 | static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store); | 308 | static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store); |
326 | 309 | ||
327 | static ssize_t ad7879_gpio_show(struct device *dev, | 310 | static struct attribute *ad7879_attributes[] = { |
328 | struct device_attribute *attr, char *buf) | 311 | &dev_attr_disable.attr, |
312 | NULL | ||
313 | }; | ||
314 | |||
315 | static const struct attribute_group ad7879_attr_group = { | ||
316 | .attrs = ad7879_attributes, | ||
317 | }; | ||
318 | |||
319 | #ifdef CONFIG_GPIOLIB | ||
320 | static int ad7879_gpio_direction_input(struct gpio_chip *chip, | ||
321 | unsigned gpio) | ||
329 | { | 322 | { |
330 | struct ad7879 *ts = dev_get_drvdata(dev); | 323 | struct ad7879 *ts = container_of(chip, struct ad7879, gc); |
324 | int err; | ||
331 | 325 | ||
332 | return sprintf(buf, "%u\n", ts->gpio); | 326 | mutex_lock(&ts->mutex); |
327 | ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIODIR | AD7879_GPIOPOL; | ||
328 | err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); | ||
329 | mutex_unlock(&ts->mutex); | ||
330 | |||
331 | return err; | ||
333 | } | 332 | } |
334 | 333 | ||
335 | static ssize_t ad7879_gpio_store(struct device *dev, | 334 | static int ad7879_gpio_direction_output(struct gpio_chip *chip, |
336 | struct device_attribute *attr, | 335 | unsigned gpio, int level) |
337 | const char *buf, size_t count) | ||
338 | { | 336 | { |
339 | struct ad7879 *ts = dev_get_drvdata(dev); | 337 | struct ad7879 *ts = container_of(chip, struct ad7879, gc); |
340 | unsigned long val; | 338 | int err; |
341 | int error; | ||
342 | 339 | ||
343 | error = strict_strtoul(buf, 10, &val); | 340 | mutex_lock(&ts->mutex); |
344 | if (error) | 341 | ts->cmd_crtl2 &= ~AD7879_GPIODIR; |
345 | return error; | 342 | ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIOPOL; |
343 | if (level) | ||
344 | ts->cmd_crtl2 |= AD7879_GPIO_DATA; | ||
345 | else | ||
346 | ts->cmd_crtl2 &= ~AD7879_GPIO_DATA; | ||
347 | |||
348 | err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); | ||
349 | mutex_unlock(&ts->mutex); | ||
350 | |||
351 | return err; | ||
352 | } | ||
353 | |||
354 | static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio) | ||
355 | { | ||
356 | struct ad7879 *ts = container_of(chip, struct ad7879, gc); | ||
357 | u16 val; | ||
346 | 358 | ||
347 | mutex_lock(&ts->mutex); | 359 | mutex_lock(&ts->mutex); |
348 | ts->gpio = !!val; | 360 | val = ad7879_read(ts->bus, AD7879_REG_CTRL2); |
349 | error = ad7879_write(ts->bus, AD7879_REG_CTRL2, | ||
350 | ts->gpio ? | ||
351 | ts->cmd_crtl2 & ~AD7879_GPIO_DATA : | ||
352 | ts->cmd_crtl2 | AD7879_GPIO_DATA); | ||
353 | mutex_unlock(&ts->mutex); | 361 | mutex_unlock(&ts->mutex); |
354 | 362 | ||
355 | return error ? : count; | 363 | return !!(val & AD7879_GPIO_DATA); |
356 | } | 364 | } |
357 | 365 | ||
358 | static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store); | 366 | static void ad7879_gpio_set_value(struct gpio_chip *chip, |
367 | unsigned gpio, int value) | ||
368 | { | ||
369 | struct ad7879 *ts = container_of(chip, struct ad7879, gc); | ||
359 | 370 | ||
360 | static struct attribute *ad7879_attributes[] = { | 371 | mutex_lock(&ts->mutex); |
361 | &dev_attr_disable.attr, | 372 | if (value) |
362 | &dev_attr_gpio.attr, | 373 | ts->cmd_crtl2 |= AD7879_GPIO_DATA; |
363 | NULL | 374 | else |
364 | }; | 375 | ts->cmd_crtl2 &= ~AD7879_GPIO_DATA; |
365 | 376 | ||
366 | static const struct attribute_group ad7879_attr_group = { | 377 | ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); |
367 | .attrs = ad7879_attributes, | 378 | mutex_unlock(&ts->mutex); |
368 | }; | 379 | } |
380 | |||
381 | static int __devinit ad7879_gpio_add(struct device *dev) | ||
382 | { | ||
383 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
384 | struct ad7879_platform_data *pdata = dev->platform_data; | ||
385 | int ret = 0; | ||
386 | |||
387 | if (pdata->gpio_export) { | ||
388 | ts->gc.direction_input = ad7879_gpio_direction_input; | ||
389 | ts->gc.direction_output = ad7879_gpio_direction_output; | ||
390 | ts->gc.get = ad7879_gpio_get_value; | ||
391 | ts->gc.set = ad7879_gpio_set_value; | ||
392 | ts->gc.can_sleep = 1; | ||
393 | ts->gc.base = pdata->gpio_base; | ||
394 | ts->gc.ngpio = 1; | ||
395 | ts->gc.label = "AD7879-GPIO"; | ||
396 | ts->gc.owner = THIS_MODULE; | ||
397 | ts->gc.dev = dev; | ||
398 | |||
399 | ret = gpiochip_add(&ts->gc); | ||
400 | if (ret) | ||
401 | dev_err(dev, "failed to register gpio %d\n", | ||
402 | ts->gc.base); | ||
403 | } | ||
404 | |||
405 | return ret; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * We mark ad7879_gpio_remove inline so there is a chance the code | ||
410 | * gets discarded when not needed. We can't do __devinit/__devexit | ||
411 | * markup since it is used in both probe and remove methods. | ||
412 | */ | ||
413 | static inline void ad7879_gpio_remove(struct device *dev) | ||
414 | { | ||
415 | struct ad7879 *ts = dev_get_drvdata(dev); | ||
416 | struct ad7879_platform_data *pdata = dev->platform_data; | ||
417 | int ret; | ||
418 | |||
419 | if (pdata->gpio_export) { | ||
420 | ret = gpiochip_remove(&ts->gc); | ||
421 | if (ret) | ||
422 | dev_err(dev, "failed to remove gpio %d\n", | ||
423 | ts->gc.base); | ||
424 | } | ||
425 | } | ||
426 | #else | ||
427 | static inline int ad7879_gpio_add(struct device *dev) | ||
428 | { | ||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static inline void ad7879_gpio_remove(struct device *dev) | ||
433 | { | ||
434 | } | ||
435 | #endif | ||
369 | 436 | ||
370 | static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) | 437 | static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) |
371 | { | 438 | { |
@@ -403,12 +470,6 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) | |||
403 | ts->pen_down_acc_interval = pdata->pen_down_acc_interval; | 470 | ts->pen_down_acc_interval = pdata->pen_down_acc_interval; |
404 | ts->median = pdata->median; | 471 | ts->median = pdata->median; |
405 | 472 | ||
406 | if (pdata->gpio_output) | ||
407 | ts->gpio_init = AD7879_GPIO_EN | | ||
408 | (pdata->gpio_default ? 0 : AD7879_GPIO_DATA); | ||
409 | else | ||
410 | ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR; | ||
411 | |||
412 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev)); | 473 | snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev)); |
413 | 474 | ||
414 | input_dev->name = "AD7879 Touchscreen"; | 475 | input_dev->name = "AD7879 Touchscreen"; |
@@ -446,6 +507,23 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) | |||
446 | goto err_free_mem; | 507 | goto err_free_mem; |
447 | } | 508 | } |
448 | 509 | ||
510 | ts->cmd_crtl3 = AD7879_YPLUS_BIT | | ||
511 | AD7879_XPLUS_BIT | | ||
512 | AD7879_Z2_BIT | | ||
513 | AD7879_Z1_BIT | | ||
514 | AD7879_TEMPMASK_BIT | | ||
515 | AD7879_AUXVBATMASK_BIT | | ||
516 | AD7879_GPIOALERTMASK_BIT; | ||
517 | |||
518 | ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR | | ||
519 | AD7879_AVG(ts->averaging) | | ||
520 | AD7879_MFS(ts->median) | | ||
521 | AD7879_FCD(ts->first_conversion_delay); | ||
522 | |||
523 | ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 | | ||
524 | AD7879_ACQ(ts->acquisition_time) | | ||
525 | AD7879_TMR(ts->pen_down_acc_interval); | ||
526 | |||
449 | ad7879_setup(ts); | 527 | ad7879_setup(ts); |
450 | 528 | ||
451 | err = request_irq(bus->irq, ad7879_irq, | 529 | err = request_irq(bus->irq, ad7879_irq, |
@@ -460,15 +538,21 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) | |||
460 | if (err) | 538 | if (err) |
461 | goto err_free_irq; | 539 | goto err_free_irq; |
462 | 540 | ||
463 | err = input_register_device(input_dev); | 541 | err = ad7879_gpio_add(&bus->dev); |
464 | if (err) | 542 | if (err) |
465 | goto err_remove_attr; | 543 | goto err_remove_attr; |
466 | 544 | ||
545 | err = input_register_device(input_dev); | ||
546 | if (err) | ||
547 | goto err_remove_gpio; | ||
548 | |||
467 | dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n", | 549 | dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n", |
468 | revid >> 8, bus->irq); | 550 | revid >> 8, bus->irq); |
469 | 551 | ||
470 | return 0; | 552 | return 0; |
471 | 553 | ||
554 | err_remove_gpio: | ||
555 | ad7879_gpio_remove(&bus->dev); | ||
472 | err_remove_attr: | 556 | err_remove_attr: |
473 | sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group); | 557 | sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group); |
474 | err_free_irq: | 558 | err_free_irq: |
@@ -481,6 +565,7 @@ err_free_mem: | |||
481 | 565 | ||
482 | static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts) | 566 | static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts) |
483 | { | 567 | { |
568 | ad7879_gpio_remove(&bus->dev); | ||
484 | ad7879_disable(ts); | 569 | ad7879_disable(ts); |
485 | sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group); | 570 | sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group); |
486 | free_irq(ts->bus->irq, ts); | 571 | free_irq(ts->bus->irq, ts); |
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 09a5e7341bd5..5256123a5228 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c | |||
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) | |||
618 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH | 618 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH |
619 | static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) | 619 | static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) |
620 | { | 620 | { |
621 | dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; | 621 | dev->x = (pkt[2] << 8) | pkt[1]; |
622 | dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; | 622 | dev->y = (pkt[4] << 8) | pkt[3]; |
623 | dev->press = pkt[5] & 0xff; | 623 | dev->press = pkt[5] & 0xff; |
624 | dev->touch = pkt[0] & 0x01; | 624 | dev->touch = pkt[0] & 0x01; |
625 | 625 | ||
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { | |||
809 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH | 809 | #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH |
810 | [DEVTYPE_GENERAL_TOUCH] = { | 810 | [DEVTYPE_GENERAL_TOUCH] = { |
811 | .min_xc = 0x0, | 811 | .min_xc = 0x0, |
812 | .max_xc = 0x0500, | 812 | .max_xc = 0x7fff, |
813 | .min_yc = 0x0, | 813 | .min_yc = 0x0, |
814 | .max_yc = 0x0500, | 814 | .max_yc = 0x7fff, |
815 | .rept_size = 7, | 815 | .rept_size = 7, |
816 | .read_data = general_touch_read_data, | 816 | .read_data = general_touch_read_data, |
817 | }, | 817 | }, |
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 54abf9e303b7..f1c8cae70b4b 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, | |||
172 | { | 172 | { |
173 | int r = 0; | 173 | int r = 0; |
174 | size_t dummy = 0; | 174 | size_t dummy = 0; |
175 | int overhead_size = | 175 | int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg); |
176 | sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); | ||
177 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; | 176 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; |
178 | struct receiving_pkg pkg; | 177 | struct receiving_pkg pkg; |
179 | 178 | ||
179 | /* | ||
180 | * Given the space needed to hold the 'struct cn_msg' and | ||
181 | * 'struct dm_ulog_request' - do we have enough payload | ||
182 | * space remaining? | ||
183 | */ | ||
180 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { | 184 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { |
181 | DMINFO("Size of tfr exceeds preallocated size"); | 185 | DMINFO("Size of tfr exceeds preallocated size"); |
182 | return -EINVAL; | 186 | return -EINVAL; |
@@ -191,7 +195,7 @@ resend: | |||
191 | */ | 195 | */ |
192 | mutex_lock(&dm_ulog_lock); | 196 | mutex_lock(&dm_ulog_lock); |
193 | 197 | ||
194 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | 198 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); |
195 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | 199 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); |
196 | tfr->luid = luid; | 200 | tfr->luid = luid; |
197 | tfr->seq = dm_ulog_seq++; | 201 | tfr->seq = dm_ulog_seq++; |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ad779bd13aec..6c1046df81f6 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
724 | /* | 724 | /* |
725 | * Dispatch io. | 725 | * Dispatch io. |
726 | */ | 726 | */ |
727 | if (unlikely(ms->log_failure)) { | 727 | if (unlikely(ms->log_failure) && errors_handled(ms)) { |
728 | spin_lock_irq(&ms->lock); | 728 | spin_lock_irq(&ms->lock); |
729 | bio_list_merge(&ms->failures, &sync); | 729 | bio_list_merge(&ms->failures, &sync); |
730 | spin_unlock_irq(&ms->lock); | 730 | spin_unlock_irq(&ms->lock); |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 5f19ceb6fe91..168bd38f5006 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success) | |||
660 | spin_lock_irq(&rh->region_lock); | 660 | spin_lock_irq(&rh->region_lock); |
661 | if (success) | 661 | if (success) |
662 | list_add(®->list, ®->rh->recovered_regions); | 662 | list_add(®->list, ®->rh->recovered_regions); |
663 | else { | 663 | else |
664 | reg->state = DM_RH_NOSYNC; | ||
665 | list_add(®->list, ®->rh->failed_recovered_regions); | 664 | list_add(®->list, ®->rh->failed_recovered_regions); |
666 | } | 665 | |
667 | spin_unlock_irq(&rh->region_lock); | 666 | spin_unlock_irq(&rh->region_lock); |
668 | 667 | ||
669 | rh->wakeup_workers(rh->context); | 668 | rh->wakeup_workers(rh->context); |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 7d08879689ac..c097d8a4823d 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
254 | * Issue the synchronous I/O from a different thread | 254 | * Issue the synchronous I/O from a different thread |
255 | * to avoid generic_make_request recursion. | 255 | * to avoid generic_make_request recursion. |
256 | */ | 256 | */ |
257 | INIT_WORK(&req.work, do_metadata); | 257 | INIT_WORK_ON_STACK(&req.work, do_metadata); |
258 | queue_work(ps->metadata_wq, &req.work); | 258 | queue_work(ps->metadata_wq, &req.work); |
259 | flush_workqueue(ps->metadata_wq); | 259 | flush_workqueue(ps->metadata_wq); |
260 | 260 | ||
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e0efc1adcaff..bd58703ee8f6 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | stripes = simple_strtoul(argv[0], &end, 10); | 112 | stripes = simple_strtoul(argv[0], &end, 10); |
113 | if (*end) { | 113 | if (!stripes || *end) { |
114 | ti->error = "Invalid stripe count"; | 114 | ti->error = "Invalid stripe count"; |
115 | return -EINVAL; | 115 | return -EINVAL; |
116 | } | 116 | } |
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index f53392df7b97..f91b40942e07 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c | |||
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = { | |||
80 | }; | 80 | }; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The sysfs structure is embedded in md struct, nothing to do here | ||
84 | */ | ||
85 | static void dm_sysfs_release(struct kobject *kobj) | ||
86 | { | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * dm kobject is embedded in mapped_device structure | 83 | * dm kobject is embedded in mapped_device structure |
91 | * no need to define release function here | 84 | * no need to define release function here |
92 | */ | 85 | */ |
93 | static struct kobj_type dm_ktype = { | 86 | static struct kobj_type dm_ktype = { |
94 | .sysfs_ops = &dm_sysfs_ops, | 87 | .sysfs_ops = &dm_sysfs_ops, |
95 | .default_attrs = dm_attrs, | 88 | .default_attrs = dm_attrs, |
96 | .release = dm_sysfs_release | ||
97 | }; | 89 | }; |
98 | 90 | ||
99 | /* | 91 | /* |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3167480b532c..aa4e2aa86d49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) | |||
1595 | return BLKPREP_OK; | 1595 | return BLKPREP_OK; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | static void map_request(struct dm_target *ti, struct request *clone, | 1598 | /* |
1599 | struct mapped_device *md) | 1599 | * Returns: |
1600 | * 0 : the request has been processed (not requeued) | ||
1601 | * !0 : the request has been requeued | ||
1602 | */ | ||
1603 | static int map_request(struct dm_target *ti, struct request *clone, | ||
1604 | struct mapped_device *md) | ||
1600 | { | 1605 | { |
1601 | int r; | 1606 | int r, requeued = 0; |
1602 | struct dm_rq_target_io *tio = clone->end_io_data; | 1607 | struct dm_rq_target_io *tio = clone->end_io_data; |
1603 | 1608 | ||
1604 | /* | 1609 | /* |
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1625 | case DM_MAPIO_REQUEUE: | 1630 | case DM_MAPIO_REQUEUE: |
1626 | /* The target wants to requeue the I/O */ | 1631 | /* The target wants to requeue the I/O */ |
1627 | dm_requeue_unmapped_request(clone); | 1632 | dm_requeue_unmapped_request(clone); |
1633 | requeued = 1; | ||
1628 | break; | 1634 | break; |
1629 | default: | 1635 | default: |
1630 | if (r > 0) { | 1636 | if (r > 0) { |
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1636 | dm_kill_unmapped_request(clone, r); | 1642 | dm_kill_unmapped_request(clone, r); |
1637 | break; | 1643 | break; |
1638 | } | 1644 | } |
1645 | |||
1646 | return requeued; | ||
1639 | } | 1647 | } |
1640 | 1648 | ||
1641 | /* | 1649 | /* |
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q) | |||
1677 | atomic_inc(&md->pending[rq_data_dir(clone)]); | 1685 | atomic_inc(&md->pending[rq_data_dir(clone)]); |
1678 | 1686 | ||
1679 | spin_unlock(q->queue_lock); | 1687 | spin_unlock(q->queue_lock); |
1680 | map_request(ti, clone, md); | 1688 | if (map_request(ti, clone, md)) |
1689 | goto requeued; | ||
1690 | |||
1681 | spin_lock_irq(q->queue_lock); | 1691 | spin_lock_irq(q->queue_lock); |
1682 | } | 1692 | } |
1683 | 1693 | ||
1684 | goto out; | 1694 | goto out; |
1685 | 1695 | ||
1696 | requeued: | ||
1697 | spin_lock_irq(q->queue_lock); | ||
1698 | |||
1686 | plug_and_out: | 1699 | plug_and_out: |
1687 | if (!elv_queue_empty(q)) | 1700 | if (!elv_queue_empty(q)) |
1688 | /* Some requests still remain, retry later */ | 1701 | /* Some requests still remain, retry later */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index dd3dfe42d5a9..a20a71e5efd3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws) | |||
4075 | { | 4075 | { |
4076 | mddev_t *mddev = container_of(ws, mddev_t, del_work); | 4076 | mddev_t *mddev = container_of(ws, mddev_t, del_work); |
4077 | 4077 | ||
4078 | if (mddev->private == &md_redundancy_group) { | 4078 | if (mddev->private) { |
4079 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); | 4079 | sysfs_remove_group(&mddev->kobj, &md_redundancy_group); |
4080 | if (mddev->private != (void*)1) | ||
4081 | sysfs_remove_group(&mddev->kobj, mddev->private); | ||
4080 | if (mddev->sysfs_action) | 4082 | if (mddev->sysfs_action) |
4081 | sysfs_put(mddev->sysfs_action); | 4083 | sysfs_put(mddev->sysfs_action); |
4082 | mddev->sysfs_action = NULL; | 4084 | mddev->sysfs_action = NULL; |
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev) | |||
4287 | sysfs_notify_dirent(rdev->sysfs_state); | 4289 | sysfs_notify_dirent(rdev->sysfs_state); |
4288 | } | 4290 | } |
4289 | 4291 | ||
4290 | md_probe(mddev->unit, NULL, NULL); | ||
4291 | disk = mddev->gendisk; | 4292 | disk = mddev->gendisk; |
4292 | if (!disk) | ||
4293 | return -ENOMEM; | ||
4294 | 4293 | ||
4295 | spin_lock(&pers_lock); | 4294 | spin_lock(&pers_lock); |
4296 | pers = find_pers(mddev->level, mddev->clevel); | 4295 | pers = find_pers(mddev->level, mddev->clevel); |
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4530 | mddev->queue->unplug_fn = NULL; | 4529 | mddev->queue->unplug_fn = NULL; |
4531 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4530 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4532 | module_put(mddev->pers->owner); | 4531 | module_put(mddev->pers->owner); |
4533 | if (mddev->pers->sync_request) | 4532 | if (mddev->pers->sync_request && mddev->private == NULL) |
4534 | mddev->private = &md_redundancy_group; | 4533 | mddev->private = (void*)1; |
4535 | mddev->pers = NULL; | 4534 | mddev->pers = NULL; |
4536 | /* tell userspace to handle 'inactive' */ | 4535 | /* tell userspace to handle 'inactive' */ |
4537 | sysfs_notify_dirent(mddev->sysfs_state); | 4536 | sysfs_notify_dirent(mddev->sysfs_state); |
@@ -4578,9 +4577,6 @@ out: | |||
4578 | } | 4577 | } |
4579 | mddev->bitmap_info.offset = 0; | 4578 | mddev->bitmap_info.offset = 0; |
4580 | 4579 | ||
4581 | /* make sure all md_delayed_delete calls have finished */ | ||
4582 | flush_scheduled_work(); | ||
4583 | |||
4584 | export_array(mddev); | 4580 | export_array(mddev); |
4585 | 4581 | ||
4586 | mddev->array_sectors = 0; | 4582 | mddev->array_sectors = 0; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e84204eb12df..ceb24afdc147 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev) | |||
5136 | mddev->thread = NULL; | 5136 | mddev->thread = NULL; |
5137 | mddev->queue->backing_dev_info.congested_fn = NULL; | 5137 | mddev->queue->backing_dev_info.congested_fn = NULL; |
5138 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 5138 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
5139 | sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); | ||
5140 | free_conf(conf); | 5139 | free_conf(conf); |
5141 | mddev->private = NULL; | 5140 | mddev->private = &raid5_attrs_group; |
5142 | return 0; | 5141 | return 0; |
5143 | } | 5142 | } |
5144 | 5143 | ||
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5464 | !test_bit(Faulty, &rdev->flags)) { | 5463 | !test_bit(Faulty, &rdev->flags)) { |
5465 | if (raid5_add_disk(mddev, rdev) == 0) { | 5464 | if (raid5_add_disk(mddev, rdev) == 0) { |
5466 | char nm[20]; | 5465 | char nm[20]; |
5467 | if (rdev->raid_disk >= conf->previous_raid_disks) | 5466 | if (rdev->raid_disk >= conf->previous_raid_disks) { |
5468 | set_bit(In_sync, &rdev->flags); | 5467 | set_bit(In_sync, &rdev->flags); |
5469 | else | 5468 | added_devices++; |
5469 | } else | ||
5470 | rdev->recovery_offset = 0; | 5470 | rdev->recovery_offset = 0; |
5471 | added_devices++; | ||
5472 | sprintf(nm, "rd%d", rdev->raid_disk); | 5471 | sprintf(nm, "rd%d", rdev->raid_disk); |
5473 | if (sysfs_create_link(&mddev->kobj, | 5472 | if (sysfs_create_link(&mddev->kobj, |
5474 | &rdev->kobj, nm)) | 5473 | &rdev->kobj, nm)) |
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5480 | break; | 5479 | break; |
5481 | } | 5480 | } |
5482 | 5481 | ||
5482 | /* When a reshape changes the number of devices, ->degraded | ||
5483 | * is measured against the large of the pre and post number of | ||
5484 | * devices.*/ | ||
5483 | if (mddev->delta_disks > 0) { | 5485 | if (mddev->delta_disks > 0) { |
5484 | spin_lock_irqsave(&conf->device_lock, flags); | 5486 | spin_lock_irqsave(&conf->device_lock, flags); |
5485 | mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) | 5487 | mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) |
5486 | - added_devices; | 5488 | - added_devices; |
5487 | spin_unlock_irqrestore(&conf->device_lock, flags); | 5489 | spin_unlock_irqrestore(&conf->device_lock, flags); |
5488 | } | 5490 | } |
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c index becbaadb3b77..5ed75263340a 100644 --- a/drivers/media/common/saa7146_video.c +++ b/drivers/media/common/saa7146_video.c | |||
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) | |||
1333 | 1333 | ||
1334 | DEB_CAP(("vbuf:%p\n",vb)); | 1334 | DEB_CAP(("vbuf:%p\n",vb)); |
1335 | 1335 | ||
1336 | release_all_pagetables(dev, buf); | ||
1337 | |||
1338 | saa7146_dma_free(dev,q,buf); | 1336 | saa7146_dma_free(dev,q,buf); |
1337 | |||
1338 | release_all_pagetables(dev, buf); | ||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | static struct videobuf_queue_ops video_qops = { | 1341 | static struct videobuf_queue_ops video_qops = { |
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index c37790ad92d0..9ddc57909d49 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c | |||
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file) | |||
761 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); | 761 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); |
762 | dmxdevfilter->type = DMXDEV_TYPE_NONE; | 762 | dmxdevfilter->type = DMXDEV_TYPE_NONE; |
763 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); | 763 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); |
764 | INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
765 | init_timer(&dmxdevfilter->timer); | 764 | init_timer(&dmxdevfilter->timer); |
766 | 765 | ||
767 | dvbdev->users++; | 766 | dvbdev->users++; |
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, | |||
887 | dmxdevfilter->type = DMXDEV_TYPE_PES; | 886 | dmxdevfilter->type = DMXDEV_TYPE_PES; |
888 | memcpy(&dmxdevfilter->params, params, | 887 | memcpy(&dmxdevfilter->params, params, |
889 | sizeof(struct dmx_pes_filter_params)); | 888 | sizeof(struct dmx_pes_filter_params)); |
889 | INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
890 | 890 | ||
891 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); | 891 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); |
892 | 892 | ||
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c index b78cfb7d1897..67f189b7aa1f 100644 --- a/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/drivers/media/dvb/dvb-core/dvb_demux.c | |||
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
426 | }; | 426 | }; |
427 | }; | 427 | }; |
428 | 428 | ||
429 | if (dvb_demux_tscheck) { | 429 | if (demux->cnt_storage) { |
430 | if (!demux->cnt_storage) | ||
431 | demux->cnt_storage = vmalloc(MAX_PID + 1); | ||
432 | |||
433 | if (!demux->cnt_storage) { | ||
434 | printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); | ||
435 | dvb_demux_tscheck = 0; | ||
436 | goto no_dvb_demux_tscheck; | ||
437 | } | ||
438 | |||
439 | /* check pkt counter */ | 430 | /* check pkt counter */ |
440 | if (pid < MAX_PID) { | 431 | if (pid < MAX_PID) { |
441 | if (buf[1] & 0x80) | 432 | if (buf[1] & 0x80) |
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
454 | }; | 445 | }; |
455 | /* end check */ | 446 | /* end check */ |
456 | }; | 447 | }; |
457 | no_dvb_demux_tscheck: | ||
458 | 448 | ||
459 | list_for_each_entry(feed, &demux->feed_list, list_head) { | 449 | list_for_each_entry(feed, &demux->feed_list, list_head) { |
460 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | 450 | if ((feed->pid != pid) && (feed->pid != 0x2000)) |
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) | |||
1246 | dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); | 1236 | dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); |
1247 | if (!dvbdemux->feed) { | 1237 | if (!dvbdemux->feed) { |
1248 | vfree(dvbdemux->filter); | 1238 | vfree(dvbdemux->filter); |
1239 | dvbdemux->filter = NULL; | ||
1249 | return -ENOMEM; | 1240 | return -ENOMEM; |
1250 | } | 1241 | } |
1251 | for (i = 0; i < dvbdemux->filternum; i++) { | 1242 | for (i = 0; i < dvbdemux->filternum; i++) { |
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) | |||
1257 | dvbdemux->feed[i].index = i; | 1248 | dvbdemux->feed[i].index = i; |
1258 | } | 1249 | } |
1259 | 1250 | ||
1251 | if (dvb_demux_tscheck) { | ||
1252 | dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); | ||
1253 | |||
1254 | if (!dvbdemux->cnt_storage) | ||
1255 | printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n"); | ||
1256 | } | ||
1257 | |||
1260 | INIT_LIST_HEAD(&dvbdemux->frontend_list); | 1258 | INIT_LIST_HEAD(&dvbdemux->frontend_list); |
1261 | 1259 | ||
1262 | for (i = 0; i < DMX_TS_PES_OTHER; i++) { | 1260 | for (i = 0; i < DMX_TS_PES_OTHER; i++) { |
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 1b249897c9fb..465295b1d14b 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig | |||
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB | |||
112 | select DVB_MT352 if !DVB_FE_CUSTOMISE | 112 | select DVB_MT352 if !DVB_FE_CUSTOMISE |
113 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE | 113 | select DVB_ZL10353 if !DVB_FE_CUSTOMISE |
114 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE | 114 | select DVB_DIB7000P if !DVB_FE_CUSTOMISE |
115 | select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE | ||
116 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE | 115 | select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE |
116 | select DVB_ATBM8830 if !DVB_FE_CUSTOMISE | ||
117 | select DVB_LGS8GXX if !DVB_FE_CUSTOMISE | ||
117 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE | 118 | select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE |
118 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE | 119 | select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE |
119 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE | 120 | select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE |
121 | select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE | ||
120 | help | 122 | help |
121 | Say Y here to support the Conexant USB2.0 hybrid reference design. | 123 | Say Y here to support the Conexant USB2.0 hybrid reference design. |
122 | Currently, only DVB and ATSC modes are supported, analog mode | 124 | Currently, only DVB and ATSC modes are supported, analog mode |
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c index 3051b64aa17c..445fa1068064 100644 --- a/drivers/media/dvb/frontends/l64781.c +++ b/drivers/media/dvb/frontends/l64781.c | |||
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa | |||
192 | spi_bias *= qam_tab[p->constellation]; | 192 | spi_bias *= qam_tab[p->constellation]; |
193 | spi_bias /= p->code_rate_HP + 1; | 193 | spi_bias /= p->code_rate_HP + 1; |
194 | spi_bias /= (guard_tab[p->guard_interval] + 32); | 194 | spi_bias /= (guard_tab[p->guard_interval] + 32); |
195 | spi_bias *= 1000ULL; | 195 | spi_bias *= 1000; |
196 | spi_bias /= 1000ULL + ppm/1000; | 196 | spi_bias /= 1000 + ppm/1000; |
197 | spi_bias *= p->code_rate_HP; | 197 | spi_bias *= p->code_rate_HP; |
198 | 198 | ||
199 | val0x04 = (p->transmission_mode << 2) | p->guard_interval; | 199 | val0x04 = (p->transmission_mode << 2) | p->guard_interval; |
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 3182a406bdd1..ae08b077fd04 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c | |||
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, | |||
4461 | request_modules(btv); | 4461 | request_modules(btv); |
4462 | } | 4462 | } |
4463 | 4463 | ||
4464 | init_bttv_i2c_ir(btv); | ||
4464 | bttv_input_init(btv); | 4465 | bttv_input_init(btv); |
4465 | 4466 | ||
4466 | /* everything is fine */ | 4467 | /* everything is fine */ |
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c index 63aa31a041e8..407fa61e4cda 100644 --- a/drivers/media/video/bt8xx/bttv-i2c.c +++ b/drivers/media/video/bt8xx/bttv-i2c.c | |||
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv) | |||
388 | if (0 == btv->i2c_rc && i2c_scan) | 388 | if (0 == btv->i2c_rc && i2c_scan) |
389 | do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); | 389 | do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); |
390 | 390 | ||
391 | /* Instantiate the IR receiver device, if present */ | 391 | return btv->i2c_rc; |
392 | } | ||
393 | |||
394 | /* Instantiate the I2C IR receiver device, if present */ | ||
395 | void __devinit init_bttv_i2c_ir(struct bttv *btv) | ||
396 | { | ||
392 | if (0 == btv->i2c_rc) { | 397 | if (0 == btv->i2c_rc) { |
393 | struct i2c_board_info info; | 398 | struct i2c_board_info info; |
394 | /* The external IR receiver is at i2c address 0x34 (0x35 for | 399 | /* The external IR receiver is at i2c address 0x34 (0x35 for |
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv) | |||
408 | strlcpy(info.type, "ir_video", I2C_NAME_SIZE); | 413 | strlcpy(info.type, "ir_video", I2C_NAME_SIZE); |
409 | i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); | 414 | i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); |
410 | } | 415 | } |
411 | return btv->i2c_rc; | ||
412 | } | 416 | } |
413 | 417 | ||
414 | int __devexit fini_bttv_i2c(struct bttv *btv) | 418 | int __devexit fini_bttv_i2c(struct bttv *btv) |
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h index a1d0e9c9f286..6cccc2a17eee 100644 --- a/drivers/media/video/bt8xx/bttvp.h +++ b/drivers/media/video/bt8xx/bttvp.h | |||
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug; | |||
279 | extern unsigned int bttv_gpio; | 279 | extern unsigned int bttv_gpio; |
280 | extern void bttv_gpio_tracking(struct bttv *btv, char *comment); | 280 | extern void bttv_gpio_tracking(struct bttv *btv, char *comment); |
281 | extern int init_bttv_i2c(struct bttv *btv); | 281 | extern int init_bttv_i2c(struct bttv *btv); |
282 | extern void init_bttv_i2c_ir(struct bttv *btv); | ||
282 | extern int fini_bttv_i2c(struct bttv *btv); | 283 | extern int fini_bttv_i2c(struct bttv *btv); |
283 | 284 | ||
284 | #define bttv_printk if (bttv_verbose) printk | 285 | #define bttv_printk if (bttv_verbose) printk |
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c index fc4dd6045720..7438f8d775ba 100644 --- a/drivers/media/video/mt9t112.c +++ b/drivers/media/video/mt9t112.c | |||
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client) | |||
514 | /* poll to verify out of standby. Must Poll this bit */ | 514 | /* poll to verify out of standby. Must Poll this bit */ |
515 | for (i = 0; i < 100; i++) { | 515 | for (i = 0; i < 100; i++) { |
516 | mt9t112_reg_read(data, client, 0x0018); | 516 | mt9t112_reg_read(data, client, 0x0018); |
517 | if (0x4000 & data) | 517 | if (!(0x4000 & data)) |
518 | break; | 518 | break; |
519 | 519 | ||
520 | mdelay(10); | 520 | mdelay(10); |
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c index 50b415e07eda..f7f7e04cf485 100644 --- a/drivers/media/video/pwc/pwc-ctrl.c +++ b/drivers/media/video/pwc/pwc-ctrl.c | |||
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value) | |||
753 | buf[0] = 0xff; /* fixed */ | 753 | buf[0] = 0xff; /* fixed */ |
754 | 754 | ||
755 | ret = send_control_msg(pdev, | 755 | ret = send_control_msg(pdev, |
756 | SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf)); | 756 | SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1); |
757 | 757 | ||
758 | if (!mode && ret >= 0) { | 758 | if (!mode && ret >= 0) { |
759 | if (value < 0) | 759 | if (value < 0) |
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 7dfecfc6017c..ee5bff02a92c 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c | |||
@@ -93,9 +93,9 @@ static int ts_open(struct file *file) | |||
93 | dprintk("open dev=%s\n", video_device_node_name(vdev)); | 93 | dprintk("open dev=%s\n", video_device_node_name(vdev)); |
94 | err = -EBUSY; | 94 | err = -EBUSY; |
95 | if (!mutex_trylock(&dev->empress_tsq.vb_lock)) | 95 | if (!mutex_trylock(&dev->empress_tsq.vb_lock)) |
96 | goto done; | 96 | return err; |
97 | if (atomic_read(&dev->empress_users)) | 97 | if (atomic_read(&dev->empress_users)) |
98 | goto done_up; | 98 | goto done; |
99 | 99 | ||
100 | /* Unmute audio */ | 100 | /* Unmute audio */ |
101 | saa_writeb(SAA7134_AUDIO_MUTE_CTRL, | 101 | saa_writeb(SAA7134_AUDIO_MUTE_CTRL, |
@@ -105,10 +105,8 @@ static int ts_open(struct file *file) | |||
105 | file->private_data = dev; | 105 | file->private_data = dev; |
106 | err = 0; | 106 | err = 0; |
107 | 107 | ||
108 | done_up: | ||
109 | mutex_unlock(&dev->empress_tsq.vb_lock); | ||
110 | done: | 108 | done: |
111 | unlock_kernel(); | 109 | mutex_unlock(&dev->empress_tsq.vb_lock); |
112 | return err; | 110 | return err; |
113 | } | 111 | } |
114 | 112 | ||
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 85bc6a685e36..44d2037e9e56 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc) | |||
4330 | 4330 | ||
4331 | if (ioc->bus_type == SPI) | 4331 | if (ioc->bus_type == SPI) |
4332 | num_chain *= MPT_SCSI_CAN_QUEUE; | 4332 | num_chain *= MPT_SCSI_CAN_QUEUE; |
4333 | else if (ioc->bus_type == SAS) | ||
4334 | num_chain *= MPT_SAS_CAN_QUEUE; | ||
4333 | else | 4335 | else |
4334 | num_chain *= MPT_FC_CAN_QUEUE; | 4336 | num_chain *= MPT_FC_CAN_QUEUE; |
4335 | 4337 | ||
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 57752751712b..81279b3d694c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | |||
1796 | dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " | 1796 | dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " |
1797 | "Command not in the active list! (sc=%p)\n", ioc->name, | 1797 | "Command not in the active list! (sc=%p)\n", ioc->name, |
1798 | SCpnt)); | 1798 | SCpnt)); |
1799 | retval = 0; | 1799 | retval = SUCCESS; |
1800 | goto out; | 1800 | goto out; |
1801 | } | 1801 | } |
1802 | 1802 | ||
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 8f0d18409ede..e09eb4870db6 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MFD_SM501) += sm501.o | 5 | obj-$(CONFIG_MFD_SM501) += sm501.o |
6 | obj-$(CONFIG_MFD_ASIC3) += asic3.o | 6 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o |
7 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o | 7 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o |
8 | 8 | ||
9 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 9 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b9f1e84897cc..e7f8027165e6 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, | |||
74 | } | 74 | } |
75 | 75 | ||
76 | mrq->cmd->arg = dev_addr; | 76 | mrq->cmd->arg = dev_addr; |
77 | if (!mmc_card_blockaddr(test->card)) | ||
78 | mrq->cmd->arg <<= 9; | ||
79 | |||
77 | mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; | 80 | mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; |
78 | 81 | ||
79 | if (blocks == 1) | 82 | if (blocks == 1) |
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write) | |||
190 | } | 193 | } |
191 | 194 | ||
192 | for (i = 0;i < BUFFER_SIZE / 512;i++) { | 195 | for (i = 0;i < BUFFER_SIZE / 512;i++) { |
193 | ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); | 196 | ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); |
194 | if (ret) | 197 | if (ret) |
195 | return ret; | 198 | return ret; |
196 | } | 199 | } |
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test) | |||
219 | memset(test->buffer, 0, 512); | 222 | memset(test->buffer, 0, 512); |
220 | 223 | ||
221 | for (i = 0;i < BUFFER_SIZE / 512;i++) { | 224 | for (i = 0;i < BUFFER_SIZE / 512;i++) { |
222 | ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); | 225 | ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); |
223 | if (ret) | 226 | if (ret) |
224 | return ret; | 227 | return ret; |
225 | } | 228 | } |
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, | |||
426 | for (i = 0;i < sectors;i++) { | 429 | for (i = 0;i < sectors;i++) { |
427 | ret = mmc_test_buffer_transfer(test, | 430 | ret = mmc_test_buffer_transfer(test, |
428 | test->buffer + i * 512, | 431 | test->buffer + i * 512, |
429 | dev_addr + i * 512, 512, 0); | 432 | dev_addr + i, 512, 0); |
430 | if (ret) | 433 | if (ret) |
431 | return ret; | 434 | return ret; |
432 | } | 435 | } |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 4c364d44ad59..2de0cc823d60 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -549,4 +549,21 @@ config MTD_VMU | |||
549 | To build this as a module select M here, the module will be called | 549 | To build this as a module select M here, the module will be called |
550 | vmu-flash. | 550 | vmu-flash. |
551 | 551 | ||
552 | config MTD_PISMO | ||
553 | tristate "MTD discovery driver for PISMO modules" | ||
554 | depends on I2C | ||
555 | depends on ARCH_VERSATILE | ||
556 | help | ||
557 | This driver allows for discovery of PISMO modules - see | ||
558 | <http://www.pismoworld.org/>. These are small modules containing | ||
559 | up to five memory devices (eg, SRAM, flash, DOC) described by an | ||
560 | I2C EEPROM. | ||
561 | |||
562 | This driver does not create any MTD maps itself; instead it | ||
563 | creates MTD physmap and MTD SRAM platform devices. If you | ||
564 | enable this option, you should consider enabling MTD_PHYSMAP | ||
565 | and/or MTD_PLATRAM according to the devices on your module. | ||
566 | |||
567 | When built as a module, it will be called pismo.ko | ||
568 | |||
552 | endmenu | 569 | endmenu |
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c new file mode 100644 index 000000000000..c48cad271f5d --- /dev/null +++ b/drivers/mtd/maps/pismo.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * PISMO memory driver - http://www.pismoworld.org/ | ||
3 | * | ||
4 | * For ARM Realview and Versatile platforms | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/i2c.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/mtd/physmap.h> | ||
17 | #include <linux/mtd/plat-ram.h> | ||
18 | #include <linux/mtd/pismo.h> | ||
19 | |||
20 | #define PISMO_NUM_CS 5 | ||
21 | |||
22 | struct pismo_cs_block { | ||
23 | u8 type; | ||
24 | u8 width; | ||
25 | __le16 access; | ||
26 | __le32 size; | ||
27 | u32 reserved[2]; | ||
28 | char device[32]; | ||
29 | } __packed; | ||
30 | |||
31 | struct pismo_eeprom { | ||
32 | struct pismo_cs_block cs[PISMO_NUM_CS]; | ||
33 | char board[15]; | ||
34 | u8 sum; | ||
35 | } __packed; | ||
36 | |||
37 | struct pismo_mem { | ||
38 | phys_addr_t base; | ||
39 | u32 size; | ||
40 | u16 access; | ||
41 | u8 width; | ||
42 | u8 type; | ||
43 | }; | ||
44 | |||
45 | struct pismo_data { | ||
46 | struct i2c_client *client; | ||
47 | void (*vpp)(void *, int); | ||
48 | void *vpp_data; | ||
49 | struct platform_device *dev[PISMO_NUM_CS]; | ||
50 | }; | ||
51 | |||
52 | /* FIXME: set_vpp could do with a better calling convention */ | ||
53 | static struct pismo_data *vpp_pismo; | ||
54 | static DEFINE_MUTEX(pismo_mutex); | ||
55 | |||
56 | static int pismo_setvpp_probe_fix(struct pismo_data *pismo) | ||
57 | { | ||
58 | mutex_lock(&pismo_mutex); | ||
59 | if (vpp_pismo) { | ||
60 | mutex_unlock(&pismo_mutex); | ||
61 | kfree(pismo); | ||
62 | return -EBUSY; | ||
63 | } | ||
64 | vpp_pismo = pismo; | ||
65 | mutex_unlock(&pismo_mutex); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void pismo_setvpp_remove_fix(struct pismo_data *pismo) | ||
70 | { | ||
71 | mutex_lock(&pismo_mutex); | ||
72 | if (vpp_pismo == pismo) | ||
73 | vpp_pismo = NULL; | ||
74 | mutex_unlock(&pismo_mutex); | ||
75 | } | ||
76 | |||
77 | static void pismo_set_vpp(struct map_info *map, int on) | ||
78 | { | ||
79 | struct pismo_data *pismo = vpp_pismo; | ||
80 | |||
81 | pismo->vpp(pismo->vpp_data, on); | ||
82 | } | ||
83 | /* end of hack */ | ||
84 | |||
85 | |||
86 | static unsigned int __devinit pismo_width_to_bytes(unsigned int width) | ||
87 | { | ||
88 | width &= 15; | ||
89 | if (width > 2) | ||
90 | return 0; | ||
91 | return 1 << width; | ||
92 | } | ||
93 | |||
94 | static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf, | ||
95 | u8 addr, size_t size) | ||
96 | { | ||
97 | int ret; | ||
98 | struct i2c_msg msg[] = { | ||
99 | { | ||
100 | .addr = client->addr, | ||
101 | .len = sizeof(addr), | ||
102 | .buf = &addr, | ||
103 | }, { | ||
104 | .addr = client->addr, | ||
105 | .flags = I2C_M_RD, | ||
106 | .len = size, | ||
107 | .buf = buf, | ||
108 | }, | ||
109 | }; | ||
110 | |||
111 | ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); | ||
112 | |||
113 | return ret == ARRAY_SIZE(msg) ? size : -EIO; | ||
114 | } | ||
115 | |||
116 | static int __devinit pismo_add_device(struct pismo_data *pismo, int i, | ||
117 | struct pismo_mem *region, const char *name, void *pdata, size_t psize) | ||
118 | { | ||
119 | struct platform_device *dev; | ||
120 | struct resource res = { }; | ||
121 | phys_addr_t base = region.base; | ||
122 | int ret; | ||
123 | |||
124 | if (base == ~0) | ||
125 | return -ENXIO; | ||
126 | |||
127 | res.start = base; | ||
128 | res.end = base + region->size - 1; | ||
129 | res.flags = IORESOURCE_MEM; | ||
130 | |||
131 | dev = platform_device_alloc(name, i); | ||
132 | if (!dev) | ||
133 | return -ENOMEM; | ||
134 | dev->dev.parent = &pismo->client->dev; | ||
135 | |||
136 | do { | ||
137 | ret = platform_device_add_resources(dev, &res, 1); | ||
138 | if (ret) | ||
139 | break; | ||
140 | |||
141 | ret = platform_device_add_data(dev, pdata, psize); | ||
142 | if (ret) | ||
143 | break; | ||
144 | |||
145 | ret = platform_device_add(dev); | ||
146 | if (ret) | ||
147 | break; | ||
148 | |||
149 | pismo->dev[i] = dev; | ||
150 | return 0; | ||
151 | } while (0); | ||
152 | |||
153 | platform_device_put(dev); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static int __devinit pismo_add_nor(struct pismo_data *pismo, int i, | ||
158 | struct pismo_mem *region) | ||
159 | { | ||
160 | struct physmap_flash_data data = { | ||
161 | .width = region->width, | ||
162 | }; | ||
163 | |||
164 | if (pismo->vpp) | ||
165 | data.set_vpp = pismo_set_vpp; | ||
166 | |||
167 | return pismo_add_device(pismo, i, region, "physmap-flash", | ||
168 | &data, sizeof(data)); | ||
169 | } | ||
170 | |||
171 | static int __devinit pismo_add_sram(struct pismo_data *pismo, int i, | ||
172 | struct pismo_mem *region) | ||
173 | { | ||
174 | struct platdata_mtd_ram data = { | ||
175 | .bankwidth = region->width, | ||
176 | }; | ||
177 | |||
178 | return pismo_add_device(pismo, i, region, "mtd-ram", | ||
179 | &data, sizeof(data)); | ||
180 | } | ||
181 | |||
182 | static void __devinit pismo_add_one(struct pismo_data *pismo, int i, | ||
183 | const struct pismo_cs_block *cs, phys_addr_t base) | ||
184 | { | ||
185 | struct device *dev = &pismo->client->dev; | ||
186 | struct pismo_mem region; | ||
187 | |||
188 | region.base = base; | ||
189 | region.type = cs->type; | ||
190 | region.width = pismo_width_to_bytes(cs->width); | ||
191 | region.access = le16_to_cpu(cs->access); | ||
192 | region.size = le32_to_cpu(cs->size); | ||
193 | |||
194 | if (region.width == 0) { | ||
195 | dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width); | ||
196 | return; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * FIXME: may need to the platforms memory controller here, but at | ||
201 | * the moment we assume that it has already been correctly setup. | ||
202 | * The memory controller can also tell us the base address as well. | ||
203 | */ | ||
204 | |||
205 | dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n", | ||
206 | i, cs->device, region.type, region.access, region.size / 1024); | ||
207 | |||
208 | switch (region.type) { | ||
209 | case 0: | ||
210 | break; | ||
211 | case 1: | ||
212 | /* static DOC */ | ||
213 | break; | ||
214 | case 2: | ||
215 | /* static NOR */ | ||
216 | pismo_add_nor(pismo, i, ®ion); | ||
217 | break; | ||
218 | case 3: | ||
219 | /* static RAM */ | ||
220 | pismo_add_sram(pismo, i, ®ion); | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static int __devexit pismo_remove(struct i2c_client *client) | ||
226 | { | ||
227 | struct pismo_data *pismo = i2c_get_clientdata(client); | ||
228 | int i; | ||
229 | |||
230 | for (i = 0; i < ARRAY_SIZE(pismo->dev); i++) | ||
231 | platform_device_unregister(pismo->dev[i]); | ||
232 | |||
233 | /* FIXME: set_vpp needs saner arguments */ | ||
234 | pismo_setvpp_remove_fix(pismo); | ||
235 | |||
236 | kfree(pismo); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int __devinit pismo_probe(struct i2c_client *client, | ||
242 | const struct i2c_device_id *id) | ||
243 | { | ||
244 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | ||
245 | struct pismo_pdata *pdata = client->dev.platform_data; | ||
246 | struct pismo_eeprom eeprom; | ||
247 | struct pismo_data *pismo; | ||
248 | int ret, i; | ||
249 | |||
250 | if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { | ||
251 | dev_err(&client->dev, "functionality mismatch\n"); | ||
252 | return -EIO; | ||
253 | } | ||
254 | |||
255 | pismo = kzalloc(sizeof(*pismo), GFP_KERNEL); | ||
256 | if (!pismo) | ||
257 | return -ENOMEM; | ||
258 | |||
259 | /* FIXME: set_vpp needs saner arguments */ | ||
260 | ret = pismo_setvpp_probe_fix(pismo); | ||
261 | if (ret) | ||
262 | return ret; | ||
263 | |||
264 | pismo->client = client; | ||
265 | if (pdata) { | ||
266 | pismo->vpp = pdata->set_vpp; | ||
267 | pismo->vpp_data = pdata->vpp_data; | ||
268 | } | ||
269 | i2c_set_clientdata(client, pismo); | ||
270 | |||
271 | ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); | ||
272 | if (ret < 0) { | ||
273 | dev_err(&client->dev, "error reading EEPROM: %d\n", ret); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | dev_info(&client->dev, "%.15s board found\n", eeprom.board); | ||
278 | |||
279 | for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++) | ||
280 | if (eeprom.cs[i].type != 0xff) | ||
281 | pismo_add_one(pismo, i, &eeprom.cs[i], | ||
282 | pdata->cs_addrs[i]); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static const struct i2c_device_id pismo_id[] = { | ||
288 | { "pismo" }, | ||
289 | { }, | ||
290 | }; | ||
291 | MODULE_DEVICE_TABLE(i2c, pismo_id); | ||
292 | |||
293 | static struct i2c_driver pismo_driver = { | ||
294 | .driver = { | ||
295 | .name = "pismo", | ||
296 | .owner = THIS_MODULE, | ||
297 | }, | ||
298 | .probe = pismo_probe, | ||
299 | .remove = __devexit_p(pismo_remove), | ||
300 | .id_table = pismo_id, | ||
301 | }; | ||
302 | |||
303 | static int __init pismo_init(void) | ||
304 | { | ||
305 | BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48); | ||
306 | BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256); | ||
307 | |||
308 | return i2c_add_driver(&pismo_driver); | ||
309 | } | ||
310 | module_init(pismo_init); | ||
311 | |||
312 | static void __exit pismo_exit(void) | ||
313 | { | ||
314 | i2c_del_driver(&pismo_driver); | ||
315 | } | ||
316 | module_exit(pismo_exit); | ||
317 | |||
318 | MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>"); | ||
319 | MODULE_DESCRIPTION("PISMO memory driver"); | ||
320 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index a714ec482761..92e12df0917f 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -322,7 +322,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, | |||
322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | 322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); |
323 | 323 | ||
324 | /* Panics must be written immediately */ | 324 | /* Panics must be written immediately */ |
325 | if (reason == KMSG_DUMP_PANIC) { | 325 | if (reason != KMSG_DUMP_OOPS) { |
326 | if (!cxt->mtd->panic_write) | 326 | if (!cxt->mtd->panic_write) |
327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); | 327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); |
328 | else | 328 | else |
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c index 79fc4530987b..25c5dd03a837 100644 --- a/drivers/mtd/tests/mtd_readtest.c +++ b/drivers/mtd/tests/mtd_readtest.c | |||
@@ -147,6 +147,10 @@ static int scan_for_bad_eraseblocks(void) | |||
147 | } | 147 | } |
148 | memset(bbt, 0 , ebcnt); | 148 | memset(bbt, 0 , ebcnt); |
149 | 149 | ||
150 | /* NOR flash does not implement block_isbad */ | ||
151 | if (mtd->block_isbad == NULL) | ||
152 | return 0; | ||
153 | |||
150 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 154 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
151 | for (i = 0; i < ebcnt; ++i) { | 155 | for (i = 0; i < ebcnt; ++i) { |
152 | bbt[i] = is_block_bad(i) ? 1 : 0; | 156 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -184,7 +188,7 @@ static int __init mtd_readtest_init(void) | |||
184 | tmp = mtd->size; | 188 | tmp = mtd->size; |
185 | do_div(tmp, mtd->erasesize); | 189 | do_div(tmp, mtd->erasesize); |
186 | ebcnt = tmp; | 190 | ebcnt = tmp; |
187 | pgcnt = mtd->erasesize / mtd->writesize; | 191 | pgcnt = mtd->erasesize / pgsize; |
188 | 192 | ||
189 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 193 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
190 | "page size %u, count of eraseblocks %u, pages per " | 194 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 141363a7e805..7fbb51d4eabe 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -301,6 +301,10 @@ static int scan_for_bad_eraseblocks(void) | |||
301 | } | 301 | } |
302 | memset(bbt, 0 , ebcnt); | 302 | memset(bbt, 0 , ebcnt); |
303 | 303 | ||
304 | /* NOR flash does not implement block_isbad */ | ||
305 | if (mtd->block_isbad == NULL) | ||
306 | goto out; | ||
307 | |||
304 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 308 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
305 | for (i = 0; i < ebcnt; ++i) { | 309 | for (i = 0; i < ebcnt; ++i) { |
306 | bbt[i] = is_block_bad(i) ? 1 : 0; | 310 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -309,6 +313,7 @@ static int scan_for_bad_eraseblocks(void) | |||
309 | cond_resched(); | 313 | cond_resched(); |
310 | } | 314 | } |
311 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | 315 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); |
316 | out: | ||
312 | goodebcnt = ebcnt - bad; | 317 | goodebcnt = ebcnt - bad; |
313 | return 0; | 318 | return 0; |
314 | } | 319 | } |
@@ -340,7 +345,7 @@ static int __init mtd_speedtest_init(void) | |||
340 | tmp = mtd->size; | 345 | tmp = mtd->size; |
341 | do_div(tmp, mtd->erasesize); | 346 | do_div(tmp, mtd->erasesize); |
342 | ebcnt = tmp; | 347 | ebcnt = tmp; |
343 | pgcnt = mtd->erasesize / mtd->writesize; | 348 | pgcnt = mtd->erasesize / pgsize; |
344 | 349 | ||
345 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 350 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
346 | "page size %u, count of eraseblocks %u, pages per " | 351 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 63920476b57a..a99d3cd737d8 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
@@ -227,6 +227,10 @@ static int scan_for_bad_eraseblocks(void) | |||
227 | } | 227 | } |
228 | memset(bbt, 0 , ebcnt); | 228 | memset(bbt, 0 , ebcnt); |
229 | 229 | ||
230 | /* NOR flash does not implement block_isbad */ | ||
231 | if (mtd->block_isbad == NULL) | ||
232 | return 0; | ||
233 | |||
230 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 234 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
231 | for (i = 0; i < ebcnt; ++i) { | 235 | for (i = 0; i < ebcnt; ++i) { |
232 | bbt[i] = is_block_bad(i) ? 1 : 0; | 236 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -265,7 +269,7 @@ static int __init mtd_stresstest_init(void) | |||
265 | tmp = mtd->size; | 269 | tmp = mtd->size; |
266 | do_div(tmp, mtd->erasesize); | 270 | do_div(tmp, mtd->erasesize); |
267 | ebcnt = tmp; | 271 | ebcnt = tmp; |
268 | pgcnt = mtd->erasesize / mtd->writesize; | 272 | pgcnt = mtd->erasesize / pgsize; |
269 | 273 | ||
270 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 274 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
271 | "page size %u, count of eraseblocks %u, pages per " | 275 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index f237ddbb2713..111ea41c4ecd 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -853,7 +853,6 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
853 | break; | 853 | break; |
854 | } | 854 | } |
855 | 855 | ||
856 | req.name[req.name_len] = '\0'; | ||
857 | err = verify_mkvol_req(ubi, &req); | 856 | err = verify_mkvol_req(ubi, &req); |
858 | if (err) | 857 | if (err) |
859 | break; | 858 | break; |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 277786ebaa2c..1361574e2b00 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm); | |||
291 | */ | 291 | */ |
292 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | 292 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) |
293 | { | 293 | { |
294 | int error, ubi_num, vol_id; | 294 | int error, ubi_num, vol_id, mod; |
295 | struct ubi_volume_desc *ret; | ||
296 | struct inode *inode; | 295 | struct inode *inode; |
297 | struct path path; | 296 | struct path path; |
298 | 297 | ||
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | |||
306 | return ERR_PTR(error); | 305 | return ERR_PTR(error); |
307 | 306 | ||
308 | inode = path.dentry->d_inode; | 307 | inode = path.dentry->d_inode; |
308 | mod = inode->i_mode; | ||
309 | ubi_num = ubi_major2num(imajor(inode)); | 309 | ubi_num = ubi_major2num(imajor(inode)); |
310 | vol_id = iminor(inode) - 1; | 310 | vol_id = iminor(inode) - 1; |
311 | path_put(&path); | ||
311 | 312 | ||
313 | if (!S_ISCHR(mod)) | ||
314 | return ERR_PTR(-EINVAL); | ||
312 | if (vol_id >= 0 && ubi_num >= 0) | 315 | if (vol_id >= 0 && ubi_num >= 0) |
313 | ret = ubi_open_volume(ubi_num, vol_id, mode); | 316 | return ubi_open_volume(ubi_num, vol_id, mode); |
314 | else | 317 | return ERR_PTR(-ENODEV); |
315 | ret = ERR_PTR(-ENODEV); | ||
316 | |||
317 | path_put(&path); | ||
318 | return ret; | ||
319 | } | 318 | } |
320 | EXPORT_SYMBOL_GPL(ubi_open_volume_path); | 319 | EXPORT_SYMBOL_GPL(ubi_open_volume_path); |
321 | 320 | ||
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index c1d7b880c795..425bf5a3edd4 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c | |||
@@ -155,6 +155,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, | |||
155 | if (err) | 155 | if (err) |
156 | return err; | 156 | return err; |
157 | vol->updating = 0; | 157 | vol->updating = 0; |
158 | return 0; | ||
158 | } | 159 | } |
159 | 160 | ||
160 | vol->upd_buf = vmalloc(ubi->leb_size); | 161 | vol->upd_buf = vmalloc(ubi->leb_size); |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 1afc61e7455d..40044028d682 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
566 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); | 566 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); |
567 | vol->alignment = be32_to_cpu(vtbl[i].alignment); | 567 | vol->alignment = be32_to_cpu(vtbl[i].alignment); |
568 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); | 568 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); |
569 | vol->upd_marker = vtbl[i].upd_marker; | ||
569 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? | 570 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? |
570 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | 571 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; |
571 | vol->name_len = be16_to_cpu(vtbl[i].name_len); | 572 | vol->name_len = be16_to_cpu(vtbl[i].name_len); |
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 62d9c9cc5671..1dd4403247ca 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c | |||
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev) | |||
921 | size = (res->end - res->start) + 1; | 921 | size = (res->end - res->start) + 1; |
922 | 922 | ||
923 | ax->mem2 = request_mem_region(res->start, size, pdev->name); | 923 | ax->mem2 = request_mem_region(res->start, size, pdev->name); |
924 | if (ax->mem == NULL) { | 924 | if (ax->mem2 == NULL) { |
925 | dev_err(&pdev->dev, "cannot reserve registers\n"); | 925 | dev_err(&pdev->dev, "cannot reserve registers\n"); |
926 | ret = -ENXIO; | 926 | ret = -ENXIO; |
927 | goto exit_mem1; | 927 | goto exit_mem1; |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 9fd8e5ecd5d7..5bc74590c73e 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
@@ -276,8 +276,13 @@ struct be_adapter { | |||
276 | int link_speed; | 276 | int link_speed; |
277 | u8 port_type; | 277 | u8 port_type; |
278 | u8 transceiver; | 278 | u8 transceiver; |
279 | u8 generation; /* BladeEngine ASIC generation */ | ||
279 | }; | 280 | }; |
280 | 281 | ||
282 | /* BladeEngine Generation numbers */ | ||
283 | #define BE_GEN2 2 | ||
284 | #define BE_GEN3 3 | ||
285 | |||
281 | extern const struct ethtool_ops be_ethtool_ops; | 286 | extern const struct ethtool_ops be_ethtool_ops; |
282 | 287 | ||
283 | #define drvr_stats(adapter) (&adapter->stats.drvr_stats) | 288 | #define drvr_stats(adapter) (&adapter->stats.drvr_stats) |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 102ade134165..006cb2efcd22 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |||
286 | MCC_WRB_SGE_CNT_SHIFT; | 286 | MCC_WRB_SGE_CNT_SHIFT; |
287 | wrb->payload_length = payload_len; | 287 | wrb->payload_length = payload_len; |
288 | wrb->tag0 = opcode; | 288 | wrb->tag0 = opcode; |
289 | be_dws_cpu_to_le(wrb, 20); | 289 | be_dws_cpu_to_le(wrb, 8); |
290 | } | 290 | } |
291 | 291 | ||
292 | /* Don't touch the hdr after it's prepared */ | 292 | /* Don't touch the hdr after it's prepared */ |
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |||
296 | req_hdr->opcode = opcode; | 296 | req_hdr->opcode = opcode; |
297 | req_hdr->subsystem = subsystem; | 297 | req_hdr->subsystem = subsystem; |
298 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | 298 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); |
299 | req_hdr->version = 0; | ||
299 | } | 300 | } |
300 | 301 | ||
301 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | 302 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, |
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index c002b8391b4d..13b33c841083 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
@@ -164,7 +164,8 @@ struct be_cmd_req_hdr { | |||
164 | u8 domain; /* dword 0 */ | 164 | u8 domain; /* dword 0 */ |
165 | u32 timeout; /* dword 1 */ | 165 | u32 timeout; /* dword 1 */ |
166 | u32 request_length; /* dword 2 */ | 166 | u32 request_length; /* dword 2 */ |
167 | u32 rsvd; /* dword 3 */ | 167 | u8 version; /* dword 3 */ |
168 | u8 rsvd[3]; /* dword 3 */ | ||
168 | }; | 169 | }; |
169 | 170 | ||
170 | #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ | 171 | #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 3a1f7902c16d..626b76c0ebc7 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size) | |||
910 | static void be_post_rx_frags(struct be_adapter *adapter) | 910 | static void be_post_rx_frags(struct be_adapter *adapter) |
911 | { | 911 | { |
912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | 912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; |
913 | struct be_rx_page_info *page_info = NULL; | 913 | struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; |
914 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 914 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
915 | struct page *pagep = NULL; | 915 | struct page *pagep = NULL; |
916 | struct be_eth_rx_d *rxd; | 916 | struct be_eth_rx_d *rxd; |
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
941 | rxd = queue_head_node(rxq); | 941 | rxd = queue_head_node(rxq); |
942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | 942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); |
943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | 943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); |
944 | queue_head_inc(rxq); | ||
945 | 944 | ||
946 | /* Any space left in the current big page for another frag? */ | 945 | /* Any space left in the current big page for another frag? */ |
947 | if ((page_offset + rx_frag_size + rx_frag_size) > | 946 | if ((page_offset + rx_frag_size + rx_frag_size) > |
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
949 | pagep = NULL; | 948 | pagep = NULL; |
950 | page_info->last_page_user = true; | 949 | page_info->last_page_user = true; |
951 | } | 950 | } |
951 | |||
952 | prev_page_info = page_info; | ||
953 | queue_head_inc(rxq); | ||
952 | page_info = &page_info_tbl[rxq->head]; | 954 | page_info = &page_info_tbl[rxq->head]; |
953 | } | 955 | } |
954 | if (pagep) | 956 | if (pagep) |
955 | page_info->last_page_user = true; | 957 | prev_page_info->last_page_user = true; |
956 | 958 | ||
957 | if (posted) { | 959 | if (posted) { |
958 | atomic_add(posted, &rxq->used); | 960 | atomic_add(posted, &rxq->used); |
@@ -1348,7 +1350,7 @@ static irqreturn_t be_intx(int irq, void *dev) | |||
1348 | int isr; | 1350 | int isr; |
1349 | 1351 | ||
1350 | isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + | 1352 | isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + |
1351 | be_pci_func(adapter) * CEV_ISR_SIZE); | 1353 | (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); |
1352 | if (!isr) | 1354 | if (!isr) |
1353 | return IRQ_NONE; | 1355 | return IRQ_NONE; |
1354 | 1356 | ||
@@ -2049,6 +2051,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) | |||
2049 | static int be_map_pci_bars(struct be_adapter *adapter) | 2051 | static int be_map_pci_bars(struct be_adapter *adapter) |
2050 | { | 2052 | { |
2051 | u8 __iomem *addr; | 2053 | u8 __iomem *addr; |
2054 | int pcicfg_reg; | ||
2052 | 2055 | ||
2053 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | 2056 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), |
2054 | pci_resource_len(adapter->pdev, 2)); | 2057 | pci_resource_len(adapter->pdev, 2)); |
@@ -2062,8 +2065,13 @@ static int be_map_pci_bars(struct be_adapter *adapter) | |||
2062 | goto pci_map_err; | 2065 | goto pci_map_err; |
2063 | adapter->db = addr; | 2066 | adapter->db = addr; |
2064 | 2067 | ||
2065 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | 2068 | if (adapter->generation == BE_GEN2) |
2066 | pci_resource_len(adapter->pdev, 1)); | 2069 | pcicfg_reg = 1; |
2070 | else | ||
2071 | pcicfg_reg = 0; | ||
2072 | |||
2073 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), | ||
2074 | pci_resource_len(adapter->pdev, pcicfg_reg)); | ||
2067 | if (addr == NULL) | 2075 | if (addr == NULL) |
2068 | goto pci_map_err; | 2076 | goto pci_map_err; |
2069 | adapter->pcicfg = addr; | 2077 | adapter->pcicfg = addr; |
@@ -2160,6 +2168,7 @@ static int be_stats_init(struct be_adapter *adapter) | |||
2160 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | 2168 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); |
2161 | if (cmd->va == NULL) | 2169 | if (cmd->va == NULL) |
2162 | return -1; | 2170 | return -1; |
2171 | memset(cmd->va, 0, cmd->size); | ||
2163 | return 0; | 2172 | return 0; |
2164 | } | 2173 | } |
2165 | 2174 | ||
@@ -2238,6 +2247,20 @@ static int __devinit be_probe(struct pci_dev *pdev, | |||
2238 | goto rel_reg; | 2247 | goto rel_reg; |
2239 | } | 2248 | } |
2240 | adapter = netdev_priv(netdev); | 2249 | adapter = netdev_priv(netdev); |
2250 | |||
2251 | switch (pdev->device) { | ||
2252 | case BE_DEVICE_ID1: | ||
2253 | case OC_DEVICE_ID1: | ||
2254 | adapter->generation = BE_GEN2; | ||
2255 | break; | ||
2256 | case BE_DEVICE_ID2: | ||
2257 | case OC_DEVICE_ID2: | ||
2258 | adapter->generation = BE_GEN3; | ||
2259 | break; | ||
2260 | default: | ||
2261 | adapter->generation = 0; | ||
2262 | } | ||
2263 | |||
2241 | adapter->pdev = pdev; | 2264 | adapter->pdev = pdev; |
2242 | pci_set_drvdata(pdev, adapter); | 2265 | pci_set_drvdata(pdev, adapter); |
2243 | adapter->netdev = netdev; | 2266 | adapter->netdev = netdev; |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 8ffea3990d07..0b23bc4f56c6 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/dma.h> | 33 | #include <asm/dma.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | 35 | ||
36 | #include <asm/dpmc.h> | ||
36 | #include <asm/blackfin.h> | 37 | #include <asm/blackfin.h> |
37 | #include <asm/cacheflush.h> | 38 | #include <asm/cacheflush.h> |
38 | #include <asm/portmux.h> | 39 | #include <asm/portmux.h> |
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev) | |||
386 | u32 sclk, mdc_div; | 387 | u32 sclk, mdc_div; |
387 | 388 | ||
388 | /* Enable PHY output early */ | 389 | /* Enable PHY output early */ |
389 | if (!(bfin_read_VR_CTL() & PHYCLKOE)) | 390 | if (!(bfin_read_VR_CTL() & CLKBUFOE)) |
390 | bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); | 391 | bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); |
391 | 392 | ||
392 | sclk = get_sclk(); | 393 | sclk = get_sclk(); |
393 | mdc_div = ((sclk / MDC_CLK) / 2) - 1; | 394 | mdc_div = ((sclk / MDC_CLK) / 2) - 1; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3f0071cfe56b..efa0e41bf3ec 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3639,7 +3639,7 @@ static int bond_open(struct net_device *bond_dev) | |||
3639 | */ | 3639 | */ |
3640 | if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { | 3640 | if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { |
3641 | /* something went wrong - fail the open operation */ | 3641 | /* something went wrong - fail the open operation */ |
3642 | return -1; | 3642 | return -ENOMEM; |
3643 | } | 3643 | } |
3644 | 3644 | ||
3645 | INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); | 3645 | INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); |
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index bdbd14727e4b..318a018ca7c5 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2079 | struct sge_fl *fl, int len, int complete) | 2079 | struct sge_fl *fl, int len, int complete) |
2080 | { | 2080 | { |
2081 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | 2081 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; |
2082 | struct port_info *pi = netdev_priv(qs->netdev); | ||
2082 | struct sk_buff *skb = NULL; | 2083 | struct sk_buff *skb = NULL; |
2083 | struct cpl_rx_pkt *cpl; | 2084 | struct cpl_rx_pkt *cpl; |
2084 | struct skb_frag_struct *rx_frag; | 2085 | struct skb_frag_struct *rx_frag; |
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2116 | 2117 | ||
2117 | if (!nr_frags) { | 2118 | if (!nr_frags) { |
2118 | offset = 2 + sizeof(struct cpl_rx_pkt); | 2119 | offset = 2 + sizeof(struct cpl_rx_pkt); |
2119 | qs->lro_va = sd->pg_chunk.va + 2; | 2120 | cpl = qs->lro_va = sd->pg_chunk.va + 2; |
2120 | } | ||
2121 | len -= offset; | ||
2122 | 2121 | ||
2123 | prefetch(qs->lro_va); | 2122 | if ((pi->rx_offload & T3_RX_CSUM) && |
2123 | cpl->csum_valid && cpl->csum == htons(0xffff)) { | ||
2124 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2125 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; | ||
2126 | } else | ||
2127 | skb->ip_summed = CHECKSUM_NONE; | ||
2128 | } else | ||
2129 | cpl = qs->lro_va; | ||
2130 | |||
2131 | len -= offset; | ||
2124 | 2132 | ||
2125 | rx_frag += nr_frags; | 2133 | rx_frag += nr_frags; |
2126 | rx_frag->page = sd->pg_chunk.page; | 2134 | rx_frag->page = sd->pg_chunk.page; |
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2136 | return; | 2144 | return; |
2137 | 2145 | ||
2138 | skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); | 2146 | skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); |
2139 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2140 | cpl = qs->lro_va; | ||
2141 | 2147 | ||
2142 | if (unlikely(cpl->vlan_valid)) { | 2148 | if (unlikely(cpl->vlan_valid)) { |
2143 | struct net_device *dev = qs->netdev; | ||
2144 | struct port_info *pi = netdev_priv(dev); | ||
2145 | struct vlan_group *grp = pi->vlan_grp; | 2149 | struct vlan_group *grp = pi->vlan_grp; |
2146 | 2150 | ||
2147 | if (likely(grp != NULL)) { | 2151 | if (likely(grp != NULL)) { |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 2a567df3ea71..e8932db7ee77 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -326,6 +326,8 @@ struct e1000_adapter { | |||
326 | /* for ioport free */ | 326 | /* for ioport free */ |
327 | int bars; | 327 | int bars; |
328 | int need_ioport; | 328 | int need_ioport; |
329 | |||
330 | bool discarding; | ||
329 | }; | 331 | }; |
330 | 332 | ||
331 | enum e1000_state_t { | 333 | enum e1000_state_t { |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 7e855f9bbd97..765543663a4f 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1698 | rctl &= ~E1000_RCTL_SZ_4096; | 1698 | rctl &= ~E1000_RCTL_SZ_4096; |
1699 | rctl |= E1000_RCTL_BSEX; | 1699 | rctl |= E1000_RCTL_BSEX; |
1700 | switch (adapter->rx_buffer_len) { | 1700 | switch (adapter->rx_buffer_len) { |
1701 | case E1000_RXBUFFER_256: | ||
1702 | rctl |= E1000_RCTL_SZ_256; | ||
1703 | rctl &= ~E1000_RCTL_BSEX; | ||
1704 | break; | ||
1705 | case E1000_RXBUFFER_512: | ||
1706 | rctl |= E1000_RCTL_SZ_512; | ||
1707 | rctl &= ~E1000_RCTL_BSEX; | ||
1708 | break; | ||
1709 | case E1000_RXBUFFER_1024: | ||
1710 | rctl |= E1000_RCTL_SZ_1024; | ||
1711 | rctl &= ~E1000_RCTL_BSEX; | ||
1712 | break; | ||
1713 | case E1000_RXBUFFER_2048: | 1701 | case E1000_RXBUFFER_2048: |
1714 | default: | 1702 | default: |
1715 | rctl |= E1000_RCTL_SZ_2048; | 1703 | rctl |= E1000_RCTL_SZ_2048; |
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2802 | dma_error: | 2790 | dma_error: |
2803 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 2791 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
2804 | buffer_info->dma = 0; | 2792 | buffer_info->dma = 0; |
2805 | count--; | 2793 | if (count) |
2806 | |||
2807 | while (count >= 0) { | ||
2808 | count--; | 2794 | count--; |
2809 | i--; | 2795 | |
2810 | if (i < 0) | 2796 | while (count--) { |
2797 | if (i==0) | ||
2811 | i += tx_ring->count; | 2798 | i += tx_ring->count; |
2799 | i--; | ||
2812 | buffer_info = &tx_ring->buffer_info[i]; | 2800 | buffer_info = &tx_ring->buffer_info[i]; |
2813 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 2801 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
2814 | } | 2802 | } |
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3176 | * however with the new *_jumbo_rx* routines, jumbo receives will use | 3164 | * however with the new *_jumbo_rx* routines, jumbo receives will use |
3177 | * fragmented skbs */ | 3165 | * fragmented skbs */ |
3178 | 3166 | ||
3179 | if (max_frame <= E1000_RXBUFFER_256) | 3167 | if (max_frame <= E1000_RXBUFFER_2048) |
3180 | adapter->rx_buffer_len = E1000_RXBUFFER_256; | ||
3181 | else if (max_frame <= E1000_RXBUFFER_512) | ||
3182 | adapter->rx_buffer_len = E1000_RXBUFFER_512; | ||
3183 | else if (max_frame <= E1000_RXBUFFER_1024) | ||
3184 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; | ||
3185 | else if (max_frame <= E1000_RXBUFFER_2048) | ||
3186 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3168 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
3187 | else | 3169 | else |
3188 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) | 3170 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) |
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3850 | 3832 | ||
3851 | length = le16_to_cpu(rx_desc->length); | 3833 | length = le16_to_cpu(rx_desc->length); |
3852 | /* !EOP means multiple descriptors were used to store a single | 3834 | /* !EOP means multiple descriptors were used to store a single |
3853 | * packet, also make sure the frame isn't just CRC only */ | 3835 | * packet, if thats the case we need to toss it. In fact, we |
3854 | if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { | 3836 | * to toss every packet with the EOP bit clear and the next |
3837 | * frame that _does_ have the EOP bit set, as it is by | ||
3838 | * definition only a frame fragment | ||
3839 | */ | ||
3840 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
3841 | adapter->discarding = true; | ||
3842 | |||
3843 | if (adapter->discarding) { | ||
3855 | /* All receives must fit into a single buffer */ | 3844 | /* All receives must fit into a single buffer */ |
3856 | E1000_DBG("%s: Receive packet consumed multiple" | 3845 | E1000_DBG("%s: Receive packet consumed multiple" |
3857 | " buffers\n", netdev->name); | 3846 | " buffers\n", netdev->name); |
3858 | /* recycle */ | 3847 | /* recycle */ |
3859 | buffer_info->skb = skb; | 3848 | buffer_info->skb = skb; |
3849 | if (status & E1000_RXD_STAT_EOP) | ||
3850 | adapter->discarding = false; | ||
3860 | goto next_desc; | 3851 | goto next_desc; |
3861 | } | 3852 | } |
3862 | 3853 | ||
@@ -4015,11 +4006,21 @@ check_page: | |||
4015 | } | 4006 | } |
4016 | } | 4007 | } |
4017 | 4008 | ||
4018 | if (!buffer_info->dma) | 4009 | if (!buffer_info->dma) { |
4019 | buffer_info->dma = pci_map_page(pdev, | 4010 | buffer_info->dma = pci_map_page(pdev, |
4020 | buffer_info->page, 0, | 4011 | buffer_info->page, 0, |
4021 | buffer_info->length, | 4012 | buffer_info->length, |
4022 | PCI_DMA_FROMDEVICE); | 4013 | PCI_DMA_FROMDEVICE); |
4014 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | ||
4015 | put_page(buffer_info->page); | ||
4016 | dev_kfree_skb(skb); | ||
4017 | buffer_info->page = NULL; | ||
4018 | buffer_info->skb = NULL; | ||
4019 | buffer_info->dma = 0; | ||
4020 | adapter->alloc_rx_buff_failed++; | ||
4021 | break; /* while !buffer_info->skb */ | ||
4022 | } | ||
4023 | } | ||
4023 | 4024 | ||
4024 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4025 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4025 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 4026 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -4110,6 +4111,13 @@ map_skb: | |||
4110 | skb->data, | 4111 | skb->data, |
4111 | buffer_info->length, | 4112 | buffer_info->length, |
4112 | PCI_DMA_FROMDEVICE); | 4113 | PCI_DMA_FROMDEVICE); |
4114 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { | ||
4115 | dev_kfree_skb(skb); | ||
4116 | buffer_info->skb = NULL; | ||
4117 | buffer_info->dma = 0; | ||
4118 | adapter->alloc_rx_buff_failed++; | ||
4119 | break; /* while !buffer_info->skb */ | ||
4120 | } | ||
4113 | 4121 | ||
4114 | /* | 4122 | /* |
4115 | * XXX if it was allocated cleanly it will never map to a | 4123 | * XXX if it was allocated cleanly it will never map to a |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index d6ee28f6ea08..d236efaf7478 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -421,6 +421,7 @@ struct e1000_info { | |||
421 | /* CRC Stripping defines */ | 421 | /* CRC Stripping defines */ |
422 | #define FLAG2_CRC_STRIPPING (1 << 0) | 422 | #define FLAG2_CRC_STRIPPING (1 << 0) |
423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | 423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) |
424 | #define FLAG2_IS_DISCARDING (1 << 2) | ||
424 | 425 | ||
425 | #define E1000_RX_DESC_PS(R, i) \ | 426 | #define E1000_RX_DESC_PS(R, i) \ |
426 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 427 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index c45965a256b6..57f149b75fbe 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
450 | 450 | ||
451 | length = le16_to_cpu(rx_desc->length); | 451 | length = le16_to_cpu(rx_desc->length); |
452 | 452 | ||
453 | /* !EOP means multiple descriptors were used to store a single | 453 | /* |
454 | * packet, also make sure the frame isn't just CRC only */ | 454 | * !EOP means multiple descriptors were used to store a single |
455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | * packet, if that's the case we need to toss it. In fact, we |
456 | * need to toss every packet with the EOP bit clear and the | ||
457 | * next frame that _does_ have the EOP bit set, as it is by | ||
458 | * definition only a frame fragment | ||
459 | */ | ||
460 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
461 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
462 | |||
463 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
456 | /* All receives must fit into a single buffer */ | 464 | /* All receives must fit into a single buffer */ |
457 | e_dbg("Receive packet consumed multiple buffers\n"); | 465 | e_dbg("Receive packet consumed multiple buffers\n"); |
458 | /* recycle */ | 466 | /* recycle */ |
459 | buffer_info->skb = skb; | 467 | buffer_info->skb = skb; |
468 | if (status & E1000_RXD_STAT_EOP) | ||
469 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
460 | goto next_desc; | 470 | goto next_desc; |
461 | } | 471 | } |
462 | 472 | ||
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
745 | PCI_DMA_FROMDEVICE); | 755 | PCI_DMA_FROMDEVICE); |
746 | buffer_info->dma = 0; | 756 | buffer_info->dma = 0; |
747 | 757 | ||
748 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 758 | /* see !EOP comment in other rx routine */ |
759 | if (!(staterr & E1000_RXD_STAT_EOP)) | ||
760 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
761 | |||
762 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
749 | e_dbg("Packet Split buffers didn't pick up the full " | 763 | e_dbg("Packet Split buffers didn't pick up the full " |
750 | "packet\n"); | 764 | "packet\n"); |
751 | dev_kfree_skb_irq(skb); | 765 | dev_kfree_skb_irq(skb); |
766 | if (staterr & E1000_RXD_STAT_EOP) | ||
767 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
752 | goto next_desc; | 768 | goto next_desc; |
753 | } | 769 | } |
754 | 770 | ||
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1118 | 1134 | ||
1119 | rx_ring->next_to_clean = 0; | 1135 | rx_ring->next_to_clean = 0; |
1120 | rx_ring->next_to_use = 0; | 1136 | rx_ring->next_to_use = 0; |
1137 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
1121 | 1138 | ||
1122 | writel(0, adapter->hw.hw_addr + rx_ring->head); | 1139 | writel(0, adapter->hw.hw_addr + rx_ring->head); |
1123 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | 1140 | writel(0, adapter->hw.hw_addr + rx_ring->tail); |
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2333 | rctl &= ~E1000_RCTL_SZ_4096; | 2350 | rctl &= ~E1000_RCTL_SZ_4096; |
2334 | rctl |= E1000_RCTL_BSEX; | 2351 | rctl |= E1000_RCTL_BSEX; |
2335 | switch (adapter->rx_buffer_len) { | 2352 | switch (adapter->rx_buffer_len) { |
2336 | case 256: | ||
2337 | rctl |= E1000_RCTL_SZ_256; | ||
2338 | rctl &= ~E1000_RCTL_BSEX; | ||
2339 | break; | ||
2340 | case 512: | ||
2341 | rctl |= E1000_RCTL_SZ_512; | ||
2342 | rctl &= ~E1000_RCTL_BSEX; | ||
2343 | break; | ||
2344 | case 1024: | ||
2345 | rctl |= E1000_RCTL_SZ_1024; | ||
2346 | rctl &= ~E1000_RCTL_BSEX; | ||
2347 | break; | ||
2348 | case 2048: | 2353 | case 2048: |
2349 | default: | 2354 | default: |
2350 | rctl |= E1000_RCTL_SZ_2048; | 2355 | rctl |= E1000_RCTL_SZ_2048; |
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3781 | 0, IPPROTO_TCP, 0); | 3786 | 0, IPPROTO_TCP, 0); |
3782 | cmd_length = E1000_TXD_CMD_IP; | 3787 | cmd_length = E1000_TXD_CMD_IP; |
3783 | ipcse = skb_transport_offset(skb) - 1; | 3788 | ipcse = skb_transport_offset(skb) - 1; |
3784 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3789 | } else if (skb_is_gso_v6(skb)) { |
3785 | ipv6_hdr(skb)->payload_len = 0; | 3790 | ipv6_hdr(skb)->payload_len = 0; |
3786 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3791 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3787 | &ipv6_hdr(skb)->daddr, | 3792 | &ipv6_hdr(skb)->daddr, |
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3962 | dma_error: | 3967 | dma_error: |
3963 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 3968 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
3964 | buffer_info->dma = 0; | 3969 | buffer_info->dma = 0; |
3965 | count--; | 3970 | if (count) |
3966 | |||
3967 | while (count >= 0) { | ||
3968 | count--; | 3971 | count--; |
3969 | i--; | 3972 | |
3970 | if (i < 0) | 3973 | while (count--) { |
3974 | if (i==0) | ||
3971 | i += tx_ring->count; | 3975 | i += tx_ring->count; |
3976 | i--; | ||
3972 | buffer_info = &tx_ring->buffer_info[i]; | 3977 | buffer_info = &tx_ring->buffer_info[i]; |
3973 | e1000_put_txbuf(adapter, buffer_info);; | 3978 | e1000_put_txbuf(adapter, buffer_info);; |
3974 | } | 3979 | } |
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4317 | * fragmented skbs | 4322 | * fragmented skbs |
4318 | */ | 4323 | */ |
4319 | 4324 | ||
4320 | if (max_frame <= 256) | 4325 | if (max_frame <= 2048) |
4321 | adapter->rx_buffer_len = 256; | ||
4322 | else if (max_frame <= 512) | ||
4323 | adapter->rx_buffer_len = 512; | ||
4324 | else if (max_frame <= 1024) | ||
4325 | adapter->rx_buffer_len = 1024; | ||
4326 | else if (max_frame <= 2048) | ||
4327 | adapter->rx_buffer_len = 2048; | 4326 | adapter->rx_buffer_len = 2048; |
4328 | else | 4327 | else |
4329 | adapter->rx_buffer_len = 4096; | 4328 | adapter->rx_buffer_len = 4096; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 933c64ff2465..c881347cb26d 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
421 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; | 421 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
422 | if (tx_queue > IGB_N0_QUEUE) | 422 | if (tx_queue > IGB_N0_QUEUE) |
423 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; | 423 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; |
424 | if (!adapter->msix_entries && msix_vector == 0) | ||
425 | msixbm |= E1000_EIMS_OTHER; | ||
424 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 426 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
425 | q_vector->eims_value = msixbm; | 427 | q_vector->eims_value = msixbm; |
426 | break; | 428 | break; |
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
877 | { | 879 | { |
878 | struct net_device *netdev = adapter->netdev; | 880 | struct net_device *netdev = adapter->netdev; |
879 | struct pci_dev *pdev = adapter->pdev; | 881 | struct pci_dev *pdev = adapter->pdev; |
880 | struct e1000_hw *hw = &adapter->hw; | ||
881 | int err = 0; | 882 | int err = 0; |
882 | 883 | ||
883 | if (adapter->msix_entries) { | 884 | if (adapter->msix_entries) { |
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
909 | igb_setup_all_tx_resources(adapter); | 910 | igb_setup_all_tx_resources(adapter); |
910 | igb_setup_all_rx_resources(adapter); | 911 | igb_setup_all_rx_resources(adapter); |
911 | } else { | 912 | } else { |
912 | switch (hw->mac.type) { | 913 | igb_assign_vector(adapter->q_vector[0], 0); |
913 | case e1000_82575: | ||
914 | wr32(E1000_MSIXBM(0), | ||
915 | (E1000_EICR_RX_QUEUE0 | | ||
916 | E1000_EICR_TX_QUEUE0 | | ||
917 | E1000_EIMS_OTHER)); | ||
918 | break; | ||
919 | case e1000_82580: | ||
920 | case e1000_82576: | ||
921 | wr32(E1000_IVAR0, E1000_IVAR_VALID); | ||
922 | break; | ||
923 | default: | ||
924 | break; | ||
925 | } | ||
926 | } | 914 | } |
927 | 915 | ||
928 | if (adapter->flags & IGB_FLAG_HAS_MSI) { | 916 | if (adapter->flags & IGB_FLAG_HAS_MSI) { |
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter) | |||
1140 | } | 1128 | } |
1141 | if (adapter->msix_entries) | 1129 | if (adapter->msix_entries) |
1142 | igb_configure_msix(adapter); | 1130 | igb_configure_msix(adapter); |
1131 | else | ||
1132 | igb_assign_vector(adapter->q_vector[0], 0); | ||
1143 | 1133 | ||
1144 | /* Clear any pending interrupts. */ | 1134 | /* Clear any pending interrupts. */ |
1145 | rd32(E1000_ICR); | 1135 | rd32(E1000_ICR); |
@@ -3422,7 +3412,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring, | |||
3422 | iph->daddr, 0, | 3412 | iph->daddr, 0, |
3423 | IPPROTO_TCP, | 3413 | IPPROTO_TCP, |
3424 | 0); | 3414 | 0); |
3425 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3415 | } else if (skb_is_gso_v6(skb)) { |
3426 | ipv6_hdr(skb)->payload_len = 0; | 3416 | ipv6_hdr(skb)->payload_len = 0; |
3427 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3417 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3428 | &ipv6_hdr(skb)->daddr, | 3418 | &ipv6_hdr(skb)->daddr, |
@@ -3584,6 +3574,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
3584 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 3574 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
3585 | struct skb_frag_struct *frag; | 3575 | struct skb_frag_struct *frag; |
3586 | 3576 | ||
3577 | count++; | ||
3587 | i++; | 3578 | i++; |
3588 | if (i == tx_ring->count) | 3579 | if (i == tx_ring->count) |
3589 | i = 0; | 3580 | i = 0; |
@@ -3605,7 +3596,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
3605 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 3596 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) |
3606 | goto dma_error; | 3597 | goto dma_error; |
3607 | 3598 | ||
3608 | count++; | ||
3609 | } | 3599 | } |
3610 | 3600 | ||
3611 | tx_ring->buffer_info[i].skb = skb; | 3601 | tx_ring->buffer_info[i].skb = skb; |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 0dbd0320023a..2aa71a766c35 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1963 | iph->daddr, 0, | 1963 | iph->daddr, 0, |
1964 | IPPROTO_TCP, | 1964 | IPPROTO_TCP, |
1965 | 0); | 1965 | 0); |
1966 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 1966 | } else if (skb_is_gso_v6(skb)) { |
1967 | ipv6_hdr(skb)->payload_len = 0; | 1967 | ipv6_hdr(skb)->payload_len = 0; |
1968 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 1968 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
1969 | &ipv6_hdr(skb)->daddr, | 1969 | &ipv6_hdr(skb)->daddr, |
@@ -2117,6 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2117 | /* set time_stamp *before* dma to help avoid a possible race */ | 2117 | /* set time_stamp *before* dma to help avoid a possible race */ |
2118 | buffer_info->time_stamp = jiffies; | 2118 | buffer_info->time_stamp = jiffies; |
2119 | buffer_info->next_to_watch = i; | 2119 | buffer_info->next_to_watch = i; |
2120 | buffer_info->mapped_as_page = false; | ||
2120 | buffer_info->dma = pci_map_single(pdev, skb->data, len, | 2121 | buffer_info->dma = pci_map_single(pdev, skb->data, len, |
2121 | PCI_DMA_TODEVICE); | 2122 | PCI_DMA_TODEVICE); |
2122 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 2123 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) |
@@ -2126,6 +2127,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2126 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 2127 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
2127 | struct skb_frag_struct *frag; | 2128 | struct skb_frag_struct *frag; |
2128 | 2129 | ||
2130 | count++; | ||
2129 | i++; | 2131 | i++; |
2130 | if (i == tx_ring->count) | 2132 | if (i == tx_ring->count) |
2131 | i = 0; | 2133 | i = 0; |
@@ -2146,7 +2148,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2146 | PCI_DMA_TODEVICE); | 2148 | PCI_DMA_TODEVICE); |
2147 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 2149 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) |
2148 | goto dma_error; | 2150 | goto dma_error; |
2149 | count++; | ||
2150 | } | 2151 | } |
2151 | 2152 | ||
2152 | tx_ring->buffer_info[i].skb = skb; | 2153 | tx_ring->buffer_info[i].skb = skb; |
@@ -2163,14 +2164,14 @@ dma_error: | |||
2163 | buffer_info->length = 0; | 2164 | buffer_info->length = 0; |
2164 | buffer_info->next_to_watch = 0; | 2165 | buffer_info->next_to_watch = 0; |
2165 | buffer_info->mapped_as_page = false; | 2166 | buffer_info->mapped_as_page = false; |
2166 | count--; | 2167 | if (count) |
2168 | count--; | ||
2167 | 2169 | ||
2168 | /* clear timestamp and dma mappings for remaining portion of packet */ | 2170 | /* clear timestamp and dma mappings for remaining portion of packet */ |
2169 | while (count >= 0) { | 2171 | while (count--) { |
2170 | count--; | 2172 | if (i==0) |
2171 | i--; | ||
2172 | if (i < 0) | ||
2173 | i += tx_ring->count; | 2173 | i += tx_ring->count; |
2174 | i--; | ||
2174 | buffer_info = &tx_ring->buffer_info[i]; | 2175 | buffer_info = &tx_ring->buffer_info[i]; |
2175 | igbvf_put_txbuf(adapter, buffer_info); | 2176 | igbvf_put_txbuf(adapter, buffer_info); |
2176 | } | 2177 | } |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index bcd0f01d5feb..593d1a4f217c 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1363 | dma_error: | 1363 | dma_error: |
1364 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 1364 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
1365 | buffer_info->dma = 0; | 1365 | buffer_info->dma = 0; |
1366 | count--; | 1366 | if (count) |
1367 | |||
1368 | while (count >= 0) { | ||
1369 | count--; | 1367 | count--; |
1370 | i--; | 1368 | |
1371 | if (i < 0) | 1369 | while (count--) { |
1370 | if (i==0) | ||
1372 | i += tx_ring->count; | 1371 | i += tx_ring->count; |
1372 | i--; | ||
1373 | buffer_info = &tx_ring->buffer_info[i]; | 1373 | buffer_info = &tx_ring->buffer_info[i]; |
1374 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | 1374 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); |
1375 | } | 1375 | } |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 3103f4165311..35a06b47587b 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
357 | u32 fctrl_reg; | 357 | u32 fctrl_reg; |
358 | u32 rmcs_reg; | 358 | u32 rmcs_reg; |
359 | u32 reg; | 359 | u32 reg; |
360 | u32 link_speed = 0; | ||
361 | bool link_up; | ||
360 | 362 | ||
361 | #ifdef CONFIG_DCB | 363 | #ifdef CONFIG_DCB |
362 | if (hw->fc.requested_mode == ixgbe_fc_pfc) | 364 | if (hw->fc.requested_mode == ixgbe_fc_pfc) |
363 | goto out; | 365 | goto out; |
364 | 366 | ||
365 | #endif /* CONFIG_DCB */ | 367 | #endif /* CONFIG_DCB */ |
368 | /* | ||
369 | * On 82598 having Rx FC on causes resets while doing 1G | ||
370 | * so if it's on turn it off once we know link_speed. For | ||
371 | * more details see 82598 Specification update. | ||
372 | */ | ||
373 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | ||
374 | if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { | ||
375 | switch (hw->fc.requested_mode) { | ||
376 | case ixgbe_fc_full: | ||
377 | hw->fc.requested_mode = ixgbe_fc_tx_pause; | ||
378 | break; | ||
379 | case ixgbe_fc_rx_pause: | ||
380 | hw->fc.requested_mode = ixgbe_fc_none; | ||
381 | break; | ||
382 | default: | ||
383 | /* no change */ | ||
384 | break; | ||
385 | } | ||
386 | } | ||
387 | |||
366 | /* Negotiate the fc mode to use */ | 388 | /* Negotiate the fc mode to use */ |
367 | ret_val = ixgbe_fc_autoneg(hw); | 389 | ret_val = ixgbe_fc_autoneg(hw); |
368 | if (ret_val) | 390 | if (ret_val) |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 56f37f66b696..dd4883f642be 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, | |||
223 | 223 | ||
224 | if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != | 224 | if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != |
225 | adapter->dcb_cfg.bw_percentage[0][bwg_id]) { | 225 | adapter->dcb_cfg.bw_percentage[0][bwg_id]) { |
226 | adapter->dcb_set_bitmap |= BIT_PG_RX; | 226 | adapter->dcb_set_bitmap |= BIT_PG_TX; |
227 | adapter->dcb_set_bitmap |= BIT_RESETLINK; | 227 | adapter->dcb_set_bitmap |= BIT_RESETLINK; |
228 | } | 228 | } |
229 | } | 229 | } |
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | |||
341 | if (!adapter->dcb_set_bitmap) | 341 | if (!adapter->dcb_set_bitmap) |
342 | return DCB_NO_HW_CHG; | 342 | return DCB_NO_HW_CHG; |
343 | 343 | ||
344 | ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, | ||
345 | adapter->ring_feature[RING_F_DCB].indices); | ||
346 | |||
347 | if (ret) | ||
348 | return DCB_NO_HW_CHG; | ||
349 | |||
344 | /* | 350 | /* |
345 | * Only take down the adapter if the configuration change | 351 | * Only take down the adapter if the configuration change |
346 | * requires a reset. | 352 | * requires a reset. |
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | |||
359 | } | 365 | } |
360 | } | 366 | } |
361 | 367 | ||
362 | ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, | ||
363 | adapter->ring_feature[RING_F_DCB].indices); | ||
364 | if (ret) { | ||
365 | if (adapter->dcb_set_bitmap & BIT_RESETLINK) | ||
366 | clear_bit(__IXGBE_RESETTING, &adapter->state); | ||
367 | return DCB_NO_HW_CHG; | ||
368 | } | ||
369 | |||
370 | if (adapter->dcb_cfg.pfc_mode_enable) { | 368 | if (adapter->dcb_cfg.pfc_mode_enable) { |
371 | if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && | 369 | if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && |
372 | (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) | 370 | (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9c9202f40b10..951b73cf5ca2 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -4928,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
4928 | iph->daddr, 0, | 4928 | iph->daddr, 0, |
4929 | IPPROTO_TCP, | 4929 | IPPROTO_TCP, |
4930 | 0); | 4930 | 0); |
4931 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 4931 | } else if (skb_is_gso_v6(skb)) { |
4932 | ipv6_hdr(skb)->payload_len = 0; | 4932 | ipv6_hdr(skb)->payload_len = 0; |
4933 | tcp_hdr(skb)->check = | 4933 | tcp_hdr(skb)->check = |
4934 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 4934 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
@@ -5167,19 +5167,19 @@ dma_error: | |||
5167 | tx_buffer_info->dma = 0; | 5167 | tx_buffer_info->dma = 0; |
5168 | tx_buffer_info->time_stamp = 0; | 5168 | tx_buffer_info->time_stamp = 0; |
5169 | tx_buffer_info->next_to_watch = 0; | 5169 | tx_buffer_info->next_to_watch = 0; |
5170 | count--; | 5170 | if (count) |
5171 | count--; | ||
5171 | 5172 | ||
5172 | /* clear timestamp and dma mappings for remaining portion of packet */ | 5173 | /* clear timestamp and dma mappings for remaining portion of packet */ |
5173 | while (count >= 0) { | 5174 | while (count--) { |
5174 | count--; | 5175 | if (i==0) |
5175 | i--; | ||
5176 | if (i < 0) | ||
5177 | i += tx_ring->count; | 5176 | i += tx_ring->count; |
5177 | i--; | ||
5178 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 5178 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); |
5180 | } | 5180 | } |
5181 | 5181 | ||
5182 | return count; | 5182 | return 0; |
5183 | } | 5183 | } |
5184 | 5184 | ||
5185 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | 5185 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, |
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
5329 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 5329 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
5330 | int txq = smp_processor_id(); | 5330 | int txq = smp_processor_id(); |
5331 | 5331 | ||
5332 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | 5332 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
5333 | while (unlikely(txq >= dev->real_num_tx_queues)) | ||
5334 | txq -= dev->real_num_tx_queues; | ||
5333 | return txq; | 5335 | return txq; |
5336 | } | ||
5334 | 5337 | ||
5335 | #ifdef IXGBE_FCOE | 5338 | #ifdef IXGBE_FCOE |
5336 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5339 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
@@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5760 | if (err) | 5763 | if (err) |
5761 | goto err_sw_init; | 5764 | goto err_sw_init; |
5762 | 5765 | ||
5766 | /* Make it possible the adapter to be woken up via WOL */ | ||
5767 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
5768 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | ||
5769 | |||
5763 | /* | 5770 | /* |
5764 | * If there is a fan on this device and it has failed log the | 5771 | * If there is a fan on this device and it has failed log the |
5765 | * failure. | 5772 | * failure. |
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c index c146304d8d6c..c0ceebccaa49 100644 --- a/drivers/net/ks8851_mll.c +++ b/drivers/net/ks8851_mll.c | |||
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) | |||
854 | 854 | ||
855 | static irqreturn_t ks_irq(int irq, void *pw) | 855 | static irqreturn_t ks_irq(int irq, void *pw) |
856 | { | 856 | { |
857 | struct ks_net *ks = pw; | 857 | struct net_device *netdev = pw; |
858 | struct net_device *netdev = ks->netdev; | 858 | struct ks_net *ks = netdev_priv(netdev); |
859 | u16 status; | 859 | u16 status; |
860 | 860 | ||
861 | /*this should be the first in IRQ handler */ | 861 | /*this should be the first in IRQ handler */ |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9f9d6081959b..24279e6e55f5 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) | |||
1941 | netif_wake_queue(adapter->netdev); | 1941 | netif_wake_queue(adapter->netdev); |
1942 | 1942 | ||
1943 | clear_bit(__NX_RESETTING, &adapter->state); | 1943 | clear_bit(__NX_RESETTING, &adapter->state); |
1944 | 1944 | return; | |
1945 | } else { | 1945 | } else { |
1946 | clear_bit(__NX_RESETTING, &adapter->state); | 1946 | clear_bit(__NX_RESETTING, &adapter->state); |
1947 | if (!netxen_nic_reset_context(adapter)) { | 1947 | if (!netxen_nic_reset_context(adapter)) { |
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work) | |||
2240 | 2240 | ||
2241 | netxen_nic_down(adapter, netdev); | 2241 | netxen_nic_down(adapter, netdev); |
2242 | 2242 | ||
2243 | rtnl_lock(); | ||
2243 | netxen_nic_detach(adapter); | 2244 | netxen_nic_detach(adapter); |
2245 | rtnl_unlock(); | ||
2244 | 2246 | ||
2245 | status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); | 2247 | status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); |
2246 | 2248 | ||
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 813aca3fc433..7b17404d0858 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
717 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), | 717 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), |
718 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), | 718 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), |
719 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 719 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
720 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | ||
720 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 721 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
721 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 722 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
722 | PCMCIA_DEVICE_NULL, | 723 | PCMCIA_DEVICE_NULL, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b0e9f9c51721..0295097d6c44 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg); | |||
410 | 410 | ||
411 | 411 | ||
412 | static void phy_change(struct work_struct *work); | 412 | static void phy_change(struct work_struct *work); |
413 | static void phy_state_machine(struct work_struct *work); | ||
414 | 413 | ||
415 | /** | 414 | /** |
416 | * phy_start_machine - start PHY state machine tracking | 415 | * phy_start_machine - start PHY state machine tracking |
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev, | |||
430 | { | 429 | { |
431 | phydev->adjust_state = handler; | 430 | phydev->adjust_state = handler; |
432 | 431 | ||
433 | INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); | ||
434 | schedule_delayed_work(&phydev->state_queue, HZ); | 432 | schedule_delayed_work(&phydev->state_queue, HZ); |
435 | } | 433 | } |
436 | 434 | ||
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start); | |||
761 | * phy_state_machine - Handle the state machine | 759 | * phy_state_machine - Handle the state machine |
762 | * @work: work_struct that describes the work to be done | 760 | * @work: work_struct that describes the work to be done |
763 | */ | 761 | */ |
764 | static void phy_state_machine(struct work_struct *work) | 762 | void phy_state_machine(struct work_struct *work) |
765 | { | 763 | { |
766 | struct delayed_work *dwork = to_delayed_work(work); | 764 | struct delayed_work *dwork = to_delayed_work(work); |
767 | struct phy_device *phydev = | 765 | struct phy_device *phydev = |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8212b2b93422..adbc0fded130 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | |||
177 | dev->state = PHY_DOWN; | 177 | dev->state = PHY_DOWN; |
178 | 178 | ||
179 | mutex_init(&dev->lock); | 179 | mutex_init(&dev->lock); |
180 | INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); | ||
180 | 181 | ||
181 | return dev; | 182 | return dev; |
182 | } | 183 | } |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 707b391afa02..894a7c84faef 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4119 | err = pcie_set_readrq(pdev, 4096); | 4119 | err = pcie_set_readrq(pdev, 4096); |
4120 | if (err) { | 4120 | if (err) { |
4121 | dev_err(&pdev->dev, "Set readrq failed.\n"); | 4121 | dev_err(&pdev->dev, "Set readrq failed.\n"); |
4122 | goto err_out; | 4122 | goto err_out1; |
4123 | } | 4123 | } |
4124 | 4124 | ||
4125 | err = pci_request_regions(pdev, DRV_NAME); | 4125 | err = pci_request_regions(pdev, DRV_NAME); |
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4140 | 4140 | ||
4141 | if (err) { | 4141 | if (err) { |
4142 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); | 4142 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); |
4143 | goto err_out; | 4143 | goto err_out2; |
4144 | } | 4144 | } |
4145 | 4145 | ||
4146 | /* Set PCIe reset type for EEH to fundamental. */ | 4146 | /* Set PCIe reset type for EEH to fundamental. */ |
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4152 | if (!qdev->reg_base) { | 4152 | if (!qdev->reg_base) { |
4153 | dev_err(&pdev->dev, "Register mapping failed.\n"); | 4153 | dev_err(&pdev->dev, "Register mapping failed.\n"); |
4154 | err = -ENOMEM; | 4154 | err = -ENOMEM; |
4155 | goto err_out; | 4155 | goto err_out2; |
4156 | } | 4156 | } |
4157 | 4157 | ||
4158 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); | 4158 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); |
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4162 | if (!qdev->doorbell_area) { | 4162 | if (!qdev->doorbell_area) { |
4163 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); | 4163 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); |
4164 | err = -ENOMEM; | 4164 | err = -ENOMEM; |
4165 | goto err_out; | 4165 | goto err_out2; |
4166 | } | 4166 | } |
4167 | 4167 | ||
4168 | err = ql_get_board_info(qdev); | 4168 | err = ql_get_board_info(qdev); |
4169 | if (err) { | 4169 | if (err) { |
4170 | dev_err(&pdev->dev, "Register access failed.\n"); | 4170 | dev_err(&pdev->dev, "Register access failed.\n"); |
4171 | err = -EIO; | 4171 | err = -EIO; |
4172 | goto err_out; | 4172 | goto err_out2; |
4173 | } | 4173 | } |
4174 | qdev->msg_enable = netif_msg_init(debug, default_msg); | 4174 | qdev->msg_enable = netif_msg_init(debug, default_msg); |
4175 | spin_lock_init(&qdev->hw_lock); | 4175 | spin_lock_init(&qdev->hw_lock); |
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4179 | err = qdev->nic_ops->get_flash(qdev); | 4179 | err = qdev->nic_ops->get_flash(qdev); |
4180 | if (err) { | 4180 | if (err) { |
4181 | dev_err(&pdev->dev, "Invalid FLASH.\n"); | 4181 | dev_err(&pdev->dev, "Invalid FLASH.\n"); |
4182 | goto err_out; | 4182 | goto err_out2; |
4183 | } | 4183 | } |
4184 | 4184 | ||
4185 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | 4185 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); |
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4212 | DRV_NAME, DRV_VERSION); | 4212 | DRV_NAME, DRV_VERSION); |
4213 | } | 4213 | } |
4214 | return 0; | 4214 | return 0; |
4215 | err_out: | 4215 | err_out2: |
4216 | ql_release_all(pdev); | 4216 | ql_release_all(pdev); |
4217 | err_out1: | ||
4217 | pci_disable_device(pdev); | 4218 | pci_disable_device(pdev); |
4218 | return err; | 4219 | return err; |
4219 | } | 4220 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index cc4218667cba..3c4836d0898f 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, | |||
3421 | break; | 3421 | break; |
3422 | } | 3422 | } |
3423 | } else { | 3423 | } else { |
3424 | if (!(val64 & busy_bit)) { | 3424 | if (val64 & busy_bit) { |
3425 | ret = SUCCESS; | 3425 | ret = SUCCESS; |
3426 | break; | 3426 | break; |
3427 | } | 3427 | } |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 103e8b0e2a0d..46997e177ee3 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2284 | fail2: | 2284 | fail2: |
2285 | efx_fini_struct(efx); | 2285 | efx_fini_struct(efx); |
2286 | fail1: | 2286 | fail1: |
2287 | WARN_ON(rc > 0); | ||
2287 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); | 2288 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); |
2288 | free_netdev(net_dev); | 2289 | free_netdev(net_dev); |
2289 | return rc; | 2290 | return rc; |
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index bf0b96af5334..5712fddd72f2 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -29,6 +29,15 @@ | |||
29 | #define FALCON_BOARD_SFN4111T 0x51 | 29 | #define FALCON_BOARD_SFN4111T 0x51 |
30 | #define FALCON_BOARD_SFN4112F 0x52 | 30 | #define FALCON_BOARD_SFN4112F 0x52 |
31 | 31 | ||
32 | /* Board temperature is about 15°C above ambient when air flow is | ||
33 | * limited. */ | ||
34 | #define FALCON_BOARD_TEMP_BIAS 15 | ||
35 | |||
36 | /* SFC4000 datasheet says: 'The maximum permitted junction temperature | ||
37 | * is 125°C; the thermal design of the environment for the SFC4000 | ||
38 | * should aim to keep this well below 100°C.' */ | ||
39 | #define FALCON_JUNC_TEMP_MAX 90 | ||
40 | |||
32 | /***************************************************************************** | 41 | /***************************************************************************** |
33 | * Support for LM87 sensor chip used on several boards | 42 | * Support for LM87 sensor chip used on several boards |
34 | */ | 43 | */ |
@@ -548,16 +557,16 @@ fail_hwmon: | |||
548 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ | 557 | static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ |
549 | 558 | ||
550 | static const u8 sfe4002_lm87_regs[] = { | 559 | static const u8 sfe4002_lm87_regs[] = { |
551 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | 560 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ |
552 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | 561 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ |
553 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | 562 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ |
554 | LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ | 563 | LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ |
555 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | 564 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ |
556 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | 565 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ |
557 | LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ | 566 | LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ |
558 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | 567 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ |
559 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | 568 | LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), |
560 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | 569 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), |
561 | 0 | 570 | 0 |
562 | }; | 571 | }; |
563 | 572 | ||
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx) | |||
619 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ | 628 | static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ |
620 | 629 | ||
621 | static const u8 sfn4112f_lm87_regs[] = { | 630 | static const u8 sfn4112f_lm87_regs[] = { |
622 | LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ | 631 | LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ |
623 | LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ | 632 | LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ |
624 | LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ | 633 | LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ |
625 | LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ | 634 | LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ |
626 | LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ | 635 | LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ |
627 | LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ | 636 | LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ |
628 | LM87_TEMP_INT_LIMITS(10, 60), /* board */ | 637 | LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), |
629 | LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ | 638 | LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), |
630 | 0 | 639 | 0 |
631 | }; | 640 | }; |
632 | 641 | ||
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 0d4eba7266ec..f66b3da6ddff 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
127 | efx_dword_t reg; | 127 | efx_dword_t reg; |
128 | 128 | ||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | 129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ |
130 | rc = efx_mcdi_poll_reboot(efx); | 130 | rc = -efx_mcdi_poll_reboot(efx); |
131 | if (rc) | 131 | if (rc) |
132 | goto out; | 132 | goto out; |
133 | 133 | ||
@@ -804,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |||
804 | loff_t offset, u8 *buffer, size_t length) | 804 | loff_t offset, u8 *buffer, size_t length) |
805 | { | 805 | { |
806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | 806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; |
807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; | 807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
808 | size_t outlen; | 808 | size_t outlen; |
809 | int rc; | 809 | int rc; |
810 | 810 | ||
@@ -828,7 +828,7 @@ fail: | |||
828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
829 | loff_t offset, const u8 *buffer, size_t length) | 829 | loff_t offset, const u8 *buffer, size_t length) |
830 | { | 830 | { |
831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; | 831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
832 | int rc; | 832 | int rc; |
833 | 833 | ||
834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | 834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); |
@@ -838,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | |||
838 | 838 | ||
839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | 839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); |
840 | 840 | ||
841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), | 841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, |
842 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
842 | NULL, 0, NULL); | 843 | NULL, 0, NULL); |
843 | if (rc) | 844 | if (rc) |
844 | goto fail; | 845 | goto fail; |
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h index de916728c2e3..10ce98f4c0fb 100644 --- a/drivers/net/sfc/mcdi.h +++ b/drivers/net/sfc/mcdi.h | |||
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |||
111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
112 | loff_t offset, const u8 *buffer, | 112 | loff_t offset, const u8 *buffer, |
113 | size_t length); | 113 | size_t length); |
114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | ||
114 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | 115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, |
115 | loff_t offset, size_t length); | 116 | loff_t offset, size_t length); |
116 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, | 117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, |
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h index 2a85360a46f0..73e71f420624 100644 --- a/drivers/net/sfc/mcdi_pcol.h +++ b/drivers/net/sfc/mcdi_pcol.h | |||
@@ -1090,8 +1090,10 @@ | |||
1090 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 | 1090 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 |
1091 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 | 1091 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 |
1092 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 | 1092 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 |
1093 | #define MC_CMD_GMAC_DMABUF_START 64 | ||
1094 | #define MC_CMD_GMAC_DMABUF_END 95 | ||
1093 | /* Insert new members here. */ | 1095 | /* Insert new members here. */ |
1094 | #define MC_CMD_MAC_GENERATION_END 60 | 1096 | #define MC_CMD_MAC_GENERATION_END 96 |
1095 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) | 1097 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) |
1096 | 1098 | ||
1097 | /* MC_CMD_MAC_STATS: | 1099 | /* MC_CMD_MAC_STATS: |
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index 3a464529a46b..407bbaddfea6 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include "mcdi_pcol.h" | 23 | #include "mcdi_pcol.h" |
24 | 24 | ||
25 | #define EFX_SPI_VERIFY_BUF_LEN 16 | 25 | #define EFX_SPI_VERIFY_BUF_LEN 16 |
26 | #define EFX_MCDI_CHUNK_LEN 128 | ||
27 | 26 | ||
28 | struct efx_mtd_partition { | 27 | struct efx_mtd_partition { |
29 | struct mtd_info mtd; | 28 | struct mtd_info mtd; |
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, | |||
428 | int rc = 0; | 427 | int rc = 0; |
429 | 428 | ||
430 | while (offset < end) { | 429 | while (offset < end) { |
431 | chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); | 430 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); |
432 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, | 431 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, |
433 | buffer, chunk); | 432 | buffer, chunk); |
434 | if (rc) | 433 | if (rc) |
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, | |||
491 | } | 490 | } |
492 | 491 | ||
493 | while (offset < end) { | 492 | while (offset < end) { |
494 | chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); | 493 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); |
495 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, | 494 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, |
496 | buffer, chunk); | 495 | buffer, chunk); |
497 | if (rc) | 496 | if (rc) |
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index ff8f0a417fa3..67eec7a6e487 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -318,15 +318,9 @@ static int qt202x_reset_phy(struct efx_nic *efx) | |||
318 | /* Wait 250ms for the PHY to complete bootup */ | 318 | /* Wait 250ms for the PHY to complete bootup */ |
319 | msleep(250); | 319 | msleep(250); |
320 | 320 | ||
321 | /* Check that all the MMDs we expect are present and responding. We | ||
322 | * expect faults on some if the link is down, but not on the PHY XS */ | ||
323 | rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS); | ||
324 | if (rc < 0) | ||
325 | goto fail; | ||
326 | |||
327 | falcon_board(efx)->type->init_phy(efx); | 321 | falcon_board(efx)->type->init_phy(efx); |
328 | 322 | ||
329 | return rc; | 323 | return 0; |
330 | 324 | ||
331 | fail: | 325 | fail: |
332 | EFX_ERR(efx, "PHY reset timed out\n"); | 326 | EFX_ERR(efx, "PHY reset timed out\n"); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 37f486b65f63..67249c3c9f50 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | |||
644 | { | 644 | { |
645 | u32 reg1; | 645 | u32 reg1; |
646 | 646 | ||
647 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
647 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 648 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
648 | reg1 &= ~phy_power[port]; | 649 | reg1 &= ~phy_power[port]; |
649 | 650 | ||
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | |||
651 | reg1 |= coma_mode[port]; | 652 | reg1 |= coma_mode[port]; |
652 | 653 | ||
653 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 654 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
655 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
654 | sky2_pci_read32(hw, PCI_DEV_REG1); | 656 | sky2_pci_read32(hw, PCI_DEV_REG1); |
655 | 657 | ||
656 | if (hw->chip_id == CHIP_ID_YUKON_FE) | 658 | if (hw->chip_id == CHIP_ID_YUKON_FE) |
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) | |||
707 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); | 709 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); |
708 | } | 710 | } |
709 | 711 | ||
712 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
710 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 713 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
711 | reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ | 714 | reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ |
712 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 715 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
716 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
713 | } | 717 | } |
714 | 718 | ||
715 | /* Force a renegotiation */ | 719 | /* Force a renegotiation */ |
@@ -1021,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, | |||
1021 | static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) | 1025 | static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) |
1022 | { | 1026 | { |
1023 | struct sky2_tx_le *le = sky2->tx_le + *slot; | 1027 | struct sky2_tx_le *le = sky2->tx_le + *slot; |
1024 | struct tx_ring_info *re = sky2->tx_ring + *slot; | ||
1025 | 1028 | ||
1026 | *slot = RING_NEXT(*slot, sky2->tx_ring_size); | 1029 | *slot = RING_NEXT(*slot, sky2->tx_ring_size); |
1027 | re->flags = 0; | ||
1028 | re->skb = NULL; | ||
1029 | le->ctrl = 0; | 1030 | le->ctrl = 0; |
1030 | return le; | 1031 | return le; |
1031 | } | 1032 | } |
@@ -1618,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
1618 | return count; | 1619 | return count; |
1619 | } | 1620 | } |
1620 | 1621 | ||
1621 | static void sky2_tx_unmap(struct pci_dev *pdev, | 1622 | static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) |
1622 | const struct tx_ring_info *re) | ||
1623 | { | 1623 | { |
1624 | if (re->flags & TX_MAP_SINGLE) | 1624 | if (re->flags & TX_MAP_SINGLE) |
1625 | pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), | 1625 | pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), |
@@ -1629,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev, | |||
1629 | pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), | 1629 | pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), |
1630 | pci_unmap_len(re, maplen), | 1630 | pci_unmap_len(re, maplen), |
1631 | PCI_DMA_TODEVICE); | 1631 | PCI_DMA_TODEVICE); |
1632 | re->flags = 0; | ||
1632 | } | 1633 | } |
1633 | 1634 | ||
1634 | /* | 1635 | /* |
@@ -1835,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1835 | dev->stats.tx_packets++; | 1836 | dev->stats.tx_packets++; |
1836 | dev->stats.tx_bytes += skb->len; | 1837 | dev->stats.tx_bytes += skb->len; |
1837 | 1838 | ||
1839 | re->skb = NULL; | ||
1838 | dev_kfree_skb_any(skb); | 1840 | dev_kfree_skb_any(skb); |
1839 | 1841 | ||
1840 | sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); | 1842 | sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); |
@@ -2149,7 +2151,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw) | |||
2149 | 2151 | ||
2150 | /* reset PHY Link Detect */ | 2152 | /* reset PHY Link Detect */ |
2151 | phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); | 2153 | phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); |
2154 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2152 | sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); | 2155 | sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); |
2156 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2153 | 2157 | ||
2154 | sky2_link_up(sky2); | 2158 | sky2_link_up(sky2); |
2155 | } | 2159 | } |
@@ -2640,6 +2644,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2640 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { | 2644 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { |
2641 | u16 pci_err; | 2645 | u16 pci_err; |
2642 | 2646 | ||
2647 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2643 | pci_err = sky2_pci_read16(hw, PCI_STATUS); | 2648 | pci_err = sky2_pci_read16(hw, PCI_STATUS); |
2644 | if (net_ratelimit()) | 2649 | if (net_ratelimit()) |
2645 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", | 2650 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", |
@@ -2647,12 +2652,14 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2647 | 2652 | ||
2648 | sky2_pci_write16(hw, PCI_STATUS, | 2653 | sky2_pci_write16(hw, PCI_STATUS, |
2649 | pci_err | PCI_STATUS_ERROR_BITS); | 2654 | pci_err | PCI_STATUS_ERROR_BITS); |
2655 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2650 | } | 2656 | } |
2651 | 2657 | ||
2652 | if (status & Y2_IS_PCI_EXP) { | 2658 | if (status & Y2_IS_PCI_EXP) { |
2653 | /* PCI-Express uncorrectable Error occurred */ | 2659 | /* PCI-Express uncorrectable Error occurred */ |
2654 | u32 err; | 2660 | u32 err; |
2655 | 2661 | ||
2662 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2656 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2663 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2657 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, | 2664 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, |
2658 | 0xfffffffful); | 2665 | 0xfffffffful); |
@@ -2660,6 +2667,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2660 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); | 2667 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); |
2661 | 2668 | ||
2662 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2669 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2670 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2663 | } | 2671 | } |
2664 | 2672 | ||
2665 | if (status & Y2_HWE_L1_MASK) | 2673 | if (status & Y2_HWE_L1_MASK) |
@@ -3038,6 +3046,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3038 | } | 3046 | } |
3039 | 3047 | ||
3040 | sky2_power_on(hw); | 3048 | sky2_power_on(hw); |
3049 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3041 | 3050 | ||
3042 | for (i = 0; i < hw->ports; i++) { | 3051 | for (i = 0; i < hw->ports; i++) { |
3043 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); | 3052 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); |
@@ -3074,6 +3083,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3074 | reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; | 3083 | reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; |
3075 | 3084 | ||
3076 | /* reset PHY Link Detect */ | 3085 | /* reset PHY Link Detect */ |
3086 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3077 | sky2_pci_write16(hw, PSM_CONFIG_REG4, | 3087 | sky2_pci_write16(hw, PSM_CONFIG_REG4, |
3078 | reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); | 3088 | reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); |
3079 | sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); | 3089 | sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); |
@@ -3091,6 +3101,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3091 | /* restore the PCIe Link Control register */ | 3101 | /* restore the PCIe Link Control register */ |
3092 | sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); | 3102 | sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); |
3093 | } | 3103 | } |
3104 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3094 | 3105 | ||
3095 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ | 3106 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ |
3096 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); | 3107 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); |
@@ -3228,6 +3239,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw) | |||
3228 | return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; | 3239 | return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; |
3229 | } | 3240 | } |
3230 | 3241 | ||
3242 | static void sky2_hw_set_wol(struct sky2_hw *hw) | ||
3243 | { | ||
3244 | int wol = 0; | ||
3245 | int i; | ||
3246 | |||
3247 | for (i = 0; i < hw->ports; i++) { | ||
3248 | struct net_device *dev = hw->dev[i]; | ||
3249 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3250 | |||
3251 | if (sky2->wol) | ||
3252 | wol = 1; | ||
3253 | } | ||
3254 | |||
3255 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | ||
3256 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3257 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
3258 | sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); | ||
3259 | |||
3260 | device_set_wakeup_enable(&hw->pdev->dev, wol); | ||
3261 | } | ||
3262 | |||
3231 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 3263 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
3232 | { | 3264 | { |
3233 | const struct sky2_port *sky2 = netdev_priv(dev); | 3265 | const struct sky2_port *sky2 = netdev_priv(dev); |
@@ -3247,13 +3279,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
3247 | 3279 | ||
3248 | sky2->wol = wol->wolopts; | 3280 | sky2->wol = wol->wolopts; |
3249 | 3281 | ||
3250 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | 3282 | sky2_hw_set_wol(hw); |
3251 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3252 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
3253 | sky2_write32(hw, B0_CTST, sky2->wol | ||
3254 | ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); | ||
3255 | |||
3256 | device_set_wakeup_enable(&hw->pdev->dev, sky2->wol); | ||
3257 | 3283 | ||
3258 | if (!netif_running(dev)) | 3284 | if (!netif_running(dev)) |
3259 | sky2_wol_init(sky2); | 3285 | sky2_wol_init(sky2); |
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 95db60adde41..f9521136a869 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev) | |||
1063 | if (retval) { | 1063 | if (retval) { |
1064 | printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", | 1064 | printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", |
1065 | FIRMWARE_RX); | 1065 | FIRMWARE_RX); |
1066 | return retval; | 1066 | goto out_init; |
1067 | } | 1067 | } |
1068 | if (fw_rx->size % 4) { | 1068 | if (fw_rx->size % 4) { |
1069 | printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", | 1069 | printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", |
@@ -1108,6 +1108,9 @@ out_tx: | |||
1108 | release_firmware(fw_tx); | 1108 | release_firmware(fw_tx); |
1109 | out_rx: | 1109 | out_rx: |
1110 | release_firmware(fw_rx); | 1110 | release_firmware(fw_rx); |
1111 | out_init: | ||
1112 | if (retval) | ||
1113 | netdev_close(dev); | ||
1111 | return retval; | 1114 | return retval; |
1112 | } | 1115 | } |
1113 | 1116 | ||
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 75a669d48e5e..d71c1976072e 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) | |||
1437 | /* Transmit complete. */ | 1437 | /* Transmit complete. */ |
1438 | lp->lstats.tx_ints++; | 1438 | lp->lstats.tx_ints++; |
1439 | tc35815_txdone(dev); | 1439 | tc35815_txdone(dev); |
1440 | netif_wake_queue(dev); | ||
1441 | if (ret < 0) | 1440 | if (ret < 0) |
1442 | ret = 0; | 1441 | ret = 0; |
1443 | } | 1442 | } |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 595777dcadb1..20696b5d60a5 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -249,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = { | |||
249 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 249 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
250 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ | 250 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ |
251 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ | 251 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ |
252 | { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ | ||
252 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 253 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
253 | { } /* terminate list */ | 254 | { } /* terminate list */ |
254 | }; | 255 | }; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 96bdc0b43889..eb8fe7e16c6c 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3279,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3279 | /* Handle the transmitted buffer and release */ | 3279 | /* Handle the transmitted buffer and release */ |
3280 | /* the BD to be used with the current frame */ | 3280 | /* the BD to be used with the current frame */ |
3281 | 3281 | ||
3282 | if (bd == ugeth->txBd[txQ]) /* queue empty? */ | 3282 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3283 | if (!skb) | ||
3283 | break; | 3284 | break; |
3284 | 3285 | ||
3285 | dev->stats.tx_packets++; | 3286 | dev->stats.tx_packets++; |
3286 | 3287 | ||
3287 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; | ||
3288 | |||
3289 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && | 3288 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && |
3290 | skb_recycle_check(skb, | 3289 | skb_recycle_check(skb, |
3291 | ugeth->ug_info->uf_info.max_rx_buf_length + | 3290 | ugeth->ug_info->uf_info.max_rx_buf_length + |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 21e183a83b99..5f3b9eaeb04f 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -419,7 +419,7 @@ static int cdc_manage_power(struct usbnet *dev, int on) | |||
419 | 419 | ||
420 | static const struct driver_info cdc_info = { | 420 | static const struct driver_info cdc_info = { |
421 | .description = "CDC Ethernet Device", | 421 | .description = "CDC Ethernet Device", |
422 | .flags = FLAG_ETHER | FLAG_LINK_INTR, | 422 | .flags = FLAG_ETHER, |
423 | // .check_connect = cdc_check_connect, | 423 | // .check_connect = cdc_check_connect, |
424 | .bind = cdc_bind, | 424 | .bind = cdc_bind, |
425 | .unbind = usbnet_cdc_unbind, | 425 | .unbind = usbnet_cdc_unbind, |
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = { | |||
584 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | 584 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), |
585 | .driver_info = (unsigned long) &mbm_info, | 585 | .driver_info = (unsigned long) &mbm_info, |
586 | }, { | 586 | }, { |
587 | /* Ericsson C3607w ver 2 */ | ||
588 | USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM, | ||
589 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | ||
590 | .driver_info = (unsigned long) &mbm_info, | ||
591 | }, { | ||
587 | /* Toshiba F3507g */ | 592 | /* Toshiba F3507g */ |
588 | USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, | 593 | USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, |
589 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), | 594 | USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c93f58f5c6f2..317aa34b21cf 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status) | |||
1877 | /** | 1877 | /** |
1878 | * tx_srv - transmit interrupt service | 1878 | * tx_srv - transmit interrupt service |
1879 | * @vptr; Velocity | 1879 | * @vptr; Velocity |
1880 | * @status: | ||
1881 | * | 1880 | * |
1882 | * Scan the queues looking for transmitted packets that | 1881 | * Scan the queues looking for transmitted packets that |
1883 | * we can complete and clean up. Update any statistics as | 1882 | * we can complete and clean up. Update any statistics as |
1884 | * necessary/ | 1883 | * necessary/ |
1885 | */ | 1884 | */ |
1886 | static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | 1885 | static int velocity_tx_srv(struct velocity_info *vptr) |
1887 | { | 1886 | { |
1888 | struct tx_desc *td; | 1887 | struct tx_desc *td; |
1889 | int qnum; | 1888 | int qnum; |
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2090 | /** | 2089 | /** |
2091 | * velocity_rx_srv - service RX interrupt | 2090 | * velocity_rx_srv - service RX interrupt |
2092 | * @vptr: velocity | 2091 | * @vptr: velocity |
2093 | * @status: adapter status (unused) | ||
2094 | * | 2092 | * |
2095 | * Walk the receive ring of the velocity adapter and remove | 2093 | * Walk the receive ring of the velocity adapter and remove |
2096 | * any received packets from the receive queue. Hand the ring | 2094 | * any received packets from the receive queue. Hand the ring |
2097 | * slots back to the adapter for reuse. | 2095 | * slots back to the adapter for reuse. |
2098 | */ | 2096 | */ |
2099 | static int velocity_rx_srv(struct velocity_info *vptr, int status, | 2097 | static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) |
2100 | int budget_left) | ||
2101 | { | 2098 | { |
2102 | struct net_device_stats *stats = &vptr->dev->stats; | 2099 | struct net_device_stats *stats = &vptr->dev->stats; |
2103 | int rd_curr = vptr->rx.curr; | 2100 | int rd_curr = vptr->rx.curr; |
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget) | |||
2151 | struct velocity_info *vptr = container_of(napi, | 2148 | struct velocity_info *vptr = container_of(napi, |
2152 | struct velocity_info, napi); | 2149 | struct velocity_info, napi); |
2153 | unsigned int rx_done; | 2150 | unsigned int rx_done; |
2154 | u32 isr_status; | 2151 | unsigned long flags; |
2155 | |||
2156 | spin_lock(&vptr->lock); | ||
2157 | isr_status = mac_read_isr(vptr->mac_regs); | ||
2158 | |||
2159 | /* Ack the interrupt */ | ||
2160 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2161 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2162 | velocity_error(vptr, isr_status); | ||
2163 | 2152 | ||
2153 | spin_lock_irqsave(&vptr->lock, flags); | ||
2164 | /* | 2154 | /* |
2165 | * Do rx and tx twice for performance (taken from the VIA | 2155 | * Do rx and tx twice for performance (taken from the VIA |
2166 | * out-of-tree driver). | 2156 | * out-of-tree driver). |
2167 | */ | 2157 | */ |
2168 | rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); | 2158 | rx_done = velocity_rx_srv(vptr, budget / 2); |
2169 | velocity_tx_srv(vptr, isr_status); | 2159 | velocity_tx_srv(vptr); |
2170 | rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); | 2160 | rx_done += velocity_rx_srv(vptr, budget - rx_done); |
2171 | velocity_tx_srv(vptr, isr_status); | 2161 | velocity_tx_srv(vptr); |
2172 | |||
2173 | spin_unlock(&vptr->lock); | ||
2174 | 2162 | ||
2175 | /* If budget not fully consumed, exit the polling mode */ | 2163 | /* If budget not fully consumed, exit the polling mode */ |
2176 | if (rx_done < budget) { | 2164 | if (rx_done < budget) { |
2177 | napi_complete(napi); | 2165 | napi_complete(napi); |
2178 | mac_enable_int(vptr->mac_regs); | 2166 | mac_enable_int(vptr->mac_regs); |
2179 | } | 2167 | } |
2168 | spin_unlock_irqrestore(&vptr->lock, flags); | ||
2180 | 2169 | ||
2181 | return rx_done; | 2170 | return rx_done; |
2182 | } | 2171 | } |
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance) | |||
2206 | return IRQ_NONE; | 2195 | return IRQ_NONE; |
2207 | } | 2196 | } |
2208 | 2197 | ||
2198 | /* Ack the interrupt */ | ||
2199 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2200 | |||
2209 | if (likely(napi_schedule_prep(&vptr->napi))) { | 2201 | if (likely(napi_schedule_prep(&vptr->napi))) { |
2210 | mac_disable_int(vptr->mac_regs); | 2202 | mac_disable_int(vptr->mac_regs); |
2211 | __napi_schedule(&vptr->napi); | 2203 | __napi_schedule(&vptr->napi); |
2212 | } | 2204 | } |
2205 | |||
2206 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2207 | velocity_error(vptr, isr_status); | ||
2208 | |||
2213 | spin_unlock(&vptr->lock); | 2209 | spin_unlock(&vptr->lock); |
2214 | 2210 | ||
2215 | return IRQ_HANDLED; | 2211 | return IRQ_HANDLED; |
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev) | |||
3100 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); | 3096 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); |
3101 | mac_disable_int(vptr->mac_regs); | 3097 | mac_disable_int(vptr->mac_regs); |
3102 | 3098 | ||
3103 | velocity_tx_srv(vptr, 0); | 3099 | velocity_tx_srv(vptr); |
3104 | 3100 | ||
3105 | for (i = 0; i < vptr->tx.numq; i++) { | 3101 | for (i = 0; i < vptr->tx.numq; i++) { |
3106 | if (vptr->tx.used[i]) | 3102 | if (vptr->tx.used[i]) |
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3344 | { | 3340 | { |
3345 | struct velocity_info *vptr = netdev_priv(dev); | 3341 | struct velocity_info *vptr = netdev_priv(dev); |
3346 | int max_us = 0x3f * 64; | 3342 | int max_us = 0x3f * 64; |
3343 | unsigned long flags; | ||
3347 | 3344 | ||
3348 | /* 6 bits of */ | 3345 | /* 6 bits of */ |
3349 | if (ecmd->tx_coalesce_usecs > max_us) | 3346 | if (ecmd->tx_coalesce_usecs > max_us) |
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3365 | ecmd->tx_coalesce_usecs); | 3362 | ecmd->tx_coalesce_usecs); |
3366 | 3363 | ||
3367 | /* Setup the interrupt suppression and queue timers */ | 3364 | /* Setup the interrupt suppression and queue timers */ |
3365 | spin_lock_irqsave(&vptr->lock, flags); | ||
3368 | mac_disable_int(vptr->mac_regs); | 3366 | mac_disable_int(vptr->mac_regs); |
3369 | setup_adaptive_interrupts(vptr); | 3367 | setup_adaptive_interrupts(vptr); |
3370 | setup_queue_timers(vptr); | 3368 | setup_queue_timers(vptr); |
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev, | |||
3372 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); | 3370 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); |
3373 | mac_clear_isr(vptr->mac_regs); | 3371 | mac_clear_isr(vptr->mac_regs); |
3374 | mac_enable_int(vptr->mac_regs); | 3372 | mac_enable_int(vptr->mac_regs); |
3373 | spin_unlock_irqrestore(&vptr->lock, flags); | ||
3375 | 3374 | ||
3376 | return 0; | 3375 | return 0; |
3377 | } | 3376 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c708ecc3cb2e..9ead30bd00c4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work) | |||
395 | 395 | ||
396 | vi = container_of(work, struct virtnet_info, refill.work); | 396 | vi = container_of(work, struct virtnet_info, refill.work); |
397 | napi_disable(&vi->napi); | 397 | napi_disable(&vi->napi); |
398 | try_fill_recv(vi, GFP_KERNEL); | 398 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
399 | still_empty = (vi->num == 0); | ||
400 | napi_enable(&vi->napi); | 399 | napi_enable(&vi->napi); |
401 | 400 | ||
402 | /* In theory, this can happen: if we don't get any buffers in | 401 | /* In theory, this can happen: if we don't get any buffers in |
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 5cc0f279417e..2d7c96d7e865 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h | |||
@@ -151,6 +151,7 @@ enum { | |||
151 | 151 | ||
152 | /* Device IDs */ | 152 | /* Device IDs */ |
153 | USB_DEVICE_ID_I6050 = 0x0186, | 153 | USB_DEVICE_ID_I6050 = 0x0186, |
154 | USB_DEVICE_ID_I6050_2 = 0x0188, | ||
154 | }; | 155 | }; |
155 | 156 | ||
156 | 157 | ||
@@ -234,6 +235,7 @@ struct i2400mu { | |||
234 | u8 rx_size_auto_shrink; | 235 | u8 rx_size_auto_shrink; |
235 | 236 | ||
236 | struct dentry *debugfs_dentry; | 237 | struct dentry *debugfs_dentry; |
238 | unsigned i6050:1; /* 1 if this is a 6050 based SKU */ | ||
237 | }; | 239 | }; |
238 | 240 | ||
239 | 241 | ||
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 3b48681f8a0d..98f4f8c5fb68 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface, | |||
478 | i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; | 478 | i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; |
479 | i2400m->bus_bm_mac_addr_impaired = 0; | 479 | i2400m->bus_bm_mac_addr_impaired = 0; |
480 | 480 | ||
481 | if (id->idProduct == USB_DEVICE_ID_I6050) { | 481 | switch (id->idProduct) { |
482 | case USB_DEVICE_ID_I6050: | ||
483 | case USB_DEVICE_ID_I6050_2: | ||
484 | i2400mu->i6050 = 1; | ||
485 | break; | ||
486 | default: | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (i2400mu->i6050) { | ||
482 | i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; | 491 | i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; |
483 | i2400mu->endpoint_cfg.bulk_out = 0; | 492 | i2400mu->endpoint_cfg.bulk_out = 0; |
484 | i2400mu->endpoint_cfg.notification = 3; | 493 | i2400mu->endpoint_cfg.notification = 3; |
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface) | |||
719 | static | 728 | static |
720 | struct usb_device_id i2400mu_id_table[] = { | 729 | struct usb_device_id i2400mu_id_table[] = { |
721 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, | 730 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, |
731 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, | ||
722 | { USB_DEVICE(0x8086, 0x0181) }, | 732 | { USB_DEVICE(0x8086, 0x0181) }, |
723 | { USB_DEVICE(0x8086, 0x1403) }, | 733 | { USB_DEVICE(0x8086, 0x1403) }, |
724 | { USB_DEVICE(0x8086, 0x1405) }, | 734 | { USB_DEVICE(0x8086, 0x1405) }, |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2ec61f08cfdb..ae371448b5a0 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -855,12 +855,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) | |||
855 | } | 855 | } |
856 | } | 856 | } |
857 | 857 | ||
858 | static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah) | 858 | static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) |
859 | { | 859 | { |
860 | u32 i, j; | 860 | u32 i, j; |
861 | 861 | ||
862 | if ((ah->hw_version.devid == AR9280_DEVID_PCI) && | 862 | if (ah->hw_version.devid == AR9280_DEVID_PCI) { |
863 | test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) { | ||
864 | 863 | ||
865 | /* EEPROM Fixup */ | 864 | /* EEPROM Fixup */ |
866 | for (i = 0; i < ah->iniModes.ia_rows; i++) { | 865 | for (i = 0; i < ah->iniModes.ia_rows; i++) { |
@@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah) | |||
980 | if (r) | 979 | if (r) |
981 | return r; | 980 | return r; |
982 | 981 | ||
983 | ath9k_hw_init_11a_eeprom_fix(ah); | 982 | ath9k_hw_init_eeprom_fix(ah); |
984 | 983 | ||
985 | r = ath9k_hw_init_macaddr(ah); | 984 | r = ath9k_hw_init_macaddr(ah); |
986 | if (r) { | 985 | if (r) { |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 996eb90263cc..643bea35686f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2655,10 +2655,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | |||
2655 | (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { | 2655 | (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { |
2656 | ath9k_ps_wakeup(sc); | 2656 | ath9k_ps_wakeup(sc); |
2657 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); | 2657 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
2658 | ath_beacon_return(sc, avp); | ||
2659 | ath9k_ps_restore(sc); | 2658 | ath9k_ps_restore(sc); |
2660 | } | 2659 | } |
2661 | 2660 | ||
2661 | ath_beacon_return(sc, avp); | ||
2662 | sc->sc_flags &= ~SC_OP_BEACONS; | 2662 | sc->sc_flags &= ~SC_OP_BEACONS; |
2663 | 2663 | ||
2664 | for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { | 2664 | for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index fa12b9060b0b..29bf33692f71 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, | |||
1615 | bf->bf_frmlen -= padsize; | 1615 | bf->bf_frmlen -= padsize; |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | if (conf_is_ht(&hw->conf) && !is_pae(skb)) | 1618 | if (conf_is_ht(&hw->conf)) |
1619 | bf->bf_state.bf_type |= BUF_HT; | 1619 | bf->bf_state.bf_type |= BUF_HT; |
1620 | 1620 | ||
1621 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | 1621 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); |
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1701 | goto tx_done; | 1701 | goto tx_done; |
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { | 1704 | if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { |
1705 | /* | 1705 | /* |
1706 | * Try aggregation if it's a unicast data frame | 1706 | * Try aggregation if it's a unicast data frame |
1707 | * and the destination is HT capable. | 1707 | * and the destination is HT capable. |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fe3bf9491997..c484cc253892 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -115,6 +115,7 @@ | |||
115 | #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ | 115 | #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ |
116 | #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ | 116 | #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ |
117 | #define B43_MMIO_RNG 0x65A | 117 | #define B43_MMIO_RNG 0x65A |
118 | #define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ | ||
118 | #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ | 119 | #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ |
119 | #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 | 120 | #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 |
120 | #define B43_MMIO_POWERUP_DELAY 0x6A8 | 121 | #define B43_MMIO_POWERUP_DELAY 0x6A8 |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4c41cfe44f26..490fb45d1d05 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev) | |||
628 | static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) | 628 | static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) |
629 | { | 629 | { |
630 | /* slot_time is in usec. */ | 630 | /* slot_time is in usec. */ |
631 | if (dev->phy.type != B43_PHYTYPE_G) | 631 | /* This test used to exit for all but a G PHY. */ |
632 | if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) | ||
632 | return; | 633 | return; |
633 | b43_write16(dev, 0x684, 510 + slot_time); | 634 | b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); |
634 | b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | 635 | /* Shared memory location 0x0010 is the slot time and should be |
636 | * set to slot_time; however, this register is initially 0 and changing | ||
637 | * the value adversely affects the transmit rate for BCM4311 | ||
638 | * devices. Until this behavior is unterstood, delete this step | ||
639 | * | ||
640 | * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | ||
641 | */ | ||
635 | } | 642 | } |
636 | 643 | ||
637 | static void b43_short_slot_timing_enable(struct b43_wldev *dev) | 644 | static void b43_short_slot_timing_enable(struct b43_wldev *dev) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 9b4b8b5c7574..31462813bac0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2008 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2008 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2009 | "%d index %d\n", scd_ssn , index); | 2009 | "%d index %d\n", scd_ssn , index); |
2010 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2010 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2011 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 2011 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
2012 | 2012 | ||
2013 | if (priv->mac80211_registered && | 2013 | if (priv->mac80211_registered && |
2014 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2014 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 33a5866538e7..cffaae772d51 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | |||
1125 | scd_ssn , index, txq_id, txq->swq_id); | 1125 | scd_ssn , index, txq_id, txq->swq_id); |
1126 | 1126 | ||
1127 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 1127 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
1128 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 1128 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1129 | 1129 | ||
1130 | if (priv->mac80211_registered && | 1130 | if (priv->mac80211_registered && |
1131 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 1131 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv, | |||
1153 | tx_resp->failure_frame); | 1153 | tx_resp->failure_frame); |
1154 | 1154 | ||
1155 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 1155 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
1156 | if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) | 1156 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1157 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
1158 | 1157 | ||
1159 | if (priv->mac80211_registered && | 1158 | if (priv->mac80211_registered && |
1160 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 1159 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
1161 | iwl_wake_queue(priv, txq_id); | 1160 | iwl_wake_queue(priv, txq_id); |
1162 | } | 1161 | } |
1163 | 1162 | ||
1164 | if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) | 1163 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
1165 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | ||
1166 | 1164 | ||
1167 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 1165 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
1168 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 1166 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
@@ -1598,6 +1596,7 @@ struct iwl_cfg iwl5300_agn_cfg = { | |||
1598 | .use_bsm = false, | 1596 | .use_bsm = false, |
1599 | .ht_greenfield_support = true, | 1597 | .ht_greenfield_support = true, |
1600 | .led_compensation = 51, | 1598 | .led_compensation = 51, |
1599 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1601 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1600 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1602 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1601 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1603 | }; | 1602 | }; |
@@ -1622,6 +1621,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { | |||
1622 | .use_bsm = false, | 1621 | .use_bsm = false, |
1623 | .ht_greenfield_support = true, | 1622 | .ht_greenfield_support = true, |
1624 | .led_compensation = 51, | 1623 | .led_compensation = 51, |
1624 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1625 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1625 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1626 | }; | 1626 | }; |
1627 | 1627 | ||
@@ -1667,6 +1667,7 @@ struct iwl_cfg iwl5100_agn_cfg = { | |||
1667 | .use_bsm = false, | 1667 | .use_bsm = false, |
1668 | .ht_greenfield_support = true, | 1668 | .ht_greenfield_support = true, |
1669 | .led_compensation = 51, | 1669 | .led_compensation = 51, |
1670 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1670 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1671 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1671 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1672 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1672 | }; | 1673 | }; |
@@ -1691,6 +1692,7 @@ struct iwl_cfg iwl5350_agn_cfg = { | |||
1691 | .use_bsm = false, | 1692 | .use_bsm = false, |
1692 | .ht_greenfield_support = true, | 1693 | .ht_greenfield_support = true, |
1693 | .led_compensation = 51, | 1694 | .led_compensation = 51, |
1695 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1694 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1696 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1695 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1697 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1696 | }; | 1698 | }; |
@@ -1715,6 +1717,7 @@ struct iwl_cfg iwl5150_agn_cfg = { | |||
1715 | .use_bsm = false, | 1717 | .use_bsm = false, |
1716 | .ht_greenfield_support = true, | 1718 | .ht_greenfield_support = true, |
1717 | .led_compensation = 51, | 1719 | .led_compensation = 51, |
1720 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1718 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1721 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1719 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1722 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1720 | }; | 1723 | }; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5461f105bd2d..f36f804804fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
2745 | priv->staging_rxon.flags = 0; | 2745 | priv->staging_rxon.flags = 0; |
2746 | 2746 | ||
2747 | iwl_set_rxon_channel(priv, conf->channel); | 2747 | iwl_set_rxon_channel(priv, conf->channel); |
2748 | iwl_set_rxon_ht(priv, ht_conf); | ||
2748 | 2749 | ||
2749 | iwl_set_flags_for_band(priv, conf->channel->band); | 2750 | iwl_set_flags_for_band(priv, conf->channel->band); |
2750 | spin_unlock_irqrestore(&priv->lock, flags); | 2751 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 27ca859e7453..b69e972671b2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv); | |||
446 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 446 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
447 | struct iwl_tx_queue *txq); | 447 | struct iwl_tx_queue *txq); |
448 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 448 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
449 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | ||
450 | int sta_id, int tid, int freed); | ||
449 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 451 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
450 | int slots_num, u32 txq_id); | 452 | int slots_num, u32 txq_id); |
451 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 453 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index e7d88d1da15d..83cc4e500a96 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c | |||
@@ -1,3 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
1 | #include <linux/module.h> | 27 | #include <linux/module.h> |
2 | 28 | ||
3 | /* sparse doesn't like tracepoint macros */ | 29 | /* sparse doesn't like tracepoint macros */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 21361968ab7e..d9c7363b1bbb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h | |||
@@ -1,3 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
1 | #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) | 27 | #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) |
2 | #define __IWLWIFI_DEVICE_TRACE | 28 | #define __IWLWIFI_DEVICE_TRACE |
3 | 29 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 6f36b6e79f5e..2dbce85404aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
928 | if (ieee80211_is_mgmt(fc) || | 928 | if (ieee80211_is_mgmt(fc) || |
929 | ieee80211_has_protected(fc) || | 929 | ieee80211_has_protected(fc) || |
930 | ieee80211_has_morefrags(fc) || | 930 | ieee80211_has_morefrags(fc) || |
931 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) | 931 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || |
932 | (ieee80211_is_data_qos(fc) && | ||
933 | *ieee80211_get_qos_ctl(hdr) & | ||
934 | IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) | ||
932 | ret = skb_linearize(skb); | 935 | ret = skb_linearize(skb); |
933 | else | 936 | else |
934 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | 937 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index cde09a890b73..90fbdb25399e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -297,7 +297,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, | |||
297 | } | 297 | } |
298 | EXPORT_SYMBOL(iwl_add_station); | 298 | EXPORT_SYMBOL(iwl_add_station); |
299 | 299 | ||
300 | static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr) | 300 | static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr) |
301 | { | 301 | { |
302 | unsigned long flags; | 302 | unsigned long flags; |
303 | u8 sta_id = iwl_find_station(priv, addr); | 303 | u8 sta_id = iwl_find_station(priv, addr); |
@@ -324,7 +324,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv, | |||
324 | { | 324 | { |
325 | struct iwl_rem_sta_cmd *rm_sta = | 325 | struct iwl_rem_sta_cmd *rm_sta = |
326 | (struct iwl_rem_sta_cmd *)cmd->cmd.payload; | 326 | (struct iwl_rem_sta_cmd *)cmd->cmd.payload; |
327 | const char *addr = rm_sta->addr; | 327 | const u8 *addr = rm_sta->addr; |
328 | 328 | ||
329 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { | 329 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { |
330 | IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", | 330 | IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 87ce2bd292c7..8f4071562857 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
120 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | 120 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); |
121 | 121 | ||
122 | 122 | ||
123 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | ||
124 | int sta_id, int tid, int freed) | ||
125 | { | ||
126 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) | ||
127 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
128 | else { | ||
129 | IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", | ||
130 | priv->stations[sta_id].tid[tid].tfds_in_queue, | ||
131 | freed); | ||
132 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; | ||
133 | } | ||
134 | } | ||
135 | EXPORT_SYMBOL(iwl_free_tfds_in_queue); | ||
136 | |||
123 | /** | 137 | /** |
124 | * iwl_tx_queue_free - Deallocate DMA queue. | 138 | * iwl_tx_queue_free - Deallocate DMA queue. |
125 | * @txq: Transmit queue to deallocate. | 139 | * @txq: Transmit queue to deallocate. |
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1131 | struct iwl_queue *q = &txq->q; | 1145 | struct iwl_queue *q = &txq->q; |
1132 | struct iwl_tx_info *tx_info; | 1146 | struct iwl_tx_info *tx_info; |
1133 | int nfreed = 0; | 1147 | int nfreed = 0; |
1148 | struct ieee80211_hdr *hdr; | ||
1134 | 1149 | ||
1135 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 1150 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { |
1136 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | 1151 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1145 | 1160 | ||
1146 | tx_info = &txq->txb[txq->q.read_ptr]; | 1161 | tx_info = &txq->txb[txq->q.read_ptr]; |
1147 | iwl_tx_status(priv, tx_info->skb[0]); | 1162 | iwl_tx_status(priv, tx_info->skb[0]); |
1163 | |||
1164 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1165 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1166 | nfreed++; | ||
1148 | tx_info->skb[0] = NULL; | 1167 | tx_info->skb[0] = NULL; |
1149 | 1168 | ||
1150 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 1169 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1151 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | 1170 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); |
1152 | 1171 | ||
1153 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 1172 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1154 | nfreed++; | ||
1155 | } | 1173 | } |
1156 | return nfreed; | 1174 | return nfreed; |
1157 | } | 1175 | } |
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1559 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | 1577 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { |
1560 | /* calculate mac80211 ampdu sw queue to wake */ | 1578 | /* calculate mac80211 ampdu sw queue to wake */ |
1561 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | 1579 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
1562 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 1580 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1563 | 1581 | ||
1564 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | 1582 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && |
1565 | priv->mac80211_registered && | 1583 | priv->mac80211_registered && |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index 777584d76a88..1e41ad0fcad5 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c | |||
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm, | |||
973 | 973 | ||
974 | memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); | 974 | memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); |
975 | 975 | ||
976 | update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE; | ||
977 | update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) - | ||
978 | sizeof(struct iwm_umac_wifi_if)); | ||
979 | |||
976 | update.command = cpu_to_le32(command); | 980 | update.command = cpu_to_le32(command); |
977 | if (pmksa->bssid) | 981 | if (pmksa->bssid) |
978 | memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); | 982 | memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h index 06af0552cd75..3dfd9f0e9003 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.h +++ b/drivers/net/wireless/iwmc3200wifi/commands.h | |||
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx { | |||
463 | #define IWM_CMD_PMKID_FLUSH 3 | 463 | #define IWM_CMD_PMKID_FLUSH 3 |
464 | 464 | ||
465 | struct iwm_umac_pmkid_update { | 465 | struct iwm_umac_pmkid_update { |
466 | struct iwm_umac_wifi_if hdr; | ||
466 | __le32 command; | 467 | __le32 command; |
467 | u8 bssid[ETH_ALEN]; | 468 | u8 bssid[ETH_ALEN]; |
468 | __le16 reserved; | 469 | __le16 reserved; |
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 6d6ed7485175..f727b4a83196 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c | |||
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, | |||
794 | } | 794 | } |
795 | 795 | ||
796 | bss->bss = kzalloc(bss_len, GFP_KERNEL); | 796 | bss->bss = kzalloc(bss_len, GFP_KERNEL); |
797 | if (!bss) { | 797 | if (!bss->bss) { |
798 | kfree(bss); | 798 | kfree(bss); |
799 | IWM_ERR(iwm, "Couldn't allocate bss\n"); | 799 | IWM_ERR(iwm, "Couldn't allocate bss\n"); |
800 | return -ENOMEM; | 800 | return -ENOMEM; |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index a15962a19b2a..a72f7c2577de 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
197 | i %= ring_limit; | 197 | i %= ring_limit; |
198 | continue; | 198 | continue; |
199 | } | 199 | } |
200 | |||
201 | if (unlikely(len > priv->common.rx_mtu)) { | ||
202 | if (net_ratelimit()) | ||
203 | dev_err(&priv->pdev->dev, "rx'd frame size " | ||
204 | "exceeds length threshold.\n"); | ||
205 | |||
206 | len = priv->common.rx_mtu; | ||
207 | } | ||
200 | skb_put(skb, len); | 208 | skb_put(skb, len); |
201 | 209 | ||
202 | if (p54_rx(dev, skb)) { | 210 | if (p54_rx(dev, skb)) { |
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index bc5726dd5fe4..7ba3052b0708 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c | |||
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { | |||
65 | /* Sitecom */ | 65 | /* Sitecom */ |
66 | {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, | 66 | {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, |
67 | {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, | 67 | {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, |
68 | {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, | ||
68 | /* Sphairon Access Systems GmbH */ | 69 | /* Sphairon Access Systems GmbH */ |
69 | {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, | 70 | {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, |
70 | /* Dick Smith Electronics */ | 71 | /* Dick Smith Electronics */ |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index ac19ecd19cfe..72d3e437e190 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = { | |||
62 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, | 62 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, |
63 | /* ZD1211B */ | 63 | /* ZD1211B */ |
64 | { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, | 64 | { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, |
65 | { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, | ||
65 | { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, | 66 | { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, |
66 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, | 67 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, |
67 | { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, | 68 | { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 8e952fdab764..cb2fd01eddae 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) | |||
720 | -ret_val); | 720 | -ret_val); |
721 | goto acpiphp_bus_add_out; | 721 | goto acpiphp_bus_add_out; |
722 | } | 722 | } |
723 | /* | ||
724 | * try to start anyway. We could have failed to add | ||
725 | * simply because this bus had previously been added | ||
726 | * on another add. Don't bother with the return value | ||
727 | * we just keep going. | ||
728 | */ | ||
729 | ret_val = acpi_bus_start(device); | 723 | ret_val = acpi_bus_start(device); |
730 | 724 | ||
731 | acpiphp_bus_add_out: | 725 | acpiphp_bus_add_out: |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 8c30a9544d61..223052b73563 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj) | |||
321 | unsigned long flags; | 321 | unsigned long flags; |
322 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | 322 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); |
323 | int pos_cap_err, rp_pos_cap_err; | 323 | int pos_cap_err, rp_pos_cap_err; |
324 | u32 sever, mask; | 324 | u32 sever, cor_mask, uncor_mask; |
325 | int ret = 0; | 325 | int ret = 0; |
326 | 326 | ||
327 | dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); | 327 | dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); |
@@ -339,6 +339,9 @@ static int aer_inject(struct aer_error_inj *einj) | |||
339 | goto out_put; | 339 | goto out_put; |
340 | } | 340 | } |
341 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); | 341 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); |
342 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask); | ||
343 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, | ||
344 | &uncor_mask); | ||
342 | 345 | ||
343 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); | 346 | rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); |
344 | if (!rp_pos_cap_err) { | 347 | if (!rp_pos_cap_err) { |
@@ -374,17 +377,14 @@ static int aer_inject(struct aer_error_inj *einj) | |||
374 | err->header_log2 = einj->header_log2; | 377 | err->header_log2 = einj->header_log2; |
375 | err->header_log3 = einj->header_log3; | 378 | err->header_log3 = einj->header_log3; |
376 | 379 | ||
377 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask); | 380 | if (einj->cor_status && !(einj->cor_status & ~cor_mask)) { |
378 | if (einj->cor_status && !(einj->cor_status & ~mask)) { | ||
379 | ret = -EINVAL; | 381 | ret = -EINVAL; |
380 | printk(KERN_WARNING "The correctable error(s) is masked " | 382 | printk(KERN_WARNING "The correctable error(s) is masked " |
381 | "by device\n"); | 383 | "by device\n"); |
382 | spin_unlock_irqrestore(&inject_lock, flags); | 384 | spin_unlock_irqrestore(&inject_lock, flags); |
383 | goto out_put; | 385 | goto out_put; |
384 | } | 386 | } |
385 | 387 | if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) { | |
386 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask); | ||
387 | if (einj->uncor_status && !(einj->uncor_status & ~mask)) { | ||
388 | ret = -EINVAL; | 388 | ret = -EINVAL; |
389 | printk(KERN_WARNING "The uncorrectable error(s) is masked " | 389 | printk(KERN_WARNING "The uncorrectable error(s) is masked " |
390 | "by device\n"); | 390 | "by device\n"); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 98ffb2de22e9..446e4a94d7d3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -681,7 +681,7 @@ static void pci_read_irq(struct pci_dev *dev) | |||
681 | dev->irq = irq; | 681 | dev->irq = irq; |
682 | } | 682 | } |
683 | 683 | ||
684 | static void set_pcie_port_type(struct pci_dev *pdev) | 684 | void set_pcie_port_type(struct pci_dev *pdev) |
685 | { | 685 | { |
686 | int pos; | 686 | int pos; |
687 | u16 reg16; | 687 | u16 reg16; |
@@ -695,7 +695,7 @@ static void set_pcie_port_type(struct pci_dev *pdev) | |||
695 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; | 695 | pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; |
696 | } | 696 | } |
697 | 697 | ||
698 | static void set_pcie_hotplug_bridge(struct pci_dev *pdev) | 698 | void set_pcie_hotplug_bridge(struct pci_dev *pdev) |
699 | { | 699 | { |
700 | int pos; | 700 | int pos; |
701 | u16 reg16; | 701 | u16 reg16; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c74694345b6e..d58b94030ef3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) | |||
338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 338 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 339 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
340 | 340 | ||
341 | /* | ||
342 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | ||
343 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | ||
344 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | ||
345 | * (which conflicts w/ BAR1's memory range). | ||
346 | */ | ||
347 | static void __devinit quirk_cs5536_vsa(struct pci_dev *dev) | ||
348 | { | ||
349 | if (pci_resource_len(dev, 0) != 8) { | ||
350 | struct resource *res = &dev->resource[0]; | ||
351 | res->end = res->start + 8 - 1; | ||
352 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected " | ||
353 | "(incorrect header); workaround applied.\n"); | ||
354 | } | ||
355 | } | ||
356 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | ||
357 | |||
341 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 358 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
342 | unsigned size, int nr, const char *name) | 359 | unsigned size, int nr, const char *name) |
343 | { | 360 | { |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 07d14dfdf0b4..226b3e93498c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev) | |||
934 | acer_backlight_device = bd; | 934 | acer_backlight_device = bd; |
935 | 935 | ||
936 | bd->props.power = FB_BLANK_UNBLANK; | 936 | bd->props.power = FB_BLANK_UNBLANK; |
937 | bd->props.brightness = max_brightness; | 937 | bd->props.brightness = read_brightness(bd); |
938 | bd->props.max_brightness = max_brightness; | 938 | bd->props.max_brightness = max_brightness; |
939 | backlight_update_status(bd); | 939 | backlight_update_status(bd); |
940 | return 0; | 940 | return 0; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e67e4feb35cb..eb603f1d55ca 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -5771,7 +5771,7 @@ static void thermal_exit(void) | |||
5771 | case TPACPI_THERMAL_ACPI_TMP07: | 5771 | case TPACPI_THERMAL_ACPI_TMP07: |
5772 | case TPACPI_THERMAL_ACPI_UPDT: | 5772 | case TPACPI_THERMAL_ACPI_UPDT: |
5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, | 5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, |
5774 | &thermal_temp_input16_group); | 5774 | &thermal_temp_input8_group); |
5775 | break; | 5775 | break; |
5776 | case TPACPI_THERMAL_NONE: | 5776 | case TPACPI_THERMAL_NONE: |
5777 | default: | 5777 | default: |
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index fa39e759a275..6ea3cb5837c7 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c | |||
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) | |||
175 | dev_err(&dev->dev, "Do not pass platform_data through " | 175 | dev_err(&dev->dev, "Do not pass platform_data through " |
176 | "wm97xx_bat_set_pdata!\n"); | 176 | "wm97xx_bat_set_pdata!\n"); |
177 | return -EINVAL; | 177 | return -EINVAL; |
178 | } else | 178 | } |
179 | pdata = wmdata->batt_pdata; | 179 | |
180 | if (!wmdata) { | ||
181 | dev_err(&dev->dev, "No platform data supplied\n"); | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | |||
185 | pdata = wmdata->batt_pdata; | ||
180 | 186 | ||
181 | if (dev->id != -1) | 187 | if (dev->id != -1) |
182 | return -EINVAL; | 188 | return -EINVAL; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 686ef270ecf7..b60a4c9f8f16 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) | |||
661 | static void print_constraints(struct regulator_dev *rdev) | 661 | static void print_constraints(struct regulator_dev *rdev) |
662 | { | 662 | { |
663 | struct regulation_constraints *constraints = rdev->constraints; | 663 | struct regulation_constraints *constraints = rdev->constraints; |
664 | char buf[80]; | 664 | char buf[80] = ""; |
665 | int count = 0; | 665 | int count = 0; |
666 | int ret; | 666 | int ret; |
667 | 667 | ||
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 76d08c282f9c..4f33a0f4a179 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c | |||
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev, | |||
183 | if (vol_map[val] >= min_vol) | 183 | if (vol_map[val] >= min_vol) |
184 | break; | 184 | break; |
185 | 185 | ||
186 | if (vol_map[val] > max_vol) | 186 | if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol) |
187 | return -EINVAL; | 187 | return -EINVAL; |
188 | 188 | ||
189 | return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), | 189 | return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), |
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev, | |||
272 | if (vol_map[val] >= min_vol) | 272 | if (vol_map[val] >= min_vol) |
273 | break; | 273 | break; |
274 | 274 | ||
275 | if (vol_map[val] > max_vol) | 275 | if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol) |
276 | return -EINVAL; | 276 | return -EINVAL; |
277 | 277 | ||
278 | ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), | 278 | ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), |
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 1bbff099a546..e7b89e704af6 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c | |||
@@ -1504,7 +1504,8 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | |||
1504 | led->isink_init.consumer_supplies = &led->isink_consumer; | 1504 | led->isink_init.consumer_supplies = &led->isink_consumer; |
1505 | led->isink_init.constraints.min_uA = 0; | 1505 | led->isink_init.constraints.min_uA = 0; |
1506 | led->isink_init.constraints.max_uA = pdata->max_uA; | 1506 | led->isink_init.constraints.max_uA = pdata->max_uA; |
1507 | led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT; | 1507 | led->isink_init.constraints.valid_ops_mask |
1508 | = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS; | ||
1508 | led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | 1509 | led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; |
1509 | ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); | 1510 | ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); |
1510 | if (ret != 0) { | 1511 | if (ret != 0) { |
@@ -1517,6 +1518,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | |||
1517 | led->dcdc_init.num_consumer_supplies = 1; | 1518 | led->dcdc_init.num_consumer_supplies = 1; |
1518 | led->dcdc_init.consumer_supplies = &led->dcdc_consumer; | 1519 | led->dcdc_init.consumer_supplies = &led->dcdc_consumer; |
1519 | led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | 1520 | led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; |
1521 | led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; | ||
1520 | ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); | 1522 | ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); |
1521 | if (ret != 0) { | 1523 | if (ret != 0) { |
1522 | platform_device_put(pdev); | 1524 | platform_device_put(pdev); |
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c index 3a7be11cc6b9..812c66755083 100644 --- a/drivers/rtc/rtc-fm3130.c +++ b/drivers/rtc/rtc-fm3130.c | |||
@@ -376,20 +376,22 @@ static int __devinit fm3130_probe(struct i2c_client *client, | |||
376 | } | 376 | } |
377 | 377 | ||
378 | /* Disabling calibration mode */ | 378 | /* Disabling calibration mode */ |
379 | if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) | 379 | if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) { |
380 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, | 380 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, |
381 | fm3130->regs[FM3130_RTC_CONTROL] & | 381 | fm3130->regs[FM3130_RTC_CONTROL] & |
382 | ~(FM3130_RTC_CONTROL_BIT_CAL)); | 382 | ~(FM3130_RTC_CONTROL_BIT_CAL)); |
383 | dev_warn(&client->dev, "Disabling calibration mode!\n"); | 383 | dev_warn(&client->dev, "Disabling calibration mode!\n"); |
384 | } | ||
384 | 385 | ||
385 | /* Disabling read and write modes */ | 386 | /* Disabling read and write modes */ |
386 | if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || | 387 | if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || |
387 | fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) | 388 | fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) { |
388 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, | 389 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, |
389 | fm3130->regs[FM3130_RTC_CONTROL] & | 390 | fm3130->regs[FM3130_RTC_CONTROL] & |
390 | ~(FM3130_RTC_CONTROL_BIT_READ | | 391 | ~(FM3130_RTC_CONTROL_BIT_READ | |
391 | FM3130_RTC_CONTROL_BIT_WRITE)); | 392 | FM3130_RTC_CONTROL_BIT_WRITE)); |
392 | dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); | 393 | dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); |
394 | } | ||
393 | 395 | ||
394 | /* oscillator off? turn it on, so clock can tick. */ | 396 | /* oscillator off? turn it on, so clock can tick. */ |
395 | if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) | 397 | if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index fdb2e7c14506..5905936c7c60 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1004,8 +1004,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, | |||
1004 | if (device == NULL || | 1004 | if (device == NULL || |
1005 | device != dasd_device_from_cdev_locked(cdev) || | 1005 | device != dasd_device_from_cdev_locked(cdev) || |
1006 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1006 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
1007 | DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | 1007 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
1008 | "bus_id %s", dev_name(&cdev->dev)); | 1008 | "invalid device in request"); |
1009 | return; | 1009 | return; |
1010 | } | 1010 | } |
1011 | 1011 | ||
@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1078 | device = (struct dasd_device *) cqr->startdev; | 1078 | device = (struct dasd_device *) cqr->startdev; |
1079 | if (!device || | 1079 | if (!device || |
1080 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1080 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
1081 | DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | 1081 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
1082 | "bus_id %s", dev_name(&cdev->dev)); | 1082 | "invalid device in request"); |
1083 | return; | 1083 | return; |
1084 | } | 1084 | } |
1085 | 1085 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 1c500c462225..1cca21aafaba 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3033,7 +3033,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, | |||
3033 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3033 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3034 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", | 3034 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", |
3035 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), | 3035 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), |
3036 | scsw_cc(&irb->scsw), req->intrc); | 3036 | scsw_cc(&irb->scsw), req ? req->intrc : 0); |
3037 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3037 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3038 | " device %s: Failing CCW: %p\n", | 3038 | " device %s: Failing CCW: %p\n", |
3039 | dev_name(&device->cdev->dev), | 3039 | dev_name(&device->cdev->dev), |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index fc7b30b4a255..7039d9cf0fb4 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block, | |||
260 | struct ccw_dev_id dev_id; | 260 | struct ccw_dev_id dev_id; |
261 | 261 | ||
262 | base = block->base; | 262 | base = block->base; |
263 | if (!base->discipline->fill_info) | 263 | if (!base->discipline || !base->discipline->fill_info) |
264 | return -EINVAL; | 264 | return -EINVAL; |
265 | 265 | ||
266 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); | 266 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); |
@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block, | |||
303 | dasd_info->features |= | 303 | dasd_info->features |= |
304 | ((base->features & DASD_FEATURE_READONLY) != 0); | 304 | ((base->features & DASD_FEATURE_READONLY) != 0); |
305 | 305 | ||
306 | if (base->discipline) | 306 | memcpy(dasd_info->type, base->discipline->name, 4); |
307 | memcpy(dasd_info->type, base->discipline->name, 4); | ||
308 | else | ||
309 | memcpy(dasd_info->type, "none", 4); | ||
310 | 307 | ||
311 | if (block->request_queue->request_fn) { | 308 | if (block->request_queue->request_fn) { |
312 | struct list_head *l; | 309 | struct list_head *l; |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 6315fbd8e68b..71f95f54866f 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -72,7 +72,7 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
72 | /* Print device number. */ | 72 | /* Print device number. */ |
73 | seq_printf(m, "%s", dev_name(&device->cdev->dev)); | 73 | seq_printf(m, "%s", dev_name(&device->cdev->dev)); |
74 | /* Print discipline string. */ | 74 | /* Print discipline string. */ |
75 | if (device != NULL && device->discipline != NULL) | 75 | if (device->discipline != NULL) |
76 | seq_printf(m, "(%s)", device->discipline->name); | 76 | seq_printf(m, "(%s)", device->discipline->name); |
77 | else | 77 | else |
78 | seq_printf(m, "(none)"); | 78 | seq_printf(m, "(none)"); |
@@ -92,10 +92,7 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
92 | substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; | 92 | substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; |
93 | seq_printf(m, "%4s: ", substr); | 93 | seq_printf(m, "%4s: ", substr); |
94 | /* Print device status information. */ | 94 | /* Print device status information. */ |
95 | switch ((device != NULL) ? device->state : -1) { | 95 | switch (device->state) { |
96 | case -1: | ||
97 | seq_printf(m, "unknown"); | ||
98 | break; | ||
99 | case DASD_STATE_NEW: | 96 | case DASD_STATE_NEW: |
100 | seq_printf(m, "new"); | 97 | seq_printf(m, "new"); |
101 | break; | 98 | break; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index b9d2a007e93b..3796ffdb8479 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -495,6 +495,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp) | |||
495 | if (tty->driver_data == NULL) | 495 | if (tty->driver_data == NULL) |
496 | return -ENOMEM; | 496 | return -ENOMEM; |
497 | tty->low_latency = 0; | 497 | tty->low_latency = 0; |
498 | if (!tty->winsize.ws_row && !tty->winsize.ws_col) { | ||
499 | tty->winsize.ws_row = 24; | ||
500 | tty->winsize.ws_col = 80; | ||
501 | } | ||
498 | } | 502 | } |
499 | return 0; | 503 | return 0; |
500 | } | 504 | } |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 999fe80c4051..62b654af9237 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) | |||
531 | qdio_siga_sync_q(q); | 531 | qdio_siga_sync_q(q); |
532 | get_buf_state(q, q->first_to_check, &state, 0); | 532 | get_buf_state(q, q->first_to_check, &state, 0); |
533 | 533 | ||
534 | if (state == SLSB_P_INPUT_PRIMED) | 534 | if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) |
535 | /* more work coming */ | 535 | /* more work coming */ |
536 | return 0; | 536 | return 0; |
537 | 537 | ||
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
960 | qdio_handle_activate_check(cdev, intparm, cstat, | 960 | qdio_handle_activate_check(cdev, intparm, cstat, |
961 | dstat); | 961 | dstat); |
962 | break; | 962 | break; |
963 | case QDIO_IRQ_STATE_STOPPED: | ||
964 | break; | ||
963 | default: | 965 | default: |
964 | WARN_ON(1); | 966 | WARN_ON(1); |
965 | } | 967 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index a23726a0735c..142f72a2ca5a 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev, | |||
373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
374 | return -EAGAIN; | 374 | return -EAGAIN; |
375 | } | 375 | } |
376 | if (service_rc == 8 && service_rs == 72) | ||
377 | return -EINVAL; | ||
376 | zdev->online = 0; | 378 | zdev->online = 0; |
377 | return -EAGAIN; /* repeat the request on a different device. */ | 379 | return -EAGAIN; /* repeat the request on a different device. */ |
378 | } | 380 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 79c120578e61..68f3e6204db8 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -470,6 +470,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | |||
470 | } | 470 | } |
471 | if (service_rc == 12 && service_rs == 769) | 471 | if (service_rc == 12 && service_rs == 769) |
472 | return -EINVAL; | 472 | return -EINVAL; |
473 | if (service_rc == 8 && service_rs == 72) | ||
474 | return -EINVAL; | ||
473 | zdev->online = 0; | 475 | zdev->online = 0; |
474 | return -EAGAIN; /* repeat the request on a different device. */ | 476 | return -EAGAIN; /* repeat the request on a different device. */ |
475 | } | 477 | } |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index f932400e980a..0eb6eefd2c1a 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/miscdevice.h> | 14 | #include <linux/miscdevice.h> |
15 | #include <asm/compat.h> | ||
15 | #include <asm/ccwdev.h> | 16 | #include <asm/ccwdev.h> |
16 | #include "zfcp_def.h" | 17 | #include "zfcp_def.h" |
17 | #include "zfcp_ext.h" | 18 | #include "zfcp_ext.h" |
@@ -163,7 +164,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data, | |||
163 | } | 164 | } |
164 | 165 | ||
165 | static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | 166 | static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, |
166 | unsigned long buffer) | 167 | unsigned long arg) |
167 | { | 168 | { |
168 | struct zfcp_cfdc_data *data; | 169 | struct zfcp_cfdc_data *data; |
169 | struct zfcp_cfdc_data __user *data_user; | 170 | struct zfcp_cfdc_data __user *data_user; |
@@ -175,7 +176,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | |||
175 | if (command != ZFCP_CFDC_IOC) | 176 | if (command != ZFCP_CFDC_IOC) |
176 | return -ENOTTY; | 177 | return -ENOTTY; |
177 | 178 | ||
178 | data_user = (void __user *) buffer; | 179 | if (is_compat_task()) |
180 | data_user = compat_ptr(arg); | ||
181 | else | ||
182 | data_user = (void __user *)arg; | ||
183 | |||
179 | if (!data_user) | 184 | if (!data_user) |
180 | return -EINVAL; | 185 | return -EINVAL; |
181 | 186 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 84450955ae11..7369c8911bcf 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -327,7 +327,7 @@ static void zfcp_dbf_hba_view_response(char **p, | |||
327 | break; | 327 | break; |
328 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); | 328 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); |
329 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); | 329 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); |
330 | p += sprintf(*p, "\n"); | 330 | *p += sprintf(*p, "\n"); |
331 | break; | 331 | break; |
332 | 332 | ||
333 | case FSF_QTCB_OPEN_PORT_WITH_DID: | 333 | case FSF_QTCB_OPEN_PORT_WITH_DID: |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 03dec832b465..66bdb34143cb 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -108,6 +108,7 @@ extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *); | |||
108 | extern int zfcp_fc_gs_setup(struct zfcp_adapter *); | 108 | extern int zfcp_fc_gs_setup(struct zfcp_adapter *); |
109 | extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); | 109 | extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); |
110 | extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); | 110 | extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); |
111 | extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); | ||
111 | 112 | ||
112 | /* zfcp_fsf.c */ | 113 | /* zfcp_fsf.c */ |
113 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); | 114 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); |
@@ -129,9 +130,9 @@ extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); | |||
129 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); | 130 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); |
130 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); | 131 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); |
131 | extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, | 132 | extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, |
132 | mempool_t *); | 133 | mempool_t *, unsigned int); |
133 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, | 134 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, |
134 | struct zfcp_fsf_ct_els *); | 135 | struct zfcp_fsf_ct_els *, unsigned int); |
135 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, | 136 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, |
136 | struct scsi_cmnd *); | 137 | struct scsi_cmnd *); |
137 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); | 138 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index ac5e3b7a3576..271399f62f1b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -258,7 +258,8 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, | |||
258 | gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; | 258 | gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; |
259 | 259 | ||
260 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, | 260 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, |
261 | adapter->pool.gid_pn_req); | 261 | adapter->pool.gid_pn_req, |
262 | ZFCP_FC_CTELS_TMO); | ||
262 | if (!ret) { | 263 | if (!ret) { |
263 | wait_for_completion(&completion); | 264 | wait_for_completion(&completion); |
264 | zfcp_fc_ns_gid_pn_eval(gid_pn); | 265 | zfcp_fc_ns_gid_pn_eval(gid_pn); |
@@ -421,7 +422,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port) | |||
421 | hton24(adisc->adisc_req.adisc_port_id, | 422 | hton24(adisc->adisc_req.adisc_port_id, |
422 | fc_host_port_id(adapter->scsi_host)); | 423 | fc_host_port_id(adapter->scsi_host)); |
423 | 424 | ||
424 | ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); | 425 | ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, |
426 | ZFCP_FC_CTELS_TMO); | ||
425 | if (ret) | 427 | if (ret) |
426 | kmem_cache_free(zfcp_data.adisc_cache, adisc); | 428 | kmem_cache_free(zfcp_data.adisc_cache, adisc); |
427 | 429 | ||
@@ -532,7 +534,8 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, | |||
532 | ct->req = &gpn_ft->sg_req; | 534 | ct->req = &gpn_ft->sg_req; |
533 | ct->resp = gpn_ft->sg_resp; | 535 | ct->resp = gpn_ft->sg_resp; |
534 | 536 | ||
535 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); | 537 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, |
538 | ZFCP_FC_CTELS_TMO); | ||
536 | if (!ret) | 539 | if (!ret) |
537 | wait_for_completion(&completion); | 540 | wait_for_completion(&completion); |
538 | return ret; | 541 | return ret; |
@@ -668,15 +671,52 @@ static void zfcp_fc_ct_els_job_handler(void *data) | |||
668 | { | 671 | { |
669 | struct fc_bsg_job *job = data; | 672 | struct fc_bsg_job *job = data; |
670 | struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; | 673 | struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; |
671 | int status = zfcp_ct_els->status; | 674 | struct fc_bsg_reply *jr = job->reply; |
672 | int reply_status; | ||
673 | 675 | ||
674 | reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; | 676 | jr->reply_payload_rcv_len = job->reply_payload.payload_len; |
675 | job->reply->reply_data.ctels_reply.status = reply_status; | 677 | jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; |
676 | job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; | 678 | jr->result = zfcp_ct_els->status ? -EIO : 0; |
677 | job->job_done(job); | 679 | job->job_done(job); |
678 | } | 680 | } |
679 | 681 | ||
682 | static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job) | ||
683 | { | ||
684 | u32 preamble_word1; | ||
685 | u8 gs_type; | ||
686 | struct zfcp_adapter *adapter; | ||
687 | |||
688 | preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; | ||
689 | gs_type = (preamble_word1 & 0xff000000) >> 24; | ||
690 | |||
691 | adapter = (struct zfcp_adapter *) job->shost->hostdata[0]; | ||
692 | |||
693 | switch (gs_type) { | ||
694 | case FC_FST_ALIAS: | ||
695 | return &adapter->gs->as; | ||
696 | case FC_FST_MGMT: | ||
697 | return &adapter->gs->ms; | ||
698 | case FC_FST_TIME: | ||
699 | return &adapter->gs->ts; | ||
700 | break; | ||
701 | case FC_FST_DIR: | ||
702 | return &adapter->gs->ds; | ||
703 | break; | ||
704 | default: | ||
705 | return NULL; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | static void zfcp_fc_ct_job_handler(void *data) | ||
710 | { | ||
711 | struct fc_bsg_job *job = data; | ||
712 | struct zfcp_fc_wka_port *wka_port; | ||
713 | |||
714 | wka_port = zfcp_fc_job_wka_port(job); | ||
715 | zfcp_fc_wka_port_put(wka_port); | ||
716 | |||
717 | zfcp_fc_ct_els_job_handler(data); | ||
718 | } | ||
719 | |||
680 | static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, | 720 | static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, |
681 | struct zfcp_adapter *adapter) | 721 | struct zfcp_adapter *adapter) |
682 | { | 722 | { |
@@ -695,43 +735,27 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, | |||
695 | } else | 735 | } else |
696 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); | 736 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); |
697 | 737 | ||
698 | return zfcp_fsf_send_els(adapter, d_id, els); | 738 | els->handler = zfcp_fc_ct_els_job_handler; |
739 | return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); | ||
699 | } | 740 | } |
700 | 741 | ||
701 | static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, | 742 | static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, |
702 | struct zfcp_adapter *adapter) | 743 | struct zfcp_adapter *adapter) |
703 | { | 744 | { |
704 | int ret; | 745 | int ret; |
705 | u8 gs_type; | ||
706 | struct zfcp_fsf_ct_els *ct = job->dd_data; | 746 | struct zfcp_fsf_ct_els *ct = job->dd_data; |
707 | struct zfcp_fc_wka_port *wka_port; | 747 | struct zfcp_fc_wka_port *wka_port; |
708 | u32 preamble_word1; | ||
709 | 748 | ||
710 | preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; | 749 | wka_port = zfcp_fc_job_wka_port(job); |
711 | gs_type = (preamble_word1 & 0xff000000) >> 24; | 750 | if (!wka_port) |
712 | 751 | return -EINVAL; | |
713 | switch (gs_type) { | ||
714 | case FC_FST_ALIAS: | ||
715 | wka_port = &adapter->gs->as; | ||
716 | break; | ||
717 | case FC_FST_MGMT: | ||
718 | wka_port = &adapter->gs->ms; | ||
719 | break; | ||
720 | case FC_FST_TIME: | ||
721 | wka_port = &adapter->gs->ts; | ||
722 | break; | ||
723 | case FC_FST_DIR: | ||
724 | wka_port = &adapter->gs->ds; | ||
725 | break; | ||
726 | default: | ||
727 | return -EINVAL; /* no such service */ | ||
728 | } | ||
729 | 752 | ||
730 | ret = zfcp_fc_wka_port_get(wka_port); | 753 | ret = zfcp_fc_wka_port_get(wka_port); |
731 | if (ret) | 754 | if (ret) |
732 | return ret; | 755 | return ret; |
733 | 756 | ||
734 | ret = zfcp_fsf_send_ct(wka_port, ct, NULL); | 757 | ct->handler = zfcp_fc_ct_job_handler; |
758 | ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ); | ||
735 | if (ret) | 759 | if (ret) |
736 | zfcp_fc_wka_port_put(wka_port); | 760 | zfcp_fc_wka_port_put(wka_port); |
737 | 761 | ||
@@ -752,7 +776,6 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) | |||
752 | 776 | ||
753 | ct_els->req = job->request_payload.sg_list; | 777 | ct_els->req = job->request_payload.sg_list; |
754 | ct_els->resp = job->reply_payload.sg_list; | 778 | ct_els->resp = job->reply_payload.sg_list; |
755 | ct_els->handler = zfcp_fc_ct_els_job_handler; | ||
756 | ct_els->handler_data = job; | 779 | ct_els->handler_data = job; |
757 | 780 | ||
758 | switch (job->request->msgcode) { | 781 | switch (job->request->msgcode) { |
@@ -767,6 +790,12 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) | |||
767 | } | 790 | } |
768 | } | 791 | } |
769 | 792 | ||
793 | int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job) | ||
794 | { | ||
795 | /* hardware tracks timeout, reset bsg timeout to not interfere */ | ||
796 | return -EAGAIN; | ||
797 | } | ||
798 | |||
770 | int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) | 799 | int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) |
771 | { | 800 | { |
772 | struct zfcp_fc_wka_ports *wka_ports; | 801 | struct zfcp_fc_wka_ports *wka_ports; |
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index cb2a3669a384..0747b087390d 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ | 27 | #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ |
28 | (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) | 28 | (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) |
29 | 29 | ||
30 | #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) | ||
31 | |||
30 | /** | 32 | /** |
31 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request | 33 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request |
32 | * @ct_hdr: FC GS common transport header | 34 | * @ct_hdr: FC GS common transport header |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 482dcd97aa5d..e8fb4d9baa8b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -1068,20 +1068,20 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1068 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, | 1068 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, |
1069 | struct scatterlist *sg_req, | 1069 | struct scatterlist *sg_req, |
1070 | struct scatterlist *sg_resp, | 1070 | struct scatterlist *sg_resp, |
1071 | int max_sbals) | 1071 | int max_sbals, unsigned int timeout) |
1072 | { | 1072 | { |
1073 | int ret; | 1073 | int ret; |
1074 | unsigned int fcp_chan_timeout; | ||
1075 | 1074 | ||
1076 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); | 1075 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); |
1077 | if (ret) | 1076 | if (ret) |
1078 | return ret; | 1077 | return ret; |
1079 | 1078 | ||
1080 | /* common settings for ct/gs and els requests */ | 1079 | /* common settings for ct/gs and els requests */ |
1081 | fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; | 1080 | if (timeout > 255) |
1081 | timeout = 255; /* max value accepted by hardware */ | ||
1082 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; | 1082 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; |
1083 | req->qtcb->bottom.support.timeout = fcp_chan_timeout; | 1083 | req->qtcb->bottom.support.timeout = timeout; |
1084 | zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); | 1084 | zfcp_fsf_start_timer(req, (timeout + 10) * HZ); |
1085 | 1085 | ||
1086 | return 0; | 1086 | return 0; |
1087 | } | 1087 | } |
@@ -1092,7 +1092,8 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, | |||
1092 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req | 1092 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req |
1093 | */ | 1093 | */ |
1094 | int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | 1094 | int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, |
1095 | struct zfcp_fsf_ct_els *ct, mempool_t *pool) | 1095 | struct zfcp_fsf_ct_els *ct, mempool_t *pool, |
1096 | unsigned int timeout) | ||
1096 | { | 1097 | { |
1097 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; | 1098 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1098 | struct zfcp_fsf_req *req; | 1099 | struct zfcp_fsf_req *req; |
@@ -1111,7 +1112,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1111 | 1112 | ||
1112 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1113 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1113 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, | 1114 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, |
1114 | FSF_MAX_SBALS_PER_REQ); | 1115 | FSF_MAX_SBALS_PER_REQ, timeout); |
1115 | if (ret) | 1116 | if (ret) |
1116 | goto failed_send; | 1117 | goto failed_send; |
1117 | 1118 | ||
@@ -1188,7 +1189,7 @@ skip_fsfstatus: | |||
1188 | * @els: pointer to struct zfcp_send_els with data for the command | 1189 | * @els: pointer to struct zfcp_send_els with data for the command |
1189 | */ | 1190 | */ |
1190 | int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | 1191 | int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, |
1191 | struct zfcp_fsf_ct_els *els) | 1192 | struct zfcp_fsf_ct_els *els, unsigned int timeout) |
1192 | { | 1193 | { |
1193 | struct zfcp_fsf_req *req; | 1194 | struct zfcp_fsf_req *req; |
1194 | struct zfcp_qdio *qdio = adapter->qdio; | 1195 | struct zfcp_qdio *qdio = adapter->qdio; |
@@ -1206,7 +1207,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1206 | } | 1207 | } |
1207 | 1208 | ||
1208 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1209 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1209 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); | 1210 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); |
1210 | 1211 | ||
1211 | if (ret) | 1212 | if (ret) |
1212 | goto failed_send; | 1213 | goto failed_send; |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 771cc536a989..8e6fc68d6bd4 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -652,6 +652,7 @@ struct fc_function_template zfcp_transport_functions = { | |||
652 | .show_host_port_state = 1, | 652 | .show_host_port_state = 1, |
653 | .show_host_active_fc4s = 1, | 653 | .show_host_active_fc4s = 1, |
654 | .bsg_request = zfcp_fc_exec_bsg_job, | 654 | .bsg_request = zfcp_fc_exec_bsg_job, |
655 | .bsg_timeout = zfcp_fc_timeout_bsg_job, | ||
655 | /* no functions registered for following dynamic attributes but | 656 | /* no functions registered for following dynamic attributes but |
656 | directly set by LLDD */ | 657 | directly set by LLDD */ |
657 | .show_host_port_type = 1, | 658 | .show_host_port_type = 1, |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 2a889853a106..7e26ebc26661 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) | |||
293 | status = -EINVAL; | 293 | status = -EINVAL; |
294 | } | 294 | } |
295 | } | 295 | } |
296 | aac_fib_complete(fibptr); | 296 | /* Do not set XferState to zero unless receives a response from F/W */ |
297 | if (status >= 0) | ||
298 | aac_fib_complete(fibptr); | ||
299 | |||
297 | /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ | 300 | /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ |
298 | if (status >= 0) { | 301 | if (status >= 0) { |
299 | if ((aac_commit == 1) || commit_flag) { | 302 | if ((aac_commit == 1) || commit_flag) { |
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) | |||
310 | FsaNormal, | 313 | FsaNormal, |
311 | 1, 1, | 314 | 1, 1, |
312 | NULL, NULL); | 315 | NULL, NULL); |
313 | aac_fib_complete(fibptr); | 316 | /* Do not set XferState to zero unless |
317 | * receives a response from F/W */ | ||
318 | if (status >= 0) | ||
319 | aac_fib_complete(fibptr); | ||
314 | } else if (aac_commit == 0) { | 320 | } else if (aac_commit == 0) { |
315 | printk(KERN_WARNING | 321 | printk(KERN_WARNING |
316 | "aac_get_config_status: Foreign device configurations are being ignored\n"); | 322 | "aac_get_config_status: Foreign device configurations are being ignored\n"); |
317 | } | 323 | } |
318 | } | 324 | } |
319 | aac_fib_free(fibptr); | 325 | /* FIB should be freed only after getting the response from the F/W */ |
326 | if (status != -ERESTARTSYS) | ||
327 | aac_fib_free(fibptr); | ||
320 | return status; | 328 | return status; |
321 | } | 329 | } |
322 | 330 | ||
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev) | |||
355 | maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); | 363 | maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); |
356 | aac_fib_complete(fibptr); | 364 | aac_fib_complete(fibptr); |
357 | } | 365 | } |
358 | aac_fib_free(fibptr); | 366 | /* FIB should be freed only after getting the response from the F/W */ |
367 | if (status != -ERESTARTSYS) | ||
368 | aac_fib_free(fibptr); | ||
359 | 369 | ||
360 | if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) | 370 | if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) |
361 | maximum_num_containers = MAXIMUM_NUM_CONTAINERS; | 371 | maximum_num_containers = MAXIMUM_NUM_CONTAINERS; |
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1245 | NULL); | 1255 | NULL); |
1246 | 1256 | ||
1247 | if (rcode < 0) { | 1257 | if (rcode < 0) { |
1248 | aac_fib_complete(fibptr); | 1258 | /* FIB should be freed only after |
1249 | aac_fib_free(fibptr); | 1259 | * getting the response from the F/W */ |
1260 | if (rcode != -ERESTARTSYS) { | ||
1261 | aac_fib_complete(fibptr); | ||
1262 | aac_fib_free(fibptr); | ||
1263 | } | ||
1250 | return rcode; | 1264 | return rcode; |
1251 | } | 1265 | } |
1252 | memcpy(&dev->adapter_info, info, sizeof(*info)); | 1266 | memcpy(&dev->adapter_info, info, sizeof(*info)); |
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1270 | 1284 | ||
1271 | if (rcode >= 0) | 1285 | if (rcode >= 0) |
1272 | memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); | 1286 | memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); |
1287 | if (rcode == -ERESTARTSYS) { | ||
1288 | fibptr = aac_fib_alloc(dev); | ||
1289 | if (!fibptr) | ||
1290 | return -ENOMEM; | ||
1291 | } | ||
1292 | |||
1273 | } | 1293 | } |
1274 | 1294 | ||
1275 | 1295 | ||
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1470 | (dev->scsi_host_ptr->sg_tablesize * 8) + 112; | 1490 | (dev->scsi_host_ptr->sg_tablesize * 8) + 112; |
1471 | } | 1491 | } |
1472 | } | 1492 | } |
1473 | 1493 | /* FIB should be freed only after getting the response from the F/W */ | |
1474 | aac_fib_complete(fibptr); | 1494 | if (rcode != -ERESTARTSYS) { |
1475 | aac_fib_free(fibptr); | 1495 | aac_fib_complete(fibptr); |
1496 | aac_fib_free(fibptr); | ||
1497 | } | ||
1476 | 1498 | ||
1477 | return rcode; | 1499 | return rcode; |
1478 | } | 1500 | } |
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd) | |||
1633 | * Alocate and initialize a Fib | 1655 | * Alocate and initialize a Fib |
1634 | */ | 1656 | */ |
1635 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { | 1657 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { |
1658 | printk(KERN_WARNING "aac_read: fib allocation failed\n"); | ||
1636 | return -1; | 1659 | return -1; |
1637 | } | 1660 | } |
1638 | 1661 | ||
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd) | |||
1712 | * Allocate and initialize a Fib then setup a BlockWrite command | 1735 | * Allocate and initialize a Fib then setup a BlockWrite command |
1713 | */ | 1736 | */ |
1714 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { | 1737 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { |
1715 | scsicmd->result = DID_ERROR << 16; | 1738 | /* FIB temporarily unavailable,not catastrophic failure */ |
1716 | scsicmd->scsi_done(scsicmd); | 1739 | |
1717 | return 0; | 1740 | /* scsicmd->result = DID_ERROR << 16; |
1741 | * scsicmd->scsi_done(scsicmd); | ||
1742 | * return 0; | ||
1743 | */ | ||
1744 | printk(KERN_WARNING "aac_write: fib allocation failed\n"); | ||
1745 | return -1; | ||
1718 | } | 1746 | } |
1719 | 1747 | ||
1720 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); | 1748 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 83986ed86556..619c02d9c862 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | *----------------------------------------------------------------------------*/ | 12 | *----------------------------------------------------------------------------*/ |
13 | 13 | ||
14 | #ifndef AAC_DRIVER_BUILD | 14 | #ifndef AAC_DRIVER_BUILD |
15 | # define AAC_DRIVER_BUILD 2461 | 15 | # define AAC_DRIVER_BUILD 24702 |
16 | # define AAC_DRIVER_BRANCH "-ms" | 16 | # define AAC_DRIVER_BRANCH "-ms" |
17 | #endif | 17 | #endif |
18 | #define MAXIMUM_NUM_CONTAINERS 32 | 18 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -1036,6 +1036,9 @@ struct aac_dev | |||
1036 | u8 printf_enabled; | 1036 | u8 printf_enabled; |
1037 | u8 in_reset; | 1037 | u8 in_reset; |
1038 | u8 msi; | 1038 | u8 msi; |
1039 | int management_fib_count; | ||
1040 | spinlock_t manage_lock; | ||
1041 | |||
1039 | }; | 1042 | }; |
1040 | 1043 | ||
1041 | #define aac_adapter_interrupt(dev) \ | 1044 | #define aac_adapter_interrupt(dev) \ |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 0391d759dfdb..9c0c91178538 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -153,7 +153,7 @@ cleanup: | |||
153 | fibptr->hw_fib_pa = hw_fib_pa; | 153 | fibptr->hw_fib_pa = hw_fib_pa; |
154 | fibptr->hw_fib_va = hw_fib; | 154 | fibptr->hw_fib_va = hw_fib; |
155 | } | 155 | } |
156 | if (retval != -EINTR) | 156 | if (retval != -ERESTARTSYS) |
157 | aac_fib_free(fibptr); | 157 | aac_fib_free(fibptr); |
158 | return retval; | 158 | return retval; |
159 | } | 159 | } |
@@ -322,7 +322,7 @@ return_fib: | |||
322 | } | 322 | } |
323 | if (f.wait) { | 323 | if (f.wait) { |
324 | if(down_interruptible(&fibctx->wait_sem) < 0) { | 324 | if(down_interruptible(&fibctx->wait_sem) < 0) { |
325 | status = -EINTR; | 325 | status = -ERESTARTSYS; |
326 | } else { | 326 | } else { |
327 | /* Lock again and retry */ | 327 | /* Lock again and retry */ |
328 | spin_lock_irqsave(&dev->fib_lock, flags); | 328 | spin_lock_irqsave(&dev->fib_lock, flags); |
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
593 | u64 addr; | 593 | u64 addr; |
594 | void* p; | 594 | void* p; |
595 | if (upsg->sg[i].count > | 595 | if (upsg->sg[i].count > |
596 | (dev->adapter_info.options & | 596 | ((dev->adapter_info.options & |
597 | AAC_OPT_NEW_COMM) ? | 597 | AAC_OPT_NEW_COMM) ? |
598 | (dev->scsi_host_ptr->max_sectors << 9) : | 598 | (dev->scsi_host_ptr->max_sectors << 9) : |
599 | 65536) { | 599 | 65536)) { |
600 | rcode = -EINVAL; | 600 | rcode = -EINVAL; |
601 | goto cleanup; | 601 | goto cleanup; |
602 | } | 602 | } |
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
645 | u64 addr; | 645 | u64 addr; |
646 | void* p; | 646 | void* p; |
647 | if (usg->sg[i].count > | 647 | if (usg->sg[i].count > |
648 | (dev->adapter_info.options & | 648 | ((dev->adapter_info.options & |
649 | AAC_OPT_NEW_COMM) ? | 649 | AAC_OPT_NEW_COMM) ? |
650 | (dev->scsi_host_ptr->max_sectors << 9) : | 650 | (dev->scsi_host_ptr->max_sectors << 9) : |
651 | 65536) { | 651 | 65536)) { |
652 | rcode = -EINVAL; | 652 | rcode = -EINVAL; |
653 | goto cleanup; | 653 | goto cleanup; |
654 | } | 654 | } |
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
695 | uintptr_t addr; | 695 | uintptr_t addr; |
696 | void* p; | 696 | void* p; |
697 | if (usg->sg[i].count > | 697 | if (usg->sg[i].count > |
698 | (dev->adapter_info.options & | 698 | ((dev->adapter_info.options & |
699 | AAC_OPT_NEW_COMM) ? | 699 | AAC_OPT_NEW_COMM) ? |
700 | (dev->scsi_host_ptr->max_sectors << 9) : | 700 | (dev->scsi_host_ptr->max_sectors << 9) : |
701 | 65536) { | 701 | 65536)) { |
702 | rcode = -EINVAL; | 702 | rcode = -EINVAL; |
703 | goto cleanup; | 703 | goto cleanup; |
704 | } | 704 | } |
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
734 | dma_addr_t addr; | 734 | dma_addr_t addr; |
735 | void* p; | 735 | void* p; |
736 | if (upsg->sg[i].count > | 736 | if (upsg->sg[i].count > |
737 | (dev->adapter_info.options & | 737 | ((dev->adapter_info.options & |
738 | AAC_OPT_NEW_COMM) ? | 738 | AAC_OPT_NEW_COMM) ? |
739 | (dev->scsi_host_ptr->max_sectors << 9) : | 739 | (dev->scsi_host_ptr->max_sectors << 9) : |
740 | 65536) { | 740 | 65536)) { |
741 | rcode = -EINVAL; | 741 | rcode = -EINVAL; |
742 | goto cleanup; | 742 | goto cleanup; |
743 | } | 743 | } |
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
772 | psg->count = cpu_to_le32(sg_indx+1); | 772 | psg->count = cpu_to_le32(sg_indx+1); |
773 | status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); | 773 | status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); |
774 | } | 774 | } |
775 | if (status == -EINTR) { | 775 | if (status == -ERESTARTSYS) { |
776 | rcode = -EINTR; | 776 | rcode = -ERESTARTSYS; |
777 | goto cleanup; | 777 | goto cleanup; |
778 | } | 778 | } |
779 | 779 | ||
@@ -810,7 +810,7 @@ cleanup: | |||
810 | for(i=0; i <= sg_indx; i++){ | 810 | for(i=0; i <= sg_indx; i++){ |
811 | kfree(sg_list[i]); | 811 | kfree(sg_list[i]); |
812 | } | 812 | } |
813 | if (rcode != -EINTR) { | 813 | if (rcode != -ERESTARTSYS) { |
814 | aac_fib_complete(srbfib); | 814 | aac_fib_complete(srbfib); |
815 | aac_fib_free(srbfib); | 815 | aac_fib_free(srbfib); |
816 | } | 816 | } |
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) | |||
848 | */ | 848 | */ |
849 | 849 | ||
850 | status = aac_dev_ioctl(dev, cmd, arg); | 850 | status = aac_dev_ioctl(dev, cmd, arg); |
851 | if(status != -ENOTTY) | 851 | if (status != -ENOTTY) |
852 | return status; | 852 | return status; |
853 | 853 | ||
854 | switch (cmd) { | 854 | switch (cmd) { |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 666d5151d628..a7261486ccd4 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev) | |||
194 | 194 | ||
195 | if (status >= 0) | 195 | if (status >= 0) |
196 | aac_fib_complete(fibctx); | 196 | aac_fib_complete(fibctx); |
197 | aac_fib_free(fibctx); | 197 | /* FIB should be freed only after getting the response from the F/W */ |
198 | if (status != -ERESTARTSYS) | ||
199 | aac_fib_free(fibctx); | ||
198 | return status; | 200 | return status; |
199 | } | 201 | } |
200 | 202 | ||
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
304 | /* | 306 | /* |
305 | * Check the preferred comm settings, defaults from template. | 307 | * Check the preferred comm settings, defaults from template. |
306 | */ | 308 | */ |
309 | dev->management_fib_count = 0; | ||
310 | spin_lock_init(&dev->manage_lock); | ||
307 | dev->max_fib_size = sizeof(struct hw_fib); | 311 | dev->max_fib_size = sizeof(struct hw_fib); |
308 | dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size | 312 | dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size |
309 | - sizeof(struct aac_fibhdr) | 313 | - sizeof(struct aac_fibhdr) |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 956261f25181..94d2954d79ae 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) | |||
189 | 189 | ||
190 | void aac_fib_free(struct fib *fibptr) | 190 | void aac_fib_free(struct fib *fibptr) |
191 | { | 191 | { |
192 | unsigned long flags; | 192 | unsigned long flags, flagsv; |
193 | |||
194 | spin_lock_irqsave(&fibptr->event_lock, flagsv); | ||
195 | if (fibptr->done == 2) { | ||
196 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
197 | return; | ||
198 | } | ||
199 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
193 | 200 | ||
194 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); | 201 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); |
195 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) | 202 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) |
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
390 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 397 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
391 | unsigned long flags = 0; | 398 | unsigned long flags = 0; |
392 | unsigned long qflags; | 399 | unsigned long qflags; |
400 | unsigned long mflags = 0; | ||
401 | |||
393 | 402 | ||
394 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | 403 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) |
395 | return -EBUSY; | 404 | return -EBUSY; |
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
471 | if (!dev->queues) | 480 | if (!dev->queues) |
472 | return -EBUSY; | 481 | return -EBUSY; |
473 | 482 | ||
474 | if(wait) | 483 | if (wait) { |
484 | |||
485 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
486 | if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { | ||
487 | printk(KERN_INFO "No management Fibs Available:%d\n", | ||
488 | dev->management_fib_count); | ||
489 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
490 | return -EBUSY; | ||
491 | } | ||
492 | dev->management_fib_count++; | ||
493 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
475 | spin_lock_irqsave(&fibptr->event_lock, flags); | 494 | spin_lock_irqsave(&fibptr->event_lock, flags); |
476 | aac_adapter_deliver(fibptr); | 495 | } |
496 | |||
497 | if (aac_adapter_deliver(fibptr) != 0) { | ||
498 | printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); | ||
499 | if (wait) { | ||
500 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
501 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
502 | dev->management_fib_count--; | ||
503 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
504 | } | ||
505 | return -EBUSY; | ||
506 | } | ||
507 | |||
477 | 508 | ||
478 | /* | 509 | /* |
479 | * If the caller wanted us to wait for response wait now. | 510 | * If the caller wanted us to wait for response wait now. |
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
516 | udelay(5); | 547 | udelay(5); |
517 | } | 548 | } |
518 | } else if (down_interruptible(&fibptr->event_wait)) { | 549 | } else if (down_interruptible(&fibptr->event_wait)) { |
519 | fibptr->done = 2; | 550 | /* Do nothing ... satisfy |
520 | up(&fibptr->event_wait); | 551 | * down_interruptible must_check */ |
521 | } | 552 | } |
553 | |||
522 | spin_lock_irqsave(&fibptr->event_lock, flags); | 554 | spin_lock_irqsave(&fibptr->event_lock, flags); |
523 | if ((fibptr->done == 0) || (fibptr->done == 2)) { | 555 | if (fibptr->done == 0) { |
524 | fibptr->done = 2; /* Tell interrupt we aborted */ | 556 | fibptr->done = 2; /* Tell interrupt we aborted */ |
525 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 557 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
526 | return -EINTR; | 558 | return -ERESTARTSYS; |
527 | } | 559 | } |
528 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 560 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
529 | BUG_ON(fibptr->done == 0); | 561 | BUG_ON(fibptr->done == 0); |
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
689 | 721 | ||
690 | int aac_fib_complete(struct fib *fibptr) | 722 | int aac_fib_complete(struct fib *fibptr) |
691 | { | 723 | { |
724 | unsigned long flags; | ||
692 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 725 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
693 | 726 | ||
694 | /* | 727 | /* |
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr) | |||
709 | * command is complete that we had sent to the adapter and this | 742 | * command is complete that we had sent to the adapter and this |
710 | * cdb could be reused. | 743 | * cdb could be reused. |
711 | */ | 744 | */ |
745 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
746 | if (fibptr->done == 2) { | ||
747 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
748 | return 0; | ||
749 | } | ||
750 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
751 | |||
712 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && | 752 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && |
713 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) | 753 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) |
714 | { | 754 | { |
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced) | |||
1355 | 1395 | ||
1356 | if (status >= 0) | 1396 | if (status >= 0) |
1357 | aac_fib_complete(fibctx); | 1397 | aac_fib_complete(fibctx); |
1358 | aac_fib_free(fibctx); | 1398 | /* FIB should be freed only after getting |
1399 | * the response from the F/W */ | ||
1400 | if (status != -ERESTARTSYS) | ||
1401 | aac_fib_free(fibctx); | ||
1359 | } | 1402 | } |
1360 | } | 1403 | } |
1361 | 1404 | ||
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data) | |||
1759 | struct fib *fibptr; | 1802 | struct fib *fibptr; |
1760 | 1803 | ||
1761 | if ((fibptr = aac_fib_alloc(dev))) { | 1804 | if ((fibptr = aac_fib_alloc(dev))) { |
1805 | int status; | ||
1762 | __le32 *info; | 1806 | __le32 *info; |
1763 | 1807 | ||
1764 | aac_fib_init(fibptr); | 1808 | aac_fib_init(fibptr); |
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data) | |||
1769 | 1813 | ||
1770 | *info = cpu_to_le32(now.tv_sec); | 1814 | *info = cpu_to_le32(now.tv_sec); |
1771 | 1815 | ||
1772 | (void)aac_fib_send(SendHostTime, | 1816 | status = aac_fib_send(SendHostTime, |
1773 | fibptr, | 1817 | fibptr, |
1774 | sizeof(*info), | 1818 | sizeof(*info), |
1775 | FsaNormal, | 1819 | FsaNormal, |
1776 | 1, 1, | 1820 | 1, 1, |
1777 | NULL, | 1821 | NULL, |
1778 | NULL); | 1822 | NULL); |
1779 | aac_fib_complete(fibptr); | 1823 | /* Do not set XferState to zero unless |
1780 | aac_fib_free(fibptr); | 1824 | * receives a response from F/W */ |
1825 | if (status >= 0) | ||
1826 | aac_fib_complete(fibptr); | ||
1827 | /* FIB should be freed only after | ||
1828 | * getting the response from the F/W */ | ||
1829 | if (status != -ERESTARTSYS) | ||
1830 | aac_fib_free(fibptr); | ||
1781 | } | 1831 | } |
1782 | difference = (long)(unsigned)update_interval*HZ; | 1832 | difference = (long)(unsigned)update_interval*HZ; |
1783 | } else { | 1833 | } else { |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index abc9ef5d1b10..9c7408fe8c7d 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
57 | struct hw_fib * hwfib; | 57 | struct hw_fib * hwfib; |
58 | struct fib * fib; | 58 | struct fib * fib; |
59 | int consumed = 0; | 59 | int consumed = 0; |
60 | unsigned long flags; | 60 | unsigned long flags, mflags; |
61 | 61 | ||
62 | spin_lock_irqsave(q->lock, flags); | 62 | spin_lock_irqsave(q->lock, flags); |
63 | /* | 63 | /* |
64 | * Keep pulling response QEs off the response queue and waking | 64 | * Keep pulling response QEs off the response queue and waking |
65 | * up the waiters until there are no more QEs. We then return | 65 | * up the waiters until there are no more QEs. We then return |
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
125 | } else { | 125 | } else { |
126 | unsigned long flagv; | 126 | unsigned long flagv; |
127 | spin_lock_irqsave(&fib->event_lock, flagv); | 127 | spin_lock_irqsave(&fib->event_lock, flagv); |
128 | if (!fib->done) | 128 | if (!fib->done) { |
129 | fib->done = 1; | 129 | fib->done = 1; |
130 | up(&fib->event_wait); | 130 | up(&fib->event_wait); |
131 | } | ||
131 | spin_unlock_irqrestore(&fib->event_lock, flagv); | 132 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
133 | |||
134 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
135 | dev->management_fib_count--; | ||
136 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
137 | |||
132 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | 138 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); |
133 | if (fib->done == 2) { | 139 | if (fib->done == 2) { |
140 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
141 | fib->done = 0; | ||
142 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
134 | aac_fib_complete(fib); | 143 | aac_fib_complete(fib); |
135 | aac_fib_free(fib); | 144 | aac_fib_free(fib); |
136 | } | 145 | } |
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
232 | 241 | ||
233 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | 242 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) |
234 | { | 243 | { |
244 | unsigned long mflags; | ||
235 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); | 245 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
236 | if ((index & 0x00000002L)) { | 246 | if ((index & 0x00000002L)) { |
237 | struct hw_fib * hw_fib; | 247 | struct hw_fib * hw_fib; |
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
320 | unsigned long flagv; | 330 | unsigned long flagv; |
321 | dprintk((KERN_INFO "event_wait up\n")); | 331 | dprintk((KERN_INFO "event_wait up\n")); |
322 | spin_lock_irqsave(&fib->event_lock, flagv); | 332 | spin_lock_irqsave(&fib->event_lock, flagv); |
323 | if (!fib->done) | 333 | if (!fib->done) { |
324 | fib->done = 1; | 334 | fib->done = 1; |
325 | up(&fib->event_wait); | 335 | up(&fib->event_wait); |
336 | } | ||
326 | spin_unlock_irqrestore(&fib->event_lock, flagv); | 337 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
338 | |||
339 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
340 | dev->management_fib_count--; | ||
341 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
342 | |||
327 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | 343 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); |
344 | if (fib->done == 2) { | ||
345 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
346 | fib->done = 0; | ||
347 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
348 | aac_fib_complete(fib); | ||
349 | aac_fib_free(fib); | ||
350 | } | ||
351 | |||
328 | } | 352 | } |
329 | return 0; | 353 | return 0; |
330 | } | 354 | } |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 4d419c155ce9..78971db5b60e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3171 | tinfo->curr.transport_version = 2; | 3171 | tinfo->curr.transport_version = 2; |
3172 | tinfo->goal.transport_version = 2; | 3172 | tinfo->goal.transport_version = 2; |
3173 | tinfo->goal.ppr_options = 0; | 3173 | tinfo->goal.ppr_options = 0; |
3174 | /* | 3174 | if (scb != NULL) { |
3175 | * Remove any SCBs in the waiting for selection | 3175 | /* |
3176 | * queue that may also be for this target so | 3176 | * Remove any SCBs in the waiting |
3177 | * that command ordering is preserved. | 3177 | * for selection queue that may |
3178 | */ | 3178 | * also be for this target so that |
3179 | ahd_freeze_devq(ahd, scb); | 3179 | * command ordering is preserved. |
3180 | ahd_qinfifo_requeue_tail(ahd, scb); | 3180 | */ |
3181 | ahd_freeze_devq(ahd, scb); | ||
3182 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3183 | } | ||
3181 | printerror = 0; | 3184 | printerror = 0; |
3182 | } | 3185 | } |
3183 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) | 3186 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) |
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3194 | MSG_EXT_WDTR_BUS_8_BIT, | 3197 | MSG_EXT_WDTR_BUS_8_BIT, |
3195 | AHD_TRANS_CUR|AHD_TRANS_GOAL, | 3198 | AHD_TRANS_CUR|AHD_TRANS_GOAL, |
3196 | /*paused*/TRUE); | 3199 | /*paused*/TRUE); |
3197 | /* | 3200 | if (scb != NULL) { |
3198 | * Remove any SCBs in the waiting for selection | 3201 | /* |
3199 | * queue that may also be for this target so that | 3202 | * Remove any SCBs in the waiting for |
3200 | * command ordering is preserved. | 3203 | * selection queue that may also be for |
3201 | */ | 3204 | * this target so that command ordering |
3202 | ahd_freeze_devq(ahd, scb); | 3205 | * is preserved. |
3203 | ahd_qinfifo_requeue_tail(ahd, scb); | 3206 | */ |
3207 | ahd_freeze_devq(ahd, scb); | ||
3208 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3209 | } | ||
3204 | printerror = 0; | 3210 | printerror = 0; |
3205 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) | 3211 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) |
3206 | && ppr_busfree == 0) { | 3212 | && ppr_busfree == 0) { |
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3217 | /*ppr_options*/0, | 3223 | /*ppr_options*/0, |
3218 | AHD_TRANS_CUR|AHD_TRANS_GOAL, | 3224 | AHD_TRANS_CUR|AHD_TRANS_GOAL, |
3219 | /*paused*/TRUE); | 3225 | /*paused*/TRUE); |
3220 | /* | 3226 | if (scb != NULL) { |
3221 | * Remove any SCBs in the waiting for selection | 3227 | /* |
3222 | * queue that may also be for this target so that | 3228 | * Remove any SCBs in the waiting for |
3223 | * command ordering is preserved. | 3229 | * selection queue that may also be for |
3224 | */ | 3230 | * this target so that command ordering |
3225 | ahd_freeze_devq(ahd, scb); | 3231 | * is preserved. |
3226 | ahd_qinfifo_requeue_tail(ahd, scb); | 3232 | */ |
3233 | ahd_freeze_devq(ahd, scb); | ||
3234 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3235 | } | ||
3227 | printerror = 0; | 3236 | printerror = 0; |
3228 | } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 | 3237 | } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 |
3229 | && ahd_sent_msg(ahd, AHDMSG_1B, | 3238 | && ahd_sent_msg(ahd, AHDMSG_1B, |
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3251 | * the message phases. We check it last in case we | 3260 | * the message phases. We check it last in case we |
3252 | * had to send some other message that caused a busfree. | 3261 | * had to send some other message that caused a busfree. |
3253 | */ | 3262 | */ |
3254 | if (printerror != 0 | 3263 | if (scb != NULL && printerror != 0 |
3255 | && (lastphase == P_MESGIN || lastphase == P_MESGOUT) | 3264 | && (lastphase == P_MESGIN || lastphase == P_MESGOUT) |
3256 | && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { | 3265 | && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { |
3257 | 3266 | ||
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 477542602284..9e71ac611146 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt) | |||
2516 | if (info->scsi.phase == PHASE_IDLE) | 2516 | if (info->scsi.phase == PHASE_IDLE) |
2517 | fas216_kick(info); | 2517 | fas216_kick(info); |
2518 | 2518 | ||
2519 | mod_timer(&info->eh_timer, 30 * HZ); | 2519 | mod_timer(&info->eh_timer, jiffies + 30 * HZ); |
2520 | spin_unlock_irqrestore(&info->host_lock, flags); | 2520 | spin_unlock_irqrestore(&info->host_lock, flags); |
2521 | 2521 | ||
2522 | /* | 2522 | /* |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 10be9f36a4cc..2f47ae7cce91 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | |||
2009 | fcoe_interface_cleanup(fcoe); | 2009 | fcoe_interface_cleanup(fcoe); |
2010 | rtnl_unlock(); | 2010 | rtnl_unlock(); |
2011 | fcoe_if_destroy(fcoe->ctlr.lp); | 2011 | fcoe_if_destroy(fcoe->ctlr.lp); |
2012 | module_put(THIS_MODULE); | ||
2013 | |||
2012 | out_putdev: | 2014 | out_putdev: |
2013 | dev_put(netdev); | 2015 | dev_put(netdev); |
2014 | out_nodev: | 2016 | out_nodev: |
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
2059 | } | 2061 | } |
2060 | #endif | 2062 | #endif |
2061 | 2063 | ||
2064 | if (!try_module_get(THIS_MODULE)) { | ||
2065 | rc = -EINVAL; | ||
2066 | goto out_nomod; | ||
2067 | } | ||
2068 | |||
2062 | rtnl_lock(); | 2069 | rtnl_lock(); |
2063 | netdev = fcoe_if_to_netdev(buffer); | 2070 | netdev = fcoe_if_to_netdev(buffer); |
2064 | if (!netdev) { | 2071 | if (!netdev) { |
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) | |||
2099 | if (!fcoe_link_ok(lport)) | 2106 | if (!fcoe_link_ok(lport)) |
2100 | fcoe_ctlr_link_up(&fcoe->ctlr); | 2107 | fcoe_ctlr_link_up(&fcoe->ctlr); |
2101 | 2108 | ||
2102 | rc = 0; | ||
2103 | out_free: | ||
2104 | /* | 2109 | /* |
2105 | * Release from init in fcoe_interface_create(), on success lport | 2110 | * Release from init in fcoe_interface_create(), on success lport |
2106 | * should be holding a reference taken in fcoe_if_create(). | 2111 | * should be holding a reference taken in fcoe_if_create(). |
2107 | */ | 2112 | */ |
2108 | fcoe_interface_put(fcoe); | 2113 | fcoe_interface_put(fcoe); |
2114 | dev_put(netdev); | ||
2115 | rtnl_unlock(); | ||
2116 | mutex_unlock(&fcoe_config_mutex); | ||
2117 | |||
2118 | return 0; | ||
2119 | out_free: | ||
2120 | fcoe_interface_put(fcoe); | ||
2109 | out_putdev: | 2121 | out_putdev: |
2110 | dev_put(netdev); | 2122 | dev_put(netdev); |
2111 | out_nodev: | 2123 | out_nodev: |
2112 | rtnl_unlock(); | 2124 | rtnl_unlock(); |
2125 | module_put(THIS_MODULE); | ||
2126 | out_nomod: | ||
2113 | mutex_unlock(&fcoe_config_mutex); | 2127 | mutex_unlock(&fcoe_config_mutex); |
2114 | return rc; | 2128 | return rc; |
2115 | } | 2129 | } |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 9823291395ad..511cb6b371ee 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) | |||
1187 | next_timer = fip->ctlr_ka_time; | 1187 | next_timer = fip->ctlr_ka_time; |
1188 | 1188 | ||
1189 | if (time_after_eq(jiffies, fip->port_ka_time)) { | 1189 | if (time_after_eq(jiffies, fip->port_ka_time)) { |
1190 | fip->port_ka_time += jiffies + | 1190 | fip->port_ka_time = jiffies + |
1191 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | 1191 | msecs_to_jiffies(FIP_VN_KA_PERIOD); |
1192 | fip->send_port_ka = 1; | 1192 | fip->send_port_ka = 1; |
1193 | } | 1193 | } |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 19d711cb938c..7f4364770e4a 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, | |||
1890 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); | 1890 | fc_exch_setup_hdr(ep, fp, ep->f_ctl); |
1891 | sp->cnt++; | 1891 | sp->cnt++; |
1892 | 1892 | ||
1893 | if (ep->xid <= lport->lro_xid) | 1893 | if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) |
1894 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); | 1894 | fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); |
1895 | 1895 | ||
1896 | if (unlikely(lport->tt.frame_send(lport, fp))) | 1896 | if (unlikely(lport->tt.frame_send(lport, fp))) |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 881d5dfe8c74..6fde2fabfd9b 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) | |||
298 | { | 298 | { |
299 | struct fc_lport *lport; | 299 | struct fc_lport *lport; |
300 | 300 | ||
301 | if (!fsp) | ||
302 | return; | ||
303 | |||
304 | lport = fsp->lp; | 301 | lport = fsp->lp; |
305 | if ((fsp->req_flags & FC_SRB_READ) && | 302 | if ((fsp->req_flags & FC_SRB_READ) && |
306 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { | 303 | (lport->lro_enabled) && (lport->tt.ddp_setup)) { |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 0b165024a219..7ec8ce75007c 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) | |||
1800 | u32 did; | 1800 | u32 did; |
1801 | 1801 | ||
1802 | job->reply->reply_payload_rcv_len = 0; | 1802 | job->reply->reply_payload_rcv_len = 0; |
1803 | rsp->resid_len = job->reply_payload.payload_len; | 1803 | if (rsp) |
1804 | rsp->resid_len = job->reply_payload.payload_len; | ||
1804 | 1805 | ||
1805 | mutex_lock(&lport->lp_mutex); | 1806 | mutex_lock(&lport->lp_mutex); |
1806 | 1807 | ||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 02300523b234..97923bb07765 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
623 | 623 | ||
624 | tov = ntohl(plp->fl_csp.sp_e_d_tov); | 624 | tov = ntohl(plp->fl_csp.sp_e_d_tov); |
625 | if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) | 625 | if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) |
626 | tov /= 1000; | 626 | tov /= 1000000; |
627 | if (tov > rdata->e_d_tov) | 627 | if (tov > rdata->e_d_tov) |
628 | rdata->e_d_tov = tov; | 628 | rdata->e_d_tov = tov; |
629 | csp_seq = ntohs(plp->fl_csp.sp_tot_seq); | 629 | csp_seq = ntohs(plp->fl_csp.sp_tot_seq); |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index db6856c138fc..4ad87fd74ddd 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) | |||
992 | if (r2t == NULL) { | 992 | if (r2t == NULL) { |
993 | if (kfifo_out(&tcp_task->r2tqueue, | 993 | if (kfifo_out(&tcp_task->r2tqueue, |
994 | (void *)&tcp_task->r2t, sizeof(void *)) != | 994 | (void *)&tcp_task->r2t, sizeof(void *)) != |
995 | sizeof(void *)) { | 995 | sizeof(void *)) |
996 | WARN_ONCE(1, "unexpected fifo state"); | ||
997 | r2t = NULL; | 996 | r2t = NULL; |
998 | } | 997 | else |
999 | 998 | r2t = tcp_task->r2t; | |
1000 | r2t = tcp_task->r2t; | ||
1001 | } | 999 | } |
1002 | spin_unlock_bh(&session->lock); | 1000 | spin_unlock_bh(&session->lock); |
1003 | } | 1001 | } |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2445e399fd60..2445e399fd60 100755..100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8a2a1c5935c6..8a2a1c5935c6 100755..100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 708ea3157b60..d9b8ca5116bc 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) | |||
3781 | compat_alloc_user_space(sizeof(struct megasas_iocpacket)); | 3781 | compat_alloc_user_space(sizeof(struct megasas_iocpacket)); |
3782 | int i; | 3782 | int i; |
3783 | int error = 0; | 3783 | int error = 0; |
3784 | compat_uptr_t ptr; | ||
3784 | 3785 | ||
3785 | if (clear_user(ioc, sizeof(*ioc))) | 3786 | if (clear_user(ioc, sizeof(*ioc))) |
3786 | return -EFAULT; | 3787 | return -EFAULT; |
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) | |||
3793 | copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) | 3794 | copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) |
3794 | return -EFAULT; | 3795 | return -EFAULT; |
3795 | 3796 | ||
3796 | for (i = 0; i < MAX_IOCTL_SGE; i++) { | 3797 | /* |
3797 | compat_uptr_t ptr; | 3798 | * The sense_ptr is used in megasas_mgmt_fw_ioctl only when |
3799 | * sense_len is not null, so prepare the 64bit value under | ||
3800 | * the same condition. | ||
3801 | */ | ||
3802 | if (ioc->sense_len) { | ||
3803 | void __user **sense_ioc_ptr = | ||
3804 | (void __user **)(ioc->frame.raw + ioc->sense_off); | ||
3805 | compat_uptr_t *sense_cioc_ptr = | ||
3806 | (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); | ||
3807 | if (get_user(ptr, sense_cioc_ptr) || | ||
3808 | put_user(compat_ptr(ptr), sense_ioc_ptr)) | ||
3809 | return -EFAULT; | ||
3810 | } | ||
3798 | 3811 | ||
3812 | for (i = 0; i < MAX_IOCTL_SGE; i++) { | ||
3799 | if (get_user(ptr, &cioc->sgl[i].iov_base) || | 3813 | if (get_user(ptr, &cioc->sgl[i].iov_base) || |
3800 | put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || | 3814 | put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || |
3801 | copy_in_user(&ioc->sgl[i].iov_len, | 3815 | copy_in_user(&ioc->sgl[i].iov_len, |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 608e675f68c8..1263d9796e89 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1586,8 +1586,7 @@ typedef struct fc_port { | |||
1586 | */ | 1586 | */ |
1587 | #define FCF_FABRIC_DEVICE BIT_0 | 1587 | #define FCF_FABRIC_DEVICE BIT_0 |
1588 | #define FCF_LOGIN_NEEDED BIT_1 | 1588 | #define FCF_LOGIN_NEEDED BIT_1 |
1589 | #define FCF_TAPE_PRESENT BIT_2 | 1589 | #define FCF_FCP2_DEVICE BIT_2 |
1590 | #define FCF_FCP2_DEVICE BIT_3 | ||
1591 | 1590 | ||
1592 | /* No loop ID flag. */ | 1591 | /* No loop ID flag. */ |
1593 | #define FC_NO_LOOP_ID 0x1000 | 1592 | #define FC_NO_LOOP_ID 0x1000 |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f61fb8d01330..8bc6f53691e9 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); | |||
453 | extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); | 453 | extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); |
454 | extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); | 454 | extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); |
455 | extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); | 455 | extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); |
456 | extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); | ||
457 | 456 | ||
458 | #endif /* _QLA_GBL_H */ | 457 | #endif /* _QLA_GBL_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b4a0eac8f96d..3f8e8495b743 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
205 | 205 | ||
206 | switch (data[0]) { | 206 | switch (data[0]) { |
207 | case MBS_COMMAND_COMPLETE: | 207 | case MBS_COMMAND_COMPLETE: |
208 | if (fcport->flags & FCF_TAPE_PRESENT) | 208 | if (fcport->flags & FCF_FCP2_DEVICE) |
209 | opts |= BIT_1; | 209 | opts |= BIT_1; |
210 | rval = qla2x00_get_port_database(vha, fcport, opts); | 210 | rval = qla2x00_get_port_database(vha, fcport, opts); |
211 | if (rval != QLA_SUCCESS) | 211 | if (rval != QLA_SUCCESS) |
@@ -2726,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
2726 | 2726 | ||
2727 | /* | 2727 | /* |
2728 | * Logout all previous fabric devices marked lost, except | 2728 | * Logout all previous fabric devices marked lost, except |
2729 | * tape devices. | 2729 | * FCP2 devices. |
2730 | */ | 2730 | */ |
2731 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 2731 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
2732 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 2732 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
@@ -2739,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
2739 | qla2x00_mark_device_lost(vha, fcport, | 2739 | qla2x00_mark_device_lost(vha, fcport, |
2740 | ql2xplogiabsentdevice, 0); | 2740 | ql2xplogiabsentdevice, 0); |
2741 | if (fcport->loop_id != FC_NO_LOOP_ID && | 2741 | if (fcport->loop_id != FC_NO_LOOP_ID && |
2742 | (fcport->flags & FCF_TAPE_PRESENT) == 0 && | 2742 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && |
2743 | fcport->port_type != FCT_INITIATOR && | 2743 | fcport->port_type != FCT_INITIATOR && |
2744 | fcport->port_type != FCT_BROADCAST) { | 2744 | fcport->port_type != FCT_BROADCAST) { |
2745 | ha->isp_ops->fabric_logout(vha, | 2745 | ha->isp_ops->fabric_logout(vha, |
@@ -3018,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3018 | fcport->d_id.b24 = new_fcport->d_id.b24; | 3018 | fcport->d_id.b24 = new_fcport->d_id.b24; |
3019 | fcport->flags |= FCF_LOGIN_NEEDED; | 3019 | fcport->flags |= FCF_LOGIN_NEEDED; |
3020 | if (fcport->loop_id != FC_NO_LOOP_ID && | 3020 | if (fcport->loop_id != FC_NO_LOOP_ID && |
3021 | (fcport->flags & FCF_TAPE_PRESENT) == 0 && | 3021 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && |
3022 | fcport->port_type != FCT_INITIATOR && | 3022 | fcport->port_type != FCT_INITIATOR && |
3023 | fcport->port_type != FCT_BROADCAST) { | 3023 | fcport->port_type != FCT_BROADCAST) { |
3024 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, | 3024 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, |
@@ -3272,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3272 | 3272 | ||
3273 | rval = qla2x00_fabric_login(vha, fcport, next_loopid); | 3273 | rval = qla2x00_fabric_login(vha, fcport, next_loopid); |
3274 | if (rval == QLA_SUCCESS) { | 3274 | if (rval == QLA_SUCCESS) { |
3275 | /* Send an ADISC to tape devices.*/ | 3275 | /* Send an ADISC to FCP2 devices.*/ |
3276 | opts = 0; | 3276 | opts = 0; |
3277 | if (fcport->flags & FCF_TAPE_PRESENT) | 3277 | if (fcport->flags & FCF_FCP2_DEVICE) |
3278 | opts |= BIT_1; | 3278 | opts |= BIT_1; |
3279 | rval = qla2x00_get_port_database(vha, fcport, opts); | 3279 | rval = qla2x00_get_port_database(vha, fcport, opts); |
3280 | if (rval != QLA_SUCCESS) { | 3280 | if (rval != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ffd0efdff40e..6fc63b98818c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
1917 | struct rsp_que *rsp; | 1917 | struct rsp_que *rsp; |
1918 | struct device_reg_24xx __iomem *reg; | 1918 | struct device_reg_24xx __iomem *reg; |
1919 | struct scsi_qla_host *vha; | 1919 | struct scsi_qla_host *vha; |
1920 | unsigned long flags; | ||
1920 | 1921 | ||
1921 | rsp = (struct rsp_que *) dev_id; | 1922 | rsp = (struct rsp_que *) dev_id; |
1922 | if (!rsp) { | 1923 | if (!rsp) { |
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
1927 | ha = rsp->hw; | 1928 | ha = rsp->hw; |
1928 | reg = &ha->iobase->isp24; | 1929 | reg = &ha->iobase->isp24; |
1929 | 1930 | ||
1930 | spin_lock_irq(&ha->hardware_lock); | 1931 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1931 | 1932 | ||
1932 | vha = qla25xx_get_host(rsp); | 1933 | vha = pci_get_drvdata(ha->pdev); |
1933 | qla24xx_process_response_queue(vha, rsp); | 1934 | qla24xx_process_response_queue(vha, rsp); |
1934 | if (!ha->flags.disable_msix_handshake) { | 1935 | if (!ha->flags.disable_msix_handshake) { |
1935 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1936 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1936 | RD_REG_DWORD_RELAXED(®->hccr); | 1937 | RD_REG_DWORD_RELAXED(®->hccr); |
1937 | } | 1938 | } |
1938 | spin_unlock_irq(&ha->hardware_lock); | 1939 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1939 | 1940 | ||
1940 | return IRQ_HANDLED; | 1941 | return IRQ_HANDLED; |
1941 | } | 1942 | } |
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1946 | struct qla_hw_data *ha; | 1947 | struct qla_hw_data *ha; |
1947 | struct rsp_que *rsp; | 1948 | struct rsp_que *rsp; |
1948 | struct device_reg_24xx __iomem *reg; | 1949 | struct device_reg_24xx __iomem *reg; |
1950 | unsigned long flags; | ||
1949 | 1951 | ||
1950 | rsp = (struct rsp_que *) dev_id; | 1952 | rsp = (struct rsp_que *) dev_id; |
1951 | if (!rsp) { | 1953 | if (!rsp) { |
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1958 | /* Clear the interrupt, if enabled, for this response queue */ | 1960 | /* Clear the interrupt, if enabled, for this response queue */ |
1959 | if (rsp->options & ~BIT_6) { | 1961 | if (rsp->options & ~BIT_6) { |
1960 | reg = &ha->iobase->isp24; | 1962 | reg = &ha->iobase->isp24; |
1961 | spin_lock_irq(&ha->hardware_lock); | 1963 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1962 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1964 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1963 | RD_REG_DWORD_RELAXED(®->hccr); | 1965 | RD_REG_DWORD_RELAXED(®->hccr); |
1964 | spin_unlock_irq(&ha->hardware_lock); | 1966 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1965 | } | 1967 | } |
1966 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); | 1968 | queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); |
1967 | 1969 | ||
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1979 | uint32_t stat; | 1981 | uint32_t stat; |
1980 | uint32_t hccr; | 1982 | uint32_t hccr; |
1981 | uint16_t mb[4]; | 1983 | uint16_t mb[4]; |
1984 | unsigned long flags; | ||
1982 | 1985 | ||
1983 | rsp = (struct rsp_que *) dev_id; | 1986 | rsp = (struct rsp_que *) dev_id; |
1984 | if (!rsp) { | 1987 | if (!rsp) { |
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1990 | reg = &ha->iobase->isp24; | 1993 | reg = &ha->iobase->isp24; |
1991 | status = 0; | 1994 | status = 0; |
1992 | 1995 | ||
1993 | spin_lock_irq(&ha->hardware_lock); | 1996 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1994 | vha = pci_get_drvdata(ha->pdev); | 1997 | vha = pci_get_drvdata(ha->pdev); |
1995 | do { | 1998 | do { |
1996 | stat = RD_REG_DWORD(®->host_status); | 1999 | stat = RD_REG_DWORD(®->host_status); |
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
2039 | } | 2042 | } |
2040 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 2043 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
2041 | } while (0); | 2044 | } while (0); |
2042 | spin_unlock_irq(&ha->hardware_lock); | 2045 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2043 | 2046 | ||
2044 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && | 2047 | if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && |
2045 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { | 2048 | (status & MBX_INTERRUPT) && ha->flags.mbox_int) { |
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) | |||
2277 | msix->rsp = rsp; | 2280 | msix->rsp = rsp; |
2278 | return ret; | 2281 | return ret; |
2279 | } | 2282 | } |
2280 | |||
2281 | struct scsi_qla_host * | ||
2282 | qla25xx_get_host(struct rsp_que *rsp) | ||
2283 | { | ||
2284 | srb_t *sp; | ||
2285 | struct qla_hw_data *ha = rsp->hw; | ||
2286 | struct scsi_qla_host *vha = NULL; | ||
2287 | struct sts_entry_24xx *pkt; | ||
2288 | struct req_que *req; | ||
2289 | uint16_t que; | ||
2290 | uint32_t handle; | ||
2291 | |||
2292 | pkt = (struct sts_entry_24xx *) rsp->ring_ptr; | ||
2293 | que = MSW(pkt->handle); | ||
2294 | handle = (uint32_t) LSW(pkt->handle); | ||
2295 | req = ha->req_q_map[que]; | ||
2296 | if (handle < MAX_OUTSTANDING_COMMANDS) { | ||
2297 | sp = req->outstanding_cmds[handle]; | ||
2298 | if (sp) | ||
2299 | return sp->fcport->vha; | ||
2300 | else | ||
2301 | goto base_que; | ||
2302 | } | ||
2303 | base_que: | ||
2304 | vha = pci_get_drvdata(ha->pdev); | ||
2305 | return vha; | ||
2306 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b901aa267e7d..ff17dee28613 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -636,13 +636,15 @@ failed: | |||
636 | 636 | ||
637 | static void qla_do_work(struct work_struct *work) | 637 | static void qla_do_work(struct work_struct *work) |
638 | { | 638 | { |
639 | unsigned long flags; | ||
639 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); | 640 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); |
640 | struct scsi_qla_host *vha; | 641 | struct scsi_qla_host *vha; |
642 | struct qla_hw_data *ha = rsp->hw; | ||
641 | 643 | ||
642 | spin_lock_irq(&rsp->hw->hardware_lock); | 644 | spin_lock_irqsave(&rsp->hw->hardware_lock, flags); |
643 | vha = qla25xx_get_host(rsp); | 645 | vha = pci_get_drvdata(ha->pdev); |
644 | qla24xx_process_response_queue(vha, rsp); | 646 | qla24xx_process_response_queue(vha, rsp); |
645 | spin_unlock_irq(&rsp->hw->hardware_lock); | 647 | spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); |
646 | } | 648 | } |
647 | 649 | ||
648 | /* create response queue */ | 650 | /* create response queue */ |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 209f50e788a1..8529eb1f3cd4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1188,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1188 | scsi_qla_host_t *vha = shost_priv(sdev->host); | 1188 | scsi_qla_host_t *vha = shost_priv(sdev->host); |
1189 | struct qla_hw_data *ha = vha->hw; | 1189 | struct qla_hw_data *ha = vha->hw; |
1190 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); | 1190 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); |
1191 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | ||
1192 | struct req_que *req = vha->req; | 1191 | struct req_que *req = vha->req; |
1193 | 1192 | ||
1194 | if (sdev->tagged_supported) | 1193 | if (sdev->tagged_supported) |
@@ -1197,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1197 | scsi_deactivate_tcq(sdev, req->max_q_depth); | 1196 | scsi_deactivate_tcq(sdev, req->max_q_depth); |
1198 | 1197 | ||
1199 | rport->dev_loss_tmo = ha->port_down_retry_count; | 1198 | rport->dev_loss_tmo = ha->port_down_retry_count; |
1200 | if (sdev->type == TYPE_TAPE) | ||
1201 | fcport->flags |= FCF_TAPE_PRESENT; | ||
1202 | 1199 | ||
1203 | return 0; | 1200 | return 0; |
1204 | } | 1201 | } |
@@ -2805,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
2805 | 2802 | ||
2806 | fcport->login_retry--; | 2803 | fcport->login_retry--; |
2807 | if (fcport->flags & FCF_FABRIC_DEVICE) { | 2804 | if (fcport->flags & FCF_FABRIC_DEVICE) { |
2808 | if (fcport->flags & FCF_TAPE_PRESENT) | 2805 | if (fcport->flags & FCF_FCP2_DEVICE) |
2809 | ha->isp_ops->fabric_logout(vha, | 2806 | ha->isp_ops->fabric_logout(vha, |
2810 | fcport->loop_id, | 2807 | fcport->loop_id, |
2811 | fcport->d_id.b.domain, | 2808 | fcport->d_id.b.domain, |
@@ -3141,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3141 | if (!IS_QLA2100(ha) && vha->link_down_timeout) | 3138 | if (!IS_QLA2100(ha) && vha->link_down_timeout) |
3142 | atomic_set(&vha->loop_state, LOOP_DEAD); | 3139 | atomic_set(&vha->loop_state, LOOP_DEAD); |
3143 | 3140 | ||
3144 | /* Schedule an ISP abort to return any tape commands. */ | 3141 | /* |
3142 | * Schedule an ISP abort to return any FCP2-device | ||
3143 | * commands. | ||
3144 | */ | ||
3145 | /* NPIV - scan physical port only */ | 3145 | /* NPIV - scan physical port only */ |
3146 | if (!vha->vp_idx) { | 3146 | if (!vha->vp_idx) { |
3147 | spin_lock_irqsave(&ha->hardware_lock, | 3147 | spin_lock_irqsave(&ha->hardware_lock, |
@@ -3158,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3158 | if (sp->ctx) | 3158 | if (sp->ctx) |
3159 | continue; | 3159 | continue; |
3160 | sfcp = sp->fcport; | 3160 | sfcp = sp->fcport; |
3161 | if (!(sfcp->flags & FCF_TAPE_PRESENT)) | 3161 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) |
3162 | continue; | 3162 | continue; |
3163 | 3163 | ||
3164 | set_bit(ISP_ABORT_NEEDED, | 3164 | set_bit(ISP_ABORT_NEEDED, |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 010e69b29afe..371dc895972a 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, | |||
2292 | uint32_t faddr, left, burst; | 2292 | uint32_t faddr, left, burst; |
2293 | struct qla_hw_data *ha = vha->hw; | 2293 | struct qla_hw_data *ha = vha->hw; |
2294 | 2294 | ||
2295 | if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) | ||
2296 | goto try_fast; | ||
2295 | if (offset & 0xfff) | 2297 | if (offset & 0xfff) |
2296 | goto slow_read; | 2298 | goto slow_read; |
2297 | if (length < OPTROM_BURST_SIZE) | 2299 | if (length < OPTROM_BURST_SIZE) |
2298 | goto slow_read; | 2300 | goto slow_read; |
2299 | 2301 | ||
2302 | try_fast: | ||
2300 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | 2303 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, |
2301 | &optrom_dma, GFP_KERNEL); | 2304 | &optrom_dma, GFP_KERNEL); |
2302 | if (!optrom) { | 2305 | if (!optrom) { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index a65dd95507c6..ed36279a33c1 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.01-k9" | 10 | #define QLA2XXX_VERSION "8.03.01-k10" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d8927681ec88..c6642423cc67 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
749 | */ | 749 | */ |
750 | req->next_rq->resid_len = scsi_in(cmd)->resid; | 750 | req->next_rq->resid_len = scsi_in(cmd)->resid; |
751 | 751 | ||
752 | scsi_release_buffers(cmd); | ||
752 | blk_end_request_all(req, 0); | 753 | blk_end_request_all(req, 0); |
753 | 754 | ||
754 | scsi_release_buffers(cmd); | ||
755 | scsi_next_command(cmd); | 755 | scsi_next_command(cmd); |
756 | return; | 756 | return; |
757 | } | 757 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index ddfcecd5099f..653f22a8deb9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3527,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req) | |||
3527 | if (!done && i->f->bsg_timeout) { | 3527 | if (!done && i->f->bsg_timeout) { |
3528 | /* call LLDD to abort the i/o as it has timed out */ | 3528 | /* call LLDD to abort the i/o as it has timed out */ |
3529 | err = i->f->bsg_timeout(job); | 3529 | err = i->f->bsg_timeout(job); |
3530 | if (err) | 3530 | if (err == -EAGAIN) { |
3531 | job->ref_cnt--; | ||
3532 | return BLK_EH_RESET_TIMER; | ||
3533 | } else if (err) | ||
3531 | printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " | 3534 | printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " |
3532 | "abort failed with status %d\n", err); | 3535 | "abort failed with status %d\n", err); |
3533 | } | 3536 | } |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index c3e37c8e7e26..e9b15c3746fa 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ | |||
83 | 83 | ||
84 | #define PASS_LIMIT 256 | 84 | #define PASS_LIMIT 256 |
85 | 85 | ||
86 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
87 | |||
88 | |||
86 | /* | 89 | /* |
87 | * We default to IRQ0 for the "no irq" hack. Some | 90 | * We default to IRQ0 for the "no irq" hack. Some |
88 | * machine types want others as well - they're free | 91 | * machine types want others as well - they're free |
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) | |||
1792 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | 1795 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; |
1793 | spin_unlock_irqrestore(&up->port.lock, flags); | 1796 | spin_unlock_irqrestore(&up->port.lock, flags); |
1794 | 1797 | ||
1795 | return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; | 1798 | return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; |
1796 | } | 1799 | } |
1797 | 1800 | ||
1798 | static unsigned int serial8250_get_mctrl(struct uart_port *port) | 1801 | static unsigned int serial8250_get_mctrl(struct uart_port *port) |
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) | |||
1850 | spin_unlock_irqrestore(&up->port.lock, flags); | 1853 | spin_unlock_irqrestore(&up->port.lock, flags); |
1851 | } | 1854 | } |
1852 | 1855 | ||
1853 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
1854 | |||
1855 | /* | 1856 | /* |
1856 | * Wait for transmitter & holding register to empty | 1857 | * Wait for transmitter & holding register to empty |
1857 | */ | 1858 | */ |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index df854401af2d..95421fa3b304 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -758,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
758 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), | 758 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), |
759 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), | 759 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), |
760 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), | 760 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), |
761 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), | ||
761 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), | 762 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), |
762 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), | 763 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), |
763 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), | 764 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), |
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index 377f2712289e..ab2ab3c81834 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c | |||
@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s, | |||
394 | spin_unlock_irqrestore(&port->lock, flags); | 394 | spin_unlock_irqrestore(&port->lock, flags); |
395 | } | 395 | } |
396 | 396 | ||
397 | static int __init ulite_console_setup(struct console *co, char *options) | 397 | static int __devinit ulite_console_setup(struct console *co, char *options) |
398 | { | 398 | { |
399 | struct uart_port *port; | 399 | struct uart_port *port; |
400 | int baud = 9600; | 400 | int baud = 9600; |
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 51e5e1dfa6e5..30973ec16a93 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -173,15 +173,12 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | |||
173 | int edge; | 173 | int edge; |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!) | 176 | * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG |
177 | * 0 0 10 10 1 0 | 177 | * 0 0 10 10 1 1 |
178 | * 0 1 10 10 0 1 | 178 | * 0 1 10 10 0 0 |
179 | * 1 0 11 11 0 1 | 179 | * 1 0 11 11 0 0 |
180 | * 1 1 11 11 1 0 | 180 | * 1 1 11 11 1 1 |
181 | * | ||
182 | * (!) Note: REDG is inverted recommended data sheet setting | ||
183 | */ | 181 | */ |
184 | |||
185 | sh_msiof_write(p, FCTR, 0); | 182 | sh_msiof_write(p, FCTR, 0); |
186 | sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); | 183 | sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); |
187 | sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); | 184 | sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); |
@@ -193,7 +190,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | |||
193 | edge = cpol ? cpha : !cpha; | 190 | edge = cpol ? cpha : !cpha; |
194 | 191 | ||
195 | tmp |= edge << 27; /* TEDG */ | 192 | tmp |= edge << 27; /* TEDG */ |
196 | tmp |= !edge << 26; /* REDG */ | 193 | tmp |= edge << 26; /* REDG */ |
197 | tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ | 194 | tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ |
198 | sh_msiof_write(p, CTR, tmp); | 195 | sh_msiof_write(p, CTR, tmp); |
199 | } | 196 | } |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 5681ebed9c65..03dfd27c4bfb 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
494 | #endif | 494 | #endif |
495 | break; | 495 | break; |
496 | case SSB_BUSTYPE_SDIO: | 496 | case SSB_BUSTYPE_SDIO: |
497 | #ifdef CONFIG_SSB_SDIO | 497 | #ifdef CONFIG_SSB_SDIOHOST |
498 | sdev->irq = bus->host_sdio->dev.irq; | ||
499 | dev->parent = &bus->host_sdio->dev; | 498 | dev->parent = &bus->host_sdio->dev; |
500 | #endif | 499 | #endif |
501 | break; | 500 | break; |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 94eb86319ff3..fc2e963e65e9 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -99,8 +99,6 @@ source "drivers/staging/line6/Kconfig" | |||
99 | 99 | ||
100 | source "drivers/gpu/drm/vmwgfx/Kconfig" | 100 | source "drivers/gpu/drm/vmwgfx/Kconfig" |
101 | 101 | ||
102 | source "drivers/gpu/drm/radeon/Kconfig" | ||
103 | |||
104 | source "drivers/gpu/drm/nouveau/Kconfig" | 102 | source "drivers/gpu/drm/nouveau/Kconfig" |
105 | 103 | ||
106 | source "drivers/staging/octeon/Kconfig" | 104 | source "drivers/staging/octeon/Kconfig" |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6e8bcdfd23b4..a678186f218f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg) | |||
1312 | void __user *addr = as->userurb; | 1312 | void __user *addr = as->userurb; |
1313 | unsigned int i; | 1313 | unsigned int i; |
1314 | 1314 | ||
1315 | if (as->userbuffer) | 1315 | if (as->userbuffer && urb->actual_length) |
1316 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | 1316 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
1317 | urb->transfer_buffer_length)) | 1317 | urb->actual_length)) |
1318 | goto err_out; | 1318 | goto err_out; |
1319 | if (put_user(as->status, &userurb->status)) | 1319 | if (put_user(as->status, &userurb->status)) |
1320 | goto err_out; | 1320 | goto err_out; |
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg) | |||
1334 | } | 1334 | } |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | free_async(as); | ||
1338 | |||
1339 | if (put_user(addr, (void __user * __user *)arg)) | 1337 | if (put_user(addr, (void __user * __user *)arg)) |
1340 | return -EFAULT; | 1338 | return -EFAULT; |
1341 | return 0; | 1339 | return 0; |
1342 | 1340 | ||
1343 | err_out: | 1341 | err_out: |
1344 | free_async(as); | ||
1345 | return -EFAULT; | 1342 | return -EFAULT; |
1346 | } | 1343 | } |
1347 | 1344 | ||
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps) | |||
1371 | static int proc_reapurb(struct dev_state *ps, void __user *arg) | 1368 | static int proc_reapurb(struct dev_state *ps, void __user *arg) |
1372 | { | 1369 | { |
1373 | struct async *as = reap_as(ps); | 1370 | struct async *as = reap_as(ps); |
1374 | if (as) | 1371 | if (as) { |
1375 | return processcompl(as, (void __user * __user *)arg); | 1372 | int retval = processcompl(as, (void __user * __user *)arg); |
1373 | free_async(as); | ||
1374 | return retval; | ||
1375 | } | ||
1376 | if (signal_pending(current)) | 1376 | if (signal_pending(current)) |
1377 | return -EINTR; | 1377 | return -EINTR; |
1378 | return -EIO; | 1378 | return -EIO; |
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) | |||
1380 | 1380 | ||
1381 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) | 1381 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) |
1382 | { | 1382 | { |
1383 | int retval; | ||
1383 | struct async *as; | 1384 | struct async *as; |
1384 | 1385 | ||
1385 | if (!(as = async_getcompleted(ps))) | 1386 | as = async_getcompleted(ps); |
1386 | return -EAGAIN; | 1387 | retval = -EAGAIN; |
1387 | return processcompl(as, (void __user * __user *)arg); | 1388 | if (as) { |
1389 | retval = processcompl(as, (void __user * __user *)arg); | ||
1390 | free_async(as); | ||
1391 | } | ||
1392 | return retval; | ||
1388 | } | 1393 | } |
1389 | 1394 | ||
1390 | #ifdef CONFIG_COMPAT | 1395 | #ifdef CONFIG_COMPAT |
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1475 | void __user *addr = as->userurb; | 1480 | void __user *addr = as->userurb; |
1476 | unsigned int i; | 1481 | unsigned int i; |
1477 | 1482 | ||
1478 | if (as->userbuffer) | 1483 | if (as->userbuffer && urb->actual_length) |
1479 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | 1484 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
1480 | urb->transfer_buffer_length)) | 1485 | urb->actual_length)) |
1481 | return -EFAULT; | 1486 | return -EFAULT; |
1482 | if (put_user(as->status, &userurb->status)) | 1487 | if (put_user(as->status, &userurb->status)) |
1483 | return -EFAULT; | 1488 | return -EFAULT; |
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1497 | } | 1502 | } |
1498 | } | 1503 | } |
1499 | 1504 | ||
1500 | free_async(as); | ||
1501 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) | 1505 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) |
1502 | return -EFAULT; | 1506 | return -EFAULT; |
1503 | return 0; | 1507 | return 0; |
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | |||
1506 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | 1510 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) |
1507 | { | 1511 | { |
1508 | struct async *as = reap_as(ps); | 1512 | struct async *as = reap_as(ps); |
1509 | if (as) | 1513 | if (as) { |
1510 | return processcompl_compat(as, (void __user * __user *)arg); | 1514 | int retval = processcompl_compat(as, (void __user * __user *)arg); |
1515 | free_async(as); | ||
1516 | return retval; | ||
1517 | } | ||
1511 | if (signal_pending(current)) | 1518 | if (signal_pending(current)) |
1512 | return -EINTR; | 1519 | return -EINTR; |
1513 | return -EIO; | 1520 | return -EIO; |
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | |||
1515 | 1522 | ||
1516 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) | 1523 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) |
1517 | { | 1524 | { |
1525 | int retval; | ||
1518 | struct async *as; | 1526 | struct async *as; |
1519 | 1527 | ||
1520 | if (!(as = async_getcompleted(ps))) | 1528 | retval = -EAGAIN; |
1521 | return -EAGAIN; | 1529 | as = async_getcompleted(ps); |
1522 | return processcompl_compat(as, (void __user * __user *)arg); | 1530 | if (as) { |
1531 | retval = processcompl_compat(as, (void __user * __user *)arg); | ||
1532 | free_async(as); | ||
1533 | } | ||
1534 | return retval; | ||
1523 | } | 1535 | } |
1524 | 1536 | ||
1525 | 1537 | ||
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c index df77f6131c73..f1e3aad76c37 100644 --- a/drivers/usb/gadget/f_audio.c +++ b/drivers/usb/gadget/f_audio.c | |||
@@ -60,7 +60,7 @@ DECLARE_UAC_AC_HEADER_DESCRIPTOR(2); | |||
60 | #define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \ | 60 | #define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \ |
61 | + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0)) | 61 | + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0)) |
62 | /* B.3.2 Class-Specific AC Interface Descriptor */ | 62 | /* B.3.2 Class-Specific AC Interface Descriptor */ |
63 | static struct uac_ac_header_descriptor_2 ac_header_desc = { | 63 | static struct uac_ac_header_descriptor_v1_2 ac_header_desc = { |
64 | .bLength = UAC_DT_AC_HEADER_LENGTH, | 64 | .bLength = UAC_DT_AC_HEADER_LENGTH, |
65 | .bDescriptorType = USB_DT_CS_INTERFACE, | 65 | .bDescriptorType = USB_DT_CS_INTERFACE, |
66 | .bDescriptorSubtype = UAC_HEADER, | 66 | .bDescriptorSubtype = UAC_HEADER, |
@@ -124,7 +124,7 @@ static struct usb_audio_control_selector feature_unit = { | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | #define OUTPUT_TERMINAL_ID 3 | 126 | #define OUTPUT_TERMINAL_ID 3 |
127 | static struct uac_output_terminal_descriptor output_terminal_desc = { | 127 | static struct uac_output_terminal_descriptor_v1 output_terminal_desc = { |
128 | .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, | 128 | .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, |
129 | .bDescriptorType = USB_DT_CS_INTERFACE, | 129 | .bDescriptorType = USB_DT_CS_INTERFACE, |
130 | .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, | 130 | .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, |
@@ -154,7 +154,7 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = { | |||
154 | }; | 154 | }; |
155 | 155 | ||
156 | /* B.4.2 Class-Specific AS Interface Descriptor */ | 156 | /* B.4.2 Class-Specific AS Interface Descriptor */ |
157 | static struct uac_as_header_descriptor as_header_desc = { | 157 | static struct uac_as_header_descriptor_v1 as_header_desc = { |
158 | .bLength = UAC_DT_AS_HEADER_SIZE, | 158 | .bLength = UAC_DT_AS_HEADER_SIZE, |
159 | .bDescriptorType = USB_DT_CS_INTERFACE, | 159 | .bDescriptorType = USB_DT_CS_INTERFACE, |
160 | .bDescriptorSubtype = UAC_AS_GENERAL, | 160 | .bDescriptorSubtype = UAC_AS_GENERAL, |
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 0a577d5694fd..d4f0db58a8ad 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c | |||
@@ -358,7 +358,7 @@ done: | |||
358 | * b15: bmType (0 == data) | 358 | * b15: bmType (0 == data) |
359 | */ | 359 | */ |
360 | len = skb->len; | 360 | len = skb->len; |
361 | put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); | 361 | put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); |
362 | 362 | ||
363 | /* add a zero-length EEM packet, if needed */ | 363 | /* add a zero-length EEM packet, if needed */ |
364 | if (padlen) | 364 | if (padlen) |
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port, | |||
464 | } | 464 | } |
465 | 465 | ||
466 | /* validate CRC */ | 466 | /* validate CRC */ |
467 | crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); | ||
468 | if (header & BIT(14)) { | 467 | if (header & BIT(14)) { |
469 | crc = get_unaligned_le32(skb->data + len | 468 | crc = get_unaligned_le32(skb->data + len |
470 | - ETH_FCS_LEN); | 469 | - ETH_FCS_LEN); |
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c index d0b1e836f0e0..5f6a2e0a9357 100644 --- a/drivers/usb/gadget/gmidi.c +++ b/drivers/usb/gadget/gmidi.c | |||
@@ -237,7 +237,7 @@ static const struct usb_interface_descriptor ac_interface_desc = { | |||
237 | }; | 237 | }; |
238 | 238 | ||
239 | /* B.3.2 Class-Specific AC Interface Descriptor */ | 239 | /* B.3.2 Class-Specific AC Interface Descriptor */ |
240 | static const struct uac_ac_header_descriptor_1 ac_header_desc = { | 240 | static const struct uac_ac_header_descriptor_v1_1 ac_header_desc = { |
241 | .bLength = UAC_DT_AC_HEADER_SIZE(1), | 241 | .bLength = UAC_DT_AC_HEADER_SIZE(1), |
242 | .bDescriptorType = USB_DT_CS_INTERFACE, | 242 | .bDescriptorType = USB_DT_CS_INTERFACE, |
243 | .bDescriptorSubtype = USB_MS_HEADER, | 243 | .bDescriptorSubtype = USB_MS_HEADER, |
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index 429560100b10..76496f5d272c 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #if defined USB_ETH_RNDIS | 29 | #if defined USB_ETH_RNDIS |
30 | # undef USB_ETH_RNDIS | 30 | # undef USB_ETH_RNDIS |
31 | #endif | 31 | #endif |
32 | #ifdef CONFIG_USB_ETH_RNDIS | 32 | #ifdef CONFIG_USB_G_MULTI_RNDIS |
33 | # define USB_ETH_RNDIS y | 33 | # define USB_ETH_RNDIS y |
34 | #endif | 34 | #endif |
35 | 35 | ||
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index e220fb8091a3..8b45145b9136 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/err.h> | ||
29 | 30 | ||
30 | #include <linux/usb/ch9.h> | 31 | #include <linux/usb/ch9.h> |
31 | #include <linux/usb/gadget.h> | 32 | #include <linux/usb/gadget.h> |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 4b5dbd0127f5..5fc80a104150 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
@@ -2582,6 +2582,7 @@ err: | |||
2582 | hsotg->gadget.dev.driver = NULL; | 2582 | hsotg->gadget.dev.driver = NULL; |
2583 | return ret; | 2583 | return ret; |
2584 | } | 2584 | } |
2585 | EXPORT_SYMBOL(usb_gadget_register_driver); | ||
2585 | 2586 | ||
2586 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 2587 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
2587 | { | 2588 | { |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index c75d9270c752..19372673bf09 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
196 | if (hostpc_reg) { | 196 | if (hostpc_reg) { |
197 | u32 t3; | 197 | u32 t3; |
198 | 198 | ||
199 | spin_unlock_irq(&ehci->lock); | ||
199 | msleep(5);/* 5ms for HCD enter low pwr mode */ | 200 | msleep(5);/* 5ms for HCD enter low pwr mode */ |
201 | spin_lock_irq(&ehci->lock); | ||
200 | t3 = ehci_readl(ehci, hostpc_reg); | 202 | t3 = ehci_readl(ehci, hostpc_reg); |
201 | ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); | 203 | ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); |
202 | t3 = ehci_readl(ehci, hostpc_reg); | 204 | t3 = ehci_readl(ehci, hostpc_reg); |
@@ -904,17 +906,18 @@ static int ehci_hub_control ( | |||
904 | if ((temp & PORT_PE) == 0 | 906 | if ((temp & PORT_PE) == 0 |
905 | || (temp & PORT_RESET) != 0) | 907 | || (temp & PORT_RESET) != 0) |
906 | goto error; | 908 | goto error; |
907 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | 909 | |
908 | /* After above check the port must be connected. | 910 | /* After above check the port must be connected. |
909 | * Set appropriate bit thus could put phy into low power | 911 | * Set appropriate bit thus could put phy into low power |
910 | * mode if we have hostpc feature | 912 | * mode if we have hostpc feature |
911 | */ | 913 | */ |
914 | temp &= ~PORT_WKCONN_E; | ||
915 | temp |= PORT_WKDISC_E | PORT_WKOC_E; | ||
916 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | ||
912 | if (hostpc_reg) { | 917 | if (hostpc_reg) { |
913 | temp &= ~PORT_WKCONN_E; | 918 | spin_unlock_irqrestore(&ehci->lock, flags); |
914 | temp |= (PORT_WKDISC_E | PORT_WKOC_E); | ||
915 | ehci_writel(ehci, temp | PORT_SUSPEND, | ||
916 | status_reg); | ||
917 | msleep(5);/* 5ms for HCD enter low pwr mode */ | 919 | msleep(5);/* 5ms for HCD enter low pwr mode */ |
920 | spin_lock_irqsave(&ehci->lock, flags); | ||
918 | temp1 = ehci_readl(ehci, hostpc_reg); | 921 | temp1 = ehci_readl(ehci, hostpc_reg); |
919 | ehci_writel(ehci, temp1 | HOSTPC_PHCD, | 922 | ehci_writel(ehci, temp1 | HOSTPC_PHCD, |
920 | hostpc_reg); | 923 | hostpc_reg); |
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index d224ab467a40..e1232890c78b 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c | |||
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
105 | if (ep->td_base) | 105 | if (ep->td_base) |
106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); | 106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); |
107 | 107 | ||
108 | if (ep->conf_frame_Q) { | 108 | if (kfifo_initialized(&ep->conf_frame_Q)) { |
109 | size = cq_howmany(&ep->conf_frame_Q); | 109 | size = cq_howmany(&ep->conf_frame_Q); |
110 | for (; size; size--) { | 110 | for (; size; size--) { |
111 | struct packet *pkt = cq_get(&ep->conf_frame_Q); | 111 | struct packet *pkt = cq_get(&ep->conf_frame_Q); |
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
115 | cq_delete(&ep->conf_frame_Q); | 115 | cq_delete(&ep->conf_frame_Q); |
116 | } | 116 | } |
117 | 117 | ||
118 | if (ep->empty_frame_Q) { | 118 | if (kfifo_initialized(&ep->empty_frame_Q)) { |
119 | size = cq_howmany(&ep->empty_frame_Q); | 119 | size = cq_howmany(&ep->empty_frame_Q); |
120 | for (; size; size--) { | 120 | for (; size; size--) { |
121 | struct packet *pkt = cq_get(&ep->empty_frame_Q); | 121 | struct packet *pkt = cq_get(&ep->empty_frame_Q); |
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
125 | cq_delete(&ep->empty_frame_Q); | 125 | cq_delete(&ep->empty_frame_Q); |
126 | } | 126 | } |
127 | 127 | ||
128 | if (ep->dummy_packets_Q) { | 128 | if (kfifo_initialized(&ep->dummy_packets_Q)) { |
129 | size = cq_howmany(&ep->dummy_packets_Q); | 129 | size = cq_howmany(&ep->dummy_packets_Q); |
130 | for (; size; size--) { | 130 | for (; size; size--) { |
131 | u8 *buff = cq_get(&ep->dummy_packets_Q); | 131 | u8 *buff = cq_get(&ep->dummy_packets_Q); |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index b7a661c02bcd..bee558aed427 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -35,7 +35,9 @@ | |||
35 | #include <linux/usb.h> | 35 | #include <linux/usb.h> |
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/mm.h> | ||
38 | #include <linux/irq.h> | 39 | #include <linux/irq.h> |
40 | #include <asm/cacheflush.h> | ||
39 | 41 | ||
40 | #include "../core/hcd.h" | 42 | #include "../core/hcd.h" |
41 | #include "r8a66597.h" | 43 | #include "r8a66597.h" |
@@ -216,8 +218,17 @@ static void disable_controller(struct r8a66597 *r8a66597) | |||
216 | { | 218 | { |
217 | int port; | 219 | int port; |
218 | 220 | ||
221 | /* disable interrupts */ | ||
219 | r8a66597_write(r8a66597, 0, INTENB0); | 222 | r8a66597_write(r8a66597, 0, INTENB0); |
220 | r8a66597_write(r8a66597, 0, INTSTS0); | 223 | r8a66597_write(r8a66597, 0, INTENB1); |
224 | r8a66597_write(r8a66597, 0, BRDYENB); | ||
225 | r8a66597_write(r8a66597, 0, BEMPENB); | ||
226 | r8a66597_write(r8a66597, 0, NRDYENB); | ||
227 | |||
228 | /* clear status */ | ||
229 | r8a66597_write(r8a66597, 0, BRDYSTS); | ||
230 | r8a66597_write(r8a66597, 0, NRDYSTS); | ||
231 | r8a66597_write(r8a66597, 0, BEMPSTS); | ||
221 | 232 | ||
222 | for (port = 0; port < r8a66597->max_root_hub; port++) | 233 | for (port = 0; port < r8a66597->max_root_hub; port++) |
223 | r8a66597_disable_port(r8a66597, port); | 234 | r8a66597_disable_port(r8a66597, port); |
@@ -811,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, | |||
811 | enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); | 822 | enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); |
812 | } | 823 | } |
813 | 824 | ||
825 | static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, | ||
826 | int status) | ||
827 | __releases(r8a66597->lock) | ||
828 | __acquires(r8a66597->lock) | ||
829 | { | ||
830 | if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { | ||
831 | void *ptr; | ||
832 | |||
833 | for (ptr = urb->transfer_buffer; | ||
834 | ptr < urb->transfer_buffer + urb->transfer_buffer_length; | ||
835 | ptr += PAGE_SIZE) | ||
836 | flush_dcache_page(virt_to_page(ptr)); | ||
837 | } | ||
838 | |||
839 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | ||
840 | spin_unlock(&r8a66597->lock); | ||
841 | usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); | ||
842 | spin_lock(&r8a66597->lock); | ||
843 | } | ||
844 | |||
814 | /* this function must be called with interrupt disabled */ | 845 | /* this function must be called with interrupt disabled */ |
815 | static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | 846 | static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) |
816 | { | 847 | { |
@@ -829,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | |||
829 | list_del(&td->queue); | 860 | list_del(&td->queue); |
830 | kfree(td); | 861 | kfree(td); |
831 | 862 | ||
832 | if (urb) { | 863 | if (urb) |
833 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), | 864 | r8a66597_urb_done(r8a66597, urb, -ENODEV); |
834 | urb); | ||
835 | 865 | ||
836 | spin_unlock(&r8a66597->lock); | ||
837 | usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, | ||
838 | -ENODEV); | ||
839 | spin_lock(&r8a66597->lock); | ||
840 | } | ||
841 | break; | 866 | break; |
842 | } | 867 | } |
843 | } | 868 | } |
@@ -997,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port, | |||
997 | /* this function must be called with interrupt disabled */ | 1022 | /* this function must be called with interrupt disabled */ |
998 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | 1023 | static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, |
999 | u16 syssts) | 1024 | u16 syssts) |
1025 | __releases(r8a66597->lock) | ||
1026 | __acquires(r8a66597->lock) | ||
1000 | { | 1027 | { |
1001 | if (syssts == SE0) { | 1028 | if (syssts == SE0) { |
1002 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); | 1029 | r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); |
@@ -1014,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, | |||
1014 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); | 1041 | usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); |
1015 | } | 1042 | } |
1016 | 1043 | ||
1044 | spin_unlock(&r8a66597->lock); | ||
1017 | usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); | 1045 | usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); |
1046 | spin_lock(&r8a66597->lock); | ||
1018 | } | 1047 | } |
1019 | 1048 | ||
1020 | /* this function must be called with interrupt disabled */ | 1049 | /* this function must be called with interrupt disabled */ |
@@ -1274,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) | |||
1274 | if (usb_pipeisoc(urb->pipe)) | 1303 | if (usb_pipeisoc(urb->pipe)) |
1275 | urb->start_frame = r8a66597_get_frame(hcd); | 1304 | urb->start_frame = r8a66597_get_frame(hcd); |
1276 | 1305 | ||
1277 | usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | 1306 | r8a66597_urb_done(r8a66597, urb, status); |
1278 | spin_unlock(&r8a66597->lock); | ||
1279 | usb_hcd_giveback_urb(hcd, urb, status); | ||
1280 | spin_lock(&r8a66597->lock); | ||
1281 | } | 1307 | } |
1282 | 1308 | ||
1283 | if (restart) { | 1309 | if (restart) { |
@@ -2466,6 +2492,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) | |||
2466 | r8a66597->rh_timer.data = (unsigned long)r8a66597; | 2492 | r8a66597->rh_timer.data = (unsigned long)r8a66597; |
2467 | r8a66597->reg = (unsigned long)reg; | 2493 | r8a66597->reg = (unsigned long)reg; |
2468 | 2494 | ||
2495 | /* make sure no interrupts are pending */ | ||
2496 | ret = r8a66597_clock_enable(r8a66597); | ||
2497 | if (ret < 0) | ||
2498 | goto clean_up3; | ||
2499 | disable_controller(r8a66597); | ||
2500 | |||
2469 | for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { | 2501 | for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { |
2470 | INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); | 2502 | INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); |
2471 | init_timer(&r8a66597->td_timer[i]); | 2503 | init_timer(&r8a66597->td_timer[i]); |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 0025847743f3..8b37a4b9839e 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = { | |||
3245 | { USB_DEVICE(0x0711, 0x0902) }, | 3245 | { USB_DEVICE(0x0711, 0x0902) }, |
3246 | { USB_DEVICE(0x0711, 0x0903) }, | 3246 | { USB_DEVICE(0x0711, 0x0903) }, |
3247 | { USB_DEVICE(0x0711, 0x0918) }, | 3247 | { USB_DEVICE(0x0711, 0x0918) }, |
3248 | { USB_DEVICE(0x0711, 0x0920) }, | ||
3248 | { USB_DEVICE(0x182d, 0x021c) }, | 3249 | { USB_DEVICE(0x182d, 0x021c) }, |
3249 | { USB_DEVICE(0x182d, 0x0269) }, | 3250 | { USB_DEVICE(0x182d, 0x0269) }, |
3250 | { } | 3251 | { } |
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index de56b3d743d7..3d2d3e549bd1 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig | |||
@@ -44,6 +44,7 @@ config ISP1301_OMAP | |||
44 | config USB_ULPI | 44 | config USB_ULPI |
45 | bool "Generic ULPI Transceiver Driver" | 45 | bool "Generic ULPI Transceiver Driver" |
46 | depends on ARM | 46 | depends on ARM |
47 | select USB_OTG_UTILS | ||
47 | help | 48 | help |
48 | Enable this to support ULPI connected USB OTG transceivers which | 49 | Enable this to support ULPI connected USB OTG transceivers which |
49 | are likely found on embedded boards. | 50 | are likely found on embedded boards. |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 216f187582ab..7638828e7317 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -50,7 +50,7 @@ | |||
50 | * Version Information | 50 | * Version Information |
51 | */ | 51 | */ |
52 | #define DRIVER_VERSION "v1.5.0" | 52 | #define DRIVER_VERSION "v1.5.0" |
53 | #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" | 53 | #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr" |
54 | #define DRIVER_DESC "USB FTDI Serial Converters Driver" | 54 | #define DRIVER_DESC "USB FTDI Serial Converters Driver" |
55 | 55 | ||
56 | static int debug; | 56 | static int debug; |
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { | |||
145 | 145 | ||
146 | 146 | ||
147 | 147 | ||
148 | /* | ||
149 | * Device ID not listed? Test via module params product/vendor or | ||
150 | * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! | ||
151 | */ | ||
148 | static struct usb_device_id id_table_combined [] = { | 152 | static struct usb_device_id id_table_combined [] = { |
149 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, | 153 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
150 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 154 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
151 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | 155 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
156 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, | ||
152 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
153 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, | 158 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, |
154 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, | 159 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, |
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = { | |||
552 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, | 557 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, |
553 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, | 558 | { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, |
554 | /* | 559 | /* |
555 | * Due to many user requests for multiple ELV devices we enable | 560 | * ELV devices: |
556 | * them by default. | ||
557 | */ | 561 | */ |
562 | { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, | ||
563 | { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, | ||
564 | { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, | ||
565 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, | ||
566 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, | ||
567 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, | ||
568 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, | ||
569 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, | ||
558 | { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, | 570 | { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, |
559 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, | 571 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, |
560 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, | 572 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, |
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = { | |||
571 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, | 583 | { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, |
572 | { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, | 584 | { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, |
573 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 585 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
586 | { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, | ||
574 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 587 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
588 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, | ||
575 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 589 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
576 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | 590 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, |
577 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 591 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
578 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, | 592 | { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, |
593 | { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, | ||
594 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, | ||
595 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, | ||
596 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, | ||
579 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 597 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
580 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 598 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
581 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 599 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = { | |||
697 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, | 715 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, |
698 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, | 716 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, |
699 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, | 717 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, |
718 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, | ||
700 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, | 719 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, |
701 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, | 720 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, |
702 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, | 721 | { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index da92b4952ffb..c8951aeed983 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -38,6 +38,8 @@ | |||
38 | /* www.candapter.com Ewert Energy Systems CANdapter device */ | 38 | /* www.candapter.com Ewert Energy Systems CANdapter device */ |
39 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ | 39 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ |
40 | 40 | ||
41 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ | ||
42 | |||
41 | /* OOCDlink by Joern Kaipf <joernk@web.de> | 43 | /* OOCDlink by Joern Kaipf <joernk@web.de> |
42 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 44 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
43 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 45 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
@@ -161,22 +163,37 @@ | |||
161 | /* | 163 | /* |
162 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). | 164 | * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). |
163 | * All of these devices use FTDI's vendor ID (0x0403). | 165 | * All of these devices use FTDI's vendor ID (0x0403). |
166 | * Further IDs taken from ELV Windows .inf file. | ||
164 | * | 167 | * |
165 | * The previously included PID for the UO 100 module was incorrect. | 168 | * The previously included PID for the UO 100 module was incorrect. |
166 | * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). | 169 | * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). |
167 | * | 170 | * |
168 | * Armin Laeuger originally sent the PID for the UM 100 module. | 171 | * Armin Laeuger originally sent the PID for the UM 100 module. |
169 | */ | 172 | */ |
173 | #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ | ||
174 | #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ | ||
175 | #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ | ||
176 | #define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */ | ||
177 | #define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */ | ||
178 | #define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */ | ||
179 | #define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */ | ||
180 | #define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */ | ||
170 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ | 181 | #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ |
171 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ | 182 | #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ |
172 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ | 183 | #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ |
184 | #define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */ | ||
185 | #define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ | ||
186 | #define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ | ||
187 | #define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ | ||
173 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ | 188 | #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ |
174 | #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ | 189 | #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ |
175 | #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ | 190 | #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ |
176 | #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ | 191 | #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ |
177 | #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ | 192 | #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ |
178 | #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ | 193 | #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ |
194 | #define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */ | ||
179 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ | 195 | #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ |
196 | #define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */ | ||
180 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ | 197 | #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ |
181 | #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ | 198 | #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ |
182 | #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ | 199 | #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ |
@@ -968,6 +985,7 @@ | |||
968 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ | 985 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ |
969 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ | 986 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ |
970 | #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ | 987 | #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ |
988 | #define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ | ||
971 | 989 | ||
972 | /* | 990 | /* |
973 | * Marvell SheevaPlug | 991 | * Marvell SheevaPlug |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ac1b6449fb6a..3eb6143bb646 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = { | |||
298 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 298 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
300 | }, | 300 | }, |
301 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ | ||
301 | 302 | ||
302 | { } | 303 | { } |
303 | }; | 304 | }; |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index c932f9053188..49575fba3756 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999, | |||
941 | UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, | 941 | UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, |
942 | "Microtech", | 942 | "Microtech", |
943 | "USB-SCSI-DB25", | 943 | "USB-SCSI-DB25", |
944 | US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, | 944 | US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, |
945 | US_FL_SCM_MULT_TARG ), | 945 | US_FL_SCM_MULT_TARG ), |
946 | 946 | ||
947 | UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, | 947 | UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index eb12182b2059..d25df51bb0d2 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, | |||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void efifb_destroy(struct fb_info *info) | ||
165 | { | ||
166 | if (info->screen_base) | ||
167 | iounmap(info->screen_base); | ||
168 | release_mem_region(info->aperture_base, info->aperture_size); | ||
169 | framebuffer_release(info); | ||
170 | } | ||
171 | |||
164 | static struct fb_ops efifb_ops = { | 172 | static struct fb_ops efifb_ops = { |
165 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
174 | .fb_destroy = efifb_destroy, | ||
166 | .fb_setcolreg = efifb_setcolreg, | 175 | .fb_setcolreg = efifb_setcolreg, |
167 | .fb_fillrect = cfb_fillrect, | 176 | .fb_fillrect = cfb_fillrect, |
168 | .fb_copyarea = cfb_copyarea, | 177 | .fb_copyarea = cfb_copyarea, |
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev) | |||
281 | info->par = NULL; | 290 | info->par = NULL; |
282 | 291 | ||
283 | info->aperture_base = efifb_fix.smem_start; | 292 | info->aperture_base = efifb_fix.smem_start; |
284 | info->aperture_size = size_total; | 293 | info->aperture_size = size_remap; |
285 | 294 | ||
286 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); | 295 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); |
287 | if (!info->screen_base) { | 296 | if (!info->screen_base) { |
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 66358fa825f3..b4b6deceed15 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c | |||
@@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf | |||
593 | */ | 593 | */ |
594 | static int imxfb_suspend(struct platform_device *dev, pm_message_t state) | 594 | static int imxfb_suspend(struct platform_device *dev, pm_message_t state) |
595 | { | 595 | { |
596 | struct imxfb_info *fbi = platform_get_drvdata(dev); | 596 | struct fb_info *info = platform_get_drvdata(dev); |
597 | struct imxfb_info *fbi = info->par; | ||
597 | 598 | ||
598 | pr_debug("%s\n", __func__); | 599 | pr_debug("%s\n", __func__); |
599 | 600 | ||
@@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state) | |||
603 | 604 | ||
604 | static int imxfb_resume(struct platform_device *dev) | 605 | static int imxfb_resume(struct platform_device *dev) |
605 | { | 606 | { |
606 | struct imxfb_info *fbi = platform_get_drvdata(dev); | 607 | struct fb_info *info = platform_get_drvdata(dev); |
608 | struct imxfb_info *fbi = info->par; | ||
607 | 609 | ||
608 | pr_debug("%s\n", __func__); | 610 | pr_debug("%s\n", __func__); |
609 | 611 | ||
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 054ef29be479..772ba3f45e6f 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) | |||
324 | unsigned long flags; | 324 | unsigned long flags; |
325 | dma_cookie_t cookie; | 325 | dma_cookie_t cookie; |
326 | 326 | ||
327 | dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, | 327 | if (mx3_fbi->txd) |
328 | to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); | 328 | dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, |
329 | to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); | ||
330 | else | ||
331 | dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi); | ||
329 | 332 | ||
330 | /* This enables the channel */ | 333 | /* This enables the channel */ |
331 | if (mx3_fbi->cookie < 0) { | 334 | if (mx3_fbi->cookie < 0) { |
@@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a | |||
646 | 649 | ||
647 | static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) | 650 | static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) |
648 | { | 651 | { |
652 | dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value); | ||
649 | /* This might be board-specific */ | 653 | /* This might be board-specific */ |
650 | mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); | 654 | mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); |
651 | return; | 655 | return; |
@@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev) | |||
1486 | goto ersdc0; | 1490 | goto ersdc0; |
1487 | } | 1491 | } |
1488 | 1492 | ||
1493 | mx3fb->backlight_level = 255; | ||
1494 | |||
1489 | ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); | 1495 | ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); |
1490 | if (ret < 0) | 1496 | if (ret < 0) |
1491 | goto eisdc0; | 1497 | goto eisdc0; |
1492 | 1498 | ||
1493 | mx3fb->backlight_level = 255; | ||
1494 | |||
1495 | return 0; | 1499 | return 0; |
1496 | 1500 | ||
1497 | eisdc0: | 1501 | eisdc0: |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 088f32f29a6e..050ee147592f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -396,8 +396,8 @@ config SBC_FITPC2_WATCHDOG | |||
396 | tristate "Compulab SBC-FITPC2 watchdog" | 396 | tristate "Compulab SBC-FITPC2 watchdog" |
397 | depends on X86 | 397 | depends on X86 |
398 | ---help--- | 398 | ---help--- |
399 | This is the driver for the built-in watchdog timer on the fit-PC2 | 399 | This is the driver for the built-in watchdog timer on the fit-PC2, |
400 | Single-board computer made by Compulab. | 400 | fit-PC2i, CM-iAM single-board computers made by Compulab. |
401 | 401 | ||
402 | It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. | 402 | It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. |
403 | When "Watchdog Timer Value" enabled one can set 31-255 s operational range. | 403 | When "Watchdog Timer Value" enabled one can set 31-255 s operational range. |
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index c7b3f9df2317..2159e668751c 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c | |||
@@ -1,9 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Blackfin On-Chip Watchdog Driver | 2 | * Blackfin On-Chip Watchdog Driver |
3 | * Supports BF53[123]/BF53[467]/BF54[2489]/BF561 | ||
4 | * | 3 | * |
5 | * Originally based on softdog.c | 4 | * Originally based on softdog.c |
6 | * Copyright 2006-2007 Analog Devices Inc. | 5 | * Copyright 2006-2010 Analog Devices Inc. |
7 | * Copyright 2006-2007 Michele d'Amico | 6 | * Copyright 2006-2007 Michele d'Amico |
8 | * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> | 7 | * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> |
9 | * | 8 | * |
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void) | |||
137 | */ | 136 | */ |
138 | static int bfin_wdt_set_timeout(unsigned long t) | 137 | static int bfin_wdt_set_timeout(unsigned long t) |
139 | { | 138 | { |
140 | u32 cnt; | 139 | u32 cnt, max_t, sclk; |
141 | unsigned long flags; | 140 | unsigned long flags; |
142 | 141 | ||
143 | stampit(); | 142 | sclk = get_sclk(); |
143 | max_t = -1 / sclk; | ||
144 | cnt = t * sclk; | ||
145 | stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt); | ||
144 | 146 | ||
145 | cnt = t * get_sclk(); | 147 | if (t > max_t) { |
146 | if (cnt < get_sclk()) { | ||
147 | printk(KERN_WARNING PFX "timeout value is too large\n"); | 148 | printk(KERN_WARNING PFX "timeout value is too large\n"); |
148 | return -EINVAL; | 149 | return -EINVAL; |
149 | } | 150 | } |
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c index 4f4b35a20d84..3c79dc587958 100644 --- a/drivers/watchdog/ixp2000_wdt.c +++ b/drivers/watchdog/ixp2000_wdt.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/timer.h> | ||
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
24 | #include <linux/miscdevice.h> | 25 | #include <linux/miscdevice.h> |
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 91430a89107c..e6763d2a567b 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c | |||
@@ -46,9 +46,9 @@ static DEFINE_SPINLOCK(wdt_lock); | |||
46 | static void wdt_send_data(unsigned char command, unsigned char data) | 46 | static void wdt_send_data(unsigned char command, unsigned char data) |
47 | { | 47 | { |
48 | outb(command, COMMAND_PORT); | 48 | outb(command, COMMAND_PORT); |
49 | mdelay(100); | 49 | msleep(100); |
50 | outb(data, DATA_PORT); | 50 | outb(data, DATA_PORT); |
51 | mdelay(200); | 51 | msleep(200); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void wdt_enable(void) | 54 | static void wdt_enable(void) |
@@ -202,11 +202,10 @@ static int __init fitpc2_wdt_init(void) | |||
202 | { | 202 | { |
203 | int err; | 203 | int err; |
204 | 204 | ||
205 | if (strcmp("SBC-FITPC2", dmi_get_system_info(DMI_BOARD_NAME))) { | 205 | if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) |
206 | pr_info("board name is: %s. Should be SBC-FITPC2\n", | ||
207 | dmi_get_system_info(DMI_BOARD_NAME)); | ||
208 | return -ENODEV; | 206 | return -ENODEV; |
209 | } | 207 | |
208 | pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); | ||
210 | 209 | ||
211 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { | 210 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { |
212 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); | 211 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); |