diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2010-04-29 03:36:24 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-29 03:36:24 -0400 |
commit | 7407cf355fdf5500430be966dbbde84a27293bad (patch) | |
tree | 922861288ff38558ed721a79653f52b17b13bb95 /drivers | |
parent | 6a47dc1418682c83d603b491df1d048f73aa973e (diff) | |
parent | 79dba2eaa771c3173957eccfd288e0e0d12e4d3f (diff) |
Merge branch 'master' into for-2.6.35
Conflicts:
fs/block_dev.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
216 files changed, 5305 insertions, 1729 deletions
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index a610ebe18edd..2fbfe51fb141 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -471,13 +471,18 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) | |||
471 | /* allow full data read from EC address space */ | 471 | /* allow full data read from EC address space */ |
472 | if (obj_desc->field.region_obj->region.space_id == | 472 | if (obj_desc->field.region_obj->region.space_id == |
473 | ACPI_ADR_SPACE_EC) { | 473 | ACPI_ADR_SPACE_EC) { |
474 | if (obj_desc->common_field.bit_length > 8) | 474 | if (obj_desc->common_field.bit_length > 8) { |
475 | obj_desc->common_field.access_bit_width = | 475 | unsigned width = |
476 | ACPI_ROUND_UP(obj_desc->common_field. | 476 | ACPI_ROUND_BITS_UP_TO_BYTES( |
477 | bit_length, 8); | 477 | obj_desc->common_field.bit_length); |
478 | // access_bit_width is u8, don't overflow it | ||
479 | if (width > 8) | ||
480 | width = 8; | ||
478 | obj_desc->common_field.access_byte_width = | 481 | obj_desc->common_field.access_byte_width = |
479 | ACPI_DIV_8(obj_desc->common_field. | 482 | width; |
480 | access_bit_width); | 483 | obj_desc->common_field.access_bit_width = |
484 | 8 * width; | ||
485 | } | ||
481 | } | 486 | } |
482 | 487 | ||
483 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, | 488 | ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9f6cfac0f2cc..228740f356c9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) | |||
879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | 879 | void ata_qc_schedule_eh(struct ata_queued_cmd *qc) |
880 | { | 880 | { |
881 | struct ata_port *ap = qc->ap; | 881 | struct ata_port *ap = qc->ap; |
882 | struct request_queue *q = qc->scsicmd->device->request_queue; | ||
883 | unsigned long flags; | ||
882 | 884 | ||
883 | WARN_ON(!ap->ops->error_handler); | 885 | WARN_ON(!ap->ops->error_handler); |
884 | 886 | ||
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
890 | * Note that ATA_QCFLAG_FAILED is unconditionally set after | 892 | * Note that ATA_QCFLAG_FAILED is unconditionally set after |
891 | * this function completes. | 893 | * this function completes. |
892 | */ | 894 | */ |
895 | spin_lock_irqsave(q->queue_lock, flags); | ||
893 | blk_abort_request(qc->scsicmd->request); | 896 | blk_abort_request(qc->scsicmd->request); |
897 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
894 | } | 898 | } |
895 | 899 | ||
896 | /** | 900 | /** |
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) | |||
1624 | } | 1628 | } |
1625 | 1629 | ||
1626 | /* okay, this error is ours */ | 1630 | /* okay, this error is ours */ |
1631 | memset(&tf, 0, sizeof(tf)); | ||
1627 | rc = ata_eh_read_log_10h(dev, &tag, &tf); | 1632 | rc = ata_eh_read_log_10h(dev, &tag, &tf); |
1628 | if (rc) { | 1633 | if (rc) { |
1629 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " | 1634 | ata_link_printk(link, KERN_ERR, "failed to read log page 10h " |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3c3172d3c34e..4164dd244dd0 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 424 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 425 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 426 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
428 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
427 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 429 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
428 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 430 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
429 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 431 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
444 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
445 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 447 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
446 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 448 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
449 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
450 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
447 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 451 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
448 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 452 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
449 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 453 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 67e0fc542249..93d1f9b469d4 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1695,6 +1695,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) | |||
1695 | cf |= CF_DRY_RUN; | 1695 | cf |= CF_DRY_RUN; |
1696 | else { | 1696 | else { |
1697 | dev_err(DEV, "--dry-run is not supported by peer"); | 1697 | dev_err(DEV, "--dry-run is not supported by peer"); |
1698 | kfree(p); | ||
1698 | return 0; | 1699 | return 0; |
1699 | } | 1700 | } |
1700 | } | 1701 | } |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 54f56ea8a786..c786023001d2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -899,7 +899,8 @@ retry: | |||
899 | 899 | ||
900 | drbd_thread_start(&mdev->asender); | 900 | drbd_thread_start(&mdev->asender); |
901 | 901 | ||
902 | drbd_send_protocol(mdev); | 902 | if (!drbd_send_protocol(mdev)) |
903 | return -1; | ||
903 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 904 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
904 | drbd_send_sizes(mdev, 0); | 905 | drbd_send_sizes(mdev, 0); |
905 | drbd_send_uuids(mdev); | 906 | drbd_send_uuids(mdev); |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d41331bc2aa7..aa4248efc5d8 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void) | |||
1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); | 1817 | pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); |
1818 | /* clear any possible error conditions */ | 1818 | /* clear any possible error conditions */ |
1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); | 1819 | pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); |
1820 | |||
1821 | intel_i830_setup_flush(); | ||
1822 | return 0; | 1820 | return 0; |
1823 | } | 1821 | } |
1824 | 1822 | ||
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = { | |||
2188 | .agp_destroy_page = agp_generic_destroy_page, | 2186 | .agp_destroy_page = agp_generic_destroy_page, |
2189 | .agp_destroy_pages = agp_generic_destroy_pages, | 2187 | .agp_destroy_pages = agp_generic_destroy_pages, |
2190 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | 2188 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, |
2191 | .chipset_flush = intel_i830_chipset_flush, | ||
2192 | }; | 2189 | }; |
2193 | 2190 | ||
2194 | static const struct agp_bridge_driver intel_850_driver = { | 2191 | static const struct agp_bridge_driver intel_850_driver = { |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index c9bc896d68af..90b199f97bec 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, | |||
1026 | 1026 | ||
1027 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ | 1027 | xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ |
1028 | /* last check before exit */ | 1028 | /* last check before exit */ |
1029 | if (!io_detect_cm4000(iobase, dev)) | 1029 | if (!io_detect_cm4000(iobase, dev)) { |
1030 | count = -ENODEV; | 1030 | rc = -ENODEV; |
1031 | goto release_io; | ||
1032 | } | ||
1031 | 1033 | ||
1032 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) | 1034 | if (test_bit(IS_INVREV, &dev->flags) && count > 0) |
1033 | str_invert_revert(dev->rbuf, count); | 1035 | str_invert_revert(dev->rbuf, count); |
1034 | 1036 | ||
1035 | if (copy_to_user(buf, dev->rbuf, count)) | 1037 | if (copy_to_user(buf, dev->rbuf, count)) |
1036 | return -EFAULT; | 1038 | rc = -EFAULT; |
1037 | 1039 | ||
1038 | release_io: | 1040 | release_io: |
1039 | clear_bit(LOCK_IO, &dev->flags); | 1041 | clear_bit(LOCK_IO, &dev->flags); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2d5d575e889d..75d293eeb3ee 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1113,6 +1113,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1113 | unsigned int cpu = sys_dev->id; | 1113 | unsigned int cpu = sys_dev->id; |
1114 | unsigned long flags; | 1114 | unsigned long flags; |
1115 | struct cpufreq_policy *data; | 1115 | struct cpufreq_policy *data; |
1116 | struct kobject *kobj; | ||
1117 | struct completion *cmp; | ||
1116 | #ifdef CONFIG_SMP | 1118 | #ifdef CONFIG_SMP |
1117 | struct sys_device *cpu_sys_dev; | 1119 | struct sys_device *cpu_sys_dev; |
1118 | unsigned int j; | 1120 | unsigned int j; |
@@ -1141,10 +1143,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1141 | dprintk("removing link\n"); | 1143 | dprintk("removing link\n"); |
1142 | cpumask_clear_cpu(cpu, data->cpus); | 1144 | cpumask_clear_cpu(cpu, data->cpus); |
1143 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1145 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1144 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 1146 | kobj = &sys_dev->kobj; |
1145 | cpufreq_cpu_put(data); | 1147 | cpufreq_cpu_put(data); |
1146 | cpufreq_debug_enable_ratelimit(); | 1148 | cpufreq_debug_enable_ratelimit(); |
1147 | unlock_policy_rwsem_write(cpu); | 1149 | unlock_policy_rwsem_write(cpu); |
1150 | sysfs_remove_link(kobj, "cpufreq"); | ||
1148 | return 0; | 1151 | return 0; |
1149 | } | 1152 | } |
1150 | #endif | 1153 | #endif |
@@ -1181,7 +1184,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1181 | data->governor->name, CPUFREQ_NAME_LEN); | 1184 | data->governor->name, CPUFREQ_NAME_LEN); |
1182 | #endif | 1185 | #endif |
1183 | cpu_sys_dev = get_cpu_sysdev(j); | 1186 | cpu_sys_dev = get_cpu_sysdev(j); |
1184 | sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); | 1187 | kobj = &cpu_sys_dev->kobj; |
1188 | unlock_policy_rwsem_write(cpu); | ||
1189 | sysfs_remove_link(kobj, "cpufreq"); | ||
1190 | lock_policy_rwsem_write(cpu); | ||
1185 | cpufreq_cpu_put(data); | 1191 | cpufreq_cpu_put(data); |
1186 | } | 1192 | } |
1187 | } | 1193 | } |
@@ -1192,19 +1198,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1192 | if (cpufreq_driver->target) | 1198 | if (cpufreq_driver->target) |
1193 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1199 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1194 | 1200 | ||
1195 | kobject_put(&data->kobj); | 1201 | kobj = &data->kobj; |
1202 | cmp = &data->kobj_unregister; | ||
1203 | unlock_policy_rwsem_write(cpu); | ||
1204 | kobject_put(kobj); | ||
1196 | 1205 | ||
1197 | /* we need to make sure that the underlying kobj is actually | 1206 | /* we need to make sure that the underlying kobj is actually |
1198 | * not referenced anymore by anybody before we proceed with | 1207 | * not referenced anymore by anybody before we proceed with |
1199 | * unloading. | 1208 | * unloading. |
1200 | */ | 1209 | */ |
1201 | dprintk("waiting for dropping of refcount\n"); | 1210 | dprintk("waiting for dropping of refcount\n"); |
1202 | wait_for_completion(&data->kobj_unregister); | 1211 | wait_for_completion(cmp); |
1203 | dprintk("wait complete\n"); | 1212 | dprintk("wait complete\n"); |
1204 | 1213 | ||
1214 | lock_policy_rwsem_write(cpu); | ||
1205 | if (cpufreq_driver->exit) | 1215 | if (cpufreq_driver->exit) |
1206 | cpufreq_driver->exit(data); | 1216 | cpufreq_driver->exit(data); |
1207 | |||
1208 | unlock_policy_rwsem_write(cpu); | 1217 | unlock_policy_rwsem_write(cpu); |
1209 | 1218 | ||
1210 | free_cpumask_var(data->related_cpus); | 1219 | free_cpumask_var(data->related_cpus); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 599a40b25cb0..3a147874a465 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -444,6 +444,7 @@ static struct attribute_group dbs_attr_group_old = { | |||
444 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 444 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
445 | { | 445 | { |
446 | unsigned int load = 0; | 446 | unsigned int load = 0; |
447 | unsigned int max_load = 0; | ||
447 | unsigned int freq_target; | 448 | unsigned int freq_target; |
448 | 449 | ||
449 | struct cpufreq_policy *policy; | 450 | struct cpufreq_policy *policy; |
@@ -501,6 +502,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
501 | continue; | 502 | continue; |
502 | 503 | ||
503 | load = 100 * (wall_time - idle_time) / wall_time; | 504 | load = 100 * (wall_time - idle_time) / wall_time; |
505 | |||
506 | if (load > max_load) | ||
507 | max_load = load; | ||
504 | } | 508 | } |
505 | 509 | ||
506 | /* | 510 | /* |
@@ -511,7 +515,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
511 | return; | 515 | return; |
512 | 516 | ||
513 | /* Check for frequency increase */ | 517 | /* Check for frequency increase */ |
514 | if (load > dbs_tuners_ins.up_threshold) { | 518 | if (max_load > dbs_tuners_ins.up_threshold) { |
515 | this_dbs_info->down_skip = 0; | 519 | this_dbs_info->down_skip = 0; |
516 | 520 | ||
517 | /* if we are already at full speed then break out early */ | 521 | /* if we are already at full speed then break out early */ |
@@ -538,7 +542,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
538 | * can support the current CPU usage without triggering the up | 542 | * can support the current CPU usage without triggering the up |
539 | * policy. To be safe, we focus 10 points under the threshold. | 543 | * policy. To be safe, we focus 10 points under the threshold. |
540 | */ | 544 | */ |
541 | if (load < (dbs_tuners_ins.down_threshold - 10)) { | 545 | if (max_load < (dbs_tuners_ins.down_threshold - 10)) { |
542 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | 546 | freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; |
543 | 547 | ||
544 | this_dbs_info->requested_freq -= freq_target; | 548 | this_dbs_info->requested_freq -= freq_target; |
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 702dcc98c074..14a34d99eea2 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
960 | u.packet.header_length = GET_HEADER_LENGTH(control); | 960 | u.packet.header_length = GET_HEADER_LENGTH(control); |
961 | 961 | ||
962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 962 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { |
963 | if (u.packet.header_length % 4 != 0) | ||
964 | return -EINVAL; | ||
963 | header_length = u.packet.header_length; | 965 | header_length = u.packet.header_length; |
964 | } else { | 966 | } else { |
965 | /* | 967 | /* |
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) | |||
969 | if (ctx->header_size == 0) { | 971 | if (ctx->header_size == 0) { |
970 | if (u.packet.header_length > 0) | 972 | if (u.packet.header_length > 0) |
971 | return -EINVAL; | 973 | return -EINVAL; |
972 | } else if (u.packet.header_length % ctx->header_size != 0) { | 974 | } else if (u.packet.header_length == 0 || |
975 | u.packet.header_length % ctx->header_size != 0) { | ||
973 | return -EINVAL; | 976 | return -EINVAL; |
974 | } | 977 | } |
975 | header_length = 0; | 978 | header_length = 0; |
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client, | |||
1354 | return -ENODEV; | 1357 | return -ENODEV; |
1355 | 1358 | ||
1356 | if (_IOC_TYPE(cmd) != '#' || | 1359 | if (_IOC_TYPE(cmd) != '#' || |
1357 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1360 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || |
1361 | _IOC_SIZE(cmd) > sizeof(buffer)) | ||
1358 | return -EINVAL; | 1362 | return -EINVAL; |
1359 | 1363 | ||
1360 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | 1364 | if (_IOC_DIR(cmd) == _IOC_READ) |
1361 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1365 | memset(&buffer, 0, _IOC_SIZE(cmd)); |
1362 | copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | 1366 | |
1367 | if (_IOC_DIR(cmd) & _IOC_WRITE) | ||
1368 | if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) | ||
1363 | return -EFAULT; | 1369 | return -EFAULT; |
1364 | } | ||
1365 | 1370 | ||
1366 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); | 1371 | ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); |
1367 | if (ret < 0) | 1372 | if (ret < 0) |
1368 | return ret; | 1373 | return ret; |
1369 | 1374 | ||
1370 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1375 | if (_IOC_DIR(cmd) & _IOC_READ) |
1371 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1376 | if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) |
1372 | copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) | ||
1373 | return -EFAULT; | 1377 | return -EFAULT; |
1374 | } | ||
1375 | 1378 | ||
1376 | return ret; | 1379 | return ret; |
1377 | } | 1380 | } |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 3784a47865b7..8f5aebfb29df 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
190 | for (try = 0; try < 5; try++) { | 190 | for (try = 0; try < 5; try++) { |
191 | new = allocate ? old - bandwidth : old + bandwidth; | 191 | new = allocate ? old - bandwidth : old + bandwidth; |
192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
193 | break; | 193 | return -EBUSY; |
194 | 194 | ||
195 | data[0] = cpu_to_be32(old); | 195 | data[0] = cpu_to_be32(old); |
196 | data[1] = cpu_to_be32(new); | 196 | data[1] = cpu_to_be32(new); |
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) |
219 | { | 219 | { |
220 | __be32 c, all, old; | 220 | __be32 c, all, old; |
221 | int i, retry = 5; | 221 | int i, ret = -EIO, retry = 5; |
222 | 222 | ||
223 | old = all = allocate ? cpu_to_be32(~0) : 0; | 223 | old = all = allocate ? cpu_to_be32(~0) : 0; |
224 | 224 | ||
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
226 | if (!(channels_mask & 1 << i)) | 226 | if (!(channels_mask & 1 << i)) |
227 | continue; | 227 | continue; |
228 | 228 | ||
229 | ret = -EBUSY; | ||
230 | |||
229 | c = cpu_to_be32(1 << (31 - i)); | 231 | c = cpu_to_be32(1 << (31 - i)); |
230 | if ((old & c) != (all & c)) | 232 | if ((old & c) != (all & c)) |
231 | continue; | 233 | continue; |
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
251 | 253 | ||
252 | /* 1394-1995 IRM, fall through to retry. */ | 254 | /* 1394-1995 IRM, fall through to retry. */ |
253 | default: | 255 | default: |
254 | if (retry--) | 256 | if (retry) { |
257 | retry--; | ||
255 | i--; | 258 | i--; |
259 | } else { | ||
260 | ret = -EIO; | ||
261 | } | ||
256 | } | 262 | } |
257 | } | 263 | } |
258 | 264 | ||
259 | return -EIO; | 265 | return ret; |
260 | } | 266 | } |
261 | 267 | ||
262 | static void deallocate_channel(struct fw_card *card, int irm_id, | 268 | static void deallocate_channel(struct fw_card *card, int irm_id, |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 0cf4d7f562c5..94b16e0340ae 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
1158 | struct fw_packet *packet, u32 csr) | 1158 | struct fw_packet *packet, u32 csr) |
1159 | { | 1159 | { |
1160 | struct fw_packet response; | 1160 | struct fw_packet response; |
1161 | int tcode, length, ext_tcode, sel; | 1161 | int tcode, length, ext_tcode, sel, try; |
1162 | __be32 *payload, lock_old; | 1162 | __be32 *payload, lock_old; |
1163 | u32 lock_arg, lock_data; | 1163 | u32 lock_arg, lock_data; |
1164 | 1164 | ||
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); | 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
1186 | reg_write(ohci, OHCI1394_CSRControl, sel); | 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); |
1187 | 1187 | ||
1188 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) | 1188 | for (try = 0; try < 20; try++) |
1189 | lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); | 1189 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
1190 | else | 1190 | lock_old = cpu_to_be32(reg_read(ohci, |
1191 | fw_notify("swap not done yet\n"); | 1191 | OHCI1394_CSRData)); |
1192 | fw_fill_response(&response, packet->header, | ||
1193 | RCODE_COMPLETE, | ||
1194 | &lock_old, sizeof(lock_old)); | ||
1195 | goto out; | ||
1196 | } | ||
1197 | |||
1198 | fw_error("swap not done (CSR lock timeout)\n"); | ||
1199 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | ||
1192 | 1200 | ||
1193 | fw_fill_response(&response, packet->header, | ||
1194 | RCODE_COMPLETE, &lock_old, sizeof(lock_old)); | ||
1195 | out: | 1201 | out: |
1196 | fw_core_handle_response(&ohci->card, &response); | 1202 | fw_core_handle_response(&ohci->card, &response); |
1197 | } | 1203 | } |
1198 | 1204 | ||
1199 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) | 1205 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1200 | { | 1206 | { |
1201 | u64 offset; | 1207 | u64 offset, csr; |
1202 | u32 csr; | ||
1203 | 1208 | ||
1204 | if (ctx == &ctx->ohci->at_request_ctx) { | 1209 | if (ctx == &ctx->ohci->at_request_ctx) { |
1205 | packet->ack = ACK_PENDING; | 1210 | packet->ack = ACK_PENDING; |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 7d521e1d17e1..b827c976dc62 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -252,6 +252,18 @@ static void pca953x_irq_bus_lock(unsigned int irq) | |||
252 | static void pca953x_irq_bus_sync_unlock(unsigned int irq) | 252 | static void pca953x_irq_bus_sync_unlock(unsigned int irq) |
253 | { | 253 | { |
254 | struct pca953x_chip *chip = get_irq_chip_data(irq); | 254 | struct pca953x_chip *chip = get_irq_chip_data(irq); |
255 | uint16_t new_irqs; | ||
256 | uint16_t level; | ||
257 | |||
258 | /* Look for any newly setup interrupt */ | ||
259 | new_irqs = chip->irq_trig_fall | chip->irq_trig_raise; | ||
260 | new_irqs &= ~chip->reg_direction; | ||
261 | |||
262 | while (new_irqs) { | ||
263 | level = __ffs(new_irqs); | ||
264 | pca953x_gpio_direction_input(&chip->gpio_chip, level); | ||
265 | new_irqs &= ~(1 << level); | ||
266 | } | ||
255 | 267 | ||
256 | mutex_unlock(&chip->irq_lock); | 268 | mutex_unlock(&chip->irq_lock); |
257 | } | 269 | } |
@@ -278,7 +290,7 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type) | |||
278 | else | 290 | else |
279 | chip->irq_trig_raise &= ~mask; | 291 | chip->irq_trig_raise &= ~mask; |
280 | 292 | ||
281 | return pca953x_gpio_direction_input(&chip->gpio_chip, level); | 293 | return 0; |
282 | } | 294 | } |
283 | 295 | ||
284 | static struct irq_chip pca953x_irq_chip = { | 296 | static struct irq_chip pca953x_irq_chip = { |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3bd872761567..a263b7070fc6 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc) | |||
476 | unsigned long irqflags; | 476 | unsigned long irqflags; |
477 | 477 | ||
478 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 478 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
479 | dev->driver->disable_vblank(dev, crtc); | ||
479 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 480 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
480 | dev->vblank_enabled[crtc] = 0; | 481 | dev->vblank_enabled[crtc] = 0; |
481 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | 482 | dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index b743411d8144..a0c365f2e521 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) | |||
516 | } | 516 | } |
517 | driver = dev->driver; | 517 | driver = dev->driver; |
518 | 518 | ||
519 | drm_vblank_cleanup(dev); | ||
520 | |||
521 | drm_lastclose(dev); | 519 | drm_lastclose(dev); |
522 | 520 | ||
523 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && | 521 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && |
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) | |||
537 | dev->agp = NULL; | 535 | dev->agp = NULL; |
538 | } | 536 | } |
539 | 537 | ||
538 | drm_vblank_cleanup(dev); | ||
539 | |||
540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) | 540 | list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) |
541 | drm_rmmap(dev, r_list->map); | 541 | drm_rmmap(dev, r_list->map); |
542 | drm_ht_remove(&dev->map_hash); | 542 | drm_ht_remove(&dev->map_hash); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b574503dddd0..a0b8447b06e7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
226 | } else { | 226 | } else { |
227 | struct drm_i915_gem_object *obj_priv; | 227 | struct drm_i915_gem_object *obj_priv; |
228 | 228 | ||
229 | obj_priv = obj->driver_private; | 229 | obj_priv = to_intel_bo(obj); |
230 | seq_printf(m, "Fenced object[%2d] = %p: %s " | 230 | seq_printf(m, "Fenced object[%2d] = %p: %s " |
231 | "%08x %08zx %08x %s %08x %08x %d", | 231 | "%08x %08zx %08x %s %08x %08x %d", |
232 | i, obj, get_pin_flag(obj_priv), | 232 | i, obj, get_pin_flag(obj_priv), |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dc93939507d..c3cfafcbfe7d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1357 | 1357 | ||
1358 | dev_priv->cfb_size = size; | 1358 | dev_priv->cfb_size = size; |
1359 | 1359 | ||
1360 | dev_priv->compressed_fb = compressed_fb; | ||
1361 | |||
1360 | if (IS_GM45(dev)) { | 1362 | if (IS_GM45(dev)) { |
1361 | g4x_disable_fbc(dev); | 1363 | g4x_disable_fbc(dev); |
1362 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1364 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1364 | i8xx_disable_fbc(dev); | 1366 | i8xx_disable_fbc(dev); |
1365 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1367 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
1366 | I915_WRITE(FBC_LL_BASE, ll_base); | 1368 | I915_WRITE(FBC_LL_BASE, ll_base); |
1369 | dev_priv->compressed_llb = compressed_llb; | ||
1367 | } | 1370 | } |
1368 | 1371 | ||
1369 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1372 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
1370 | ll_base, size >> 20); | 1373 | ll_base, size >> 20); |
1371 | } | 1374 | } |
1372 | 1375 | ||
1376 | static void i915_cleanup_compression(struct drm_device *dev) | ||
1377 | { | ||
1378 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1379 | |||
1380 | drm_mm_put_block(dev_priv->compressed_fb); | ||
1381 | if (!IS_GM45(dev)) | ||
1382 | drm_mm_put_block(dev_priv->compressed_llb); | ||
1383 | } | ||
1384 | |||
1373 | /* true = enable decode, false = disable decoder */ | 1385 | /* true = enable decode, false = disable decoder */ |
1374 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1386 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1375 | { | 1387 | { |
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1787 | mutex_lock(&dev->struct_mutex); | 1799 | mutex_lock(&dev->struct_mutex); |
1788 | i915_gem_cleanup_ringbuffer(dev); | 1800 | i915_gem_cleanup_ringbuffer(dev); |
1789 | mutex_unlock(&dev->struct_mutex); | 1801 | mutex_unlock(&dev->struct_mutex); |
1802 | if (I915_HAS_FBC(dev) && i915_powersave) | ||
1803 | i915_cleanup_compression(dev); | ||
1790 | drm_mm_takedown(&dev_priv->vram); | 1804 | drm_mm_takedown(&dev_priv->vram); |
1791 | i915_gem_lastclose(dev); | 1805 | i915_gem_lastclose(dev); |
1792 | 1806 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4b26919abdb2..cc03537bb883 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = { | |||
69 | }; | 69 | }; |
70 | 70 | ||
71 | const static struct intel_device_info intel_i85x_info = { | 71 | const static struct intel_device_info intel_i85x_info = { |
72 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
73 | .cursor_needs_physical = 1, | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | const static struct intel_device_info intel_i865g_info = { | 76 | const static struct intel_device_info intel_i865g_info = { |
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = { | |||
80 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | 81 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, |
81 | }; | 82 | }; |
82 | const static struct intel_device_info intel_i915gm_info = { | 83 | const static struct intel_device_info intel_i915gm_info = { |
83 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 84 | .is_i9xx = 1, .is_mobile = 1, |
84 | .cursor_needs_physical = 1, | 85 | .cursor_needs_physical = 1, |
85 | }; | 86 | }; |
86 | const static struct intel_device_info intel_i945g_info = { | 87 | const static struct intel_device_info intel_i945g_info = { |
87 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | 88 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, |
88 | }; | 89 | }; |
89 | const static struct intel_device_info intel_i945gm_info = { | 90 | const static struct intel_device_info intel_i945gm_info = { |
90 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | 91 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, |
91 | .has_hotplug = 1, .cursor_needs_physical = 1, | 92 | .has_hotplug = 1, .cursor_needs_physical = 1, |
92 | }; | 93 | }; |
93 | 94 | ||
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = { | |||
151 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), |
152 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), |
153 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), |
154 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
155 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), |
156 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), |
157 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), |
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
361 | !dev_priv->mm.suspended) { | 362 | !dev_priv->mm.suspended) { |
362 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; | 363 | drm_i915_ring_buffer_t *ring = &dev_priv->ring; |
363 | struct drm_gem_object *obj = ring->ring_obj; | 364 | struct drm_gem_object *obj = ring->ring_obj; |
364 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 365 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
365 | dev_priv->mm.suspended = 0; | 366 | dev_priv->mm.suspended = 0; |
366 | 367 | ||
367 | /* Stop the ring if it's running. */ | 368 | /* Stop the ring if it's running. */ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aba8260fbc5e..6e4790065d9e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -195,6 +195,7 @@ struct intel_overlay; | |||
195 | struct intel_device_info { | 195 | struct intel_device_info { |
196 | u8 is_mobile : 1; | 196 | u8 is_mobile : 1; |
197 | u8 is_i8xx : 1; | 197 | u8 is_i8xx : 1; |
198 | u8 is_i85x : 1; | ||
198 | u8 is_i915g : 1; | 199 | u8 is_i915g : 1; |
199 | u8 is_i9xx : 1; | 200 | u8 is_i9xx : 1; |
200 | u8 is_i945gm : 1; | 201 | u8 is_i945gm : 1; |
@@ -235,11 +236,14 @@ typedef struct drm_i915_private { | |||
235 | 236 | ||
236 | drm_dma_handle_t *status_page_dmah; | 237 | drm_dma_handle_t *status_page_dmah; |
237 | void *hw_status_page; | 238 | void *hw_status_page; |
239 | void *seqno_page; | ||
238 | dma_addr_t dma_status_page; | 240 | dma_addr_t dma_status_page; |
239 | uint32_t counter; | 241 | uint32_t counter; |
240 | unsigned int status_gfx_addr; | 242 | unsigned int status_gfx_addr; |
243 | unsigned int seqno_gfx_addr; | ||
241 | drm_local_map_t hws_map; | 244 | drm_local_map_t hws_map; |
242 | struct drm_gem_object *hws_obj; | 245 | struct drm_gem_object *hws_obj; |
246 | struct drm_gem_object *seqno_obj; | ||
243 | struct drm_gem_object *pwrctx; | 247 | struct drm_gem_object *pwrctx; |
244 | 248 | ||
245 | struct resource mch_res; | 249 | struct resource mch_res; |
@@ -611,6 +615,8 @@ typedef struct drm_i915_private { | |||
611 | /* Reclocking support */ | 615 | /* Reclocking support */ |
612 | bool render_reclock_avail; | 616 | bool render_reclock_avail; |
613 | bool lvds_downclock_avail; | 617 | bool lvds_downclock_avail; |
618 | /* indicate whether the LVDS EDID is OK */ | ||
619 | bool lvds_edid_good; | ||
614 | /* indicates the reduced downclock for LVDS*/ | 620 | /* indicates the reduced downclock for LVDS*/ |
615 | int lvds_downclock; | 621 | int lvds_downclock; |
616 | struct work_struct idle_work; | 622 | struct work_struct idle_work; |
@@ -628,6 +634,9 @@ typedef struct drm_i915_private { | |||
628 | u8 max_delay; | 634 | u8 max_delay; |
629 | 635 | ||
630 | enum no_fbc_reason no_fbc_reason; | 636 | enum no_fbc_reason no_fbc_reason; |
637 | |||
638 | struct drm_mm_node *compressed_fb; | ||
639 | struct drm_mm_node *compressed_llb; | ||
631 | } drm_i915_private_t; | 640 | } drm_i915_private_t; |
632 | 641 | ||
633 | /** driver private structure attached to each drm_gem_object */ | 642 | /** driver private structure attached to each drm_gem_object */ |
@@ -731,6 +740,8 @@ struct drm_i915_gem_object { | |||
731 | atomic_t pending_flip; | 740 | atomic_t pending_flip; |
732 | }; | 741 | }; |
733 | 742 | ||
743 | #define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) | ||
744 | |||
734 | /** | 745 | /** |
735 | * Request queue structure. | 746 | * Request queue structure. |
736 | * | 747 | * |
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1066 | 1077 | ||
1067 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1078 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
1068 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1079 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
1069 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1080 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
1070 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1081 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
1071 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | 1082 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) |
1072 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1083 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1131 | 1142 | ||
1132 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1143 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ |
1133 | IS_GEN6(dev)) | 1144 | IS_GEN6(dev)) |
1145 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
1134 | 1146 | ||
1135 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1147 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1136 | 1148 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 368d726853d1..ef3d91dda71a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages, | |||
163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 163 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) |
164 | { | 164 | { |
165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 165 | drm_i915_private_t *dev_priv = obj->dev->dev_private; |
166 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 166 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
167 | 167 | ||
168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 168 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
169 | obj_priv->tiling_mode != I915_TILING_NONE; | 169 | obj_priv->tiling_mode != I915_TILING_NONE; |
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
264 | struct drm_i915_gem_pread *args, | 264 | struct drm_i915_gem_pread *args, |
265 | struct drm_file *file_priv) | 265 | struct drm_file *file_priv) |
266 | { | 266 | { |
267 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 267 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
268 | ssize_t remain; | 268 | ssize_t remain; |
269 | loff_t offset, page_base; | 269 | loff_t offset, page_base; |
270 | char __user *user_data; | 270 | char __user *user_data; |
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
285 | if (ret != 0) | 285 | if (ret != 0) |
286 | goto fail_put_pages; | 286 | goto fail_put_pages; |
287 | 287 | ||
288 | obj_priv = obj->driver_private; | 288 | obj_priv = to_intel_bo(obj); |
289 | offset = args->offset; | 289 | offset = args->offset; |
290 | 290 | ||
291 | while (remain > 0) { | 291 | while (remain > 0) { |
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
354 | struct drm_i915_gem_pread *args, | 354 | struct drm_i915_gem_pread *args, |
355 | struct drm_file *file_priv) | 355 | struct drm_file *file_priv) |
356 | { | 356 | { |
357 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 357 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
358 | struct mm_struct *mm = current->mm; | 358 | struct mm_struct *mm = current->mm; |
359 | struct page **user_pages; | 359 | struct page **user_pages; |
360 | ssize_t remain; | 360 | ssize_t remain; |
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
403 | if (ret != 0) | 403 | if (ret != 0) |
404 | goto fail_put_pages; | 404 | goto fail_put_pages; |
405 | 405 | ||
406 | obj_priv = obj->driver_private; | 406 | obj_priv = to_intel_bo(obj); |
407 | offset = args->offset; | 407 | offset = args->offset; |
408 | 408 | ||
409 | while (remain > 0) { | 409 | while (remain > 0) { |
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 479 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
480 | if (obj == NULL) | 480 | if (obj == NULL) |
481 | return -EBADF; | 481 | return -EBADF; |
482 | obj_priv = obj->driver_private; | 482 | obj_priv = to_intel_bo(obj); |
483 | 483 | ||
484 | /* Bounds check source. | 484 | /* Bounds check source. |
485 | * | 485 | * |
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
581 | struct drm_i915_gem_pwrite *args, | 581 | struct drm_i915_gem_pwrite *args, |
582 | struct drm_file *file_priv) | 582 | struct drm_file *file_priv) |
583 | { | 583 | { |
584 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 584 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
585 | drm_i915_private_t *dev_priv = dev->dev_private; | 585 | drm_i915_private_t *dev_priv = dev->dev_private; |
586 | ssize_t remain; | 586 | ssize_t remain; |
587 | loff_t offset, page_base; | 587 | loff_t offset, page_base; |
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
605 | if (ret) | 605 | if (ret) |
606 | goto fail; | 606 | goto fail; |
607 | 607 | ||
608 | obj_priv = obj->driver_private; | 608 | obj_priv = to_intel_bo(obj); |
609 | offset = obj_priv->gtt_offset + args->offset; | 609 | offset = obj_priv->gtt_offset + args->offset; |
610 | 610 | ||
611 | while (remain > 0) { | 611 | while (remain > 0) { |
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
655 | struct drm_i915_gem_pwrite *args, | 655 | struct drm_i915_gem_pwrite *args, |
656 | struct drm_file *file_priv) | 656 | struct drm_file *file_priv) |
657 | { | 657 | { |
658 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 658 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
659 | drm_i915_private_t *dev_priv = dev->dev_private; | 659 | drm_i915_private_t *dev_priv = dev->dev_private; |
660 | ssize_t remain; | 660 | ssize_t remain; |
661 | loff_t gtt_page_base, offset; | 661 | loff_t gtt_page_base, offset; |
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
699 | if (ret) | 699 | if (ret) |
700 | goto out_unpin_object; | 700 | goto out_unpin_object; |
701 | 701 | ||
702 | obj_priv = obj->driver_private; | 702 | obj_priv = to_intel_bo(obj); |
703 | offset = obj_priv->gtt_offset + args->offset; | 703 | offset = obj_priv->gtt_offset + args->offset; |
704 | 704 | ||
705 | while (remain > 0) { | 705 | while (remain > 0) { |
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
761 | struct drm_i915_gem_pwrite *args, | 761 | struct drm_i915_gem_pwrite *args, |
762 | struct drm_file *file_priv) | 762 | struct drm_file *file_priv) |
763 | { | 763 | { |
764 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 764 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
765 | ssize_t remain; | 765 | ssize_t remain; |
766 | loff_t offset, page_base; | 766 | loff_t offset, page_base; |
767 | char __user *user_data; | 767 | char __user *user_data; |
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
781 | if (ret != 0) | 781 | if (ret != 0) |
782 | goto fail_put_pages; | 782 | goto fail_put_pages; |
783 | 783 | ||
784 | obj_priv = obj->driver_private; | 784 | obj_priv = to_intel_bo(obj); |
785 | offset = args->offset; | 785 | offset = args->offset; |
786 | obj_priv->dirty = 1; | 786 | obj_priv->dirty = 1; |
787 | 787 | ||
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
829 | struct drm_i915_gem_pwrite *args, | 829 | struct drm_i915_gem_pwrite *args, |
830 | struct drm_file *file_priv) | 830 | struct drm_file *file_priv) |
831 | { | 831 | { |
832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 832 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
833 | struct mm_struct *mm = current->mm; | 833 | struct mm_struct *mm = current->mm; |
834 | struct page **user_pages; | 834 | struct page **user_pages; |
835 | ssize_t remain; | 835 | ssize_t remain; |
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
877 | if (ret != 0) | 877 | if (ret != 0) |
878 | goto fail_put_pages; | 878 | goto fail_put_pages; |
879 | 879 | ||
880 | obj_priv = obj->driver_private; | 880 | obj_priv = to_intel_bo(obj); |
881 | offset = args->offset; | 881 | offset = args->offset; |
882 | obj_priv->dirty = 1; | 882 | obj_priv->dirty = 1; |
883 | 883 | ||
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 952 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
953 | if (obj == NULL) | 953 | if (obj == NULL) |
954 | return -EBADF; | 954 | return -EBADF; |
955 | obj_priv = obj->driver_private; | 955 | obj_priv = to_intel_bo(obj); |
956 | 956 | ||
957 | /* Bounds check destination. | 957 | /* Bounds check destination. |
958 | * | 958 | * |
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1034 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
1035 | if (obj == NULL) | 1035 | if (obj == NULL) |
1036 | return -EBADF; | 1036 | return -EBADF; |
1037 | obj_priv = obj->driver_private; | 1037 | obj_priv = to_intel_bo(obj); |
1038 | 1038 | ||
1039 | mutex_lock(&dev->struct_mutex); | 1039 | mutex_lock(&dev->struct_mutex); |
1040 | 1040 | ||
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 1096 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
1097 | __func__, args->handle, obj, obj->size); | 1097 | __func__, args->handle, obj, obj->size); |
1098 | #endif | 1098 | #endif |
1099 | obj_priv = obj->driver_private; | 1099 | obj_priv = to_intel_bo(obj); |
1100 | 1100 | ||
1101 | /* Pinned buffers may be scanout, so flush the cache */ | 1101 | /* Pinned buffers may be scanout, so flush the cache */ |
1102 | if (obj_priv->pin_count) | 1102 | if (obj_priv->pin_count) |
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1167 | struct drm_gem_object *obj = vma->vm_private_data; | 1167 | struct drm_gem_object *obj = vma->vm_private_data; |
1168 | struct drm_device *dev = obj->dev; | 1168 | struct drm_device *dev = obj->dev; |
1169 | struct drm_i915_private *dev_priv = dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
1170 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1170 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1171 | pgoff_t page_offset; | 1171 | pgoff_t page_offset; |
1172 | unsigned long pfn; | 1172 | unsigned long pfn; |
1173 | int ret = 0; | 1173 | int ret = 0; |
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1234 | { | 1234 | { |
1235 | struct drm_device *dev = obj->dev; | 1235 | struct drm_device *dev = obj->dev; |
1236 | struct drm_gem_mm *mm = dev->mm_private; | 1236 | struct drm_gem_mm *mm = dev->mm_private; |
1237 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1237 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1238 | struct drm_map_list *list; | 1238 | struct drm_map_list *list; |
1239 | struct drm_local_map *map; | 1239 | struct drm_local_map *map; |
1240 | int ret = 0; | 1240 | int ret = 0; |
@@ -1305,7 +1305,7 @@ void | |||
1305 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1305 | i915_gem_release_mmap(struct drm_gem_object *obj) |
1306 | { | 1306 | { |
1307 | struct drm_device *dev = obj->dev; | 1307 | struct drm_device *dev = obj->dev; |
1308 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1308 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1309 | 1309 | ||
1310 | if (dev->dev_mapping) | 1310 | if (dev->dev_mapping) |
1311 | unmap_mapping_range(dev->dev_mapping, | 1311 | unmap_mapping_range(dev->dev_mapping, |
@@ -1316,7 +1316,7 @@ static void | |||
1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1316 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) |
1317 | { | 1317 | { |
1318 | struct drm_device *dev = obj->dev; | 1318 | struct drm_device *dev = obj->dev; |
1319 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1319 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1320 | struct drm_gem_mm *mm = dev->mm_private; | 1320 | struct drm_gem_mm *mm = dev->mm_private; |
1321 | struct drm_map_list *list; | 1321 | struct drm_map_list *list; |
1322 | 1322 | ||
@@ -1347,7 +1347,7 @@ static uint32_t | |||
1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1347 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) |
1348 | { | 1348 | { |
1349 | struct drm_device *dev = obj->dev; | 1349 | struct drm_device *dev = obj->dev; |
1350 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1350 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1351 | int start, i; | 1351 | int start, i; |
1352 | 1352 | ||
1353 | /* | 1353 | /* |
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1406 | 1406 | ||
1407 | mutex_lock(&dev->struct_mutex); | 1407 | mutex_lock(&dev->struct_mutex); |
1408 | 1408 | ||
1409 | obj_priv = obj->driver_private; | 1409 | obj_priv = to_intel_bo(obj); |
1410 | 1410 | ||
1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1411 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1412 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1450 | void | 1450 | void |
1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1451 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
1452 | { | 1452 | { |
1453 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1453 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1454 | int page_count = obj->size / PAGE_SIZE; | 1454 | int page_count = obj->size / PAGE_SIZE; |
1455 | int i; | 1455 | int i; |
1456 | 1456 | ||
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) | |||
1486 | { | 1486 | { |
1487 | struct drm_device *dev = obj->dev; | 1487 | struct drm_device *dev = obj->dev; |
1488 | drm_i915_private_t *dev_priv = dev->dev_private; | 1488 | drm_i915_private_t *dev_priv = dev->dev_private; |
1489 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1489 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1490 | 1490 | ||
1491 | /* Add a reference if we're newly entering the active list. */ | 1491 | /* Add a reference if we're newly entering the active list. */ |
1492 | if (!obj_priv->active) { | 1492 | if (!obj_priv->active) { |
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1506 | { | 1506 | { |
1507 | struct drm_device *dev = obj->dev; | 1507 | struct drm_device *dev = obj->dev; |
1508 | drm_i915_private_t *dev_priv = dev->dev_private; | 1508 | drm_i915_private_t *dev_priv = dev->dev_private; |
1509 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1510 | 1510 | ||
1511 | BUG_ON(!obj_priv->active); | 1511 | BUG_ON(!obj_priv->active); |
1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 1512 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); |
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | |||
1517 | static void | 1517 | static void |
1518 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1518 | i915_gem_object_truncate(struct drm_gem_object *obj) |
1519 | { | 1519 | { |
1520 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1520 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1521 | struct inode *inode; | 1521 | struct inode *inode; |
1522 | 1522 | ||
1523 | inode = obj->filp->f_path.dentry->d_inode; | 1523 | inode = obj->filp->f_path.dentry->d_inode; |
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1538 | { | 1538 | { |
1539 | struct drm_device *dev = obj->dev; | 1539 | struct drm_device *dev = obj->dev; |
1540 | drm_i915_private_t *dev_priv = dev->dev_private; | 1540 | drm_i915_private_t *dev_priv = dev->dev_private; |
1541 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1541 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1542 | 1542 | ||
1543 | i915_verify_inactive(dev, __FILE__, __LINE__); | 1543 | i915_verify_inactive(dev, __FILE__, __LINE__); |
1544 | if (obj_priv->pin_count != 0) | 1544 | if (obj_priv->pin_count != 0) |
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1588 | } | 1588 | } |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
1592 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
1593 | PIPE_CONTROL_DEPTH_STALL); \ | ||
1594 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
1595 | OUT_RING(0); \ | ||
1596 | OUT_RING(0); \ | ||
1597 | |||
1591 | /** | 1598 | /** |
1592 | * Creates a new sequence number, emitting a write of it to the status page | 1599 | * Creates a new sequence number, emitting a write of it to the status page |
1593 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 1600 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1622 | if (dev_priv->mm.next_gem_seqno == 0) | 1629 | if (dev_priv->mm.next_gem_seqno == 0) |
1623 | dev_priv->mm.next_gem_seqno++; | 1630 | dev_priv->mm.next_gem_seqno++; |
1624 | 1631 | ||
1625 | BEGIN_LP_RING(4); | 1632 | if (HAS_PIPE_CONTROL(dev)) { |
1626 | OUT_RING(MI_STORE_DWORD_INDEX); | 1633 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
1627 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1628 | OUT_RING(seqno); | ||
1629 | 1634 | ||
1630 | OUT_RING(MI_USER_INTERRUPT); | 1635 | /* |
1631 | ADVANCE_LP_RING(); | 1636 | * Workaround qword write incoherence by flushing the |
1637 | * PIPE_NOTIFY buffers out to memory before requesting | ||
1638 | * an interrupt. | ||
1639 | */ | ||
1640 | BEGIN_LP_RING(32); | ||
1641 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1642 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
1643 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1644 | OUT_RING(seqno); | ||
1645 | OUT_RING(0); | ||
1646 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1647 | scratch_addr += 128; /* write to separate cachelines */ | ||
1648 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1649 | scratch_addr += 128; | ||
1650 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1651 | scratch_addr += 128; | ||
1652 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1653 | scratch_addr += 128; | ||
1654 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1655 | scratch_addr += 128; | ||
1656 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
1657 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
1658 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
1659 | PIPE_CONTROL_NOTIFY); | ||
1660 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
1661 | OUT_RING(seqno); | ||
1662 | OUT_RING(0); | ||
1663 | ADVANCE_LP_RING(); | ||
1664 | } else { | ||
1665 | BEGIN_LP_RING(4); | ||
1666 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
1667 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
1668 | OUT_RING(seqno); | ||
1669 | |||
1670 | OUT_RING(MI_USER_INTERRUPT); | ||
1671 | ADVANCE_LP_RING(); | ||
1672 | } | ||
1632 | 1673 | ||
1633 | DRM_DEBUG_DRIVER("%d\n", seqno); | 1674 | DRM_DEBUG_DRIVER("%d\n", seqno); |
1634 | 1675 | ||
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev) | |||
1752 | { | 1793 | { |
1753 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | drm_i915_private_t *dev_priv = dev->dev_private; |
1754 | 1795 | ||
1755 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | 1796 | if (HAS_PIPE_CONTROL(dev)) |
1797 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
1798 | else | ||
1799 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
1756 | } | 1800 | } |
1757 | 1801 | ||
1758 | /** | 1802 | /** |
@@ -1965,7 +2009,7 @@ static int | |||
1965 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 2009 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) |
1966 | { | 2010 | { |
1967 | struct drm_device *dev = obj->dev; | 2011 | struct drm_device *dev = obj->dev; |
1968 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2012 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1969 | int ret; | 2013 | int ret; |
1970 | 2014 | ||
1971 | /* This function only exists to support waiting for existing rendering, | 2015 | /* This function only exists to support waiting for existing rendering, |
@@ -1997,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1997 | { | 2041 | { |
1998 | struct drm_device *dev = obj->dev; | 2042 | struct drm_device *dev = obj->dev; |
1999 | drm_i915_private_t *dev_priv = dev->dev_private; | 2043 | drm_i915_private_t *dev_priv = dev->dev_private; |
2000 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2044 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2001 | int ret = 0; | 2045 | int ret = 0; |
2002 | 2046 | ||
2003 | #if WATCH_BUF | 2047 | #if WATCH_BUF |
@@ -2173,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2173 | #if WATCH_LRU | 2217 | #if WATCH_LRU |
2174 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 2218 | DRM_INFO("%s: evicting %p\n", __func__, obj); |
2175 | #endif | 2219 | #endif |
2176 | obj_priv = obj->driver_private; | 2220 | obj_priv = to_intel_bo(obj); |
2177 | BUG_ON(obj_priv->pin_count != 0); | 2221 | BUG_ON(obj_priv->pin_count != 0); |
2178 | BUG_ON(obj_priv->active); | 2222 | BUG_ON(obj_priv->active); |
2179 | 2223 | ||
@@ -2244,7 +2288,7 @@ int | |||
2244 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2288 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2245 | gfp_t gfpmask) | 2289 | gfp_t gfpmask) |
2246 | { | 2290 | { |
2247 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2291 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2248 | int page_count, i; | 2292 | int page_count, i; |
2249 | struct address_space *mapping; | 2293 | struct address_space *mapping; |
2250 | struct inode *inode; | 2294 | struct inode *inode; |
@@ -2297,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2297 | struct drm_gem_object *obj = reg->obj; | 2341 | struct drm_gem_object *obj = reg->obj; |
2298 | struct drm_device *dev = obj->dev; | 2342 | struct drm_device *dev = obj->dev; |
2299 | drm_i915_private_t *dev_priv = dev->dev_private; | 2343 | drm_i915_private_t *dev_priv = dev->dev_private; |
2300 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2344 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2301 | int regnum = obj_priv->fence_reg; | 2345 | int regnum = obj_priv->fence_reg; |
2302 | uint64_t val; | 2346 | uint64_t val; |
2303 | 2347 | ||
@@ -2319,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2319 | struct drm_gem_object *obj = reg->obj; | 2363 | struct drm_gem_object *obj = reg->obj; |
2320 | struct drm_device *dev = obj->dev; | 2364 | struct drm_device *dev = obj->dev; |
2321 | drm_i915_private_t *dev_priv = dev->dev_private; | 2365 | drm_i915_private_t *dev_priv = dev->dev_private; |
2322 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2366 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2323 | int regnum = obj_priv->fence_reg; | 2367 | int regnum = obj_priv->fence_reg; |
2324 | uint64_t val; | 2368 | uint64_t val; |
2325 | 2369 | ||
@@ -2339,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2339 | struct drm_gem_object *obj = reg->obj; | 2383 | struct drm_gem_object *obj = reg->obj; |
2340 | struct drm_device *dev = obj->dev; | 2384 | struct drm_device *dev = obj->dev; |
2341 | drm_i915_private_t *dev_priv = dev->dev_private; | 2385 | drm_i915_private_t *dev_priv = dev->dev_private; |
2342 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2386 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2343 | int regnum = obj_priv->fence_reg; | 2387 | int regnum = obj_priv->fence_reg; |
2344 | int tile_width; | 2388 | int tile_width; |
2345 | uint32_t fence_reg, val; | 2389 | uint32_t fence_reg, val; |
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2362 | pitch_val = obj_priv->stride / tile_width; | 2406 | pitch_val = obj_priv->stride / tile_width; |
2363 | pitch_val = ffs(pitch_val) - 1; | 2407 | pitch_val = ffs(pitch_val) - 1; |
2364 | 2408 | ||
2409 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
2410 | HAS_128_BYTE_Y_TILING(dev)) | ||
2411 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2412 | else | ||
2413 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
2414 | |||
2365 | val = obj_priv->gtt_offset; | 2415 | val = obj_priv->gtt_offset; |
2366 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2416 | if (obj_priv->tiling_mode == I915_TILING_Y) |
2367 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2417 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
@@ -2381,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
2381 | struct drm_gem_object *obj = reg->obj; | 2431 | struct drm_gem_object *obj = reg->obj; |
2382 | struct drm_device *dev = obj->dev; | 2432 | struct drm_device *dev = obj->dev; |
2383 | drm_i915_private_t *dev_priv = dev->dev_private; | 2433 | drm_i915_private_t *dev_priv = dev->dev_private; |
2384 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2434 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2385 | int regnum = obj_priv->fence_reg; | 2435 | int regnum = obj_priv->fence_reg; |
2386 | uint32_t val; | 2436 | uint32_t val; |
2387 | uint32_t pitch_val; | 2437 | uint32_t pitch_val; |
@@ -2425,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev) | |||
2425 | if (!reg->obj) | 2475 | if (!reg->obj) |
2426 | return i; | 2476 | return i; |
2427 | 2477 | ||
2428 | obj_priv = reg->obj->driver_private; | 2478 | obj_priv = to_intel_bo(reg->obj); |
2429 | if (!obj_priv->pin_count) | 2479 | if (!obj_priv->pin_count) |
2430 | avail++; | 2480 | avail++; |
2431 | } | 2481 | } |
@@ -2480,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | |||
2480 | { | 2530 | { |
2481 | struct drm_device *dev = obj->dev; | 2531 | struct drm_device *dev = obj->dev; |
2482 | struct drm_i915_private *dev_priv = dev->dev_private; | 2532 | struct drm_i915_private *dev_priv = dev->dev_private; |
2483 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2533 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2484 | struct drm_i915_fence_reg *reg = NULL; | 2534 | struct drm_i915_fence_reg *reg = NULL; |
2485 | int ret; | 2535 | int ret; |
2486 | 2536 | ||
@@ -2547,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) | |||
2547 | { | 2597 | { |
2548 | struct drm_device *dev = obj->dev; | 2598 | struct drm_device *dev = obj->dev; |
2549 | drm_i915_private_t *dev_priv = dev->dev_private; | 2599 | drm_i915_private_t *dev_priv = dev->dev_private; |
2550 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2600 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2551 | 2601 | ||
2552 | if (IS_GEN6(dev)) { | 2602 | if (IS_GEN6(dev)) { |
2553 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2603 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + |
@@ -2583,7 +2633,7 @@ int | |||
2583 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 2633 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) |
2584 | { | 2634 | { |
2585 | struct drm_device *dev = obj->dev; | 2635 | struct drm_device *dev = obj->dev; |
2586 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2636 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2587 | 2637 | ||
2588 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2638 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
2589 | return 0; | 2639 | return 0; |
@@ -2621,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2621 | { | 2671 | { |
2622 | struct drm_device *dev = obj->dev; | 2672 | struct drm_device *dev = obj->dev; |
2623 | drm_i915_private_t *dev_priv = dev->dev_private; | 2673 | drm_i915_private_t *dev_priv = dev->dev_private; |
2624 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2674 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2625 | struct drm_mm_node *free_space; | 2675 | struct drm_mm_node *free_space; |
2626 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2676 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2627 | int ret; | 2677 | int ret; |
@@ -2728,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2728 | void | 2778 | void |
2729 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2779 | i915_gem_clflush_object(struct drm_gem_object *obj) |
2730 | { | 2780 | { |
2731 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2781 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2732 | 2782 | ||
2733 | /* If we don't have a page list set up, then we're not pinned | 2783 | /* If we don't have a page list set up, then we're not pinned |
2734 | * to GPU, and we can ignore the cache flush because it'll happen | 2784 | * to GPU, and we can ignore the cache flush because it'll happen |
@@ -2829,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
2829 | int | 2879 | int |
2830 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2880 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) |
2831 | { | 2881 | { |
2832 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2882 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2833 | uint32_t old_write_domain, old_read_domains; | 2883 | uint32_t old_write_domain, old_read_domains; |
2834 | int ret; | 2884 | int ret; |
2835 | 2885 | ||
@@ -2879,7 +2929,7 @@ int | |||
2879 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 2929 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) |
2880 | { | 2930 | { |
2881 | struct drm_device *dev = obj->dev; | 2931 | struct drm_device *dev = obj->dev; |
2882 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2932 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2883 | uint32_t old_write_domain, old_read_domains; | 2933 | uint32_t old_write_domain, old_read_domains; |
2884 | int ret; | 2934 | int ret; |
2885 | 2935 | ||
@@ -3092,7 +3142,7 @@ static void | |||
3092 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 3142 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
3093 | { | 3143 | { |
3094 | struct drm_device *dev = obj->dev; | 3144 | struct drm_device *dev = obj->dev; |
3095 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3145 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3096 | uint32_t invalidate_domains = 0; | 3146 | uint32_t invalidate_domains = 0; |
3097 | uint32_t flush_domains = 0; | 3147 | uint32_t flush_domains = 0; |
3098 | uint32_t old_read_domains; | 3148 | uint32_t old_read_domains; |
@@ -3177,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3177 | static void | 3227 | static void |
3178 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3228 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
3179 | { | 3229 | { |
3180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3181 | 3231 | ||
3182 | if (!obj_priv->page_cpu_valid) | 3232 | if (!obj_priv->page_cpu_valid) |
3183 | return; | 3233 | return; |
@@ -3217,7 +3267,7 @@ static int | |||
3217 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3267 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, |
3218 | uint64_t offset, uint64_t size) | 3268 | uint64_t offset, uint64_t size) |
3219 | { | 3269 | { |
3220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3270 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3221 | uint32_t old_read_domains; | 3271 | uint32_t old_read_domains; |
3222 | int i, ret; | 3272 | int i, ret; |
3223 | 3273 | ||
@@ -3286,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3286 | { | 3336 | { |
3287 | struct drm_device *dev = obj->dev; | 3337 | struct drm_device *dev = obj->dev; |
3288 | drm_i915_private_t *dev_priv = dev->dev_private; | 3338 | drm_i915_private_t *dev_priv = dev->dev_private; |
3289 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3339 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3290 | int i, ret; | 3340 | int i, ret; |
3291 | void __iomem *reloc_page; | 3341 | void __iomem *reloc_page; |
3292 | bool need_fence; | 3342 | bool need_fence; |
@@ -3337,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3337 | i915_gem_object_unpin(obj); | 3387 | i915_gem_object_unpin(obj); |
3338 | return -EBADF; | 3388 | return -EBADF; |
3339 | } | 3389 | } |
3340 | target_obj_priv = target_obj->driver_private; | 3390 | target_obj_priv = to_intel_bo(target_obj); |
3341 | 3391 | ||
3342 | #if WATCH_RELOC | 3392 | #if WATCH_RELOC |
3343 | DRM_INFO("%s: obj %p offset %08x target %d " | 3393 | DRM_INFO("%s: obj %p offset %08x target %d " |
@@ -3689,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3689 | prepare_to_wait(&dev_priv->pending_flip_queue, | 3739 | prepare_to_wait(&dev_priv->pending_flip_queue, |
3690 | &wait, TASK_INTERRUPTIBLE); | 3740 | &wait, TASK_INTERRUPTIBLE); |
3691 | for (i = 0; i < count; i++) { | 3741 | for (i = 0; i < count; i++) { |
3692 | obj_priv = object_list[i]->driver_private; | 3742 | obj_priv = to_intel_bo(object_list[i]); |
3693 | if (atomic_read(&obj_priv->pending_flip) > 0) | 3743 | if (atomic_read(&obj_priv->pending_flip) > 0) |
3694 | break; | 3744 | break; |
3695 | } | 3745 | } |
@@ -3798,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3798 | goto err; | 3848 | goto err; |
3799 | } | 3849 | } |
3800 | 3850 | ||
3801 | obj_priv = object_list[i]->driver_private; | 3851 | obj_priv = to_intel_bo(object_list[i]); |
3802 | if (obj_priv->in_execbuffer) { | 3852 | if (obj_priv->in_execbuffer) { |
3803 | DRM_ERROR("Object %p appears more than once in object list\n", | 3853 | DRM_ERROR("Object %p appears more than once in object list\n", |
3804 | object_list[i]); | 3854 | object_list[i]); |
@@ -3924,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3924 | 3974 | ||
3925 | for (i = 0; i < args->buffer_count; i++) { | 3975 | for (i = 0; i < args->buffer_count; i++) { |
3926 | struct drm_gem_object *obj = object_list[i]; | 3976 | struct drm_gem_object *obj = object_list[i]; |
3927 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3977 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3928 | uint32_t old_write_domain = obj->write_domain; | 3978 | uint32_t old_write_domain = obj->write_domain; |
3929 | 3979 | ||
3930 | obj->write_domain = obj->pending_write_domain; | 3980 | obj->write_domain = obj->pending_write_domain; |
@@ -3999,7 +4049,7 @@ err: | |||
3999 | 4049 | ||
4000 | for (i = 0; i < args->buffer_count; i++) { | 4050 | for (i = 0; i < args->buffer_count; i++) { |
4001 | if (object_list[i]) { | 4051 | if (object_list[i]) { |
4002 | obj_priv = object_list[i]->driver_private; | 4052 | obj_priv = to_intel_bo(object_list[i]); |
4003 | obj_priv->in_execbuffer = false; | 4053 | obj_priv->in_execbuffer = false; |
4004 | } | 4054 | } |
4005 | drm_gem_object_unreference(object_list[i]); | 4055 | drm_gem_object_unreference(object_list[i]); |
@@ -4177,7 +4227,7 @@ int | |||
4177 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4227 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
4178 | { | 4228 | { |
4179 | struct drm_device *dev = obj->dev; | 4229 | struct drm_device *dev = obj->dev; |
4180 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4181 | int ret; | 4231 | int ret; |
4182 | 4232 | ||
4183 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4233 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -4210,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) | |||
4210 | { | 4260 | { |
4211 | struct drm_device *dev = obj->dev; | 4261 | struct drm_device *dev = obj->dev; |
4212 | drm_i915_private_t *dev_priv = dev->dev_private; | 4262 | drm_i915_private_t *dev_priv = dev->dev_private; |
4213 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4263 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4214 | 4264 | ||
4215 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4265 | i915_verify_inactive(dev, __FILE__, __LINE__); |
4216 | obj_priv->pin_count--; | 4266 | obj_priv->pin_count--; |
@@ -4250,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4250 | mutex_unlock(&dev->struct_mutex); | 4300 | mutex_unlock(&dev->struct_mutex); |
4251 | return -EBADF; | 4301 | return -EBADF; |
4252 | } | 4302 | } |
4253 | obj_priv = obj->driver_private; | 4303 | obj_priv = to_intel_bo(obj); |
4254 | 4304 | ||
4255 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 4305 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
4256 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 4306 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
@@ -4307,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4307 | return -EBADF; | 4357 | return -EBADF; |
4308 | } | 4358 | } |
4309 | 4359 | ||
4310 | obj_priv = obj->driver_private; | 4360 | obj_priv = to_intel_bo(obj); |
4311 | if (obj_priv->pin_filp != file_priv) { | 4361 | if (obj_priv->pin_filp != file_priv) { |
4312 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 4362 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4313 | args->handle); | 4363 | args->handle); |
@@ -4349,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4349 | */ | 4399 | */ |
4350 | i915_gem_retire_requests(dev); | 4400 | i915_gem_retire_requests(dev); |
4351 | 4401 | ||
4352 | obj_priv = obj->driver_private; | 4402 | obj_priv = to_intel_bo(obj); |
4353 | /* Don't count being on the flushing list against the object being | 4403 | /* Don't count being on the flushing list against the object being |
4354 | * done. Otherwise, a buffer left on the flushing list but not getting | 4404 | * done. Otherwise, a buffer left on the flushing list but not getting |
4355 | * flushed (because nobody's flushing that domain) won't ever return | 4405 | * flushed (because nobody's flushing that domain) won't ever return |
@@ -4395,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4395 | } | 4445 | } |
4396 | 4446 | ||
4397 | mutex_lock(&dev->struct_mutex); | 4447 | mutex_lock(&dev->struct_mutex); |
4398 | obj_priv = obj->driver_private; | 4448 | obj_priv = to_intel_bo(obj); |
4399 | 4449 | ||
4400 | if (obj_priv->pin_count) { | 4450 | if (obj_priv->pin_count) { |
4401 | drm_gem_object_unreference(obj); | 4451 | drm_gem_object_unreference(obj); |
@@ -4456,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4456 | void i915_gem_free_object(struct drm_gem_object *obj) | 4506 | void i915_gem_free_object(struct drm_gem_object *obj) |
4457 | { | 4507 | { |
4458 | struct drm_device *dev = obj->dev; | 4508 | struct drm_device *dev = obj->dev; |
4459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 4509 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4460 | 4510 | ||
4461 | trace_i915_gem_object_destroy(obj); | 4511 | trace_i915_gem_object_destroy(obj); |
4462 | 4512 | ||
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev) | |||
4546 | return 0; | 4596 | return 0; |
4547 | } | 4597 | } |
4548 | 4598 | ||
4599 | /* | ||
4600 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
4601 | * over cache flushing. | ||
4602 | */ | ||
4603 | static int | ||
4604 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
4605 | { | ||
4606 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4607 | struct drm_gem_object *obj; | ||
4608 | struct drm_i915_gem_object *obj_priv; | ||
4609 | int ret; | ||
4610 | |||
4611 | obj = drm_gem_object_alloc(dev, 4096); | ||
4612 | if (obj == NULL) { | ||
4613 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
4614 | ret = -ENOMEM; | ||
4615 | goto err; | ||
4616 | } | ||
4617 | obj_priv = to_intel_bo(obj); | ||
4618 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4619 | |||
4620 | ret = i915_gem_object_pin(obj, 4096); | ||
4621 | if (ret) | ||
4622 | goto err_unref; | ||
4623 | |||
4624 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
4625 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
4626 | if (dev_priv->seqno_page == NULL) | ||
4627 | goto err_unpin; | ||
4628 | |||
4629 | dev_priv->seqno_obj = obj; | ||
4630 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
4631 | |||
4632 | return 0; | ||
4633 | |||
4634 | err_unpin: | ||
4635 | i915_gem_object_unpin(obj); | ||
4636 | err_unref: | ||
4637 | drm_gem_object_unreference(obj); | ||
4638 | err: | ||
4639 | return ret; | ||
4640 | } | ||
4641 | |||
4549 | static int | 4642 | static int |
4550 | i915_gem_init_hws(struct drm_device *dev) | 4643 | i915_gem_init_hws(struct drm_device *dev) |
4551 | { | 4644 | { |
@@ -4563,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4563 | obj = drm_gem_object_alloc(dev, 4096); | 4656 | obj = drm_gem_object_alloc(dev, 4096); |
4564 | if (obj == NULL) { | 4657 | if (obj == NULL) { |
4565 | DRM_ERROR("Failed to allocate status page\n"); | 4658 | DRM_ERROR("Failed to allocate status page\n"); |
4566 | return -ENOMEM; | 4659 | ret = -ENOMEM; |
4660 | goto err; | ||
4567 | } | 4661 | } |
4568 | obj_priv = obj->driver_private; | 4662 | obj_priv = to_intel_bo(obj); |
4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4663 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
4570 | 4664 | ||
4571 | ret = i915_gem_object_pin(obj, 4096); | 4665 | ret = i915_gem_object_pin(obj, 4096); |
4572 | if (ret != 0) { | 4666 | if (ret != 0) { |
4573 | drm_gem_object_unreference(obj); | 4667 | drm_gem_object_unreference(obj); |
4574 | return ret; | 4668 | goto err_unref; |
4575 | } | 4669 | } |
4576 | 4670 | ||
4577 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 4671 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4580 | if (dev_priv->hw_status_page == NULL) { | 4674 | if (dev_priv->hw_status_page == NULL) { |
4581 | DRM_ERROR("Failed to map status page.\n"); | 4675 | DRM_ERROR("Failed to map status page.\n"); |
4582 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4676 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4583 | i915_gem_object_unpin(obj); | 4677 | ret = -EINVAL; |
4584 | drm_gem_object_unreference(obj); | 4678 | goto err_unpin; |
4585 | return -EINVAL; | ||
4586 | } | 4679 | } |
4680 | |||
4681 | if (HAS_PIPE_CONTROL(dev)) { | ||
4682 | ret = i915_gem_init_pipe_control(dev); | ||
4683 | if (ret) | ||
4684 | goto err_unpin; | ||
4685 | } | ||
4686 | |||
4587 | dev_priv->hws_obj = obj; | 4687 | dev_priv->hws_obj = obj; |
4588 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4688 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
4589 | if (IS_GEN6(dev)) { | 4689 | if (IS_GEN6(dev)) { |
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev) | |||
4596 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4696 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
4597 | 4697 | ||
4598 | return 0; | 4698 | return 0; |
4699 | |||
4700 | err_unpin: | ||
4701 | i915_gem_object_unpin(obj); | ||
4702 | err_unref: | ||
4703 | drm_gem_object_unreference(obj); | ||
4704 | err: | ||
4705 | return 0; | ||
4706 | } | ||
4707 | |||
4708 | static void | ||
4709 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
4710 | { | ||
4711 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4712 | struct drm_gem_object *obj; | ||
4713 | struct drm_i915_gem_object *obj_priv; | ||
4714 | |||
4715 | obj = dev_priv->seqno_obj; | ||
4716 | obj_priv = to_intel_bo(obj); | ||
4717 | kunmap(obj_priv->pages[0]); | ||
4718 | i915_gem_object_unpin(obj); | ||
4719 | drm_gem_object_unreference(obj); | ||
4720 | dev_priv->seqno_obj = NULL; | ||
4721 | |||
4722 | dev_priv->seqno_page = NULL; | ||
4599 | } | 4723 | } |
4600 | 4724 | ||
4601 | static void | 4725 | static void |
@@ -4609,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4609 | return; | 4733 | return; |
4610 | 4734 | ||
4611 | obj = dev_priv->hws_obj; | 4735 | obj = dev_priv->hws_obj; |
4612 | obj_priv = obj->driver_private; | 4736 | obj_priv = to_intel_bo(obj); |
4613 | 4737 | ||
4614 | kunmap(obj_priv->pages[0]); | 4738 | kunmap(obj_priv->pages[0]); |
4615 | i915_gem_object_unpin(obj); | 4739 | i915_gem_object_unpin(obj); |
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
4619 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4743 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
4620 | dev_priv->hw_status_page = NULL; | 4744 | dev_priv->hw_status_page = NULL; |
4621 | 4745 | ||
4746 | if (HAS_PIPE_CONTROL(dev)) | ||
4747 | i915_gem_cleanup_pipe_control(dev); | ||
4748 | |||
4622 | /* Write high address into HWS_PGA when disabling. */ | 4749 | /* Write high address into HWS_PGA when disabling. */ |
4623 | I915_WRITE(HWS_PGA, 0x1ffff000); | 4750 | I915_WRITE(HWS_PGA, 0x1ffff000); |
4624 | } | 4751 | } |
@@ -4643,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4643 | i915_gem_cleanup_hws(dev); | 4770 | i915_gem_cleanup_hws(dev); |
4644 | return -ENOMEM; | 4771 | return -ENOMEM; |
4645 | } | 4772 | } |
4646 | obj_priv = obj->driver_private; | 4773 | obj_priv = to_intel_bo(obj); |
4647 | 4774 | ||
4648 | ret = i915_gem_object_pin(obj, 4096); | 4775 | ret = i915_gem_object_pin(obj, 4096); |
4649 | if (ret != 0) { | 4776 | if (ret != 0) { |
@@ -4936,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4936 | int ret; | 5063 | int ret; |
4937 | int page_count; | 5064 | int page_count; |
4938 | 5065 | ||
4939 | obj_priv = obj->driver_private; | 5066 | obj_priv = to_intel_bo(obj); |
4940 | if (!obj_priv->phys_obj) | 5067 | if (!obj_priv->phys_obj) |
4941 | return; | 5068 | return; |
4942 | 5069 | ||
@@ -4975,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4975 | if (id > I915_MAX_PHYS_OBJECT) | 5102 | if (id > I915_MAX_PHYS_OBJECT) |
4976 | return -EINVAL; | 5103 | return -EINVAL; |
4977 | 5104 | ||
4978 | obj_priv = obj->driver_private; | 5105 | obj_priv = to_intel_bo(obj); |
4979 | 5106 | ||
4980 | if (obj_priv->phys_obj) { | 5107 | if (obj_priv->phys_obj) { |
4981 | if (obj_priv->phys_obj->id == id) | 5108 | if (obj_priv->phys_obj->id == id) |
@@ -5026,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
5026 | struct drm_i915_gem_pwrite *args, | 5153 | struct drm_i915_gem_pwrite *args, |
5027 | struct drm_file *file_priv) | 5154 | struct drm_file *file_priv) |
5028 | { | 5155 | { |
5029 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 5156 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
5030 | void *obj_addr; | 5157 | void *obj_addr; |
5031 | int ret; | 5158 | int ret; |
5032 | char __user *user_data; | 5159 | char __user *user_data; |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index e602614bd3f8..35507cf53fa3 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -72,7 +72,7 @@ void | |||
72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | 72 | i915_gem_dump_object(struct drm_gem_object *obj, int len, |
73 | const char *where, uint32_t mark) | 73 | const char *where, uint32_t mark) |
74 | { | 74 | { |
75 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 75 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
76 | int page; | 76 | int page; |
77 | 77 | ||
78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 78 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); |
@@ -137,7 +137,7 @@ void | |||
137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 137 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) |
138 | { | 138 | { |
139 | struct drm_device *dev = obj->dev; | 139 | struct drm_device *dev = obj->dev; |
140 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 140 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
141 | int page; | 141 | int page; |
142 | uint32_t *gtt_mapping; | 142 | uint32_t *gtt_mapping; |
143 | uint32_t *backing_map = NULL; | 143 | uint32_t *backing_map = NULL; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c01c878e51ba..4bdccefcf2cf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
202 | * reg, so dont bother to check the size */ | 202 | * reg, so dont bother to check the size */ |
203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
204 | return false; | 204 | return false; |
205 | } else if (IS_I9XX(dev)) { | 205 | } else if (IS_GEN3(dev) || IS_GEN2(dev)) { |
206 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | 206 | if (stride > 8192) |
207 | |||
208 | /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) | ||
209 | * instead of 4 (2KB) on 945s. | ||
210 | */ | ||
211 | if (pitch_val > I915_FENCE_MAX_PITCH_VAL || | ||
212 | size > (I830_FENCE_MAX_SIZE_VAL << 20)) | ||
213 | return false; | 207 | return false; |
214 | } else { | ||
215 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | ||
216 | 208 | ||
217 | if (pitch_val > I830_FENCE_MAX_PITCH_VAL || | 209 | if (IS_GEN3(dev)) { |
218 | size > (I830_FENCE_MAX_SIZE_VAL << 19)) | 210 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
219 | return false; | 211 | return false; |
212 | } else { | ||
213 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | ||
214 | return false; | ||
215 | } | ||
220 | } | 216 | } |
221 | 217 | ||
222 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
@@ -240,7 +236,7 @@ bool | |||
240 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 236 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) |
241 | { | 237 | { |
242 | struct drm_device *dev = obj->dev; | 238 | struct drm_device *dev = obj->dev; |
243 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
244 | 240 | ||
245 | if (obj_priv->gtt_space == NULL) | 241 | if (obj_priv->gtt_space == NULL) |
246 | return true; | 242 | return true; |
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
280 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 276 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
281 | if (obj == NULL) | 277 | if (obj == NULL) |
282 | return -EINVAL; | 278 | return -EINVAL; |
283 | obj_priv = obj->driver_private; | 279 | obj_priv = to_intel_bo(obj); |
284 | 280 | ||
285 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 281 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { |
286 | drm_gem_object_unreference_unlocked(obj); | 282 | drm_gem_object_unreference_unlocked(obj); |
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
364 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 360 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
365 | if (obj == NULL) | 361 | if (obj == NULL) |
366 | return -EINVAL; | 362 | return -EINVAL; |
367 | obj_priv = obj->driver_private; | 363 | obj_priv = to_intel_bo(obj); |
368 | 364 | ||
369 | mutex_lock(&dev->struct_mutex); | 365 | mutex_lock(&dev->struct_mutex); |
370 | 366 | ||
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | |||
427 | { | 423 | { |
428 | struct drm_device *dev = obj->dev; | 424 | struct drm_device *dev = obj->dev; |
429 | drm_i915_private_t *dev_priv = dev->dev_private; | 425 | drm_i915_private_t *dev_priv = dev->dev_private; |
430 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 426 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
431 | int page_count = obj->size >> PAGE_SHIFT; | 427 | int page_count = obj->size >> PAGE_SHIFT; |
432 | int i; | 428 | int i; |
433 | 429 | ||
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
456 | { | 452 | { |
457 | struct drm_device *dev = obj->dev; | 453 | struct drm_device *dev = obj->dev; |
458 | drm_i915_private_t *dev_priv = dev->dev_private; | 454 | drm_i915_private_t *dev_priv = dev->dev_private; |
459 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 455 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
460 | int page_count = obj->size >> PAGE_SHIFT; | 456 | int page_count = obj->size >> PAGE_SHIFT; |
461 | int i; | 457 | int i; |
462 | 458 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 49c458bc6502..2b8b969d0c15 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
260 | 260 | ||
261 | if (mode_config->num_connector) { | 261 | if (mode_config->num_connector) { |
262 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 262 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
263 | struct intel_output *intel_output = to_intel_output(connector); | 263 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
264 | 264 | ||
265 | if (intel_output->hot_plug) | 265 | if (intel_encoder->hot_plug) |
266 | (*intel_output->hot_plug) (intel_output); | 266 | (*intel_encoder->hot_plug) (intel_encoder); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | /* Just fire off a uevent and let userspace tell us what to do */ | 269 | /* Just fire off a uevent and let userspace tell us what to do */ |
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
349 | READ_BREADCRUMB(dev_priv); | 349 | READ_BREADCRUMB(dev_priv); |
350 | } | 350 | } |
351 | 351 | ||
352 | if (gt_iir & GT_USER_INTERRUPT) { | 352 | if (gt_iir & GT_PIPE_NOTIFY) { |
353 | u32 seqno = i915_get_gem_seqno(dev); | 353 | u32 seqno = i915_get_gem_seqno(dev); |
354 | dev_priv->mm.irq_gem_seqno = seqno; | 354 | dev_priv->mm.irq_gem_seqno = seqno; |
355 | trace_i915_gem_request_complete(dev, seqno); | 355 | trace_i915_gem_request_complete(dev, seqno); |
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev, | |||
444 | if (src == NULL) | 444 | if (src == NULL) |
445 | return NULL; | 445 | return NULL; |
446 | 446 | ||
447 | src_priv = src->driver_private; | 447 | src_priv = to_intel_bo(src); |
448 | if (src_priv->pages == NULL) | 448 | if (src_priv->pages == NULL) |
449 | return NULL; | 449 | return NULL; |
450 | 450 | ||
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev) | |||
1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { |
1007 | if (HAS_PCH_SPLIT(dev)) | 1007 | if (HAS_PCH_SPLIT(dev)) |
1008 | ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1008 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
1009 | else | 1009 | else |
1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
1011 | } | 1011 | } |
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev) | |||
1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); |
1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { |
1023 | if (HAS_PCH_SPLIT(dev)) | 1023 | if (HAS_PCH_SPLIT(dev)) |
1024 | ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1024 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
1025 | else | 1025 | else |
1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
1027 | } | 1027 | } |
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1305 | /* enable kind of interrupts always enabled */ | 1305 | /* enable kind of interrupts always enabled */ |
1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1308 | u32 render_mask = GT_USER_INTERRUPT; | 1308 | u32 render_mask = GT_PIPE_NOTIFY; |
1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1311 | 1311 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 7cc8410239cb..8fcc75c1aa28 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
382 | struct drm_i915_private *dev_priv = dev->dev_private; | 382 | struct drm_i915_private *dev_priv = dev->dev_private; |
383 | struct intel_opregion *opregion = &dev_priv->opregion; | 383 | struct intel_opregion *opregion = &dev_priv->opregion; |
384 | struct drm_connector *connector; | 384 | struct drm_connector *connector; |
385 | acpi_handle handle; | ||
386 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | ||
387 | unsigned long long device_id; | ||
388 | acpi_status status; | ||
385 | int i = 0; | 389 | int i = 0; |
386 | 390 | ||
391 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | ||
392 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) | ||
393 | return; | ||
394 | |||
395 | if (acpi_is_video_device(acpi_dev)) | ||
396 | acpi_video_bus = acpi_dev; | ||
397 | else { | ||
398 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { | ||
399 | if (acpi_is_video_device(acpi_cdev)) { | ||
400 | acpi_video_bus = acpi_cdev; | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | } | ||
405 | |||
406 | if (!acpi_video_bus) { | ||
407 | printk(KERN_WARNING "No ACPI video bus found\n"); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | ||
412 | if (i >= 8) { | ||
413 | dev_printk (KERN_ERR, &dev->pdev->dev, | ||
414 | "More than 8 outputs detected\n"); | ||
415 | return; | ||
416 | } | ||
417 | status = | ||
418 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | ||
419 | NULL, &device_id); | ||
420 | if (ACPI_SUCCESS(status)) { | ||
421 | if (!device_id) | ||
422 | goto blind_set; | ||
423 | opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); | ||
424 | i++; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | end: | ||
429 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
430 | if (i < 8) | ||
431 | opregion->acpi->didl[i] = 0; | ||
432 | return; | ||
433 | |||
434 | blind_set: | ||
435 | i = 0; | ||
387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 436 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
388 | int output_type = ACPI_OTHER_OUTPUT; | 437 | int output_type = ACPI_OTHER_OUTPUT; |
389 | if (i >= 8) { | 438 | if (i >= 8) { |
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
416 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; | 465 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; |
417 | i++; | 466 | i++; |
418 | } | 467 | } |
419 | 468 | goto end; | |
420 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
421 | if (i < 8) | ||
422 | opregion->acpi->didl[i] = 0; | ||
423 | } | 469 | } |
424 | 470 | ||
425 | int intel_opregion_init(struct drm_device *dev, int resume) | 471 | int intel_opregion_init(struct drm_device *dev, int resume) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cbbf59f56dfa..4cbc5210fd30 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -230,6 +230,16 @@ | |||
230 | #define ASYNC_FLIP (1<<22) | 230 | #define ASYNC_FLIP (1<<22) |
231 | #define DISPLAY_PLANE_A (0<<20) | 231 | #define DISPLAY_PLANE_A (0<<20) |
232 | #define DISPLAY_PLANE_B (1<<20) | 232 | #define DISPLAY_PLANE_B (1<<20) |
233 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | ||
234 | #define PIPE_CONTROL_QW_WRITE (1<<14) | ||
235 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | ||
236 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | ||
237 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | ||
238 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | ||
239 | #define PIPE_CONTROL_ISP_DIS (1<<9) | ||
240 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
241 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | ||
242 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
233 | 243 | ||
234 | /* | 244 | /* |
235 | * Fence registers | 245 | * Fence registers |
@@ -241,7 +251,7 @@ | |||
241 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | 251 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
242 | #define I830_FENCE_PITCH_SHIFT 4 | 252 | #define I830_FENCE_PITCH_SHIFT 4 |
243 | #define I830_FENCE_REG_VALID (1<<0) | 253 | #define I830_FENCE_REG_VALID (1<<0) |
244 | #define I915_FENCE_MAX_PITCH_VAL 0x10 | 254 | #define I915_FENCE_MAX_PITCH_VAL 4 |
245 | #define I830_FENCE_MAX_PITCH_VAL 6 | 255 | #define I830_FENCE_MAX_PITCH_VAL 6 |
246 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) | 256 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) |
247 | 257 | ||
@@ -2285,6 +2295,7 @@ | |||
2285 | #define DEIER 0x4400c | 2295 | #define DEIER 0x4400c |
2286 | 2296 | ||
2287 | /* GT interrupt */ | 2297 | /* GT interrupt */ |
2298 | #define GT_PIPE_NOTIFY (1 << 4) | ||
2288 | #define GT_SYNC_STATUS (1 << 2) | 2299 | #define GT_SYNC_STATUS (1 << 2) |
2289 | #define GT_USER_INTERRUPT (1 << 0) | 2300 | #define GT_USER_INTERRUPT (1 << 0) |
2290 | 2301 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 38110ce742a5..759c2ef72eff 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
247 | 247 | ||
248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | 248 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
249 | { | 249 | { |
250 | struct intel_output *intel_output = to_intel_output(connector); | 250 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
251 | 251 | ||
252 | /* CRT should always be at 0, but check anyway */ | 252 | /* CRT should always be at 0, but check anyway */ |
253 | if (intel_output->type != INTEL_OUTPUT_ANALOG) | 253 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) |
254 | return false; | 254 | return false; |
255 | 255 | ||
256 | return intel_ddc_probe(intel_output); | 256 | return intel_ddc_probe(intel_encoder); |
257 | } | 257 | } |
258 | 258 | ||
259 | static enum drm_connector_status | 259 | static enum drm_connector_status |
260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | 260 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
261 | { | 261 | { |
262 | struct drm_encoder *encoder = &intel_output->enc; | 262 | struct drm_encoder *encoder = &intel_encoder->enc; |
263 | struct drm_device *dev = encoder->dev; | 263 | struct drm_device *dev = encoder->dev; |
264 | struct drm_i915_private *dev_priv = dev->dev_private; | 264 | struct drm_i915_private *dev_priv = dev->dev_private; |
265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) | |||
387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) | 387 | static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) |
388 | { | 388 | { |
389 | struct drm_device *dev = connector->dev; | 389 | struct drm_device *dev = connector->dev; |
390 | struct intel_output *intel_output = to_intel_output(connector); | 390 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
391 | struct drm_encoder *encoder = &intel_output->enc; | 391 | struct drm_encoder *encoder = &intel_encoder->enc; |
392 | struct drm_crtc *crtc; | 392 | struct drm_crtc *crtc; |
393 | int dpms_mode; | 393 | int dpms_mode; |
394 | enum drm_connector_status status; | 394 | enum drm_connector_status status; |
@@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
405 | 405 | ||
406 | /* for pre-945g platforms use load detect */ | 406 | /* for pre-945g platforms use load detect */ |
407 | if (encoder->crtc && encoder->crtc->enabled) { | 407 | if (encoder->crtc && encoder->crtc->enabled) { |
408 | status = intel_crt_load_detect(encoder->crtc, intel_output); | 408 | status = intel_crt_load_detect(encoder->crtc, intel_encoder); |
409 | } else { | 409 | } else { |
410 | crtc = intel_get_load_detect_pipe(intel_output, | 410 | crtc = intel_get_load_detect_pipe(intel_encoder, |
411 | NULL, &dpms_mode); | 411 | NULL, &dpms_mode); |
412 | if (crtc) { | 412 | if (crtc) { |
413 | status = intel_crt_load_detect(crtc, intel_output); | 413 | status = intel_crt_load_detect(crtc, intel_encoder); |
414 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 414 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
415 | } else | 415 | } else |
416 | status = connector_status_unknown; | 416 | status = connector_status_unknown; |
417 | } | 417 | } |
@@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto | |||
421 | 421 | ||
422 | static void intel_crt_destroy(struct drm_connector *connector) | 422 | static void intel_crt_destroy(struct drm_connector *connector) |
423 | { | 423 | { |
424 | struct intel_output *intel_output = to_intel_output(connector); | 424 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
425 | 425 | ||
426 | intel_i2c_destroy(intel_output->ddc_bus); | 426 | intel_i2c_destroy(intel_encoder->ddc_bus); |
427 | drm_sysfs_connector_remove(connector); | 427 | drm_sysfs_connector_remove(connector); |
428 | drm_connector_cleanup(connector); | 428 | drm_connector_cleanup(connector); |
429 | kfree(connector); | 429 | kfree(connector); |
@@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
432 | static int intel_crt_get_modes(struct drm_connector *connector) | 432 | static int intel_crt_get_modes(struct drm_connector *connector) |
433 | { | 433 | { |
434 | int ret; | 434 | int ret; |
435 | struct intel_output *intel_output = to_intel_output(connector); | 435 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
436 | struct i2c_adapter *ddcbus; | 436 | struct i2c_adapter *ddcbus; |
437 | struct drm_device *dev = connector->dev; | 437 | struct drm_device *dev = connector->dev; |
438 | 438 | ||
439 | 439 | ||
440 | ret = intel_ddc_get_modes(intel_output); | 440 | ret = intel_ddc_get_modes(intel_encoder); |
441 | if (ret || !IS_G4X(dev)) | 441 | if (ret || !IS_G4X(dev)) |
442 | goto end; | 442 | goto end; |
443 | 443 | ||
444 | ddcbus = intel_output->ddc_bus; | 444 | ddcbus = intel_encoder->ddc_bus; |
445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | 445 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ |
446 | intel_output->ddc_bus = | 446 | intel_encoder->ddc_bus = |
447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | 447 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); |
448 | 448 | ||
449 | if (!intel_output->ddc_bus) { | 449 | if (!intel_encoder->ddc_bus) { |
450 | intel_output->ddc_bus = ddcbus; | 450 | intel_encoder->ddc_bus = ddcbus; |
451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | 451 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, |
452 | "DDC bus registration failed for CRTDDC_D.\n"); | 452 | "DDC bus registration failed for CRTDDC_D.\n"); |
453 | goto end; | 453 | goto end; |
454 | } | 454 | } |
455 | /* Try to get modes by GPIOD port */ | 455 | /* Try to get modes by GPIOD port */ |
456 | ret = intel_ddc_get_modes(intel_output); | 456 | ret = intel_ddc_get_modes(intel_encoder); |
457 | intel_i2c_destroy(ddcbus); | 457 | intel_i2c_destroy(ddcbus); |
458 | 458 | ||
459 | end: | 459 | end: |
@@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
506 | void intel_crt_init(struct drm_device *dev) | 506 | void intel_crt_init(struct drm_device *dev) |
507 | { | 507 | { |
508 | struct drm_connector *connector; | 508 | struct drm_connector *connector; |
509 | struct intel_output *intel_output; | 509 | struct intel_encoder *intel_encoder; |
510 | struct drm_i915_private *dev_priv = dev->dev_private; | 510 | struct drm_i915_private *dev_priv = dev->dev_private; |
511 | u32 i2c_reg; | 511 | u32 i2c_reg; |
512 | 512 | ||
513 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 513 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); |
514 | if (!intel_output) | 514 | if (!intel_encoder) |
515 | return; | 515 | return; |
516 | 516 | ||
517 | connector = &intel_output->base; | 517 | connector = &intel_encoder->base; |
518 | drm_connector_init(dev, &intel_output->base, | 518 | drm_connector_init(dev, &intel_encoder->base, |
519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 519 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
520 | 520 | ||
521 | drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, | 521 | drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, |
522 | DRM_MODE_ENCODER_DAC); | 522 | DRM_MODE_ENCODER_DAC); |
523 | 523 | ||
524 | drm_mode_connector_attach_encoder(&intel_output->base, | 524 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
525 | &intel_output->enc); | 525 | &intel_encoder->enc); |
526 | 526 | ||
527 | /* Set up the DDC bus. */ | 527 | /* Set up the DDC bus. */ |
528 | if (HAS_PCH_SPLIT(dev)) | 528 | if (HAS_PCH_SPLIT(dev)) |
@@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev) | |||
533 | if (dev_priv->crt_ddc_bus != 0) | 533 | if (dev_priv->crt_ddc_bus != 0) |
534 | i2c_reg = dev_priv->crt_ddc_bus; | 534 | i2c_reg = dev_priv->crt_ddc_bus; |
535 | } | 535 | } |
536 | intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); | 536 | intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); |
537 | if (!intel_output->ddc_bus) { | 537 | if (!intel_encoder->ddc_bus) { |
538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 538 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
539 | "failed.\n"); | 539 | "failed.\n"); |
540 | return; | 540 | return; |
541 | } | 541 | } |
542 | 542 | ||
543 | intel_output->type = INTEL_OUTPUT_ANALOG; | 543 | intel_encoder->type = INTEL_OUTPUT_ANALOG; |
544 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 544 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
545 | (1 << INTEL_ANALOG_CLONE_BIT) | | 545 | (1 << INTEL_ANALOG_CLONE_BIT) | |
546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 546 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
547 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 547 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
548 | connector->interlace_allowed = 0; | 548 | connector->interlace_allowed = 0; |
549 | connector->doublescan_allowed = 0; | 549 | connector->doublescan_allowed = 0; |
550 | 550 | ||
551 | drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); | 551 | drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); |
552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 552 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
553 | 553 | ||
554 | drm_sysfs_connector_add(connector); | 554 | drm_sysfs_connector_add(connector); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7e753b2845f..c7502b6b1600 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | 747 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { |
748 | if (l_entry->encoder && | 748 | if (l_entry->encoder && |
749 | l_entry->encoder->crtc == crtc) { | 749 | l_entry->encoder->crtc == crtc) { |
750 | struct intel_output *intel_output = to_intel_output(l_entry); | 750 | struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); |
751 | if (intel_output->type == type) | 751 | if (intel_encoder->type == type) |
752 | return true; | 752 | return true; |
753 | } | 753 | } |
754 | } | 754 | } |
755 | return false; | 755 | return false; |
756 | } | 756 | } |
757 | 757 | ||
758 | struct drm_connector * | 758 | static struct drm_connector * |
759 | intel_pipe_get_output (struct drm_crtc *crtc) | 759 | intel_pipe_get_connector (struct drm_crtc *crtc) |
760 | { | 760 | { |
761 | struct drm_device *dev = crtc->dev; | 761 | struct drm_device *dev = crtc->dev; |
762 | struct drm_mode_config *mode_config = &dev->mode_config; | 762 | struct drm_mode_config *mode_config = &dev->mode_config; |
@@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1003 | struct drm_i915_private *dev_priv = dev->dev_private; | 1003 | struct drm_i915_private *dev_priv = dev->dev_private; |
1004 | struct drm_framebuffer *fb = crtc->fb; | 1004 | struct drm_framebuffer *fb = crtc->fb; |
1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1005 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1006 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1006 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1007 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1008 | int plane, i; | 1008 | int plane, i; |
1009 | u32 fbc_ctl, fbc_ctl2; | 1009 | u32 fbc_ctl, fbc_ctl2; |
@@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1080 | struct drm_i915_private *dev_priv = dev->dev_private; | 1080 | struct drm_i915_private *dev_priv = dev->dev_private; |
1081 | struct drm_framebuffer *fb = crtc->fb; | 1081 | struct drm_framebuffer *fb = crtc->fb; |
1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1082 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1083 | struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; | 1083 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); |
1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1084 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : | 1085 | int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : |
1086 | DPFC_CTL_PLANEB); | 1086 | DPFC_CTL_PLANEB); |
@@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1176 | return; | 1176 | return; |
1177 | 1177 | ||
1178 | intel_fb = to_intel_framebuffer(fb); | 1178 | intel_fb = to_intel_framebuffer(fb); |
1179 | obj_priv = intel_fb->obj->driver_private; | 1179 | obj_priv = to_intel_bo(intel_fb->obj); |
1180 | 1180 | ||
1181 | /* | 1181 | /* |
1182 | * If FBC is already on, we just have to verify that we can | 1182 | * If FBC is already on, we just have to verify that we can |
@@ -1243,7 +1243,7 @@ out_disable: | |||
1243 | static int | 1243 | static int |
1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1244 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
1245 | { | 1245 | { |
1246 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1246 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1247 | u32 alignment; | 1247 | u32 alignment; |
1248 | int ret; | 1248 | int ret; |
1249 | 1249 | ||
@@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1323 | 1323 | ||
1324 | intel_fb = to_intel_framebuffer(crtc->fb); | 1324 | intel_fb = to_intel_framebuffer(crtc->fb); |
1325 | obj = intel_fb->obj; | 1325 | obj = intel_fb->obj; |
1326 | obj_priv = obj->driver_private; | 1326 | obj_priv = to_intel_bo(obj); |
1327 | 1327 | ||
1328 | mutex_lock(&dev->struct_mutex); | 1328 | mutex_lock(&dev->struct_mutex); |
1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 1329 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
@@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1401 | 1401 | ||
1402 | if (old_fb) { | 1402 | if (old_fb) { |
1403 | intel_fb = to_intel_framebuffer(old_fb); | 1403 | intel_fb = to_intel_framebuffer(old_fb); |
1404 | obj_priv = intel_fb->obj->driver_private; | 1404 | obj_priv = to_intel_bo(intel_fb->obj); |
1405 | i915_gem_object_unpin(intel_fb->obj); | 1405 | i915_gem_object_unpin(intel_fb->obj); |
1406 | } | 1406 | } |
1407 | intel_increase_pllclock(crtc, true); | 1407 | intel_increase_pllclock(crtc, true); |
@@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; | 2917 | int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; |
2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; | 2918 | int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; |
2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 2919 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
2920 | int refclk, num_outputs = 0; | 2920 | int refclk, num_connectors = 0; |
2921 | intel_clock_t clock, reduced_clock; | 2921 | intel_clock_t clock, reduced_clock; |
2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; | 2922 | u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; |
2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; | 2923 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
@@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2943 | drm_vblank_pre_modeset(dev, pipe); | 2943 | drm_vblank_pre_modeset(dev, pipe); |
2944 | 2944 | ||
2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 2945 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
2946 | struct intel_output *intel_output = to_intel_output(connector); | 2946 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2947 | 2947 | ||
2948 | if (!connector->encoder || connector->encoder->crtc != crtc) | 2948 | if (!connector->encoder || connector->encoder->crtc != crtc) |
2949 | continue; | 2949 | continue; |
2950 | 2950 | ||
2951 | switch (intel_output->type) { | 2951 | switch (intel_encoder->type) { |
2952 | case INTEL_OUTPUT_LVDS: | 2952 | case INTEL_OUTPUT_LVDS: |
2953 | is_lvds = true; | 2953 | is_lvds = true; |
2954 | break; | 2954 | break; |
2955 | case INTEL_OUTPUT_SDVO: | 2955 | case INTEL_OUTPUT_SDVO: |
2956 | case INTEL_OUTPUT_HDMI: | 2956 | case INTEL_OUTPUT_HDMI: |
2957 | is_sdvo = true; | 2957 | is_sdvo = true; |
2958 | if (intel_output->needs_tv_clock) | 2958 | if (intel_encoder->needs_tv_clock) |
2959 | is_tv = true; | 2959 | is_tv = true; |
2960 | break; | 2960 | break; |
2961 | case INTEL_OUTPUT_DVO: | 2961 | case INTEL_OUTPUT_DVO: |
@@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2975 | break; | 2975 | break; |
2976 | } | 2976 | } |
2977 | 2977 | ||
2978 | num_outputs++; | 2978 | num_connectors++; |
2979 | } | 2979 | } |
2980 | 2980 | ||
2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { | 2981 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { |
2982 | refclk = dev_priv->lvds_ssc_freq * 1000; | 2982 | refclk = dev_priv->lvds_ssc_freq * 1000; |
2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 2983 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
2984 | refclk / 1000); | 2984 | refclk / 1000); |
@@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3049 | if (is_edp) { | 3049 | if (is_edp) { |
3050 | struct drm_connector *edp; | 3050 | struct drm_connector *edp; |
3051 | target_clock = mode->clock; | 3051 | target_clock = mode->clock; |
3052 | edp = intel_pipe_get_output(crtc); | 3052 | edp = intel_pipe_get_connector(crtc); |
3053 | intel_edp_link_config(to_intel_output(edp), | 3053 | intel_edp_link_config(to_intel_encoder(edp), |
3054 | &lane, &link_bw); | 3054 | &lane, &link_bw); |
3055 | } else { | 3055 | } else { |
3056 | /* DP over FDI requires target mode clock | 3056 | /* DP over FDI requires target mode clock |
@@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3231 | /* XXX: just matching BIOS for now */ | 3231 | /* XXX: just matching BIOS for now */ |
3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 3232 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3233 | dpll |= 3; | 3233 | dpll |= 3; |
3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) | 3234 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) |
3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 3235 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3236 | else | 3236 | else |
3237 | dpll |= PLL_REF_INPUT_DREFCLK; | 3237 | dpll |= PLL_REF_INPUT_DREFCLK; |
@@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3511 | if (!bo) | 3511 | if (!bo) |
3512 | return -ENOENT; | 3512 | return -ENOENT; |
3513 | 3513 | ||
3514 | obj_priv = bo->driver_private; | 3514 | obj_priv = to_intel_bo(bo); |
3515 | 3515 | ||
3516 | if (bo->size < width * height * 4) { | 3516 | if (bo->size < width * height * 4) { |
3517 | DRM_ERROR("buffer is to small\n"); | 3517 | DRM_ERROR("buffer is to small\n"); |
@@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
3655 | * detection. | 3655 | * detection. |
3656 | * | 3656 | * |
3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for | 3657 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
3658 | * its requirements. The pipe will be connected to no other outputs. | 3658 | * its requirements. The pipe will be connected to no other encoders. |
3659 | * | 3659 | * |
3660 | * Currently this code will only succeed if there is a pipe with no outputs | 3660 | * Currently this code will only succeed if there is a pipe with no encoders |
3661 | * configured for it. In the future, it could choose to temporarily disable | 3661 | * configured for it. In the future, it could choose to temporarily disable |
3662 | * some outputs to free up a pipe for its use. | 3662 | * some outputs to free up a pipe for its use. |
3663 | * | 3663 | * |
@@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = { | |||
3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | 3670 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
3671 | }; | 3671 | }; |
3672 | 3672 | ||
3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 3673 | struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
3674 | struct drm_display_mode *mode, | 3674 | struct drm_display_mode *mode, |
3675 | int *dpms_mode) | 3675 | int *dpms_mode) |
3676 | { | 3676 | { |
3677 | struct intel_crtc *intel_crtc; | 3677 | struct intel_crtc *intel_crtc; |
3678 | struct drm_crtc *possible_crtc; | 3678 | struct drm_crtc *possible_crtc; |
3679 | struct drm_crtc *supported_crtc =NULL; | 3679 | struct drm_crtc *supported_crtc =NULL; |
3680 | struct drm_encoder *encoder = &intel_output->enc; | 3680 | struct drm_encoder *encoder = &intel_encoder->enc; |
3681 | struct drm_crtc *crtc = NULL; | 3681 | struct drm_crtc *crtc = NULL; |
3682 | struct drm_device *dev = encoder->dev; | 3682 | struct drm_device *dev = encoder->dev; |
3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3683 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
@@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
3729 | } | 3729 | } |
3730 | 3730 | ||
3731 | encoder->crtc = crtc; | 3731 | encoder->crtc = crtc; |
3732 | intel_output->base.encoder = encoder; | 3732 | intel_encoder->base.encoder = encoder; |
3733 | intel_output->load_detect_temp = true; | 3733 | intel_encoder->load_detect_temp = true; |
3734 | 3734 | ||
3735 | intel_crtc = to_intel_crtc(crtc); | 3735 | intel_crtc = to_intel_crtc(crtc); |
3736 | *dpms_mode = intel_crtc->dpms_mode; | 3736 | *dpms_mode = intel_crtc->dpms_mode; |
@@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | |||
3755 | return crtc; | 3755 | return crtc; |
3756 | } | 3756 | } |
3757 | 3757 | ||
3758 | void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) | 3758 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) |
3759 | { | 3759 | { |
3760 | struct drm_encoder *encoder = &intel_output->enc; | 3760 | struct drm_encoder *encoder = &intel_encoder->enc; |
3761 | struct drm_device *dev = encoder->dev; | 3761 | struct drm_device *dev = encoder->dev; |
3762 | struct drm_crtc *crtc = encoder->crtc; | 3762 | struct drm_crtc *crtc = encoder->crtc; |
3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3763 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | 3764 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
3765 | 3765 | ||
3766 | if (intel_output->load_detect_temp) { | 3766 | if (intel_encoder->load_detect_temp) { |
3767 | encoder->crtc = NULL; | 3767 | encoder->crtc = NULL; |
3768 | intel_output->base.encoder = NULL; | 3768 | intel_encoder->base.encoder = NULL; |
3769 | intel_output->load_detect_temp = false; | 3769 | intel_encoder->load_detect_temp = false; |
3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 3770 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
3771 | drm_helper_disable_unused_functions(dev); | 3771 | drm_helper_disable_unused_functions(dev); |
3772 | } | 3772 | } |
3773 | 3773 | ||
3774 | /* Switch crtc and output back off if necessary */ | 3774 | /* Switch crtc and encoder back off if necessary */ |
3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { | 3775 | if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { |
3776 | if (encoder->crtc == crtc) | 3776 | if (encoder->crtc == crtc) |
3777 | encoder_funcs->dpms(encoder, dpms_mode); | 3777 | encoder_funcs->dpms(encoder, dpms_mode); |
@@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4156 | work = intel_crtc->unpin_work; | 4156 | work = intel_crtc->unpin_work; |
4157 | if (work == NULL || !work->pending) { | 4157 | if (work == NULL || !work->pending) { |
4158 | if (work && !work->pending) { | 4158 | if (work && !work->pending) { |
4159 | obj_priv = work->pending_flip_obj->driver_private; | 4159 | obj_priv = to_intel_bo(work->pending_flip_obj); |
4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | 4160 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", |
4161 | obj_priv, | 4161 | obj_priv, |
4162 | atomic_read(&obj_priv->pending_flip)); | 4162 | atomic_read(&obj_priv->pending_flip)); |
@@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4181 | 4181 | ||
4182 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4182 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4183 | 4183 | ||
4184 | obj_priv = work->pending_flip_obj->driver_private; | 4184 | obj_priv = to_intel_bo(work->pending_flip_obj); |
4185 | 4185 | ||
4186 | /* Initial scanout buffer will have a 0 pending flip count */ | 4186 | /* Initial scanout buffer will have a 0 pending flip count */ |
4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4187 | if ((atomic_read(&obj_priv->pending_flip) == 0) || |
@@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4252 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4253 | if (ret != 0) { | 4253 | if (ret != 0) { |
4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | 4254 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", |
4255 | obj->driver_private); | 4255 | to_intel_bo(obj)); |
4256 | kfree(work); | 4256 | kfree(work); |
4257 | intel_crtc->unpin_work = NULL; | 4257 | intel_crtc->unpin_work = NULL; |
4258 | mutex_unlock(&dev->struct_mutex); | 4258 | mutex_unlock(&dev->struct_mutex); |
@@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4266 | crtc->fb = fb; | 4266 | crtc->fb = fb; |
4267 | i915_gem_object_flush_write_domain(obj); | 4267 | i915_gem_object_flush_write_domain(obj); |
4268 | drm_vblank_get(dev, intel_crtc->pipe); | 4268 | drm_vblank_get(dev, intel_crtc->pipe); |
4269 | obj_priv = obj->driver_private; | 4269 | obj_priv = to_intel_bo(obj); |
4270 | atomic_inc(&obj_priv->pending_flip); | 4270 | atomic_inc(&obj_priv->pending_flip); |
4271 | work->pending_flip_obj = obj; | 4271 | work->pending_flip_obj = obj; |
4272 | 4272 | ||
@@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) | |||
4399 | int entry = 0; | 4399 | int entry = 0; |
4400 | 4400 | ||
4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
4402 | struct intel_output *intel_output = to_intel_output(connector); | 4402 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4403 | if (type_mask & intel_output->clone_mask) | 4403 | if (type_mask & intel_encoder->clone_mask) |
4404 | index_mask |= (1 << entry); | 4404 | index_mask |= (1 << entry); |
4405 | entry++; | 4405 | entry++; |
4406 | } | 4406 | } |
@@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4495 | intel_tv_init(dev); | 4495 | intel_tv_init(dev); |
4496 | 4496 | ||
4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 4497 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
4498 | struct intel_output *intel_output = to_intel_output(connector); | 4498 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4499 | struct drm_encoder *encoder = &intel_output->enc; | 4499 | struct drm_encoder *encoder = &intel_encoder->enc; |
4500 | 4500 | ||
4501 | encoder->possible_crtcs = intel_output->crtc_mask; | 4501 | encoder->possible_crtcs = intel_encoder->crtc_mask; |
4502 | encoder->possible_clones = intel_connector_clones(dev, | 4502 | encoder->possible_clones = intel_connector_clones(dev, |
4503 | intel_output->clone_mask); | 4503 | intel_encoder->clone_mask); |
4504 | } | 4504 | } |
4505 | } | 4505 | } |
4506 | 4506 | ||
@@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4779 | struct drm_i915_gem_object *obj_priv = NULL; | 4779 | struct drm_i915_gem_object *obj_priv = NULL; |
4780 | 4780 | ||
4781 | if (dev_priv->pwrctx) { | 4781 | if (dev_priv->pwrctx) { |
4782 | obj_priv = dev_priv->pwrctx->driver_private; | 4782 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
4783 | } else { | 4783 | } else { |
4784 | struct drm_gem_object *pwrctx; | 4784 | struct drm_gem_object *pwrctx; |
4785 | 4785 | ||
4786 | pwrctx = intel_alloc_power_context(dev); | 4786 | pwrctx = intel_alloc_power_context(dev); |
4787 | if (pwrctx) { | 4787 | if (pwrctx) { |
4788 | dev_priv->pwrctx = pwrctx; | 4788 | dev_priv->pwrctx = pwrctx; |
4789 | obj_priv = pwrctx->driver_private; | 4789 | obj_priv = to_intel_bo(pwrctx); |
4790 | } | 4790 | } |
4791 | } | 4791 | } |
4792 | 4792 | ||
@@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev) | |||
4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 4815 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 4816 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 4817 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
4818 | } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { | 4818 | } else if (IS_I965GM(dev)) { |
4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | 4819 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | 4820 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | 4821 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev) | |||
4853 | dev_priv->display.update_wm = g4x_update_wm; | 4853 | dev_priv->display.update_wm = g4x_update_wm; |
4854 | else if (IS_I965G(dev)) | 4854 | else if (IS_I965G(dev)) |
4855 | dev_priv->display.update_wm = i965_update_wm; | 4855 | dev_priv->display.update_wm = i965_update_wm; |
4856 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | 4856 | else if (IS_I9XX(dev)) { |
4857 | dev_priv->display.update_wm = i9xx_update_wm; | 4857 | dev_priv->display.update_wm = i9xx_update_wm; |
4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
4859 | } else if (IS_I85X(dev)) { | ||
4860 | dev_priv->display.update_wm = i9xx_update_wm; | ||
4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
4859 | } else { | 4862 | } else { |
4860 | if (IS_I85X(dev)) | 4863 | dev_priv->display.update_wm = i830_update_wm; |
4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 4864 | if (IS_845G(dev)) |
4862 | else if (IS_845G(dev)) | ||
4863 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 4865 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
4864 | else | 4866 | else |
4865 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 4867 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
4866 | dev_priv->display.update_wm = i830_update_wm; | ||
4867 | } | 4868 | } |
4868 | } | 4869 | } |
4869 | 4870 | ||
@@ -4957,7 +4958,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4957 | if (dev_priv->pwrctx) { | 4958 | if (dev_priv->pwrctx) { |
4958 | struct drm_i915_gem_object *obj_priv; | 4959 | struct drm_i915_gem_object *obj_priv; |
4959 | 4960 | ||
4960 | obj_priv = dev_priv->pwrctx->driver_private; | 4961 | obj_priv = to_intel_bo(dev_priv->pwrctx); |
4961 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | 4962 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); |
4962 | I915_READ(PWRCTXA); | 4963 | I915_READ(PWRCTXA); |
4963 | i915_gem_object_unpin(dev_priv->pwrctx); | 4964 | i915_gem_object_unpin(dev_priv->pwrctx); |
@@ -4978,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4978 | */ | 4979 | */ |
4979 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) | 4980 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
4980 | { | 4981 | { |
4981 | struct intel_output *intel_output = to_intel_output(connector); | 4982 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
4982 | 4983 | ||
4983 | return &intel_output->enc; | 4984 | return &intel_encoder->enc; |
4984 | } | 4985 | } |
4985 | 4986 | ||
4986 | /* | 4987 | /* |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8e283f75941d..77e40cfcf216 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -55,23 +55,23 @@ struct intel_dp_priv { | |||
55 | uint8_t link_bw; | 55 | uint8_t link_bw; |
56 | uint8_t lane_count; | 56 | uint8_t lane_count; |
57 | uint8_t dpcd[4]; | 57 | uint8_t dpcd[4]; |
58 | struct intel_output *intel_output; | 58 | struct intel_encoder *intel_encoder; |
59 | struct i2c_adapter adapter; | 59 | struct i2c_adapter adapter; |
60 | struct i2c_algo_dp_aux_data algo; | 60 | struct i2c_algo_dp_aux_data algo; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static void | 63 | static void |
64 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 64 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | 65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); |
66 | 66 | ||
67 | static void | 67 | static void |
68 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | 68 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); |
69 | 69 | ||
70 | void | 70 | void |
71 | intel_edp_link_config (struct intel_output *intel_output, | 71 | intel_edp_link_config (struct intel_encoder *intel_encoder, |
72 | int *lane_num, int *link_bw) | 72 | int *lane_num, int *link_bw) |
73 | { | 73 | { |
74 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 74 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
75 | 75 | ||
76 | *lane_num = dp_priv->lane_count; | 76 | *lane_num = dp_priv->lane_count; |
77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | 77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) |
@@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output, | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static int | 83 | static int |
84 | intel_dp_max_lane_count(struct intel_output *intel_output) | 84 | intel_dp_max_lane_count(struct intel_encoder *intel_encoder) |
85 | { | 85 | { |
86 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 86 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
87 | int max_lane_count = 4; | 87 | int max_lane_count = 4; |
88 | 88 | ||
89 | if (dp_priv->dpcd[0] >= 0x11) { | 89 | if (dp_priv->dpcd[0] >= 0x11) { |
@@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static int | 101 | static int |
102 | intel_dp_max_link_bw(struct intel_output *intel_output) | 102 | intel_dp_max_link_bw(struct intel_encoder *intel_encoder) |
103 | { | 103 | { |
104 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 104 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
105 | int max_link_bw = dp_priv->dpcd[1]; | 105 | int max_link_bw = dp_priv->dpcd[1]; |
106 | 106 | ||
107 | switch (max_link_bw) { | 107 | switch (max_link_bw) { |
@@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw) | |||
127 | /* I think this is a fiction */ | 127 | /* I think this is a fiction */ |
128 | static int | 128 | static int |
129 | intel_dp_link_required(struct drm_device *dev, | 129 | intel_dp_link_required(struct drm_device *dev, |
130 | struct intel_output *intel_output, int pixel_clock) | 130 | struct intel_encoder *intel_encoder, int pixel_clock) |
131 | { | 131 | { |
132 | struct drm_i915_private *dev_priv = dev->dev_private; | 132 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | 133 | ||
134 | if (IS_eDP(intel_output)) | 134 | if (IS_eDP(intel_encoder)) |
135 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 135 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
136 | else | 136 | else |
137 | return pixel_clock * 3; | 137 | return pixel_clock * 3; |
@@ -141,11 +141,11 @@ static int | |||
141 | intel_dp_mode_valid(struct drm_connector *connector, | 141 | intel_dp_mode_valid(struct drm_connector *connector, |
142 | struct drm_display_mode *mode) | 142 | struct drm_display_mode *mode) |
143 | { | 143 | { |
144 | struct intel_output *intel_output = to_intel_output(connector); | 144 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | 145 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
146 | int max_lanes = intel_dp_max_lane_count(intel_output); | 146 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
147 | 147 | ||
148 | if (intel_dp_link_required(connector->dev, intel_output, mode->clock) | 148 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) |
149 | > max_link_clock * max_lanes) | 149 | > max_link_clock * max_lanes) |
150 | return MODE_CLOCK_HIGH; | 150 | return MODE_CLOCK_HIGH; |
151 | 151 | ||
@@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | static int | 211 | static int |
212 | intel_dp_aux_ch(struct intel_output *intel_output, | 212 | intel_dp_aux_ch(struct intel_encoder *intel_encoder, |
213 | uint8_t *send, int send_bytes, | 213 | uint8_t *send, int send_bytes, |
214 | uint8_t *recv, int recv_size) | 214 | uint8_t *recv, int recv_size) |
215 | { | 215 | { |
216 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 216 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
217 | uint32_t output_reg = dp_priv->output_reg; | 217 | uint32_t output_reg = dp_priv->output_reg; |
218 | struct drm_device *dev = intel_output->base.dev; | 218 | struct drm_device *dev = intel_encoder->base.dev; |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
220 | uint32_t ch_ctl = output_reg + 0x10; | 220 | uint32_t ch_ctl = output_reg + 0x10; |
221 | uint32_t ch_data = ch_ctl + 4; | 221 | uint32_t ch_data = ch_ctl + 4; |
@@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
230 | * and would like to run at 2MHz. So, take the | 230 | * and would like to run at 2MHz. So, take the |
231 | * hrawclk value and divide by 2 and use that | 231 | * hrawclk value and divide by 2 and use that |
232 | */ | 232 | */ |
233 | if (IS_eDP(intel_output)) | 233 | if (IS_eDP(intel_encoder)) |
234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 234 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
235 | else if (HAS_PCH_SPLIT(dev)) | 235 | else if (HAS_PCH_SPLIT(dev)) |
236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ | 236 | aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ |
@@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
313 | 313 | ||
314 | /* Write data to the aux channel in native mode */ | 314 | /* Write data to the aux channel in native mode */ |
315 | static int | 315 | static int |
316 | intel_dp_aux_native_write(struct intel_output *intel_output, | 316 | intel_dp_aux_native_write(struct intel_encoder *intel_encoder, |
317 | uint16_t address, uint8_t *send, int send_bytes) | 317 | uint16_t address, uint8_t *send, int send_bytes) |
318 | { | 318 | { |
319 | int ret; | 319 | int ret; |
@@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
330 | memcpy(&msg[4], send, send_bytes); | 330 | memcpy(&msg[4], send, send_bytes); |
331 | msg_bytes = send_bytes + 4; | 331 | msg_bytes = send_bytes + 4; |
332 | for (;;) { | 332 | for (;;) { |
333 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); | 333 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); |
334 | if (ret < 0) | 334 | if (ret < 0) |
335 | return ret; | 335 | return ret; |
336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 336 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
@@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
345 | 345 | ||
346 | /* Write a single byte to the aux channel in native mode */ | 346 | /* Write a single byte to the aux channel in native mode */ |
347 | static int | 347 | static int |
348 | intel_dp_aux_native_write_1(struct intel_output *intel_output, | 348 | intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, |
349 | uint16_t address, uint8_t byte) | 349 | uint16_t address, uint8_t byte) |
350 | { | 350 | { |
351 | return intel_dp_aux_native_write(intel_output, address, &byte, 1); | 351 | return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* read bytes from a native aux channel */ | 354 | /* read bytes from a native aux channel */ |
355 | static int | 355 | static int |
356 | intel_dp_aux_native_read(struct intel_output *intel_output, | 356 | intel_dp_aux_native_read(struct intel_encoder *intel_encoder, |
357 | uint16_t address, uint8_t *recv, int recv_bytes) | 357 | uint16_t address, uint8_t *recv, int recv_bytes) |
358 | { | 358 | { |
359 | uint8_t msg[4]; | 359 | uint8_t msg[4]; |
@@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
372 | reply_bytes = recv_bytes + 1; | 372 | reply_bytes = recv_bytes + 1; |
373 | 373 | ||
374 | for (;;) { | 374 | for (;;) { |
375 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, | 375 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, |
376 | reply, reply_bytes); | 376 | reply, reply_bytes); |
377 | if (ret == 0) | 377 | if (ret == 0) |
378 | return -EPROTO; | 378 | return -EPROTO; |
@@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
398 | struct intel_dp_priv *dp_priv = container_of(adapter, | 398 | struct intel_dp_priv *dp_priv = container_of(adapter, |
399 | struct intel_dp_priv, | 399 | struct intel_dp_priv, |
400 | adapter); | 400 | adapter); |
401 | struct intel_output *intel_output = dp_priv->intel_output; | 401 | struct intel_encoder *intel_encoder = dp_priv->intel_encoder; |
402 | uint16_t address = algo_data->address; | 402 | uint16_t address = algo_data->address; |
403 | uint8_t msg[5]; | 403 | uint8_t msg[5]; |
404 | uint8_t reply[2]; | 404 | uint8_t reply[2]; |
@@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
437 | } | 437 | } |
438 | 438 | ||
439 | for (;;) { | 439 | for (;;) { |
440 | ret = intel_dp_aux_ch(intel_output, | 440 | ret = intel_dp_aux_ch(intel_encoder, |
441 | msg, msg_bytes, | 441 | msg, msg_bytes, |
442 | reply, reply_bytes); | 442 | reply, reply_bytes); |
443 | if (ret < 0) { | 443 | if (ret < 0) { |
@@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
465 | } | 465 | } |
466 | 466 | ||
467 | static int | 467 | static int |
468 | intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | 468 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) |
469 | { | 469 | { |
470 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 470 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
471 | 471 | ||
472 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 472 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
473 | dp_priv->algo.running = false; | 473 | dp_priv->algo.running = false; |
@@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | |||
480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | 480 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); |
481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | 481 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; |
482 | dp_priv->adapter.algo_data = &dp_priv->algo; | 482 | dp_priv->adapter.algo_data = &dp_priv->algo; |
483 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | 483 | dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; |
484 | 484 | ||
485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | 485 | return i2c_dp_aux_add_bus(&dp_priv->adapter); |
486 | } | 486 | } |
@@ -489,18 +489,18 @@ static bool | |||
489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | 489 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, |
490 | struct drm_display_mode *adjusted_mode) | 490 | struct drm_display_mode *adjusted_mode) |
491 | { | 491 | { |
492 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 492 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
493 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 493 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
494 | int lane_count, clock; | 494 | int lane_count, clock; |
495 | int max_lane_count = intel_dp_max_lane_count(intel_output); | 495 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); |
496 | int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; | 496 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; |
497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 497 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
498 | 498 | ||
499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 499 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
500 | for (clock = 0; clock <= max_clock; clock++) { | 500 | for (clock = 0; clock <= max_clock; clock++) { |
501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 501 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; |
502 | 502 | ||
503 | if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) | 503 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
504 | <= link_avail) { | 504 | <= link_avail) { |
505 | dp_priv->link_bw = bws[clock]; | 505 | dp_priv->link_bw = bws[clock]; |
506 | dp_priv->lane_count = lane_count; | 506 | dp_priv->lane_count = lane_count; |
@@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
562 | struct intel_dp_m_n m_n; | 562 | struct intel_dp_m_n m_n; |
563 | 563 | ||
564 | /* | 564 | /* |
565 | * Find the lane count in the intel_output private | 565 | * Find the lane count in the intel_encoder private |
566 | */ | 566 | */ |
567 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 567 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
568 | struct intel_output *intel_output = to_intel_output(connector); | 568 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
569 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 569 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
570 | 570 | ||
571 | if (!connector->encoder || connector->encoder->crtc != crtc) | 571 | if (!connector->encoder || connector->encoder->crtc != crtc) |
572 | continue; | 572 | continue; |
573 | 573 | ||
574 | if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { | 574 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
575 | lane_count = dp_priv->lane_count; | 575 | lane_count = dp_priv->lane_count; |
576 | break; | 576 | break; |
577 | } | 577 | } |
@@ -626,9 +626,9 @@ static void | |||
626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 626 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
627 | struct drm_display_mode *adjusted_mode) | 627 | struct drm_display_mode *adjusted_mode) |
628 | { | 628 | { |
629 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 629 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
630 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 630 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
631 | struct drm_crtc *crtc = intel_output->enc.crtc; | 631 | struct drm_crtc *crtc = intel_encoder->enc.crtc; |
632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
633 | 633 | ||
634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | 634 | dp_priv->DP = (DP_LINK_TRAIN_OFF | |
@@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
667 | if (intel_crtc->pipe == 1) | 667 | if (intel_crtc->pipe == 1) |
668 | dp_priv->DP |= DP_PIPEB_SELECT; | 668 | dp_priv->DP |= DP_PIPEB_SELECT; |
669 | 669 | ||
670 | if (IS_eDP(intel_output)) { | 670 | if (IS_eDP(intel_encoder)) { |
671 | /* don't miss out required setting for eDP */ | 671 | /* don't miss out required setting for eDP */ |
672 | dp_priv->DP |= DP_PLL_ENABLE; | 672 | dp_priv->DP |= DP_PLL_ENABLE; |
673 | if (adjusted_mode->clock < 200000) | 673 | if (adjusted_mode->clock < 200000) |
@@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) | |||
702 | static void | 702 | static void |
703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | 703 | intel_dp_dpms(struct drm_encoder *encoder, int mode) |
704 | { | 704 | { |
705 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 705 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
706 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 706 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
707 | struct drm_device *dev = intel_output->base.dev; | 707 | struct drm_device *dev = intel_encoder->base.dev; |
708 | struct drm_i915_private *dev_priv = dev->dev_private; | 708 | struct drm_i915_private *dev_priv = dev->dev_private; |
709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 709 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); |
710 | 710 | ||
711 | if (mode != DRM_MODE_DPMS_ON) { | 711 | if (mode != DRM_MODE_DPMS_ON) { |
712 | if (dp_reg & DP_PORT_EN) { | 712 | if (dp_reg & DP_PORT_EN) { |
713 | intel_dp_link_down(intel_output, dp_priv->DP); | 713 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
714 | if (IS_eDP(intel_output)) | 714 | if (IS_eDP(intel_encoder)) |
715 | ironlake_edp_backlight_off(dev); | 715 | ironlake_edp_backlight_off(dev); |
716 | } | 716 | } |
717 | } else { | 717 | } else { |
718 | if (!(dp_reg & DP_PORT_EN)) { | 718 | if (!(dp_reg & DP_PORT_EN)) { |
719 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 719 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
720 | if (IS_eDP(intel_output)) | 720 | if (IS_eDP(intel_encoder)) |
721 | ironlake_edp_backlight_on(dev); | 721 | ironlake_edp_backlight_on(dev); |
722 | } | 722 | } |
723 | } | 723 | } |
@@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
729 | * link status information | 729 | * link status information |
730 | */ | 730 | */ |
731 | static bool | 731 | static bool |
732 | intel_dp_get_link_status(struct intel_output *intel_output, | 732 | intel_dp_get_link_status(struct intel_encoder *intel_encoder, |
733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | 733 | uint8_t link_status[DP_LINK_STATUS_SIZE]) |
734 | { | 734 | { |
735 | int ret; | 735 | int ret; |
736 | 736 | ||
737 | ret = intel_dp_aux_native_read(intel_output, | 737 | ret = intel_dp_aux_native_read(intel_encoder, |
738 | DP_LANE0_1_STATUS, | 738 | DP_LANE0_1_STATUS, |
739 | link_status, DP_LINK_STATUS_SIZE); | 739 | link_status, DP_LINK_STATUS_SIZE); |
740 | if (ret != DP_LINK_STATUS_SIZE) | 740 | if (ret != DP_LINK_STATUS_SIZE) |
@@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | |||
752 | static void | 752 | static void |
753 | intel_dp_save(struct drm_connector *connector) | 753 | intel_dp_save(struct drm_connector *connector) |
754 | { | 754 | { |
755 | struct intel_output *intel_output = to_intel_output(connector); | 755 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
756 | struct drm_device *dev = intel_output->base.dev; | 756 | struct drm_device *dev = intel_encoder->base.dev; |
757 | struct drm_i915_private *dev_priv = dev->dev_private; | 757 | struct drm_i915_private *dev_priv = dev->dev_private; |
758 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 758 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
759 | 759 | ||
760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | 760 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); |
761 | intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, | 761 | intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, |
762 | dp_priv->save_link_configuration, | 762 | dp_priv->save_link_configuration, |
763 | sizeof (dp_priv->save_link_configuration)); | 763 | sizeof (dp_priv->save_link_configuration)); |
764 | } | 764 | } |
@@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
825 | } | 825 | } |
826 | 826 | ||
827 | static void | 827 | static void |
828 | intel_get_adjust_train(struct intel_output *intel_output, | 828 | intel_get_adjust_train(struct intel_encoder *intel_encoder, |
829 | uint8_t link_status[DP_LINK_STATUS_SIZE], | 829 | uint8_t link_status[DP_LINK_STATUS_SIZE], |
830 | int lane_count, | 830 | int lane_count, |
831 | uint8_t train_set[4]) | 831 | uint8_t train_set[4]) |
@@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | |||
942 | } | 942 | } |
943 | 943 | ||
944 | static bool | 944 | static bool |
945 | intel_dp_set_link_train(struct intel_output *intel_output, | 945 | intel_dp_set_link_train(struct intel_encoder *intel_encoder, |
946 | uint32_t dp_reg_value, | 946 | uint32_t dp_reg_value, |
947 | uint8_t dp_train_pat, | 947 | uint8_t dp_train_pat, |
948 | uint8_t train_set[4], | 948 | uint8_t train_set[4], |
949 | bool first) | 949 | bool first) |
950 | { | 950 | { |
951 | struct drm_device *dev = intel_output->base.dev; | 951 | struct drm_device *dev = intel_encoder->base.dev; |
952 | struct drm_i915_private *dev_priv = dev->dev_private; | 952 | struct drm_i915_private *dev_priv = dev->dev_private; |
953 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 953 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
954 | int ret; | 954 | int ret; |
955 | 955 | ||
956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | 956 | I915_WRITE(dp_priv->output_reg, dp_reg_value); |
@@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
958 | if (first) | 958 | if (first) |
959 | intel_wait_for_vblank(dev); | 959 | intel_wait_for_vblank(dev); |
960 | 960 | ||
961 | intel_dp_aux_native_write_1(intel_output, | 961 | intel_dp_aux_native_write_1(intel_encoder, |
962 | DP_TRAINING_PATTERN_SET, | 962 | DP_TRAINING_PATTERN_SET, |
963 | dp_train_pat); | 963 | dp_train_pat); |
964 | 964 | ||
965 | ret = intel_dp_aux_native_write(intel_output, | 965 | ret = intel_dp_aux_native_write(intel_encoder, |
966 | DP_TRAINING_LANE0_SET, train_set, 4); | 966 | DP_TRAINING_LANE0_SET, train_set, 4); |
967 | if (ret != 4) | 967 | if (ret != 4) |
968 | return false; | 968 | return false; |
@@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, | |||
971 | } | 971 | } |
972 | 972 | ||
973 | static void | 973 | static void |
974 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | 974 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, |
975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | 975 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) |
976 | { | 976 | { |
977 | struct drm_device *dev = intel_output->base.dev; | 977 | struct drm_device *dev = intel_encoder->base.dev; |
978 | struct drm_i915_private *dev_priv = dev->dev_private; | 978 | struct drm_i915_private *dev_priv = dev->dev_private; |
979 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 979 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
980 | uint8_t train_set[4]; | 980 | uint8_t train_set[4]; |
981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 981 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
982 | int i; | 982 | int i; |
@@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
987 | int tries; | 987 | int tries; |
988 | 988 | ||
989 | /* Write the link configuration data */ | 989 | /* Write the link configuration data */ |
990 | intel_dp_aux_native_write(intel_output, 0x100, | 990 | intel_dp_aux_native_write(intel_encoder, 0x100, |
991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | 991 | link_configuration, DP_LINK_CONFIGURATION_SIZE); |
992 | 992 | ||
993 | DP |= DP_PORT_EN; | 993 | DP |= DP_PORT_EN; |
@@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1001 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); |
1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1002 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1003 | 1003 | ||
1004 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, | 1004 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, |
1005 | DP_TRAINING_PATTERN_1, train_set, first)) | 1005 | DP_TRAINING_PATTERN_1, train_set, first)) |
1006 | break; | 1006 | break; |
1007 | first = false; | 1007 | first = false; |
1008 | /* Set training pattern 1 */ | 1008 | /* Set training pattern 1 */ |
1009 | 1009 | ||
1010 | udelay(100); | 1010 | udelay(100); |
1011 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1011 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
1012 | break; | 1012 | break; |
1013 | 1013 | ||
1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | 1014 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { |
@@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1033 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1034 | 1034 | ||
1035 | /* Compute new train_set as requested by target */ | 1035 | /* Compute new train_set as requested by target */ |
1036 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1036 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | /* channel equalization */ | 1039 | /* channel equalization */ |
@@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1045 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1046 | 1046 | ||
1047 | /* channel eq pattern */ | 1047 | /* channel eq pattern */ |
1048 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, | 1048 | if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, |
1049 | DP_TRAINING_PATTERN_2, train_set, | 1049 | DP_TRAINING_PATTERN_2, train_set, |
1050 | false)) | 1050 | false)) |
1051 | break; | 1051 | break; |
1052 | 1052 | ||
1053 | udelay(400); | 1053 | udelay(400); |
1054 | if (!intel_dp_get_link_status(intel_output, link_status)) | 1054 | if (!intel_dp_get_link_status(intel_encoder, link_status)) |
1055 | break; | 1055 | break; |
1056 | 1056 | ||
1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | 1057 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { |
@@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
1064 | break; | 1064 | break; |
1065 | 1065 | ||
1066 | /* Compute new train_set as requested by target */ | 1066 | /* Compute new train_set as requested by target */ |
1067 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | 1067 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); |
1068 | ++tries; | 1068 | ++tries; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | 1071 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); |
1072 | POSTING_READ(dp_priv->output_reg); | 1072 | POSTING_READ(dp_priv->output_reg); |
1073 | intel_dp_aux_native_write_1(intel_output, | 1073 | intel_dp_aux_native_write_1(intel_encoder, |
1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | 1074 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static void | 1077 | static void |
1078 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | 1078 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) |
1079 | { | 1079 | { |
1080 | struct drm_device *dev = intel_output->base.dev; | 1080 | struct drm_device *dev = intel_encoder->base.dev; |
1081 | struct drm_i915_private *dev_priv = dev->dev_private; | 1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
1082 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1082 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1083 | 1083 | ||
1084 | DRM_DEBUG_KMS("\n"); | 1084 | DRM_DEBUG_KMS("\n"); |
1085 | 1085 | ||
1086 | if (IS_eDP(intel_output)) { | 1086 | if (IS_eDP(intel_encoder)) { |
1087 | DP &= ~DP_PLL_ENABLE; | 1087 | DP &= ~DP_PLL_ENABLE; |
1088 | I915_WRITE(dp_priv->output_reg, DP); | 1088 | I915_WRITE(dp_priv->output_reg, DP); |
1089 | POSTING_READ(dp_priv->output_reg); | 1089 | POSTING_READ(dp_priv->output_reg); |
@@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
1096 | 1096 | ||
1097 | udelay(17000); | 1097 | udelay(17000); |
1098 | 1098 | ||
1099 | if (IS_eDP(intel_output)) | 1099 | if (IS_eDP(intel_encoder)) |
1100 | DP |= DP_LINK_TRAIN_OFF; | 1100 | DP |= DP_LINK_TRAIN_OFF; |
1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | 1101 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); |
1102 | POSTING_READ(dp_priv->output_reg); | 1102 | POSTING_READ(dp_priv->output_reg); |
@@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
1105 | static void | 1105 | static void |
1106 | intel_dp_restore(struct drm_connector *connector) | 1106 | intel_dp_restore(struct drm_connector *connector) |
1107 | { | 1107 | { |
1108 | struct intel_output *intel_output = to_intel_output(connector); | 1108 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1109 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1109 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1110 | 1110 | ||
1111 | if (dp_priv->save_DP & DP_PORT_EN) | 1111 | if (dp_priv->save_DP & DP_PORT_EN) |
1112 | intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); | 1112 | intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); |
1113 | else | 1113 | else |
1114 | intel_dp_link_down(intel_output, dp_priv->save_DP); | 1114 | intel_dp_link_down(intel_encoder, dp_priv->save_DP); |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | /* | 1117 | /* |
@@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector) | |||
1124 | */ | 1124 | */ |
1125 | 1125 | ||
1126 | static void | 1126 | static void |
1127 | intel_dp_check_link_status(struct intel_output *intel_output) | 1127 | intel_dp_check_link_status(struct intel_encoder *intel_encoder) |
1128 | { | 1128 | { |
1129 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1129 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1130 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1131 | 1131 | ||
1132 | if (!intel_output->enc.crtc) | 1132 | if (!intel_encoder->enc.crtc) |
1133 | return; | 1133 | return; |
1134 | 1134 | ||
1135 | if (!intel_dp_get_link_status(intel_output, link_status)) { | 1135 | if (!intel_dp_get_link_status(intel_encoder, link_status)) { |
1136 | intel_dp_link_down(intel_output, dp_priv->DP); | 1136 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
1137 | return; | 1137 | return; |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | 1140 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) |
1141 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 1141 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | static enum drm_connector_status | 1144 | static enum drm_connector_status |
1145 | ironlake_dp_detect(struct drm_connector *connector) | 1145 | ironlake_dp_detect(struct drm_connector *connector) |
1146 | { | 1146 | { |
1147 | struct intel_output *intel_output = to_intel_output(connector); | 1147 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1148 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1148 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1149 | enum drm_connector_status status; | 1149 | enum drm_connector_status status; |
1150 | 1150 | ||
1151 | status = connector_status_disconnected; | 1151 | status = connector_status_disconnected; |
1152 | if (intel_dp_aux_native_read(intel_output, | 1152 | if (intel_dp_aux_native_read(intel_encoder, |
1153 | 0x000, dp_priv->dpcd, | 1153 | 0x000, dp_priv->dpcd, |
1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1154 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
1155 | { | 1155 | { |
@@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1168 | static enum drm_connector_status | 1168 | static enum drm_connector_status |
1169 | intel_dp_detect(struct drm_connector *connector) | 1169 | intel_dp_detect(struct drm_connector *connector) |
1170 | { | 1170 | { |
1171 | struct intel_output *intel_output = to_intel_output(connector); | 1171 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1172 | struct drm_device *dev = intel_output->base.dev; | 1172 | struct drm_device *dev = intel_encoder->base.dev; |
1173 | struct drm_i915_private *dev_priv = dev->dev_private; | 1173 | struct drm_i915_private *dev_priv = dev->dev_private; |
1174 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1174 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1175 | uint32_t temp, bit; | 1175 | uint32_t temp, bit; |
1176 | enum drm_connector_status status; | 1176 | enum drm_connector_status status; |
1177 | 1177 | ||
@@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector) | |||
1210 | return connector_status_disconnected; | 1210 | return connector_status_disconnected; |
1211 | 1211 | ||
1212 | status = connector_status_disconnected; | 1212 | status = connector_status_disconnected; |
1213 | if (intel_dp_aux_native_read(intel_output, | 1213 | if (intel_dp_aux_native_read(intel_encoder, |
1214 | 0x000, dp_priv->dpcd, | 1214 | 0x000, dp_priv->dpcd, |
1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1215 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) |
1216 | { | 1216 | { |
@@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector) | |||
1222 | 1222 | ||
1223 | static int intel_dp_get_modes(struct drm_connector *connector) | 1223 | static int intel_dp_get_modes(struct drm_connector *connector) |
1224 | { | 1224 | { |
1225 | struct intel_output *intel_output = to_intel_output(connector); | 1225 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1226 | struct drm_device *dev = intel_output->base.dev; | 1226 | struct drm_device *dev = intel_encoder->base.dev; |
1227 | struct drm_i915_private *dev_priv = dev->dev_private; | 1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
1228 | int ret; | 1228 | int ret; |
1229 | 1229 | ||
1230 | /* We should parse the EDID data and find out if it has an audio sink | 1230 | /* We should parse the EDID data and find out if it has an audio sink |
1231 | */ | 1231 | */ |
1232 | 1232 | ||
1233 | ret = intel_ddc_get_modes(intel_output); | 1233 | ret = intel_ddc_get_modes(intel_encoder); |
1234 | if (ret) | 1234 | if (ret) |
1235 | return ret; | 1235 | return ret; |
1236 | 1236 | ||
1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1237 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1238 | if (IS_eDP(intel_output)) { | 1238 | if (IS_eDP(intel_encoder)) { |
1239 | if (dev_priv->panel_fixed_mode != NULL) { | 1239 | if (dev_priv->panel_fixed_mode != NULL) { |
1240 | struct drm_display_mode *mode; | 1240 | struct drm_display_mode *mode; |
1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1241 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1249 | static void | 1249 | static void |
1250 | intel_dp_destroy (struct drm_connector *connector) | 1250 | intel_dp_destroy (struct drm_connector *connector) |
1251 | { | 1251 | { |
1252 | struct intel_output *intel_output = to_intel_output(connector); | 1252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1253 | 1253 | ||
1254 | if (intel_output->i2c_bus) | 1254 | if (intel_encoder->i2c_bus) |
1255 | intel_i2c_destroy(intel_output->i2c_bus); | 1255 | intel_i2c_destroy(intel_encoder->i2c_bus); |
1256 | drm_sysfs_connector_remove(connector); | 1256 | drm_sysfs_connector_remove(connector); |
1257 | drm_connector_cleanup(connector); | 1257 | drm_connector_cleanup(connector); |
1258 | kfree(intel_output); | 1258 | kfree(intel_encoder); |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1261 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
@@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { | |||
1291 | }; | 1291 | }; |
1292 | 1292 | ||
1293 | void | 1293 | void |
1294 | intel_dp_hot_plug(struct intel_output *intel_output) | 1294 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
1295 | { | 1295 | { |
1296 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1296 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
1297 | 1297 | ||
1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1298 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
1299 | intel_dp_check_link_status(intel_output); | 1299 | intel_dp_check_link_status(intel_encoder); |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | void | 1302 | void |
@@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1304 | { | 1304 | { |
1305 | struct drm_i915_private *dev_priv = dev->dev_private; | 1305 | struct drm_i915_private *dev_priv = dev->dev_private; |
1306 | struct drm_connector *connector; | 1306 | struct drm_connector *connector; |
1307 | struct intel_output *intel_output; | 1307 | struct intel_encoder *intel_encoder; |
1308 | struct intel_dp_priv *dp_priv; | 1308 | struct intel_dp_priv *dp_priv; |
1309 | const char *name = NULL; | 1309 | const char *name = NULL; |
1310 | 1310 | ||
1311 | intel_output = kcalloc(sizeof(struct intel_output) + | 1311 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1312 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
1313 | if (!intel_output) | 1313 | if (!intel_encoder) |
1314 | return; | 1314 | return; |
1315 | 1315 | ||
1316 | dp_priv = (struct intel_dp_priv *)(intel_output + 1); | 1316 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
1317 | 1317 | ||
1318 | connector = &intel_output->base; | 1318 | connector = &intel_encoder->base; |
1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1319 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, |
1320 | DRM_MODE_CONNECTOR_DisplayPort); | 1320 | DRM_MODE_CONNECTOR_DisplayPort); |
1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1321 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
1322 | 1322 | ||
1323 | if (output_reg == DP_A) | 1323 | if (output_reg == DP_A) |
1324 | intel_output->type = INTEL_OUTPUT_EDP; | 1324 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1325 | else | 1325 | else |
1326 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1326 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
1327 | 1327 | ||
1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 1328 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
1329 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1329 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | 1330 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
1331 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | 1331 | intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); |
1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1332 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1333 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1333 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1334 | 1334 | ||
1335 | if (IS_eDP(intel_output)) | 1335 | if (IS_eDP(intel_encoder)) |
1336 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1336 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1337 | 1337 | ||
1338 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1338 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1339 | connector->interlace_allowed = true; | 1339 | connector->interlace_allowed = true; |
1340 | connector->doublescan_allowed = 0; | 1340 | connector->doublescan_allowed = 0; |
1341 | 1341 | ||
1342 | dp_priv->intel_output = intel_output; | 1342 | dp_priv->intel_encoder = intel_encoder; |
1343 | dp_priv->output_reg = output_reg; | 1343 | dp_priv->output_reg = output_reg; |
1344 | dp_priv->has_audio = false; | 1344 | dp_priv->has_audio = false; |
1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | 1345 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; |
1346 | intel_output->dev_priv = dp_priv; | 1346 | intel_encoder->dev_priv = dp_priv; |
1347 | 1347 | ||
1348 | drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, | 1348 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, |
1349 | DRM_MODE_ENCODER_TMDS); | 1349 | DRM_MODE_ENCODER_TMDS); |
1350 | drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); | 1350 | drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); |
1351 | 1351 | ||
1352 | drm_mode_connector_attach_encoder(&intel_output->base, | 1352 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
1353 | &intel_output->enc); | 1353 | &intel_encoder->enc); |
1354 | drm_sysfs_connector_add(connector); | 1354 | drm_sysfs_connector_add(connector); |
1355 | 1355 | ||
1356 | /* Set up the DDC bus. */ | 1356 | /* Set up the DDC bus. */ |
@@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1378 | break; | 1378 | break; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | intel_dp_i2c_init(intel_output, name); | 1381 | intel_dp_i2c_init(intel_encoder, name); |
1382 | 1382 | ||
1383 | intel_output->ddc_bus = &dp_priv->adapter; | 1383 | intel_encoder->ddc_bus = &dp_priv->adapter; |
1384 | intel_output->hot_plug = intel_dp_hot_plug; | 1384 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1385 | 1385 | ||
1386 | if (output_reg == DP_A) { | 1386 | if (output_reg == DP_A) { |
1387 | /* initialize panel mode from VBT if available for eDP */ | 1387 | /* initialize panel mode from VBT if available for eDP */ |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3a467ca57857..e30253755f12 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -95,7 +95,7 @@ struct intel_framebuffer { | |||
95 | }; | 95 | }; |
96 | 96 | ||
97 | 97 | ||
98 | struct intel_output { | 98 | struct intel_encoder { |
99 | struct drm_connector base; | 99 | struct drm_connector base; |
100 | 100 | ||
101 | struct drm_encoder enc; | 101 | struct drm_encoder enc; |
@@ -105,7 +105,7 @@ struct intel_output { | |||
105 | bool load_detect_temp; | 105 | bool load_detect_temp; |
106 | bool needs_tv_clock; | 106 | bool needs_tv_clock; |
107 | void *dev_priv; | 107 | void *dev_priv; |
108 | void (*hot_plug)(struct intel_output *); | 108 | void (*hot_plug)(struct intel_encoder *); |
109 | int crtc_mask; | 109 | int crtc_mask; |
110 | int clone_mask; | 110 | int clone_mask; |
111 | }; | 111 | }; |
@@ -152,15 +152,15 @@ struct intel_crtc { | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 154 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
155 | #define to_intel_output(x) container_of(x, struct intel_output, base) | 155 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
156 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | 156 | #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) |
157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 157 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
158 | 158 | ||
159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | 159 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
160 | const char *name); | 160 | const char *name); |
161 | void intel_i2c_destroy(struct i2c_adapter *adapter); | 161 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
162 | int intel_ddc_get_modes(struct intel_output *intel_output); | 162 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder); |
163 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 163 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); |
164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 164 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
165 | void intel_i2c_reset_gmbus(struct drm_device *dev); | 165 | void intel_i2c_reset_gmbus(struct drm_device *dev); |
166 | 166 | ||
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
175 | void | 175 | void |
176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 176 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
177 | struct drm_display_mode *adjusted_mode); | 177 | struct drm_display_mode *adjusted_mode); |
178 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | 178 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
179 | 179 | ||
180 | 180 | ||
181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | 181 | extern int intel_panel_fitter_pipe (struct drm_device *dev); |
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | |||
191 | struct drm_file *file_priv); | 191 | struct drm_file *file_priv); |
192 | extern void intel_wait_for_vblank(struct drm_device *dev); | 192 | extern void intel_wait_for_vblank(struct drm_device *dev); |
193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 193 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, | 194 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
195 | struct drm_display_mode *mode, | 195 | struct drm_display_mode *mode, |
196 | int *dpms_mode); | 196 | int *dpms_mode); |
197 | extern void intel_release_load_detect_pipe(struct intel_output *intel_output, | 197 | extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
198 | int dpms_mode); | 198 | int dpms_mode); |
199 | 199 | ||
200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | 200 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 0427ca5a2514..ebf213c96b9c 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { | |||
80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | 80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) |
81 | { | 81 | { |
82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
83 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 83 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
84 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 84 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
85 | u32 dvo_reg = dvo->dvo_reg; | 85 | u32 dvo_reg = dvo->dvo_reg; |
86 | u32 temp = I915_READ(dvo_reg); | 86 | u32 temp = I915_READ(dvo_reg); |
87 | 87 | ||
@@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | |||
99 | static void intel_dvo_save(struct drm_connector *connector) | 99 | static void intel_dvo_save(struct drm_connector *connector) |
100 | { | 100 | { |
101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 101 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
102 | struct intel_output *intel_output = to_intel_output(connector); | 102 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
103 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 103 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
104 | 104 | ||
105 | /* Each output should probably just save the registers it touches, | 105 | /* Each output should probably just save the registers it touches, |
106 | * but for now, use more overkill. | 106 | * but for now, use more overkill. |
@@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector) | |||
115 | static void intel_dvo_restore(struct drm_connector *connector) | 115 | static void intel_dvo_restore(struct drm_connector *connector) |
116 | { | 116 | { |
117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 117 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
118 | struct intel_output *intel_output = to_intel_output(connector); | 118 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
119 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 119 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
120 | 120 | ||
121 | dvo->dev_ops->restore(dvo); | 121 | dvo->dev_ops->restore(dvo); |
122 | 122 | ||
@@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector) | |||
128 | static int intel_dvo_mode_valid(struct drm_connector *connector, | 128 | static int intel_dvo_mode_valid(struct drm_connector *connector, |
129 | struct drm_display_mode *mode) | 129 | struct drm_display_mode *mode) |
130 | { | 130 | { |
131 | struct intel_output *intel_output = to_intel_output(connector); | 131 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
132 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 132 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
133 | 133 | ||
134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 134 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
135 | return MODE_NO_DBLESCAN; | 135 | return MODE_NO_DBLESCAN; |
@@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | |||
150 | struct drm_display_mode *mode, | 150 | struct drm_display_mode *mode, |
151 | struct drm_display_mode *adjusted_mode) | 151 | struct drm_display_mode *adjusted_mode) |
152 | { | 152 | { |
153 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 153 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
154 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 154 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
155 | 155 | ||
156 | /* If we have timings from the BIOS for the panel, put them in | 156 | /* If we have timings from the BIOS for the panel, put them in |
157 | * to the adjusted mode. The CRTC will be set up for this mode, | 157 | * to the adjusted mode. The CRTC will be set up for this mode, |
@@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
186 | struct drm_device *dev = encoder->dev; | 186 | struct drm_device *dev = encoder->dev; |
187 | struct drm_i915_private *dev_priv = dev->dev_private; | 187 | struct drm_i915_private *dev_priv = dev->dev_private; |
188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 188 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
189 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 189 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
190 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 190 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
191 | int pipe = intel_crtc->pipe; | 191 | int pipe = intel_crtc->pipe; |
192 | u32 dvo_val; | 192 | u32 dvo_val; |
193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; | 193 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; |
@@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
241 | */ | 241 | */ |
242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 242 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) |
243 | { | 243 | { |
244 | struct intel_output *intel_output = to_intel_output(connector); | 244 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
245 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 245 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
246 | 246 | ||
247 | return dvo->dev_ops->detect(dvo); | 247 | return dvo->dev_ops->detect(dvo); |
248 | } | 248 | } |
249 | 249 | ||
250 | static int intel_dvo_get_modes(struct drm_connector *connector) | 250 | static int intel_dvo_get_modes(struct drm_connector *connector) |
251 | { | 251 | { |
252 | struct intel_output *intel_output = to_intel_output(connector); | 252 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
253 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 253 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
254 | 254 | ||
255 | /* We should probably have an i2c driver get_modes function for those | 255 | /* We should probably have an i2c driver get_modes function for those |
256 | * devices which will have a fixed set of modes determined by the chip | 256 | * devices which will have a fixed set of modes determined by the chip |
257 | * (TV-out, for example), but for now with just TMDS and LVDS, | 257 | * (TV-out, for example), but for now with just TMDS and LVDS, |
258 | * that's not the case. | 258 | * that's not the case. |
259 | */ | 259 | */ |
260 | intel_ddc_get_modes(intel_output); | 260 | intel_ddc_get_modes(intel_encoder); |
261 | if (!list_empty(&connector->probed_modes)) | 261 | if (!list_empty(&connector->probed_modes)) |
262 | return 1; | 262 | return 1; |
263 | 263 | ||
@@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
275 | 275 | ||
276 | static void intel_dvo_destroy (struct drm_connector *connector) | 276 | static void intel_dvo_destroy (struct drm_connector *connector) |
277 | { | 277 | { |
278 | struct intel_output *intel_output = to_intel_output(connector); | 278 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
279 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 279 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
280 | 280 | ||
281 | if (dvo) { | 281 | if (dvo) { |
282 | if (dvo->dev_ops->destroy) | 282 | if (dvo->dev_ops->destroy) |
@@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector) | |||
286 | /* no need, in i830_dvoices[] now */ | 286 | /* no need, in i830_dvoices[] now */ |
287 | //kfree(dvo); | 287 | //kfree(dvo); |
288 | } | 288 | } |
289 | if (intel_output->i2c_bus) | 289 | if (intel_encoder->i2c_bus) |
290 | intel_i2c_destroy(intel_output->i2c_bus); | 290 | intel_i2c_destroy(intel_encoder->i2c_bus); |
291 | if (intel_output->ddc_bus) | 291 | if (intel_encoder->ddc_bus) |
292 | intel_i2c_destroy(intel_output->ddc_bus); | 292 | intel_i2c_destroy(intel_encoder->ddc_bus); |
293 | drm_sysfs_connector_remove(connector); | 293 | drm_sysfs_connector_remove(connector); |
294 | drm_connector_cleanup(connector); | 294 | drm_connector_cleanup(connector); |
295 | kfree(intel_output); | 295 | kfree(intel_encoder); |
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef RANDR_GET_CRTC_INTERFACE | 298 | #ifdef RANDR_GET_CRTC_INTERFACE |
@@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) | |||
300 | { | 300 | { |
301 | struct drm_device *dev = connector->dev; | 301 | struct drm_device *dev = connector->dev; |
302 | struct drm_i915_private *dev_priv = dev->dev_private; | 302 | struct drm_i915_private *dev_priv = dev->dev_private; |
303 | struct intel_output *intel_output = to_intel_output(connector); | 303 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
304 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 304 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); | 305 | int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); |
306 | 306 | ||
307 | return intel_pipe_to_crtc(pScrn, pipe); | 307 | return intel_pipe_to_crtc(pScrn, pipe); |
@@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
352 | { | 352 | { |
353 | struct drm_device *dev = connector->dev; | 353 | struct drm_device *dev = connector->dev; |
354 | struct drm_i915_private *dev_priv = dev->dev_private; | 354 | struct drm_i915_private *dev_priv = dev->dev_private; |
355 | struct intel_output *intel_output = to_intel_output(connector); | 355 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
356 | struct intel_dvo_device *dvo = intel_output->dev_priv; | 356 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; |
357 | uint32_t dvo_reg = dvo->dvo_reg; | 357 | uint32_t dvo_reg = dvo->dvo_reg; |
358 | uint32_t dvo_val = I915_READ(dvo_reg); | 358 | uint32_t dvo_val = I915_READ(dvo_reg); |
359 | struct drm_display_mode *mode = NULL; | 359 | struct drm_display_mode *mode = NULL; |
@@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
383 | 383 | ||
384 | void intel_dvo_init(struct drm_device *dev) | 384 | void intel_dvo_init(struct drm_device *dev) |
385 | { | 385 | { |
386 | struct intel_output *intel_output; | 386 | struct intel_encoder *intel_encoder; |
387 | struct intel_dvo_device *dvo; | 387 | struct intel_dvo_device *dvo; |
388 | struct i2c_adapter *i2cbus = NULL; | 388 | struct i2c_adapter *i2cbus = NULL; |
389 | int ret = 0; | 389 | int ret = 0; |
390 | int i; | 390 | int i; |
391 | int encoder_type = DRM_MODE_ENCODER_NONE; | 391 | int encoder_type = DRM_MODE_ENCODER_NONE; |
392 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | 392 | intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); |
393 | if (!intel_output) | 393 | if (!intel_encoder) |
394 | return; | 394 | return; |
395 | 395 | ||
396 | /* Set up the DDC bus */ | 396 | /* Set up the DDC bus */ |
397 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | 397 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); |
398 | if (!intel_output->ddc_bus) | 398 | if (!intel_encoder->ddc_bus) |
399 | goto free_intel; | 399 | goto free_intel; |
400 | 400 | ||
401 | /* Now, try to find a controller */ | 401 | /* Now, try to find a controller */ |
402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 402 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
403 | struct drm_connector *connector = &intel_output->base; | 403 | struct drm_connector *connector = &intel_encoder->base; |
404 | int gpio; | 404 | int gpio; |
405 | 405 | ||
406 | dvo = &intel_dvo_devices[i]; | 406 | dvo = &intel_dvo_devices[i]; |
@@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev) | |||
435 | if (!ret) | 435 | if (!ret) |
436 | continue; | 436 | continue; |
437 | 437 | ||
438 | intel_output->type = INTEL_OUTPUT_DVO; | 438 | intel_encoder->type = INTEL_OUTPUT_DVO; |
439 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 439 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
440 | switch (dvo->type) { | 440 | switch (dvo->type) { |
441 | case INTEL_DVO_CHIP_TMDS: | 441 | case INTEL_DVO_CHIP_TMDS: |
442 | intel_output->clone_mask = | 442 | intel_encoder->clone_mask = |
443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | | 443 | (1 << INTEL_DVO_TMDS_CLONE_BIT) | |
444 | (1 << INTEL_ANALOG_CLONE_BIT); | 444 | (1 << INTEL_ANALOG_CLONE_BIT); |
445 | drm_connector_init(dev, connector, | 445 | drm_connector_init(dev, connector, |
@@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev) | |||
448 | encoder_type = DRM_MODE_ENCODER_TMDS; | 448 | encoder_type = DRM_MODE_ENCODER_TMDS; |
449 | break; | 449 | break; |
450 | case INTEL_DVO_CHIP_LVDS: | 450 | case INTEL_DVO_CHIP_LVDS: |
451 | intel_output->clone_mask = | 451 | intel_encoder->clone_mask = |
452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); | 452 | (1 << INTEL_DVO_LVDS_CLONE_BIT); |
453 | drm_connector_init(dev, connector, | 453 | drm_connector_init(dev, connector, |
454 | &intel_dvo_connector_funcs, | 454 | &intel_dvo_connector_funcs, |
@@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev) | |||
463 | connector->interlace_allowed = false; | 463 | connector->interlace_allowed = false; |
464 | connector->doublescan_allowed = false; | 464 | connector->doublescan_allowed = false; |
465 | 465 | ||
466 | intel_output->dev_priv = dvo; | 466 | intel_encoder->dev_priv = dvo; |
467 | intel_output->i2c_bus = i2cbus; | 467 | intel_encoder->i2c_bus = i2cbus; |
468 | 468 | ||
469 | drm_encoder_init(dev, &intel_output->enc, | 469 | drm_encoder_init(dev, &intel_encoder->enc, |
470 | &intel_dvo_enc_funcs, encoder_type); | 470 | &intel_dvo_enc_funcs, encoder_type); |
471 | drm_encoder_helper_add(&intel_output->enc, | 471 | drm_encoder_helper_add(&intel_encoder->enc, |
472 | &intel_dvo_helper_funcs); | 472 | &intel_dvo_helper_funcs); |
473 | 473 | ||
474 | drm_mode_connector_attach_encoder(&intel_output->base, | 474 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
475 | &intel_output->enc); | 475 | &intel_encoder->enc); |
476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { | 476 | if (dvo->type == INTEL_DVO_CHIP_LVDS) { |
477 | /* For our LVDS chipsets, we should hopefully be able | 477 | /* For our LVDS chipsets, we should hopefully be able |
478 | * to dig the fixed panel mode out of the BIOS data. | 478 | * to dig the fixed panel mode out of the BIOS data. |
@@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev) | |||
490 | return; | 490 | return; |
491 | } | 491 | } |
492 | 492 | ||
493 | intel_i2c_destroy(intel_output->ddc_bus); | 493 | intel_i2c_destroy(intel_encoder->ddc_bus); |
494 | /* Didn't find a chip, so tear down. */ | 494 | /* Didn't find a chip, so tear down. */ |
495 | if (i2cbus != NULL) | 495 | if (i2cbus != NULL) |
496 | intel_i2c_destroy(i2cbus); | 496 | intel_i2c_destroy(i2cbus); |
497 | free_intel: | 497 | free_intel: |
498 | kfree(intel_output); | 498 | kfree(intel_encoder); |
499 | } | 499 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 69bbef92f130..8a0b3bcdc7b1 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
144 | ret = -ENOMEM; | 144 | ret = -ENOMEM; |
145 | goto out; | 145 | goto out; |
146 | } | 146 | } |
147 | obj_priv = fbo->driver_private; | 147 | obj_priv = to_intel_bo(fbo); |
148 | 148 | ||
149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
150 | 150 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed02f641258..48cade0cf7b1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
51 | struct drm_i915_private *dev_priv = dev->dev_private; | 51 | struct drm_i915_private *dev_priv = dev->dev_private; |
52 | struct drm_crtc *crtc = encoder->crtc; | 52 | struct drm_crtc *crtc = encoder->crtc; |
53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 53 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
54 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 54 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
55 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 55 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
56 | u32 sdvox; | 56 | u32 sdvox; |
57 | 57 | ||
58 | sdvox = SDVO_ENCODING_HDMI | | 58 | sdvox = SDVO_ENCODING_HDMI | |
@@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
74 | { | 74 | { |
75 | struct drm_device *dev = encoder->dev; | 75 | struct drm_device *dev = encoder->dev; |
76 | struct drm_i915_private *dev_priv = dev->dev_private; | 76 | struct drm_i915_private *dev_priv = dev->dev_private; |
77 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 77 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
78 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 78 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
79 | u32 temp; | 79 | u32 temp; |
80 | 80 | ||
81 | temp = I915_READ(hdmi_priv->sdvox_reg); | 81 | temp = I915_READ(hdmi_priv->sdvox_reg); |
@@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector) | |||
110 | { | 110 | { |
111 | struct drm_device *dev = connector->dev; | 111 | struct drm_device *dev = connector->dev; |
112 | struct drm_i915_private *dev_priv = dev->dev_private; | 112 | struct drm_i915_private *dev_priv = dev->dev_private; |
113 | struct intel_output *intel_output = to_intel_output(connector); | 113 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
114 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 114 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
115 | 115 | ||
116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); | 116 | hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); |
117 | } | 117 | } |
@@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector) | |||
120 | { | 120 | { |
121 | struct drm_device *dev = connector->dev; | 121 | struct drm_device *dev = connector->dev; |
122 | struct drm_i915_private *dev_priv = dev->dev_private; | 122 | struct drm_i915_private *dev_priv = dev->dev_private; |
123 | struct intel_output *intel_output = to_intel_output(connector); | 123 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
124 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 124 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
125 | 125 | ||
126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); | 126 | I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); |
127 | POSTING_READ(hdmi_priv->sdvox_reg); | 127 | POSTING_READ(hdmi_priv->sdvox_reg); |
@@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
151 | static enum drm_connector_status | 151 | static enum drm_connector_status |
152 | intel_hdmi_detect(struct drm_connector *connector) | 152 | intel_hdmi_detect(struct drm_connector *connector) |
153 | { | 153 | { |
154 | struct intel_output *intel_output = to_intel_output(connector); | 154 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
155 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 155 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
156 | struct edid *edid = NULL; | 156 | struct edid *edid = NULL; |
157 | enum drm_connector_status status = connector_status_disconnected; | 157 | enum drm_connector_status status = connector_status_disconnected; |
158 | 158 | ||
159 | hdmi_priv->has_hdmi_sink = false; | 159 | hdmi_priv->has_hdmi_sink = false; |
160 | edid = drm_get_edid(&intel_output->base, | 160 | edid = drm_get_edid(&intel_encoder->base, |
161 | intel_output->ddc_bus); | 161 | intel_encoder->ddc_bus); |
162 | 162 | ||
163 | if (edid) { | 163 | if (edid) { |
164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 164 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
165 | status = connector_status_connected; | 165 | status = connector_status_connected; |
166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 166 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
167 | } | 167 | } |
168 | intel_output->base.display_info.raw_edid = NULL; | 168 | intel_encoder->base.display_info.raw_edid = NULL; |
169 | kfree(edid); | 169 | kfree(edid); |
170 | } | 170 | } |
171 | 171 | ||
@@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
174 | 174 | ||
175 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 175 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
176 | { | 176 | { |
177 | struct intel_output *intel_output = to_intel_output(connector); | 177 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
178 | 178 | ||
179 | /* We should parse the EDID data and find out if it's an HDMI sink so | 179 | /* We should parse the EDID data and find out if it's an HDMI sink so |
180 | * we can send audio to it. | 180 | * we can send audio to it. |
181 | */ | 181 | */ |
182 | 182 | ||
183 | return intel_ddc_get_modes(intel_output); | 183 | return intel_ddc_get_modes(intel_encoder); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void intel_hdmi_destroy(struct drm_connector *connector) | 186 | static void intel_hdmi_destroy(struct drm_connector *connector) |
187 | { | 187 | { |
188 | struct intel_output *intel_output = to_intel_output(connector); | 188 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
189 | 189 | ||
190 | if (intel_output->i2c_bus) | 190 | if (intel_encoder->i2c_bus) |
191 | intel_i2c_destroy(intel_output->i2c_bus); | 191 | intel_i2c_destroy(intel_encoder->i2c_bus); |
192 | drm_sysfs_connector_remove(connector); | 192 | drm_sysfs_connector_remove(connector); |
193 | drm_connector_cleanup(connector); | 193 | drm_connector_cleanup(connector); |
194 | kfree(intel_output); | 194 | kfree(intel_encoder); |
195 | } | 195 | } |
196 | 196 | ||
197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | 197 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
@@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
230 | { | 230 | { |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
232 | struct drm_connector *connector; | 232 | struct drm_connector *connector; |
233 | struct intel_output *intel_output; | 233 | struct intel_encoder *intel_encoder; |
234 | struct intel_hdmi_priv *hdmi_priv; | 234 | struct intel_hdmi_priv *hdmi_priv; |
235 | 235 | ||
236 | intel_output = kcalloc(sizeof(struct intel_output) + | 236 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 237 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
238 | if (!intel_output) | 238 | if (!intel_encoder) |
239 | return; | 239 | return; |
240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); | 240 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); |
241 | 241 | ||
242 | connector = &intel_output->base; | 242 | connector = &intel_encoder->base; |
243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 243 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
244 | DRM_MODE_CONNECTOR_HDMIA); | 244 | DRM_MODE_CONNECTOR_HDMIA); |
245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 245 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
246 | 246 | ||
247 | intel_output->type = INTEL_OUTPUT_HDMI; | 247 | intel_encoder->type = INTEL_OUTPUT_HDMI; |
248 | 248 | ||
249 | connector->interlace_allowed = 0; | 249 | connector->interlace_allowed = 0; |
250 | connector->doublescan_allowed = 0; | 250 | connector->doublescan_allowed = 0; |
251 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 251 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
252 | 252 | ||
253 | /* Set up the DDC bus. */ | 253 | /* Set up the DDC bus. */ |
254 | if (sdvox_reg == SDVOB) { | 254 | if (sdvox_reg == SDVOB) { |
255 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 255 | intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
256 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 256 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 257 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
258 | } else if (sdvox_reg == SDVOC) { | 258 | } else if (sdvox_reg == SDVOC) { |
259 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 259 | intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
260 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 260 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 261 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
262 | } else if (sdvox_reg == HDMIB) { | 262 | } else if (sdvox_reg == HDMIB) { |
263 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 263 | intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
264 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 264 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
265 | "HDMIB"); | 265 | "HDMIB"); |
266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | 266 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
267 | } else if (sdvox_reg == HDMIC) { | 267 | } else if (sdvox_reg == HDMIC) { |
268 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 268 | intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
269 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 269 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
270 | "HDMIC"); | 270 | "HDMIC"); |
271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | 271 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
272 | } else if (sdvox_reg == HDMID) { | 272 | } else if (sdvox_reg == HDMID) { |
273 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 273 | intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
274 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 274 | intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
275 | "HDMID"); | 275 | "HDMID"); |
276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | 276 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
277 | } | 277 | } |
278 | if (!intel_output->ddc_bus) | 278 | if (!intel_encoder->ddc_bus) |
279 | goto err_connector; | 279 | goto err_connector; |
280 | 280 | ||
281 | hdmi_priv->sdvox_reg = sdvox_reg; | 281 | hdmi_priv->sdvox_reg = sdvox_reg; |
282 | intel_output->dev_priv = hdmi_priv; | 282 | intel_encoder->dev_priv = hdmi_priv; |
283 | 283 | ||
284 | drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, | 284 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, |
285 | DRM_MODE_ENCODER_TMDS); | 285 | DRM_MODE_ENCODER_TMDS); |
286 | drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); | 286 | drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); |
287 | 287 | ||
288 | drm_mode_connector_attach_encoder(&intel_output->base, | 288 | drm_mode_connector_attach_encoder(&intel_encoder->base, |
289 | &intel_output->enc); | 289 | &intel_encoder->enc); |
290 | drm_sysfs_connector_add(connector); | 290 | drm_sysfs_connector_add(connector); |
291 | 291 | ||
292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 292 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
@@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
302 | 302 | ||
303 | err_connector: | 303 | err_connector: |
304 | drm_connector_cleanup(connector); | 304 | drm_connector_cleanup(connector); |
305 | kfree(intel_output); | 305 | kfree(intel_encoder); |
306 | 306 | ||
307 | return; | 307 | return; |
308 | } | 308 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 216e9f52b6e0..b66806a37d37 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
239 | struct drm_i915_private *dev_priv = dev->dev_private; | 239 | struct drm_i915_private *dev_priv = dev->dev_private; |
240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
241 | struct drm_encoder *tmp_encoder; | 241 | struct drm_encoder *tmp_encoder; |
242 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 242 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
243 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 243 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | 244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; |
245 | int left_border = 0, right_border = 0, top_border = 0; | 245 | int left_border = 0, right_border = 0, top_border = 0; |
246 | int bottom_border = 0; | 246 | int bottom_border = 0; |
@@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
587 | { | 587 | { |
588 | struct drm_device *dev = encoder->dev; | 588 | struct drm_device *dev = encoder->dev; |
589 | struct drm_i915_private *dev_priv = dev->dev_private; | 589 | struct drm_i915_private *dev_priv = dev->dev_private; |
590 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 590 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
591 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 591 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * The LVDS pin pair will already have been turned on in the | 594 | * The LVDS pin pair will already have been turned on in the |
@@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
635 | static int intel_lvds_get_modes(struct drm_connector *connector) | 635 | static int intel_lvds_get_modes(struct drm_connector *connector) |
636 | { | 636 | { |
637 | struct drm_device *dev = connector->dev; | 637 | struct drm_device *dev = connector->dev; |
638 | struct intel_output *intel_output = to_intel_output(connector); | 638 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
639 | struct drm_i915_private *dev_priv = dev->dev_private; | 639 | struct drm_i915_private *dev_priv = dev->dev_private; |
640 | int ret = 0; | 640 | int ret = 0; |
641 | 641 | ||
642 | ret = intel_ddc_get_modes(intel_output); | 642 | if (dev_priv->lvds_edid_good) { |
643 | ret = intel_ddc_get_modes(intel_encoder); | ||
643 | 644 | ||
644 | if (ret) | 645 | if (ret) |
645 | return ret; | 646 | return ret; |
647 | } | ||
646 | 648 | ||
647 | /* Didn't get an EDID, so | 649 | /* Didn't get an EDID, so |
648 | * Set wide sync ranges so we get all modes | 650 | * Set wide sync ranges so we get all modes |
@@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
715 | static void intel_lvds_destroy(struct drm_connector *connector) | 717 | static void intel_lvds_destroy(struct drm_connector *connector) |
716 | { | 718 | { |
717 | struct drm_device *dev = connector->dev; | 719 | struct drm_device *dev = connector->dev; |
718 | struct intel_output *intel_output = to_intel_output(connector); | 720 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
719 | struct drm_i915_private *dev_priv = dev->dev_private; | 721 | struct drm_i915_private *dev_priv = dev->dev_private; |
720 | 722 | ||
721 | if (intel_output->ddc_bus) | 723 | if (intel_encoder->ddc_bus) |
722 | intel_i2c_destroy(intel_output->ddc_bus); | 724 | intel_i2c_destroy(intel_encoder->ddc_bus); |
723 | if (dev_priv->lid_notifier.notifier_call) | 725 | if (dev_priv->lid_notifier.notifier_call) |
724 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | 726 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); |
725 | drm_sysfs_connector_remove(connector); | 727 | drm_sysfs_connector_remove(connector); |
@@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
732 | uint64_t value) | 734 | uint64_t value) |
733 | { | 735 | { |
734 | struct drm_device *dev = connector->dev; | 736 | struct drm_device *dev = connector->dev; |
735 | struct intel_output *intel_output = | 737 | struct intel_encoder *intel_encoder = |
736 | to_intel_output(connector); | 738 | to_intel_encoder(connector); |
737 | 739 | ||
738 | if (property == dev->mode_config.scaling_mode_property && | 740 | if (property == dev->mode_config.scaling_mode_property && |
739 | connector->encoder) { | 741 | connector->encoder) { |
740 | struct drm_crtc *crtc = connector->encoder->crtc; | 742 | struct drm_crtc *crtc = connector->encoder->crtc; |
741 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | 743 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
742 | if (value == DRM_MODE_SCALE_NONE) { | 744 | if (value == DRM_MODE_SCALE_NONE) { |
743 | DRM_DEBUG_KMS("no scaling not supported\n"); | 745 | DRM_DEBUG_KMS("no scaling not supported\n"); |
744 | return 0; | 746 | return 0; |
@@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
858 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 860 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
859 | }, | 861 | }, |
860 | }, | 862 | }, |
863 | { | ||
864 | .callback = intel_no_lvds_dmi_callback, | ||
865 | .ident = "Clientron U800", | ||
866 | .matches = { | ||
867 | DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), | ||
868 | DMI_MATCH(DMI_PRODUCT_NAME, "U800"), | ||
869 | }, | ||
870 | }, | ||
861 | 871 | ||
862 | { } /* terminating entry */ | 872 | { } /* terminating entry */ |
863 | }; | 873 | }; |
@@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) | |||
968 | void intel_lvds_init(struct drm_device *dev) | 978 | void intel_lvds_init(struct drm_device *dev) |
969 | { | 979 | { |
970 | struct drm_i915_private *dev_priv = dev->dev_private; | 980 | struct drm_i915_private *dev_priv = dev->dev_private; |
971 | struct intel_output *intel_output; | 981 | struct intel_encoder *intel_encoder; |
972 | struct drm_connector *connector; | 982 | struct drm_connector *connector; |
973 | struct drm_encoder *encoder; | 983 | struct drm_encoder *encoder; |
974 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 984 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
@@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev) | |||
996 | gpio = PCH_GPIOC; | 1006 | gpio = PCH_GPIOC; |
997 | } | 1007 | } |
998 | 1008 | ||
999 | intel_output = kzalloc(sizeof(struct intel_output) + | 1009 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
1000 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | 1010 | sizeof(struct intel_lvds_priv), GFP_KERNEL); |
1001 | if (!intel_output) { | 1011 | if (!intel_encoder) { |
1002 | return; | 1012 | return; |
1003 | } | 1013 | } |
1004 | 1014 | ||
1005 | connector = &intel_output->base; | 1015 | connector = &intel_encoder->base; |
1006 | encoder = &intel_output->enc; | 1016 | encoder = &intel_encoder->enc; |
1007 | drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, | 1017 | drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, |
1008 | DRM_MODE_CONNECTOR_LVDS); | 1018 | DRM_MODE_CONNECTOR_LVDS); |
1009 | 1019 | ||
1010 | drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, | 1020 | drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, |
1011 | DRM_MODE_ENCODER_LVDS); | 1021 | DRM_MODE_ENCODER_LVDS); |
1012 | 1022 | ||
1013 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1023 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
1014 | intel_output->type = INTEL_OUTPUT_LVDS; | 1024 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
1015 | 1025 | ||
1016 | intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 1026 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
1017 | intel_output->crtc_mask = (1 << 1); | 1027 | intel_encoder->crtc_mask = (1 << 1); |
1018 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 1028 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
1019 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 1029 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
1020 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 1030 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1021 | connector->interlace_allowed = false; | 1031 | connector->interlace_allowed = false; |
1022 | connector->doublescan_allowed = false; | 1032 | connector->doublescan_allowed = false; |
1023 | 1033 | ||
1024 | lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); | 1034 | lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); |
1025 | intel_output->dev_priv = lvds_priv; | 1035 | intel_encoder->dev_priv = lvds_priv; |
1026 | /* create the scaling mode property */ | 1036 | /* create the scaling mode property */ |
1027 | drm_mode_create_scaling_mode_property(dev); | 1037 | drm_mode_create_scaling_mode_property(dev); |
1028 | /* | 1038 | /* |
1029 | * the initial panel fitting mode will be FULL_SCREEN. | 1039 | * the initial panel fitting mode will be FULL_SCREEN. |
1030 | */ | 1040 | */ |
1031 | 1041 | ||
1032 | drm_connector_attach_property(&intel_output->base, | 1042 | drm_connector_attach_property(&intel_encoder->base, |
1033 | dev->mode_config.scaling_mode_property, | 1043 | dev->mode_config.scaling_mode_property, |
1034 | DRM_MODE_SCALE_FULLSCREEN); | 1044 | DRM_MODE_SCALE_FULLSCREEN); |
1035 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 1045 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; |
@@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1044 | */ | 1054 | */ |
1045 | 1055 | ||
1046 | /* Set up the DDC bus. */ | 1056 | /* Set up the DDC bus. */ |
1047 | intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); | 1057 | intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); |
1048 | if (!intel_output->ddc_bus) { | 1058 | if (!intel_encoder->ddc_bus) { |
1049 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " | 1059 | dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " |
1050 | "failed.\n"); | 1060 | "failed.\n"); |
1051 | goto failed; | 1061 | goto failed; |
@@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev) | |||
1055 | * Attempt to get the fixed panel mode from DDC. Assume that the | 1065 | * Attempt to get the fixed panel mode from DDC. Assume that the |
1056 | * preferred mode is the right one. | 1066 | * preferred mode is the right one. |
1057 | */ | 1067 | */ |
1058 | intel_ddc_get_modes(intel_output); | 1068 | dev_priv->lvds_edid_good = true; |
1069 | |||
1070 | if (!intel_ddc_get_modes(intel_encoder)) | ||
1071 | dev_priv->lvds_edid_good = false; | ||
1059 | 1072 | ||
1060 | list_for_each_entry(scan, &connector->probed_modes, head) { | 1073 | list_for_each_entry(scan, &connector->probed_modes, head) { |
1061 | mutex_lock(&dev->mode_config.mutex); | 1074 | mutex_lock(&dev->mode_config.mutex); |
@@ -1133,9 +1146,9 @@ out: | |||
1133 | 1146 | ||
1134 | failed: | 1147 | failed: |
1135 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1148 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
1136 | if (intel_output->ddc_bus) | 1149 | if (intel_encoder->ddc_bus) |
1137 | intel_i2c_destroy(intel_output->ddc_bus); | 1150 | intel_i2c_destroy(intel_encoder->ddc_bus); |
1138 | drm_connector_cleanup(connector); | 1151 | drm_connector_cleanup(connector); |
1139 | drm_encoder_cleanup(encoder); | 1152 | drm_encoder_cleanup(encoder); |
1140 | kfree(intel_output); | 1153 | kfree(intel_encoder); |
1141 | } | 1154 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 89d303d1d3fb..8e5c83b2d120 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * intel_ddc_probe | 34 | * intel_ddc_probe |
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | bool intel_ddc_probe(struct intel_output *intel_output) | 37 | bool intel_ddc_probe(struct intel_encoder *intel_encoder) |
38 | { | 38 | { |
39 | u8 out_buf[] = { 0x0, 0x0}; | 39 | u8 out_buf[] = { 0x0, 0x0}; |
40 | u8 buf[2]; | 40 | u8 buf[2]; |
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
54 | } | 54 | } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | intel_i2c_quirk_set(intel_output->base.dev, true); | 57 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
58 | ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); | 58 | ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); |
59 | intel_i2c_quirk_set(intel_output->base.dev, false); | 59 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
60 | if (ret == 2) | 60 | if (ret == 2) |
61 | return true; | 61 | return true; |
62 | 62 | ||
@@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
69 | * | 69 | * |
70 | * Fetch the EDID information from @connector using the DDC bus. | 70 | * Fetch the EDID information from @connector using the DDC bus. |
71 | */ | 71 | */ |
72 | int intel_ddc_get_modes(struct intel_output *intel_output) | 72 | int intel_ddc_get_modes(struct intel_encoder *intel_encoder) |
73 | { | 73 | { |
74 | struct edid *edid; | 74 | struct edid *edid; |
75 | int ret = 0; | 75 | int ret = 0; |
76 | 76 | ||
77 | intel_i2c_quirk_set(intel_output->base.dev, true); | 77 | intel_i2c_quirk_set(intel_encoder->base.dev, true); |
78 | edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); | 78 | edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); |
79 | intel_i2c_quirk_set(intel_output->base.dev, false); | 79 | intel_i2c_quirk_set(intel_encoder->base.dev, false); |
80 | if (edid) { | 80 | if (edid) { |
81 | drm_mode_connector_update_edid_property(&intel_output->base, | 81 | drm_mode_connector_update_edid_property(&intel_encoder->base, |
82 | edid); | 82 | edid); |
83 | ret = drm_add_edid_modes(&intel_output->base, edid); | 83 | ret = drm_add_edid_modes(&intel_encoder->base, edid); |
84 | intel_output->base.display_info.raw_edid = NULL; | 84 | intel_encoder->base.display_info.raw_edid = NULL; |
85 | kfree(edid); | 85 | kfree(edid); |
86 | } | 86 | } |
87 | 87 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 60595fc26fdd..6d524a1fc271 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
724 | int ret, tmp_width; | 724 | int ret, tmp_width; |
725 | struct overlay_registers *regs; | 725 | struct overlay_registers *regs; |
726 | bool scale_changed = false; | 726 | bool scale_changed = false; |
727 | struct drm_i915_gem_object *bo_priv = new_bo->driver_private; | 727 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); |
728 | struct drm_device *dev = overlay->dev; | 728 | struct drm_device *dev = overlay->dev; |
729 | 729 | ||
730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 730 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
809 | intel_overlay_continue(overlay, scale_changed); | 809 | intel_overlay_continue(overlay, scale_changed); |
810 | 810 | ||
811 | overlay->old_vid_bo = overlay->vid_bo; | 811 | overlay->old_vid_bo = overlay->vid_bo; |
812 | overlay->vid_bo = new_bo->driver_private; | 812 | overlay->vid_bo = to_intel_bo(new_bo); |
813 | 813 | ||
814 | return 0; | 814 | return 0; |
815 | 815 | ||
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); | 1344 | reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); |
1345 | if (!reg_bo) | 1345 | if (!reg_bo) |
1346 | goto out_free; | 1346 | goto out_free; |
1347 | overlay->reg_bo = reg_bo->driver_private; | 1347 | overlay->reg_bo = to_intel_bo(reg_bo); |
1348 | 1348 | ||
1349 | if (OVERLAY_NONPHYSICAL(dev)) { | 1349 | if (OVERLAY_NONPHYSICAL(dev)) { |
1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | 1350 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26e13a0bf30b..87d953664cb0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -54,7 +54,7 @@ struct intel_sdvo_priv { | |||
54 | u8 slave_addr; | 54 | u8 slave_addr; |
55 | 55 | ||
56 | /* Register for the SDVO device: SDVOB or SDVOC */ | 56 | /* Register for the SDVO device: SDVOB or SDVOC */ |
57 | int output_device; | 57 | int sdvo_reg; |
58 | 58 | ||
59 | /* Active outputs controlled by this SDVO output */ | 59 | /* Active outputs controlled by this SDVO output */ |
60 | uint16_t controlled_output; | 60 | uint16_t controlled_output; |
@@ -124,7 +124,7 @@ struct intel_sdvo_priv { | |||
124 | */ | 124 | */ |
125 | struct intel_sdvo_encode encode; | 125 | struct intel_sdvo_encode encode; |
126 | 126 | ||
127 | /* DDC bus used by this SDVO output */ | 127 | /* DDC bus used by this SDVO encoder */ |
128 | uint8_t ddc_bus; | 128 | uint8_t ddc_bus; |
129 | 129 | ||
130 | /* Mac mini hack -- use the same DDC as the analog connector */ | 130 | /* Mac mini hack -- use the same DDC as the analog connector */ |
@@ -162,22 +162,22 @@ struct intel_sdvo_priv { | |||
162 | }; | 162 | }; |
163 | 163 | ||
164 | static bool | 164 | static bool |
165 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); | 165 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 168 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
169 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 169 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
170 | * comments in the BIOS). | 170 | * comments in the BIOS). |
171 | */ | 171 | */ |
172 | static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | 172 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) |
173 | { | 173 | { |
174 | struct drm_device *dev = intel_output->base.dev; | 174 | struct drm_device *dev = intel_encoder->base.dev; |
175 | struct drm_i915_private *dev_priv = dev->dev_private; | 175 | struct drm_i915_private *dev_priv = dev->dev_private; |
176 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 176 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
177 | u32 bval = val, cval = val; | 177 | u32 bval = val, cval = val; |
178 | int i; | 178 | int i; |
179 | 179 | ||
180 | if (sdvo_priv->output_device == SDVOB) { | 180 | if (sdvo_priv->sdvo_reg == SDVOB) { |
181 | cval = I915_READ(SDVOC); | 181 | cval = I915_READ(SDVOC); |
182 | } else { | 182 | } else { |
183 | bval = I915_READ(SDVOB); | 183 | bval = I915_READ(SDVOB); |
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | 199 | static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, |
200 | u8 *ch) | 200 | u8 *ch) |
201 | { | 201 | { |
202 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 202 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
203 | u8 out_buf[2]; | 203 | u8 out_buf[2]; |
204 | u8 buf[2]; | 204 | u8 buf[2]; |
205 | int ret; | 205 | int ret; |
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
222 | out_buf[0] = addr; | 222 | out_buf[0] = addr; |
223 | out_buf[1] = 0; | 223 | out_buf[1] = 0; |
224 | 224 | ||
225 | if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) | 225 | if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) |
226 | { | 226 | { |
227 | *ch = buf[0]; | 227 | *ch = buf[0]; |
228 | return true; | 228 | return true; |
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
232 | return false; | 232 | return false; |
233 | } | 233 | } |
234 | 234 | ||
235 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | 235 | static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, |
236 | u8 ch) | 236 | u8 ch) |
237 | { | 237 | { |
238 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 238 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
239 | u8 out_buf[2]; | 239 | u8 out_buf[2]; |
240 | struct i2c_msg msgs[] = { | 240 | struct i2c_msg msgs[] = { |
241 | { | 241 | { |
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
249 | out_buf[0] = addr; | 249 | out_buf[0] = addr; |
250 | out_buf[1] = ch; | 250 | out_buf[1] = ch; |
251 | 251 | ||
252 | if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) | 252 | if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) |
253 | { | 253 | { |
254 | return true; | 254 | return true; |
255 | } | 255 | } |
@@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name { | |||
353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), | 353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), |
354 | }; | 354 | }; |
355 | 355 | ||
356 | #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") | 356 | #define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") |
357 | #define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) | 357 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) |
358 | 358 | ||
359 | static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | 359 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, |
360 | void *args, int args_len) | 360 | void *args, int args_len) |
361 | { | 361 | { |
362 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 362 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
363 | int i; | 363 | int i; |
364 | 364 | ||
365 | DRM_DEBUG_KMS("%s: W: %02X ", | 365 | DRM_DEBUG_KMS("%s: W: %02X ", |
@@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, | |||
379 | DRM_LOG_KMS("\n"); | 379 | DRM_LOG_KMS("\n"); |
380 | } | 380 | } |
381 | 381 | ||
382 | static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, | 382 | static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, |
383 | void *args, int args_len) | 383 | void *args, int args_len) |
384 | { | 384 | { |
385 | int i; | 385 | int i; |
386 | 386 | ||
387 | intel_sdvo_debug_write(intel_output, cmd, args, args_len); | 387 | intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); |
388 | 388 | ||
389 | for (i = 0; i < args_len; i++) { | 389 | for (i = 0; i < args_len; i++) { |
390 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, | 390 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, |
391 | ((u8*)args)[i]); | 391 | ((u8*)args)[i]); |
392 | } | 392 | } |
393 | 393 | ||
394 | intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); | 394 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); |
395 | } | 395 | } |
396 | 396 | ||
397 | static const char *cmd_status_names[] = { | 397 | static const char *cmd_status_names[] = { |
@@ -404,11 +404,11 @@ static const char *cmd_status_names[] = { | |||
404 | "Scaling not supported" | 404 | "Scaling not supported" |
405 | }; | 405 | }; |
406 | 406 | ||
407 | static void intel_sdvo_debug_response(struct intel_output *intel_output, | 407 | static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, |
408 | void *response, int response_len, | 408 | void *response, int response_len, |
409 | u8 status) | 409 | u8 status) |
410 | { | 410 | { |
411 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 411 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
412 | int i; | 412 | int i; |
413 | 413 | ||
414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); | 414 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); |
@@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, | |||
423 | DRM_LOG_KMS("\n"); | 423 | DRM_LOG_KMS("\n"); |
424 | } | 424 | } |
425 | 425 | ||
426 | static u8 intel_sdvo_read_response(struct intel_output *intel_output, | 426 | static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, |
427 | void *response, int response_len) | 427 | void *response, int response_len) |
428 | { | 428 | { |
429 | int i; | 429 | int i; |
@@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, | |||
433 | while (retry--) { | 433 | while (retry--) { |
434 | /* Read the command response */ | 434 | /* Read the command response */ |
435 | for (i = 0; i < response_len; i++) { | 435 | for (i = 0; i < response_len; i++) { |
436 | intel_sdvo_read_byte(intel_output, | 436 | intel_sdvo_read_byte(intel_encoder, |
437 | SDVO_I2C_RETURN_0 + i, | 437 | SDVO_I2C_RETURN_0 + i, |
438 | &((u8 *)response)[i]); | 438 | &((u8 *)response)[i]); |
439 | } | 439 | } |
440 | 440 | ||
441 | /* read the return status */ | 441 | /* read the return status */ |
442 | intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, | 442 | intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, |
443 | &status); | 443 | &status); |
444 | 444 | ||
445 | intel_sdvo_debug_response(intel_output, response, response_len, | 445 | intel_sdvo_debug_response(intel_encoder, response, response_len, |
446 | status); | 446 | status); |
447 | if (status != SDVO_CMD_STATUS_PENDING) | 447 | if (status != SDVO_CMD_STATUS_PENDING) |
448 | return status; | 448 | return status; |
@@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
470 | * another I2C transaction after issuing the DDC bus switch, it will be | 470 | * another I2C transaction after issuing the DDC bus switch, it will be |
471 | * switched to the internal SDVO register. | 471 | * switched to the internal SDVO register. |
472 | */ | 472 | */ |
473 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | 473 | static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, |
474 | u8 target) | 474 | u8 target) |
475 | { | 475 | { |
476 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 476 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | 477 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; |
478 | struct i2c_msg msgs[] = { | 478 | struct i2c_msg msgs[] = { |
479 | { | 479 | { |
@@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
497 | }, | 497 | }, |
498 | }; | 498 | }; |
499 | 499 | ||
500 | intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 500 | intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
501 | &target, 1); | 501 | &target, 1); |
502 | /* write the DDC switch command argument */ | 502 | /* write the DDC switch command argument */ |
503 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | 503 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); |
504 | 504 | ||
505 | out_buf[0] = SDVO_I2C_OPCODE; | 505 | out_buf[0] = SDVO_I2C_OPCODE; |
506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | 506 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; |
@@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
509 | ret_value[0] = 0; | 509 | ret_value[0] = 0; |
510 | ret_value[1] = 0; | 510 | ret_value[1] = 0; |
511 | 511 | ||
512 | ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | 512 | ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); |
513 | if (ret != 3) { | 513 | if (ret != 3) { |
514 | /* failure in I2C transfer */ | 514 | /* failure in I2C transfer */ |
515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | 515 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); |
@@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | |||
523 | return; | 523 | return; |
524 | } | 524 | } |
525 | 525 | ||
526 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | 526 | static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) |
527 | { | 527 | { |
528 | struct intel_sdvo_set_target_input_args targets = {0}; | 528 | struct intel_sdvo_set_target_input_args targets = {0}; |
529 | u8 status; | 529 | u8 status; |
@@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
534 | if (target_1) | 534 | if (target_1) |
535 | targets.target_1 = 1; | 535 | targets.target_1 = 1; |
536 | 536 | ||
537 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, | 537 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, |
538 | sizeof(targets)); | 538 | sizeof(targets)); |
539 | 539 | ||
540 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 540 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
541 | 541 | ||
542 | return (status == SDVO_CMD_STATUS_SUCCESS); | 542 | return (status == SDVO_CMD_STATUS_SUCCESS); |
543 | } | 543 | } |
@@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool | |||
548 | * This function is making an assumption about the layout of the response, | 548 | * This function is making an assumption about the layout of the response, |
549 | * which should be checked against the docs. | 549 | * which should be checked against the docs. |
550 | */ | 550 | */ |
551 | static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) | 551 | static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) |
552 | { | 552 | { |
553 | struct intel_sdvo_get_trained_inputs_response response; | 553 | struct intel_sdvo_get_trained_inputs_response response; |
554 | u8 status; | 554 | u8 status; |
555 | 555 | ||
556 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); | 556 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); |
557 | status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); | 557 | status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); |
558 | if (status != SDVO_CMD_STATUS_SUCCESS) | 558 | if (status != SDVO_CMD_STATUS_SUCCESS) |
559 | return false; | 559 | return false; |
560 | 560 | ||
@@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo | |||
563 | return true; | 563 | return true; |
564 | } | 564 | } |
565 | 565 | ||
566 | static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, | 566 | static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, |
567 | u16 *outputs) | 567 | u16 *outputs) |
568 | { | 568 | { |
569 | u8 status; | 569 | u8 status; |
570 | 570 | ||
571 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); | 571 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); |
572 | status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); | 572 | status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); |
573 | 573 | ||
574 | return (status == SDVO_CMD_STATUS_SUCCESS); | 574 | return (status == SDVO_CMD_STATUS_SUCCESS); |
575 | } | 575 | } |
576 | 576 | ||
577 | static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, | 577 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, |
578 | u16 outputs) | 578 | u16 outputs) |
579 | { | 579 | { |
580 | u8 status; | 580 | u8 status; |
581 | 581 | ||
582 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, | 582 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, |
583 | sizeof(outputs)); | 583 | sizeof(outputs)); |
584 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 584 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
585 | return (status == SDVO_CMD_STATUS_SUCCESS); | 585 | return (status == SDVO_CMD_STATUS_SUCCESS); |
586 | } | 586 | } |
587 | 587 | ||
588 | static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, | 588 | static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, |
589 | int mode) | 589 | int mode) |
590 | { | 590 | { |
591 | u8 status, state = SDVO_ENCODER_STATE_ON; | 591 | u8 status, state = SDVO_ENCODER_STATE_ON; |
@@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output | |||
605 | break; | 605 | break; |
606 | } | 606 | } |
607 | 607 | ||
608 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, | 608 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, |
609 | sizeof(state)); | 609 | sizeof(state)); |
610 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 610 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
611 | 611 | ||
612 | return (status == SDVO_CMD_STATUS_SUCCESS); | 612 | return (status == SDVO_CMD_STATUS_SUCCESS); |
613 | } | 613 | } |
614 | 614 | ||
615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, | 615 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, |
616 | int *clock_min, | 616 | int *clock_min, |
617 | int *clock_max) | 617 | int *clock_max) |
618 | { | 618 | { |
619 | struct intel_sdvo_pixel_clock_range clocks; | 619 | struct intel_sdvo_pixel_clock_range clocks; |
620 | u8 status; | 620 | u8 status; |
621 | 621 | ||
622 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | 622 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, |
623 | NULL, 0); | 623 | NULL, 0); |
624 | 624 | ||
625 | status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); | 625 | status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); |
626 | 626 | ||
627 | if (status != SDVO_CMD_STATUS_SUCCESS) | 627 | if (status != SDVO_CMD_STATUS_SUCCESS) |
628 | return false; | 628 | return false; |
@@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou | |||
634 | return true; | 634 | return true; |
635 | } | 635 | } |
636 | 636 | ||
637 | static bool intel_sdvo_set_target_output(struct intel_output *intel_output, | 637 | static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, |
638 | u16 outputs) | 638 | u16 outputs) |
639 | { | 639 | { |
640 | u8 status; | 640 | u8 status; |
641 | 641 | ||
642 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, | 642 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, |
643 | sizeof(outputs)); | 643 | sizeof(outputs)); |
644 | 644 | ||
645 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 645 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
646 | return (status == SDVO_CMD_STATUS_SUCCESS); | 646 | return (status == SDVO_CMD_STATUS_SUCCESS); |
647 | } | 647 | } |
648 | 648 | ||
649 | static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | 649 | static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, |
650 | struct intel_sdvo_dtd *dtd) | 650 | struct intel_sdvo_dtd *dtd) |
651 | { | 651 | { |
652 | u8 status; | 652 | u8 status; |
653 | 653 | ||
654 | intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); | 654 | intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); |
655 | status = intel_sdvo_read_response(intel_output, &dtd->part1, | 655 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
656 | sizeof(dtd->part1)); | 656 | sizeof(dtd->part1)); |
657 | if (status != SDVO_CMD_STATUS_SUCCESS) | 657 | if (status != SDVO_CMD_STATUS_SUCCESS) |
658 | return false; | 658 | return false; |
659 | 659 | ||
660 | intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); | 660 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); |
661 | status = intel_sdvo_read_response(intel_output, &dtd->part2, | 661 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
662 | sizeof(dtd->part2)); | 662 | sizeof(dtd->part2)); |
663 | if (status != SDVO_CMD_STATUS_SUCCESS) | 663 | if (status != SDVO_CMD_STATUS_SUCCESS) |
664 | return false; | 664 | return false; |
@@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, | |||
666 | return true; | 666 | return true; |
667 | } | 667 | } |
668 | 668 | ||
669 | static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, | 669 | static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, |
670 | struct intel_sdvo_dtd *dtd) | 670 | struct intel_sdvo_dtd *dtd) |
671 | { | 671 | { |
672 | return intel_sdvo_get_timing(intel_output, | 672 | return intel_sdvo_get_timing(intel_encoder, |
673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); | 673 | SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); |
674 | } | 674 | } |
675 | 675 | ||
676 | static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, | 676 | static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, |
677 | struct intel_sdvo_dtd *dtd) | 677 | struct intel_sdvo_dtd *dtd) |
678 | { | 678 | { |
679 | return intel_sdvo_get_timing(intel_output, | 679 | return intel_sdvo_get_timing(intel_encoder, |
680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); | 680 | SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); |
681 | } | 681 | } |
682 | 682 | ||
683 | static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, | 683 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, |
684 | struct intel_sdvo_dtd *dtd) | 684 | struct intel_sdvo_dtd *dtd) |
685 | { | 685 | { |
686 | u8 status; | 686 | u8 status; |
687 | 687 | ||
688 | intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); | 688 | intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); |
689 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 689 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
690 | if (status != SDVO_CMD_STATUS_SUCCESS) | 690 | if (status != SDVO_CMD_STATUS_SUCCESS) |
691 | return false; | 691 | return false; |
692 | 692 | ||
693 | intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | 693 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); |
694 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 694 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
695 | if (status != SDVO_CMD_STATUS_SUCCESS) | 695 | if (status != SDVO_CMD_STATUS_SUCCESS) |
696 | return false; | 696 | return false; |
697 | 697 | ||
698 | return true; | 698 | return true; |
699 | } | 699 | } |
700 | 700 | ||
701 | static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, | 701 | static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, |
702 | struct intel_sdvo_dtd *dtd) | 702 | struct intel_sdvo_dtd *dtd) |
703 | { | 703 | { |
704 | return intel_sdvo_set_timing(intel_output, | 704 | return intel_sdvo_set_timing(intel_encoder, |
705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); | 705 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); |
706 | } | 706 | } |
707 | 707 | ||
708 | static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, | 708 | static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, |
709 | struct intel_sdvo_dtd *dtd) | 709 | struct intel_sdvo_dtd *dtd) |
710 | { | 710 | { |
711 | return intel_sdvo_set_timing(intel_output, | 711 | return intel_sdvo_set_timing(intel_encoder, |
712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | 712 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); |
713 | } | 713 | } |
714 | 714 | ||
715 | static bool | 715 | static bool |
716 | intel_sdvo_create_preferred_input_timing(struct intel_output *output, | 716 | intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, |
717 | uint16_t clock, | 717 | uint16_t clock, |
718 | uint16_t width, | 718 | uint16_t width, |
719 | uint16_t height) | 719 | uint16_t height) |
720 | { | 720 | { |
721 | struct intel_sdvo_preferred_input_timing_args args; | 721 | struct intel_sdvo_preferred_input_timing_args args; |
722 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 722 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
723 | uint8_t status; | 723 | uint8_t status; |
724 | 724 | ||
725 | memset(&args, 0, sizeof(args)); | 725 | memset(&args, 0, sizeof(args)); |
@@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, | |||
733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) | 733 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) |
734 | args.scaled = 1; | 734 | args.scaled = 1; |
735 | 735 | ||
736 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 736 | intel_sdvo_write_cmd(intel_encoder, |
737 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | ||
737 | &args, sizeof(args)); | 738 | &args, sizeof(args)); |
738 | status = intel_sdvo_read_response(output, NULL, 0); | 739 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
739 | if (status != SDVO_CMD_STATUS_SUCCESS) | 740 | if (status != SDVO_CMD_STATUS_SUCCESS) |
740 | return false; | 741 | return false; |
741 | 742 | ||
742 | return true; | 743 | return true; |
743 | } | 744 | } |
744 | 745 | ||
745 | static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | 746 | static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, |
746 | struct intel_sdvo_dtd *dtd) | 747 | struct intel_sdvo_dtd *dtd) |
747 | { | 748 | { |
748 | bool status; | 749 | bool status; |
749 | 750 | ||
750 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 751 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
751 | NULL, 0); | 752 | NULL, 0); |
752 | 753 | ||
753 | status = intel_sdvo_read_response(output, &dtd->part1, | 754 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, |
754 | sizeof(dtd->part1)); | 755 | sizeof(dtd->part1)); |
755 | if (status != SDVO_CMD_STATUS_SUCCESS) | 756 | if (status != SDVO_CMD_STATUS_SUCCESS) |
756 | return false; | 757 | return false; |
757 | 758 | ||
758 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | 759 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
759 | NULL, 0); | 760 | NULL, 0); |
760 | 761 | ||
761 | status = intel_sdvo_read_response(output, &dtd->part2, | 762 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, |
762 | sizeof(dtd->part2)); | 763 | sizeof(dtd->part2)); |
763 | if (status != SDVO_CMD_STATUS_SUCCESS) | 764 | if (status != SDVO_CMD_STATUS_SUCCESS) |
764 | return false; | 765 | return false; |
@@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, | |||
766 | return false; | 767 | return false; |
767 | } | 768 | } |
768 | 769 | ||
769 | static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | 770 | static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) |
770 | { | 771 | { |
771 | u8 response, status; | 772 | u8 response, status; |
772 | 773 | ||
773 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); | 774 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); |
774 | status = intel_sdvo_read_response(intel_output, &response, 1); | 775 | status = intel_sdvo_read_response(intel_encoder, &response, 1); |
775 | 776 | ||
776 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 777 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
777 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); | 778 | DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); |
@@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) | |||
783 | return response; | 784 | return response; |
784 | } | 785 | } |
785 | 786 | ||
786 | static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) | 787 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) |
787 | { | 788 | { |
788 | u8 status; | 789 | u8 status; |
789 | 790 | ||
790 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); | 791 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); |
791 | status = intel_sdvo_read_response(intel_output, NULL, 0); | 792 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
792 | if (status != SDVO_CMD_STATUS_SUCCESS) | 793 | if (status != SDVO_CMD_STATUS_SUCCESS) |
793 | return false; | 794 | return false; |
794 | 795 | ||
@@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
877 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 878 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
878 | } | 879 | } |
879 | 880 | ||
880 | static bool intel_sdvo_get_supp_encode(struct intel_output *output, | 881 | static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, |
881 | struct intel_sdvo_encode *encode) | 882 | struct intel_sdvo_encode *encode) |
882 | { | 883 | { |
883 | uint8_t status; | 884 | uint8_t status; |
884 | 885 | ||
885 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | 886 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); |
886 | status = intel_sdvo_read_response(output, encode, sizeof(*encode)); | 887 | status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); |
887 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | 888 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ |
888 | memset(encode, 0, sizeof(*encode)); | 889 | memset(encode, 0, sizeof(*encode)); |
889 | return false; | 890 | return false; |
@@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, | |||
892 | return true; | 893 | return true; |
893 | } | 894 | } |
894 | 895 | ||
895 | static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) | 896 | static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, |
897 | uint8_t mode) | ||
896 | { | 898 | { |
897 | uint8_t status; | 899 | uint8_t status; |
898 | 900 | ||
899 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); | 901 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); |
900 | status = intel_sdvo_read_response(output, NULL, 0); | 902 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
901 | 903 | ||
902 | return (status == SDVO_CMD_STATUS_SUCCESS); | 904 | return (status == SDVO_CMD_STATUS_SUCCESS); |
903 | } | 905 | } |
904 | 906 | ||
905 | static bool intel_sdvo_set_colorimetry(struct intel_output *output, | 907 | static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, |
906 | uint8_t mode) | 908 | uint8_t mode) |
907 | { | 909 | { |
908 | uint8_t status; | 910 | uint8_t status; |
909 | 911 | ||
910 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | 912 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); |
911 | status = intel_sdvo_read_response(output, NULL, 0); | 913 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
912 | 914 | ||
913 | return (status == SDVO_CMD_STATUS_SUCCESS); | 915 | return (status == SDVO_CMD_STATUS_SUCCESS); |
914 | } | 916 | } |
915 | 917 | ||
916 | #if 0 | 918 | #if 0 |
917 | static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | 919 | static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) |
918 | { | 920 | { |
919 | int i, j; | 921 | int i, j; |
920 | uint8_t set_buf_index[2]; | 922 | uint8_t set_buf_index[2]; |
@@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) | |||
923 | uint8_t buf[48]; | 925 | uint8_t buf[48]; |
924 | uint8_t *pos; | 926 | uint8_t *pos; |
925 | 927 | ||
926 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | 928 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); |
927 | intel_sdvo_read_response(output, &av_split, 1); | 929 | intel_sdvo_read_response(encoder, &av_split, 1); |
928 | 930 | ||
929 | for (i = 0; i <= av_split; i++) { | 931 | for (i = 0; i <= av_split; i++) { |
930 | set_buf_index[0] = i; set_buf_index[1] = 0; | 932 | set_buf_index[0] = i; set_buf_index[1] = 0; |
931 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, | 933 | intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, |
932 | set_buf_index, 2); | 934 | set_buf_index, 2); |
933 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); | 935 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); |
934 | intel_sdvo_read_response(output, &buf_size, 1); | 936 | intel_sdvo_read_response(encoder, &buf_size, 1); |
935 | 937 | ||
936 | pos = buf; | 938 | pos = buf; |
937 | for (j = 0; j <= buf_size; j += 8) { | 939 | for (j = 0; j <= buf_size; j += 8) { |
938 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, | 940 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, |
939 | NULL, 0); | 941 | NULL, 0); |
940 | intel_sdvo_read_response(output, pos, 8); | 942 | intel_sdvo_read_response(encoder, pos, 8); |
941 | pos += 8; | 943 | pos += 8; |
942 | } | 944 | } |
943 | } | 945 | } |
944 | } | 946 | } |
945 | #endif | 947 | #endif |
946 | 948 | ||
947 | static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, | 949 | static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, |
948 | uint8_t *data, int8_t size, uint8_t tx_rate) | 950 | int index, |
951 | uint8_t *data, int8_t size, uint8_t tx_rate) | ||
949 | { | 952 | { |
950 | uint8_t set_buf_index[2]; | 953 | uint8_t set_buf_index[2]; |
951 | 954 | ||
952 | set_buf_index[0] = index; | 955 | set_buf_index[0] = index; |
953 | set_buf_index[1] = 0; | 956 | set_buf_index[1] = 0; |
954 | 957 | ||
955 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); | 958 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, |
959 | set_buf_index, 2); | ||
956 | 960 | ||
957 | for (; size > 0; size -= 8) { | 961 | for (; size > 0; size -= 8) { |
958 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); | 962 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); |
959 | data += 8; | 963 | data += 8; |
960 | } | 964 | } |
961 | 965 | ||
962 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | 966 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); |
963 | } | 967 | } |
964 | 968 | ||
965 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | 969 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) |
@@ -1034,7 +1038,7 @@ struct dip_infoframe { | |||
1034 | } __attribute__ ((packed)) u; | 1038 | } __attribute__ ((packed)) u; |
1035 | } __attribute__((packed)); | 1039 | } __attribute__((packed)); |
1036 | 1040 | ||
1037 | static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | 1041 | static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, |
1038 | struct drm_display_mode * mode) | 1042 | struct drm_display_mode * mode) |
1039 | { | 1043 | { |
1040 | struct dip_infoframe avi_if = { | 1044 | struct dip_infoframe avi_if = { |
@@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, | |||
1045 | 1049 | ||
1046 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 1050 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, |
1047 | 4 + avi_if.len); | 1051 | 4 + avi_if.len); |
1048 | intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, | 1052 | intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, |
1053 | 4 + avi_if.len, | ||
1049 | SDVO_HBUF_TX_VSYNC); | 1054 | SDVO_HBUF_TX_VSYNC); |
1050 | } | 1055 | } |
1051 | 1056 | ||
1052 | static void intel_sdvo_set_tv_format(struct intel_output *output) | 1057 | static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) |
1053 | { | 1058 | { |
1054 | 1059 | ||
1055 | struct intel_sdvo_tv_format format; | 1060 | struct intel_sdvo_tv_format format; |
1056 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1061 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1057 | uint32_t format_map, i; | 1062 | uint32_t format_map, i; |
1058 | uint8_t status; | 1063 | uint8_t status; |
1059 | 1064 | ||
@@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) | |||
1066 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? | 1071 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? |
1067 | sizeof(format) : sizeof(format_map)); | 1072 | sizeof(format) : sizeof(format_map)); |
1068 | 1073 | ||
1069 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, | 1074 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, |
1070 | sizeof(format)); | 1075 | sizeof(format)); |
1071 | 1076 | ||
1072 | status = intel_sdvo_read_response(output, NULL, 0); | 1077 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
1073 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1078 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1074 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", | 1079 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", |
1075 | SDVO_NAME(sdvo_priv)); | 1080 | SDVO_NAME(sdvo_priv)); |
@@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1079 | struct drm_display_mode *mode, | 1084 | struct drm_display_mode *mode, |
1080 | struct drm_display_mode *adjusted_mode) | 1085 | struct drm_display_mode *adjusted_mode) |
1081 | { | 1086 | { |
1082 | struct intel_output *output = enc_to_intel_output(encoder); | 1087 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1083 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | 1088 | struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; |
1084 | 1089 | ||
1085 | if (dev_priv->is_tv) { | 1090 | if (dev_priv->is_tv) { |
1086 | struct intel_sdvo_dtd output_dtd; | 1091 | struct intel_sdvo_dtd output_dtd; |
@@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1095 | 1100 | ||
1096 | /* Set output timings */ | 1101 | /* Set output timings */ |
1097 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1102 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
1098 | intel_sdvo_set_target_output(output, | 1103 | intel_sdvo_set_target_output(intel_encoder, |
1099 | dev_priv->controlled_output); | 1104 | dev_priv->controlled_output); |
1100 | intel_sdvo_set_output_timing(output, &output_dtd); | 1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1101 | 1106 | ||
1102 | /* Set the input timing to the screen. Assume always input 0. */ | 1107 | /* Set the input timing to the screen. Assume always input 0. */ |
1103 | intel_sdvo_set_target_input(output, true, false); | 1108 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1104 | 1109 | ||
1105 | 1110 | ||
1106 | success = intel_sdvo_create_preferred_input_timing(output, | 1111 | success = intel_sdvo_create_preferred_input_timing(intel_encoder, |
1107 | mode->clock / 10, | 1112 | mode->clock / 10, |
1108 | mode->hdisplay, | 1113 | mode->hdisplay, |
1109 | mode->vdisplay); | 1114 | mode->vdisplay); |
1110 | if (success) { | 1115 | if (success) { |
1111 | struct intel_sdvo_dtd input_dtd; | 1116 | struct intel_sdvo_dtd input_dtd; |
1112 | 1117 | ||
1113 | intel_sdvo_get_preferred_input_timing(output, | 1118 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
1114 | &input_dtd); | 1119 | &input_dtd); |
1115 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1120 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1116 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1121 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
@@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1133 | intel_sdvo_get_dtd_from_mode(&output_dtd, | 1138 | intel_sdvo_get_dtd_from_mode(&output_dtd, |
1134 | dev_priv->sdvo_lvds_fixed_mode); | 1139 | dev_priv->sdvo_lvds_fixed_mode); |
1135 | 1140 | ||
1136 | intel_sdvo_set_target_output(output, | 1141 | intel_sdvo_set_target_output(intel_encoder, |
1137 | dev_priv->controlled_output); | 1142 | dev_priv->controlled_output); |
1138 | intel_sdvo_set_output_timing(output, &output_dtd); | 1143 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); |
1139 | 1144 | ||
1140 | /* Set the input timing to the screen. Assume always input 0. */ | 1145 | /* Set the input timing to the screen. Assume always input 0. */ |
1141 | intel_sdvo_set_target_input(output, true, false); | 1146 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1142 | 1147 | ||
1143 | 1148 | ||
1144 | success = intel_sdvo_create_preferred_input_timing( | 1149 | success = intel_sdvo_create_preferred_input_timing( |
1145 | output, | 1150 | intel_encoder, |
1146 | mode->clock / 10, | 1151 | mode->clock / 10, |
1147 | mode->hdisplay, | 1152 | mode->hdisplay, |
1148 | mode->vdisplay); | 1153 | mode->vdisplay); |
@@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1150 | if (success) { | 1155 | if (success) { |
1151 | struct intel_sdvo_dtd input_dtd; | 1156 | struct intel_sdvo_dtd input_dtd; |
1152 | 1157 | ||
1153 | intel_sdvo_get_preferred_input_timing(output, | 1158 | intel_sdvo_get_preferred_input_timing(intel_encoder, |
1154 | &input_dtd); | 1159 | &input_dtd); |
1155 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1160 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1156 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | 1161 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; |
@@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1182 | struct drm_i915_private *dev_priv = dev->dev_private; | 1187 | struct drm_i915_private *dev_priv = dev->dev_private; |
1183 | struct drm_crtc *crtc = encoder->crtc; | 1188 | struct drm_crtc *crtc = encoder->crtc; |
1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1189 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1185 | struct intel_output *output = enc_to_intel_output(encoder); | 1190 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1186 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1191 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1187 | u32 sdvox = 0; | 1192 | u32 sdvox = 0; |
1188 | int sdvo_pixel_multiply; | 1193 | int sdvo_pixel_multiply; |
1189 | struct intel_sdvo_in_out_map in_out; | 1194 | struct intel_sdvo_in_out_map in_out; |
@@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1202 | in_out.in0 = sdvo_priv->controlled_output; | 1207 | in_out.in0 = sdvo_priv->controlled_output; |
1203 | in_out.in1 = 0; | 1208 | in_out.in1 = 0; |
1204 | 1209 | ||
1205 | intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, | 1210 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, |
1206 | &in_out, sizeof(in_out)); | 1211 | &in_out, sizeof(in_out)); |
1207 | status = intel_sdvo_read_response(output, NULL, 0); | 1212 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); |
1208 | 1213 | ||
1209 | if (sdvo_priv->is_hdmi) { | 1214 | if (sdvo_priv->is_hdmi) { |
1210 | intel_sdvo_set_avi_infoframe(output, mode); | 1215 | intel_sdvo_set_avi_infoframe(intel_encoder, mode); |
1211 | sdvox |= SDVO_AUDIO_ENABLE; | 1216 | sdvox |= SDVO_AUDIO_ENABLE; |
1212 | } | 1217 | } |
1213 | 1218 | ||
@@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1224 | */ | 1229 | */ |
1225 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { | 1230 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { |
1226 | /* Set the output timing to the screen */ | 1231 | /* Set the output timing to the screen */ |
1227 | intel_sdvo_set_target_output(output, | 1232 | intel_sdvo_set_target_output(intel_encoder, |
1228 | sdvo_priv->controlled_output); | 1233 | sdvo_priv->controlled_output); |
1229 | intel_sdvo_set_output_timing(output, &input_dtd); | 1234 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); |
1230 | } | 1235 | } |
1231 | 1236 | ||
1232 | /* Set the input timing to the screen. Assume always input 0. */ | 1237 | /* Set the input timing to the screen. Assume always input 0. */ |
1233 | intel_sdvo_set_target_input(output, true, false); | 1238 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1234 | 1239 | ||
1235 | if (sdvo_priv->is_tv) | 1240 | if (sdvo_priv->is_tv) |
1236 | intel_sdvo_set_tv_format(output); | 1241 | intel_sdvo_set_tv_format(intel_encoder); |
1237 | 1242 | ||
1238 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | 1243 | /* We would like to use intel_sdvo_create_preferred_input_timing() to |
1239 | * provide the device with a timing it can support, if it supports that | 1244 | * provide the device with a timing it can support, if it supports that |
@@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1241 | * output the preferred timing, and we don't support that currently. | 1246 | * output the preferred timing, and we don't support that currently. |
1242 | */ | 1247 | */ |
1243 | #if 0 | 1248 | #if 0 |
1244 | success = intel_sdvo_create_preferred_input_timing(output, clock, | 1249 | success = intel_sdvo_create_preferred_input_timing(encoder, clock, |
1245 | width, height); | 1250 | width, height); |
1246 | if (success) { | 1251 | if (success) { |
1247 | struct intel_sdvo_dtd *input_dtd; | 1252 | struct intel_sdvo_dtd *input_dtd; |
1248 | 1253 | ||
1249 | intel_sdvo_get_preferred_input_timing(output, &input_dtd); | 1254 | intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); |
1250 | intel_sdvo_set_input_timing(output, &input_dtd); | 1255 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
1251 | } | 1256 | } |
1252 | #else | 1257 | #else |
1253 | intel_sdvo_set_input_timing(output, &input_dtd); | 1258 | intel_sdvo_set_input_timing(intel_encoder, &input_dtd); |
1254 | #endif | 1259 | #endif |
1255 | 1260 | ||
1256 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | 1261 | switch (intel_sdvo_get_pixel_multiplier(mode)) { |
1257 | case 1: | 1262 | case 1: |
1258 | intel_sdvo_set_clock_rate_mult(output, | 1263 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1259 | SDVO_CLOCK_RATE_MULT_1X); | 1264 | SDVO_CLOCK_RATE_MULT_1X); |
1260 | break; | 1265 | break; |
1261 | case 2: | 1266 | case 2: |
1262 | intel_sdvo_set_clock_rate_mult(output, | 1267 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1263 | SDVO_CLOCK_RATE_MULT_2X); | 1268 | SDVO_CLOCK_RATE_MULT_2X); |
1264 | break; | 1269 | break; |
1265 | case 4: | 1270 | case 4: |
1266 | intel_sdvo_set_clock_rate_mult(output, | 1271 | intel_sdvo_set_clock_rate_mult(intel_encoder, |
1267 | SDVO_CLOCK_RATE_MULT_4X); | 1272 | SDVO_CLOCK_RATE_MULT_4X); |
1268 | break; | 1273 | break; |
1269 | } | 1274 | } |
@@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1274 | SDVO_VSYNC_ACTIVE_HIGH | | 1279 | SDVO_VSYNC_ACTIVE_HIGH | |
1275 | SDVO_HSYNC_ACTIVE_HIGH; | 1280 | SDVO_HSYNC_ACTIVE_HIGH; |
1276 | } else { | 1281 | } else { |
1277 | sdvox |= I915_READ(sdvo_priv->output_device); | 1282 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); |
1278 | switch (sdvo_priv->output_device) { | 1283 | switch (sdvo_priv->sdvo_reg) { |
1279 | case SDVOB: | 1284 | case SDVOB: |
1280 | sdvox &= SDVOB_PRESERVE_MASK; | 1285 | sdvox &= SDVOB_PRESERVE_MASK; |
1281 | break; | 1286 | break; |
@@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1299 | 1304 | ||
1300 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) | 1305 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) |
1301 | sdvox |= SDVO_STALL_SELECT; | 1306 | sdvox |= SDVO_STALL_SELECT; |
1302 | intel_sdvo_write_sdvox(output, sdvox); | 1307 | intel_sdvo_write_sdvox(intel_encoder, sdvox); |
1303 | } | 1308 | } |
1304 | 1309 | ||
1305 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | 1310 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) |
1306 | { | 1311 | { |
1307 | struct drm_device *dev = encoder->dev; | 1312 | struct drm_device *dev = encoder->dev; |
1308 | struct drm_i915_private *dev_priv = dev->dev_private; | 1313 | struct drm_i915_private *dev_priv = dev->dev_private; |
1309 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1314 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1310 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1315 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1311 | u32 temp; | 1316 | u32 temp; |
1312 | 1317 | ||
1313 | if (mode != DRM_MODE_DPMS_ON) { | 1318 | if (mode != DRM_MODE_DPMS_ON) { |
1314 | intel_sdvo_set_active_outputs(intel_output, 0); | 1319 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
1315 | if (0) | 1320 | if (0) |
1316 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1321 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
1317 | 1322 | ||
1318 | if (mode == DRM_MODE_DPMS_OFF) { | 1323 | if (mode == DRM_MODE_DPMS_OFF) { |
1319 | temp = I915_READ(sdvo_priv->output_device); | 1324 | temp = I915_READ(sdvo_priv->sdvo_reg); |
1320 | if ((temp & SDVO_ENABLE) != 0) { | 1325 | if ((temp & SDVO_ENABLE) != 0) { |
1321 | intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); | 1326 | intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); |
1322 | } | 1327 | } |
1323 | } | 1328 | } |
1324 | } else { | 1329 | } else { |
@@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1326 | int i; | 1331 | int i; |
1327 | u8 status; | 1332 | u8 status; |
1328 | 1333 | ||
1329 | temp = I915_READ(sdvo_priv->output_device); | 1334 | temp = I915_READ(sdvo_priv->sdvo_reg); |
1330 | if ((temp & SDVO_ENABLE) == 0) | 1335 | if ((temp & SDVO_ENABLE) == 0) |
1331 | intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); | 1336 | intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); |
1332 | for (i = 0; i < 2; i++) | 1337 | for (i = 0; i < 2; i++) |
1333 | intel_wait_for_vblank(dev); | 1338 | intel_wait_for_vblank(dev); |
1334 | 1339 | ||
1335 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, | 1340 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, |
1336 | &input2); | 1341 | &input2); |
1337 | 1342 | ||
1338 | 1343 | ||
@@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1346 | } | 1351 | } |
1347 | 1352 | ||
1348 | if (0) | 1353 | if (0) |
1349 | intel_sdvo_set_encoder_power_state(intel_output, mode); | 1354 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); |
1350 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); | 1355 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); |
1351 | } | 1356 | } |
1352 | return; | 1357 | return; |
1353 | } | 1358 | } |
@@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1356 | { | 1361 | { |
1357 | struct drm_device *dev = connector->dev; | 1362 | struct drm_device *dev = connector->dev; |
1358 | struct drm_i915_private *dev_priv = dev->dev_private; | 1363 | struct drm_i915_private *dev_priv = dev->dev_private; |
1359 | struct intel_output *intel_output = to_intel_output(connector); | 1364 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1360 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1365 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1361 | int o; | 1366 | int o; |
1362 | 1367 | ||
1363 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); | 1368 | sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); |
1364 | intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); | 1369 | intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); |
1365 | 1370 | ||
1366 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1371 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
1367 | intel_sdvo_set_target_input(intel_output, true, false); | 1372 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1368 | intel_sdvo_get_input_timing(intel_output, | 1373 | intel_sdvo_get_input_timing(intel_encoder, |
1369 | &sdvo_priv->save_input_dtd_1); | 1374 | &sdvo_priv->save_input_dtd_1); |
1370 | } | 1375 | } |
1371 | 1376 | ||
1372 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1377 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
1373 | intel_sdvo_set_target_input(intel_output, false, true); | 1378 | intel_sdvo_set_target_input(intel_encoder, false, true); |
1374 | intel_sdvo_get_input_timing(intel_output, | 1379 | intel_sdvo_get_input_timing(intel_encoder, |
1375 | &sdvo_priv->save_input_dtd_2); | 1380 | &sdvo_priv->save_input_dtd_2); |
1376 | } | 1381 | } |
1377 | 1382 | ||
@@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1380 | u16 this_output = (1 << o); | 1385 | u16 this_output = (1 << o); |
1381 | if (sdvo_priv->caps.output_flags & this_output) | 1386 | if (sdvo_priv->caps.output_flags & this_output) |
1382 | { | 1387 | { |
1383 | intel_sdvo_set_target_output(intel_output, this_output); | 1388 | intel_sdvo_set_target_output(intel_encoder, this_output); |
1384 | intel_sdvo_get_output_timing(intel_output, | 1389 | intel_sdvo_get_output_timing(intel_encoder, |
1385 | &sdvo_priv->save_output_dtd[o]); | 1390 | &sdvo_priv->save_output_dtd[o]); |
1386 | } | 1391 | } |
1387 | } | 1392 | } |
@@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector) | |||
1389 | /* XXX: Save TV format/enhancements. */ | 1394 | /* XXX: Save TV format/enhancements. */ |
1390 | } | 1395 | } |
1391 | 1396 | ||
1392 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); | 1397 | sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); |
1393 | } | 1398 | } |
1394 | 1399 | ||
1395 | static void intel_sdvo_restore(struct drm_connector *connector) | 1400 | static void intel_sdvo_restore(struct drm_connector *connector) |
1396 | { | 1401 | { |
1397 | struct drm_device *dev = connector->dev; | 1402 | struct drm_device *dev = connector->dev; |
1398 | struct intel_output *intel_output = to_intel_output(connector); | 1403 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1399 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1404 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1400 | int o; | 1405 | int o; |
1401 | int i; | 1406 | int i; |
1402 | bool input1, input2; | 1407 | bool input1, input2; |
1403 | u8 status; | 1408 | u8 status; |
1404 | 1409 | ||
1405 | intel_sdvo_set_active_outputs(intel_output, 0); | 1410 | intel_sdvo_set_active_outputs(intel_encoder, 0); |
1406 | 1411 | ||
1407 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) | 1412 | for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) |
1408 | { | 1413 | { |
1409 | u16 this_output = (1 << o); | 1414 | u16 this_output = (1 << o); |
1410 | if (sdvo_priv->caps.output_flags & this_output) { | 1415 | if (sdvo_priv->caps.output_flags & this_output) { |
1411 | intel_sdvo_set_target_output(intel_output, this_output); | 1416 | intel_sdvo_set_target_output(intel_encoder, this_output); |
1412 | intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); | 1417 | intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); |
1413 | } | 1418 | } |
1414 | } | 1419 | } |
1415 | 1420 | ||
1416 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { | 1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { |
1417 | intel_sdvo_set_target_input(intel_output, true, false); | 1422 | intel_sdvo_set_target_input(intel_encoder, true, false); |
1418 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); | 1423 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); |
1419 | } | 1424 | } |
1420 | 1425 | ||
1421 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { | 1426 | if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { |
1422 | intel_sdvo_set_target_input(intel_output, false, true); | 1427 | intel_sdvo_set_target_input(intel_encoder, false, true); |
1423 | intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); | 1428 | intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); |
1424 | } | 1429 | } |
1425 | 1430 | ||
1426 | intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); | 1431 | intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); |
1427 | 1432 | ||
1428 | if (sdvo_priv->is_tv) { | 1433 | if (sdvo_priv->is_tv) { |
1429 | /* XXX: Restore TV format/enhancements. */ | 1434 | /* XXX: Restore TV format/enhancements. */ |
1430 | } | 1435 | } |
1431 | 1436 | ||
1432 | intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); | 1437 | intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); |
1433 | 1438 | ||
1434 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) | 1439 | if (sdvo_priv->save_SDVOX & SDVO_ENABLE) |
1435 | { | 1440 | { |
1436 | for (i = 0; i < 2; i++) | 1441 | for (i = 0; i < 2; i++) |
1437 | intel_wait_for_vblank(dev); | 1442 | intel_wait_for_vblank(dev); |
1438 | status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); | 1443 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); |
1439 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) | 1444 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) |
1440 | DRM_DEBUG_KMS("First %s output reported failure to " | 1445 | DRM_DEBUG_KMS("First %s output reported failure to " |
1441 | "sync\n", SDVO_NAME(sdvo_priv)); | 1446 | "sync\n", SDVO_NAME(sdvo_priv)); |
1442 | } | 1447 | } |
1443 | 1448 | ||
1444 | intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); | 1449 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); |
1445 | } | 1450 | } |
1446 | 1451 | ||
1447 | static int intel_sdvo_mode_valid(struct drm_connector *connector, | 1452 | static int intel_sdvo_mode_valid(struct drm_connector *connector, |
1448 | struct drm_display_mode *mode) | 1453 | struct drm_display_mode *mode) |
1449 | { | 1454 | { |
1450 | struct intel_output *intel_output = to_intel_output(connector); | 1455 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1451 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1456 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1452 | 1457 | ||
1453 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1458 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
1454 | return MODE_NO_DBLESCAN; | 1459 | return MODE_NO_DBLESCAN; |
@@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1473 | return MODE_OK; | 1478 | return MODE_OK; |
1474 | } | 1479 | } |
1475 | 1480 | ||
1476 | static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) | 1481 | static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) |
1477 | { | 1482 | { |
1478 | u8 status; | 1483 | u8 status; |
1479 | 1484 | ||
1480 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); | 1485 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); |
1481 | status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); | 1486 | status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); |
1482 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1487 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1483 | return false; | 1488 | return false; |
1484 | 1489 | ||
@@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc | |||
1488 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | 1493 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) |
1489 | { | 1494 | { |
1490 | struct drm_connector *connector = NULL; | 1495 | struct drm_connector *connector = NULL; |
1491 | struct intel_output *iout = NULL; | 1496 | struct intel_encoder *iout = NULL; |
1492 | struct intel_sdvo_priv *sdvo; | 1497 | struct intel_sdvo_priv *sdvo; |
1493 | 1498 | ||
1494 | /* find the sdvo connector */ | 1499 | /* find the sdvo connector */ |
1495 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1500 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1496 | iout = to_intel_output(connector); | 1501 | iout = to_intel_encoder(connector); |
1497 | 1502 | ||
1498 | if (iout->type != INTEL_OUTPUT_SDVO) | 1503 | if (iout->type != INTEL_OUTPUT_SDVO) |
1499 | continue; | 1504 | continue; |
1500 | 1505 | ||
1501 | sdvo = iout->dev_priv; | 1506 | sdvo = iout->dev_priv; |
1502 | 1507 | ||
1503 | if (sdvo->output_device == SDVOB && sdvoB) | 1508 | if (sdvo->sdvo_reg == SDVOB && sdvoB) |
1504 | return connector; | 1509 | return connector; |
1505 | 1510 | ||
1506 | if (sdvo->output_device == SDVOC && !sdvoB) | 1511 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) |
1507 | return connector; | 1512 | return connector; |
1508 | 1513 | ||
1509 | } | 1514 | } |
@@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) | |||
1515 | { | 1520 | { |
1516 | u8 response[2]; | 1521 | u8 response[2]; |
1517 | u8 status; | 1522 | u8 status; |
1518 | struct intel_output *intel_output; | 1523 | struct intel_encoder *intel_encoder; |
1519 | DRM_DEBUG_KMS("\n"); | 1524 | DRM_DEBUG_KMS("\n"); |
1520 | 1525 | ||
1521 | if (!connector) | 1526 | if (!connector) |
1522 | return 0; | 1527 | return 0; |
1523 | 1528 | ||
1524 | intel_output = to_intel_output(connector); | 1529 | intel_encoder = to_intel_encoder(connector); |
1525 | 1530 | ||
1526 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1531 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
1527 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1532 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1528 | 1533 | ||
1529 | if (response[0] !=0) | 1534 | if (response[0] !=0) |
1530 | return 1; | 1535 | return 1; |
@@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1536 | { | 1541 | { |
1537 | u8 response[2]; | 1542 | u8 response[2]; |
1538 | u8 status; | 1543 | u8 status; |
1539 | struct intel_output *intel_output = to_intel_output(connector); | 1544 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1540 | 1545 | ||
1541 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1546 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1542 | intel_sdvo_read_response(intel_output, &response, 2); | 1547 | intel_sdvo_read_response(intel_encoder, &response, 2); |
1543 | 1548 | ||
1544 | if (on) { | 1549 | if (on) { |
1545 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1550 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
1546 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1551 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1547 | 1552 | ||
1548 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1553 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1549 | } else { | 1554 | } else { |
1550 | response[0] = 0; | 1555 | response[0] = 0; |
1551 | response[1] = 0; | 1556 | response[1] = 0; |
1552 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1557 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1553 | } | 1558 | } |
1554 | 1559 | ||
1555 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1560 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1556 | intel_sdvo_read_response(intel_output, &response, 2); | 1561 | intel_sdvo_read_response(intel_encoder, &response, 2); |
1557 | } | 1562 | } |
1558 | 1563 | ||
1559 | static bool | 1564 | static bool |
1560 | intel_sdvo_multifunc_encoder(struct intel_output *intel_output) | 1565 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) |
1561 | { | 1566 | { |
1562 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1567 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1563 | int caps = 0; | 1568 | int caps = 0; |
1564 | 1569 | ||
1565 | if (sdvo_priv->caps.output_flags & | 1570 | if (sdvo_priv->caps.output_flags & |
@@ -1593,11 +1598,11 @@ static struct drm_connector * | |||
1593 | intel_find_analog_connector(struct drm_device *dev) | 1598 | intel_find_analog_connector(struct drm_device *dev) |
1594 | { | 1599 | { |
1595 | struct drm_connector *connector; | 1600 | struct drm_connector *connector; |
1596 | struct intel_output *intel_output; | 1601 | struct intel_encoder *intel_encoder; |
1597 | 1602 | ||
1598 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1603 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1599 | intel_output = to_intel_output(connector); | 1604 | intel_encoder = to_intel_encoder(connector); |
1600 | if (intel_output->type == INTEL_OUTPUT_ANALOG) | 1605 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) |
1601 | return connector; | 1606 | return connector; |
1602 | } | 1607 | } |
1603 | return NULL; | 1608 | return NULL; |
@@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev) | |||
1622 | enum drm_connector_status | 1627 | enum drm_connector_status |
1623 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | 1628 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) |
1624 | { | 1629 | { |
1625 | struct intel_output *intel_output = to_intel_output(connector); | 1630 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1626 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1631 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1627 | enum drm_connector_status status = connector_status_connected; | 1632 | enum drm_connector_status status = connector_status_connected; |
1628 | struct edid *edid = NULL; | 1633 | struct edid *edid = NULL; |
1629 | 1634 | ||
1630 | edid = drm_get_edid(&intel_output->base, | 1635 | edid = drm_get_edid(&intel_encoder->base, |
1631 | intel_output->ddc_bus); | 1636 | intel_encoder->ddc_bus); |
1632 | 1637 | ||
1633 | /* This is only applied to SDVO cards with multiple outputs */ | 1638 | /* This is only applied to SDVO cards with multiple outputs */ |
1634 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | 1639 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { |
1635 | uint8_t saved_ddc, temp_ddc; | 1640 | uint8_t saved_ddc, temp_ddc; |
1636 | saved_ddc = sdvo_priv->ddc_bus; | 1641 | saved_ddc = sdvo_priv->ddc_bus; |
1637 | temp_ddc = sdvo_priv->ddc_bus >> 1; | 1642 | temp_ddc = sdvo_priv->ddc_bus >> 1; |
@@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1641 | */ | 1646 | */ |
1642 | while(temp_ddc > 1) { | 1647 | while(temp_ddc > 1) { |
1643 | sdvo_priv->ddc_bus = temp_ddc; | 1648 | sdvo_priv->ddc_bus = temp_ddc; |
1644 | edid = drm_get_edid(&intel_output->base, | 1649 | edid = drm_get_edid(&intel_encoder->base, |
1645 | intel_output->ddc_bus); | 1650 | intel_encoder->ddc_bus); |
1646 | if (edid) { | 1651 | if (edid) { |
1647 | /* | 1652 | /* |
1648 | * When we can get the EDID, maybe it is the | 1653 | * When we can get the EDID, maybe it is the |
@@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1661 | */ | 1666 | */ |
1662 | if (edid == NULL && | 1667 | if (edid == NULL && |
1663 | sdvo_priv->analog_ddc_bus && | 1668 | sdvo_priv->analog_ddc_bus && |
1664 | !intel_analog_is_connected(intel_output->base.dev)) | 1669 | !intel_analog_is_connected(intel_encoder->base.dev)) |
1665 | edid = drm_get_edid(&intel_output->base, | 1670 | edid = drm_get_edid(&intel_encoder->base, |
1666 | sdvo_priv->analog_ddc_bus); | 1671 | sdvo_priv->analog_ddc_bus); |
1667 | if (edid != NULL) { | 1672 | if (edid != NULL) { |
1668 | /* Don't report the output as connected if it's a DVI-I | 1673 | /* Don't report the output as connected if it's a DVI-I |
@@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1677 | } | 1682 | } |
1678 | 1683 | ||
1679 | kfree(edid); | 1684 | kfree(edid); |
1680 | intel_output->base.display_info.raw_edid = NULL; | 1685 | intel_encoder->base.display_info.raw_edid = NULL; |
1681 | 1686 | ||
1682 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | 1687 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
1683 | status = connector_status_disconnected; | 1688 | status = connector_status_disconnected; |
@@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1689 | { | 1694 | { |
1690 | uint16_t response; | 1695 | uint16_t response; |
1691 | u8 status; | 1696 | u8 status; |
1692 | struct intel_output *intel_output = to_intel_output(connector); | 1697 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1693 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1698 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1694 | 1699 | ||
1695 | intel_sdvo_write_cmd(intel_output, | 1700 | intel_sdvo_write_cmd(intel_encoder, |
1696 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1701 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
1697 | if (sdvo_priv->is_tv) { | 1702 | if (sdvo_priv->is_tv) { |
1698 | /* add 30ms delay when the output type is SDVO-TV */ | 1703 | /* add 30ms delay when the output type is SDVO-TV */ |
1699 | mdelay(30); | 1704 | mdelay(30); |
1700 | } | 1705 | } |
1701 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1706 | status = intel_sdvo_read_response(intel_encoder, &response, 2); |
1702 | 1707 | ||
1703 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1708 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); |
1704 | 1709 | ||
@@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1708 | if (response == 0) | 1713 | if (response == 0) |
1709 | return connector_status_disconnected; | 1714 | return connector_status_disconnected; |
1710 | 1715 | ||
1711 | if (intel_sdvo_multifunc_encoder(intel_output) && | 1716 | if (intel_sdvo_multifunc_encoder(intel_encoder) && |
1712 | sdvo_priv->attached_output != response) { | 1717 | sdvo_priv->attached_output != response) { |
1713 | if (sdvo_priv->controlled_output != response && | 1718 | if (sdvo_priv->controlled_output != response && |
1714 | intel_sdvo_output_setup(intel_output, response) != true) | 1719 | intel_sdvo_output_setup(intel_encoder, response) != true) |
1715 | return connector_status_unknown; | 1720 | return connector_status_unknown; |
1716 | sdvo_priv->attached_output = response; | 1721 | sdvo_priv->attached_output = response; |
1717 | } | 1722 | } |
@@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1720 | 1725 | ||
1721 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1726 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
1722 | { | 1727 | { |
1723 | struct intel_output *intel_output = to_intel_output(connector); | 1728 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1724 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1729 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1725 | int num_modes; | 1730 | int num_modes; |
1726 | 1731 | ||
1727 | /* set the bus switch and get the modes */ | 1732 | /* set the bus switch and get the modes */ |
1728 | num_modes = intel_ddc_get_modes(intel_output); | 1733 | num_modes = intel_ddc_get_modes(intel_encoder); |
1729 | 1734 | ||
1730 | /* | 1735 | /* |
1731 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1736 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
@@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1735 | */ | 1740 | */ |
1736 | if (num_modes == 0 && | 1741 | if (num_modes == 0 && |
1737 | sdvo_priv->analog_ddc_bus && | 1742 | sdvo_priv->analog_ddc_bus && |
1738 | !intel_analog_is_connected(intel_output->base.dev)) { | 1743 | !intel_analog_is_connected(intel_encoder->base.dev)) { |
1739 | struct i2c_adapter *digital_ddc_bus; | 1744 | struct i2c_adapter *digital_ddc_bus; |
1740 | 1745 | ||
1741 | /* Switch to the analog ddc bus and try that | 1746 | /* Switch to the analog ddc bus and try that |
1742 | */ | 1747 | */ |
1743 | digital_ddc_bus = intel_output->ddc_bus; | 1748 | digital_ddc_bus = intel_encoder->ddc_bus; |
1744 | intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; | 1749 | intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; |
1745 | 1750 | ||
1746 | (void) intel_ddc_get_modes(intel_output); | 1751 | (void) intel_ddc_get_modes(intel_encoder); |
1747 | 1752 | ||
1748 | intel_output->ddc_bus = digital_ddc_bus; | 1753 | intel_encoder->ddc_bus = digital_ddc_bus; |
1749 | } | 1754 | } |
1750 | } | 1755 | } |
1751 | 1756 | ||
@@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
1816 | 1821 | ||
1817 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1822 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
1818 | { | 1823 | { |
1819 | struct intel_output *output = to_intel_output(connector); | 1824 | struct intel_encoder *output = to_intel_encoder(connector); |
1820 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1825 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
1821 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1826 | struct intel_sdvo_sdtv_resolution_request tv_res; |
1822 | uint32_t reply = 0, format_map = 0; | 1827 | uint32_t reply = 0, format_map = 0; |
@@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1858 | 1863 | ||
1859 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1864 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1860 | { | 1865 | { |
1861 | struct intel_output *intel_output = to_intel_output(connector); | 1866 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1862 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1867 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1863 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1868 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1864 | struct drm_display_mode *newmode; | 1869 | struct drm_display_mode *newmode; |
1865 | 1870 | ||
1866 | /* | 1871 | /* |
@@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1868 | * Assume that the preferred modes are | 1873 | * Assume that the preferred modes are |
1869 | * arranged in priority order. | 1874 | * arranged in priority order. |
1870 | */ | 1875 | */ |
1871 | intel_ddc_get_modes(intel_output); | 1876 | intel_ddc_get_modes(intel_encoder); |
1872 | if (list_empty(&connector->probed_modes) == false) | 1877 | if (list_empty(&connector->probed_modes) == false) |
1873 | goto end; | 1878 | goto end; |
1874 | 1879 | ||
@@ -1897,7 +1902,7 @@ end: | |||
1897 | 1902 | ||
1898 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1903 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
1899 | { | 1904 | { |
1900 | struct intel_output *output = to_intel_output(connector); | 1905 | struct intel_encoder *output = to_intel_encoder(connector); |
1901 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1906 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
1902 | 1907 | ||
1903 | if (sdvo_priv->is_tv) | 1908 | if (sdvo_priv->is_tv) |
@@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1915 | static | 1920 | static |
1916 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | 1921 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) |
1917 | { | 1922 | { |
1918 | struct intel_output *intel_output = to_intel_output(connector); | 1923 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1919 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1924 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1920 | struct drm_device *dev = connector->dev; | 1925 | struct drm_device *dev = connector->dev; |
1921 | 1926 | ||
1922 | if (sdvo_priv->is_tv) { | 1927 | if (sdvo_priv->is_tv) { |
@@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | |||
1953 | 1958 | ||
1954 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1959 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1955 | { | 1960 | { |
1956 | struct intel_output *intel_output = to_intel_output(connector); | 1961 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1957 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1962 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1958 | 1963 | ||
1959 | if (intel_output->i2c_bus) | 1964 | if (intel_encoder->i2c_bus) |
1960 | intel_i2c_destroy(intel_output->i2c_bus); | 1965 | intel_i2c_destroy(intel_encoder->i2c_bus); |
1961 | if (intel_output->ddc_bus) | 1966 | if (intel_encoder->ddc_bus) |
1962 | intel_i2c_destroy(intel_output->ddc_bus); | 1967 | intel_i2c_destroy(intel_encoder->ddc_bus); |
1963 | if (sdvo_priv->analog_ddc_bus) | 1968 | if (sdvo_priv->analog_ddc_bus) |
1964 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 1969 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
1965 | 1970 | ||
@@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1977 | drm_sysfs_connector_remove(connector); | 1982 | drm_sysfs_connector_remove(connector); |
1978 | drm_connector_cleanup(connector); | 1983 | drm_connector_cleanup(connector); |
1979 | 1984 | ||
1980 | kfree(intel_output); | 1985 | kfree(intel_encoder); |
1981 | } | 1986 | } |
1982 | 1987 | ||
1983 | static int | 1988 | static int |
@@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1985 | struct drm_property *property, | 1990 | struct drm_property *property, |
1986 | uint64_t val) | 1991 | uint64_t val) |
1987 | { | 1992 | { |
1988 | struct intel_output *intel_output = to_intel_output(connector); | 1993 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1989 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1994 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
1990 | struct drm_encoder *encoder = &intel_output->enc; | 1995 | struct drm_encoder *encoder = &intel_encoder->enc; |
1991 | struct drm_crtc *crtc = encoder->crtc; | 1996 | struct drm_crtc *crtc = encoder->crtc; |
1992 | int ret = 0; | 1997 | int ret = 0; |
1993 | bool changed = false; | 1998 | bool changed = false; |
@@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
2095 | sdvo_priv->cur_brightness = temp_value; | 2100 | sdvo_priv->cur_brightness = temp_value; |
2096 | } | 2101 | } |
2097 | if (cmd) { | 2102 | if (cmd) { |
2098 | intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); | 2103 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); |
2099 | status = intel_sdvo_read_response(intel_output, | 2104 | status = intel_sdvo_read_response(intel_encoder, |
2100 | NULL, 0); | 2105 | NULL, 0); |
2101 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2106 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2102 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | 2107 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); |
@@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) | |||
2191 | } | 2196 | } |
2192 | 2197 | ||
2193 | static bool | 2198 | static bool |
2194 | intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | 2199 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) |
2195 | { | 2200 | { |
2196 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 2201 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; |
2197 | uint8_t status; | 2202 | uint8_t status; |
@@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) | |||
2205 | return true; | 2210 | return true; |
2206 | } | 2211 | } |
2207 | 2212 | ||
2208 | static struct intel_output * | 2213 | static struct intel_encoder * |
2209 | intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) | 2214 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) |
2210 | { | 2215 | { |
2211 | struct drm_device *dev = chan->drm_dev; | 2216 | struct drm_device *dev = chan->drm_dev; |
2212 | struct drm_connector *connector; | 2217 | struct drm_connector *connector; |
2213 | struct intel_output *intel_output = NULL; | 2218 | struct intel_encoder *intel_encoder = NULL; |
2214 | 2219 | ||
2215 | list_for_each_entry(connector, | 2220 | list_for_each_entry(connector, |
2216 | &dev->mode_config.connector_list, head) { | 2221 | &dev->mode_config.connector_list, head) { |
2217 | if (to_intel_output(connector)->ddc_bus == &chan->adapter) { | 2222 | if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { |
2218 | intel_output = to_intel_output(connector); | 2223 | intel_encoder = to_intel_encoder(connector); |
2219 | break; | 2224 | break; |
2220 | } | 2225 | } |
2221 | } | 2226 | } |
2222 | return intel_output; | 2227 | return intel_encoder; |
2223 | } | 2228 | } |
2224 | 2229 | ||
2225 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | 2230 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, |
2226 | struct i2c_msg msgs[], int num) | 2231 | struct i2c_msg msgs[], int num) |
2227 | { | 2232 | { |
2228 | struct intel_output *intel_output; | 2233 | struct intel_encoder *intel_encoder; |
2229 | struct intel_sdvo_priv *sdvo_priv; | 2234 | struct intel_sdvo_priv *sdvo_priv; |
2230 | struct i2c_algo_bit_data *algo_data; | 2235 | struct i2c_algo_bit_data *algo_data; |
2231 | const struct i2c_algorithm *algo; | 2236 | const struct i2c_algorithm *algo; |
2232 | 2237 | ||
2233 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 2238 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
2234 | intel_output = | 2239 | intel_encoder = |
2235 | intel_sdvo_chan_to_intel_output( | 2240 | intel_sdvo_chan_to_intel_encoder( |
2236 | (struct intel_i2c_chan *)(algo_data->data)); | 2241 | (struct intel_i2c_chan *)(algo_data->data)); |
2237 | if (intel_output == NULL) | 2242 | if (intel_encoder == NULL) |
2238 | return -EINVAL; | 2243 | return -EINVAL; |
2239 | 2244 | ||
2240 | sdvo_priv = intel_output->dev_priv; | 2245 | sdvo_priv = intel_encoder->dev_priv; |
2241 | algo = intel_output->i2c_bus->algo; | 2246 | algo = intel_encoder->i2c_bus->algo; |
2242 | 2247 | ||
2243 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | 2248 | intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); |
2244 | return algo->master_xfer(i2c_adap, msgs, num); | 2249 | return algo->master_xfer(i2c_adap, msgs, num); |
2245 | } | 2250 | } |
2246 | 2251 | ||
@@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { | |||
2249 | }; | 2254 | }; |
2250 | 2255 | ||
2251 | static u8 | 2256 | static u8 |
2252 | intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | 2257 | intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) |
2253 | { | 2258 | { |
2254 | struct drm_i915_private *dev_priv = dev->dev_private; | 2259 | struct drm_i915_private *dev_priv = dev->dev_private; |
2255 | struct sdvo_device_mapping *my_mapping, *other_mapping; | 2260 | struct sdvo_device_mapping *my_mapping, *other_mapping; |
2256 | 2261 | ||
2257 | if (output_device == SDVOB) { | 2262 | if (sdvo_reg == SDVOB) { |
2258 | my_mapping = &dev_priv->sdvo_mappings[0]; | 2263 | my_mapping = &dev_priv->sdvo_mappings[0]; |
2259 | other_mapping = &dev_priv->sdvo_mappings[1]; | 2264 | other_mapping = &dev_priv->sdvo_mappings[1]; |
2260 | } else { | 2265 | } else { |
@@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | |||
2279 | /* No SDVO device info is found for another DVO port, | 2284 | /* No SDVO device info is found for another DVO port, |
2280 | * so use mapping assumption we had before BIOS parsing. | 2285 | * so use mapping assumption we had before BIOS parsing. |
2281 | */ | 2286 | */ |
2282 | if (output_device == SDVOB) | 2287 | if (sdvo_reg == SDVOB) |
2283 | return 0x70; | 2288 | return 0x70; |
2284 | else | 2289 | else |
2285 | return 0x72; | 2290 | return 0x72; |
@@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = { | |||
2305 | }; | 2310 | }; |
2306 | 2311 | ||
2307 | static bool | 2312 | static bool |
2308 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | 2313 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) |
2309 | { | 2314 | { |
2310 | struct drm_connector *connector = &intel_output->base; | 2315 | struct drm_connector *connector = &intel_encoder->base; |
2311 | struct drm_encoder *encoder = &intel_output->enc; | 2316 | struct drm_encoder *encoder = &intel_encoder->enc; |
2312 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2317 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2313 | bool ret = true, registered = false; | 2318 | bool ret = true, registered = false; |
2314 | 2319 | ||
2315 | sdvo_priv->is_tv = false; | 2320 | sdvo_priv->is_tv = false; |
2316 | intel_output->needs_tv_clock = false; | 2321 | intel_encoder->needs_tv_clock = false; |
2317 | sdvo_priv->is_lvds = false; | 2322 | sdvo_priv->is_lvds = false; |
2318 | 2323 | ||
2319 | if (device_is_registered(&connector->kdev)) { | 2324 | if (device_is_registered(&connector->kdev)) { |
@@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2331 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2336 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2332 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2337 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2333 | 2338 | ||
2334 | if (intel_sdvo_get_supp_encode(intel_output, | 2339 | if (intel_sdvo_get_supp_encode(intel_encoder, |
2335 | &sdvo_priv->encode) && | 2340 | &sdvo_priv->encode) && |
2336 | intel_sdvo_get_digital_encoding_mode(intel_output) && | 2341 | intel_sdvo_get_digital_encoding_mode(intel_encoder) && |
2337 | sdvo_priv->is_hdmi) { | 2342 | sdvo_priv->is_hdmi) { |
2338 | /* enable hdmi encoding mode if supported */ | 2343 | /* enable hdmi encoding mode if supported */ |
2339 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | 2344 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); |
2340 | intel_sdvo_set_colorimetry(intel_output, | 2345 | intel_sdvo_set_colorimetry(intel_encoder, |
2341 | SDVO_COLORIMETRY_RGB256); | 2346 | SDVO_COLORIMETRY_RGB256); |
2342 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2347 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2343 | intel_output->clone_mask = | 2348 | intel_encoder->clone_mask = |
2344 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2349 | (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2345 | (1 << INTEL_ANALOG_CLONE_BIT); | 2350 | (1 << INTEL_ANALOG_CLONE_BIT); |
2346 | } | 2351 | } |
@@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2356 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2357 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2353 | sdvo_priv->is_tv = true; | 2358 | sdvo_priv->is_tv = true; |
2354 | intel_output->needs_tv_clock = true; | 2359 | intel_encoder->needs_tv_clock = true; |
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2360 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2356 | } else if (flags & SDVO_OUTPUT_RGB0) { | 2361 | } else if (flags & SDVO_OUTPUT_RGB0) { |
2357 | 2362 | ||
2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | 2363 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; |
2359 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2364 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2360 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2365 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2361 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2366 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2362 | (1 << INTEL_ANALOG_CLONE_BIT); | 2367 | (1 << INTEL_ANALOG_CLONE_BIT); |
2363 | } else if (flags & SDVO_OUTPUT_RGB1) { | 2368 | } else if (flags & SDVO_OUTPUT_RGB1) { |
2364 | 2369 | ||
2365 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | 2370 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; |
2366 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2371 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2367 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2372 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2368 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2373 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2369 | (1 << INTEL_ANALOG_CLONE_BIT); | 2374 | (1 << INTEL_ANALOG_CLONE_BIT); |
2370 | } else if (flags & SDVO_OUTPUT_CVBS0) { | 2375 | } else if (flags & SDVO_OUTPUT_CVBS0) { |
2371 | 2376 | ||
@@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2373 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2378 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2374 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2379 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2375 | sdvo_priv->is_tv = true; | 2380 | sdvo_priv->is_tv = true; |
2376 | intel_output->needs_tv_clock = true; | 2381 | intel_encoder->needs_tv_clock = true; |
2377 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2382 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2378 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2383 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2379 | 2384 | ||
2380 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2385 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
2381 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2386 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2382 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2387 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2383 | sdvo_priv->is_lvds = true; | 2388 | sdvo_priv->is_lvds = true; |
2384 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2389 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
2385 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2390 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
2386 | } else if (flags & SDVO_OUTPUT_LVDS1) { | 2391 | } else if (flags & SDVO_OUTPUT_LVDS1) { |
2387 | 2392 | ||
@@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2389 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2394 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2390 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2395 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2391 | sdvo_priv->is_lvds = true; | 2396 | sdvo_priv->is_lvds = true; |
2392 | intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2397 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | |
2393 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2398 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); |
2394 | } else { | 2399 | } else { |
2395 | 2400 | ||
@@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2402 | bytes[0], bytes[1]); | 2407 | bytes[0], bytes[1]); |
2403 | ret = false; | 2408 | ret = false; |
2404 | } | 2409 | } |
2405 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 2410 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
2406 | 2411 | ||
2407 | if (ret && registered) | 2412 | if (ret && registered) |
2408 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | 2413 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; |
@@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2414 | 2419 | ||
2415 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) | 2420 | static void intel_sdvo_tv_create_property(struct drm_connector *connector) |
2416 | { | 2421 | { |
2417 | struct intel_output *intel_output = to_intel_output(connector); | 2422 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2418 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2423 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2419 | struct intel_sdvo_tv_format format; | 2424 | struct intel_sdvo_tv_format format; |
2420 | uint32_t format_map, i; | 2425 | uint32_t format_map, i; |
2421 | uint8_t status; | 2426 | uint8_t status; |
2422 | 2427 | ||
2423 | intel_sdvo_set_target_output(intel_output, | 2428 | intel_sdvo_set_target_output(intel_encoder, |
2424 | sdvo_priv->controlled_output); | 2429 | sdvo_priv->controlled_output); |
2425 | 2430 | ||
2426 | intel_sdvo_write_cmd(intel_output, | 2431 | intel_sdvo_write_cmd(intel_encoder, |
2427 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); | 2432 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); |
2428 | status = intel_sdvo_read_response(intel_output, | 2433 | status = intel_sdvo_read_response(intel_encoder, |
2429 | &format, sizeof(format)); | 2434 | &format, sizeof(format)); |
2430 | if (status != SDVO_CMD_STATUS_SUCCESS) | 2435 | if (status != SDVO_CMD_STATUS_SUCCESS) |
2431 | return; | 2436 | return; |
@@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) | |||
2463 | 2468 | ||
2464 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | 2469 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) |
2465 | { | 2470 | { |
2466 | struct intel_output *intel_output = to_intel_output(connector); | 2471 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
2467 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 2472 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; |
2468 | struct intel_sdvo_enhancements_reply sdvo_data; | 2473 | struct intel_sdvo_enhancements_reply sdvo_data; |
2469 | struct drm_device *dev = connector->dev; | 2474 | struct drm_device *dev = connector->dev; |
2470 | uint8_t status; | 2475 | uint8_t status; |
2471 | uint16_t response, data_value[2]; | 2476 | uint16_t response, data_value[2]; |
2472 | 2477 | ||
2473 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2478 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2474 | NULL, 0); | 2479 | NULL, 0); |
2475 | status = intel_sdvo_read_response(intel_output, &sdvo_data, | 2480 | status = intel_sdvo_read_response(intel_encoder, &sdvo_data, |
2476 | sizeof(sdvo_data)); | 2481 | sizeof(sdvo_data)); |
2477 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2482 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2478 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | 2483 | DRM_DEBUG_KMS(" incorrect response is returned\n"); |
@@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2488 | * property | 2493 | * property |
2489 | */ | 2494 | */ |
2490 | if (sdvo_data.overscan_h) { | 2495 | if (sdvo_data.overscan_h) { |
2491 | intel_sdvo_write_cmd(intel_output, | 2496 | intel_sdvo_write_cmd(intel_encoder, |
2492 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | 2497 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); |
2493 | status = intel_sdvo_read_response(intel_output, | 2498 | status = intel_sdvo_read_response(intel_encoder, |
2494 | &data_value, 4); | 2499 | &data_value, 4); |
2495 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2500 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2496 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2501 | DRM_DEBUG_KMS("Incorrect SDVO max " |
2497 | "h_overscan\n"); | 2502 | "h_overscan\n"); |
2498 | return; | 2503 | return; |
2499 | } | 2504 | } |
2500 | intel_sdvo_write_cmd(intel_output, | 2505 | intel_sdvo_write_cmd(intel_encoder, |
2501 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | 2506 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); |
2502 | status = intel_sdvo_read_response(intel_output, | 2507 | status = intel_sdvo_read_response(intel_encoder, |
2503 | &response, 2); | 2508 | &response, 2); |
2504 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2509 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2505 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | 2510 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); |
@@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2529 | data_value[0], data_value[1], response); | 2534 | data_value[0], data_value[1], response); |
2530 | } | 2535 | } |
2531 | if (sdvo_data.overscan_v) { | 2536 | if (sdvo_data.overscan_v) { |
2532 | intel_sdvo_write_cmd(intel_output, | 2537 | intel_sdvo_write_cmd(intel_encoder, |
2533 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | 2538 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); |
2534 | status = intel_sdvo_read_response(intel_output, | 2539 | status = intel_sdvo_read_response(intel_encoder, |
2535 | &data_value, 4); | 2540 | &data_value, 4); |
2536 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2541 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2537 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2542 | DRM_DEBUG_KMS("Incorrect SDVO max " |
2538 | "v_overscan\n"); | 2543 | "v_overscan\n"); |
2539 | return; | 2544 | return; |
2540 | } | 2545 | } |
2541 | intel_sdvo_write_cmd(intel_output, | 2546 | intel_sdvo_write_cmd(intel_encoder, |
2542 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | 2547 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); |
2543 | status = intel_sdvo_read_response(intel_output, | 2548 | status = intel_sdvo_read_response(intel_encoder, |
2544 | &response, 2); | 2549 | &response, 2); |
2545 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2550 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2546 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | 2551 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); |
@@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2570 | data_value[0], data_value[1], response); | 2575 | data_value[0], data_value[1], response); |
2571 | } | 2576 | } |
2572 | if (sdvo_data.position_h) { | 2577 | if (sdvo_data.position_h) { |
2573 | intel_sdvo_write_cmd(intel_output, | 2578 | intel_sdvo_write_cmd(intel_encoder, |
2574 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | 2579 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); |
2575 | status = intel_sdvo_read_response(intel_output, | 2580 | status = intel_sdvo_read_response(intel_encoder, |
2576 | &data_value, 4); | 2581 | &data_value, 4); |
2577 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2582 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2578 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | 2583 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); |
2579 | return; | 2584 | return; |
2580 | } | 2585 | } |
2581 | intel_sdvo_write_cmd(intel_output, | 2586 | intel_sdvo_write_cmd(intel_encoder, |
2582 | SDVO_CMD_GET_POSITION_H, NULL, 0); | 2587 | SDVO_CMD_GET_POSITION_H, NULL, 0); |
2583 | status = intel_sdvo_read_response(intel_output, | 2588 | status = intel_sdvo_read_response(intel_encoder, |
2584 | &response, 2); | 2589 | &response, 2); |
2585 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2590 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2586 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | 2591 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); |
@@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2601 | data_value[0], data_value[1], response); | 2606 | data_value[0], data_value[1], response); |
2602 | } | 2607 | } |
2603 | if (sdvo_data.position_v) { | 2608 | if (sdvo_data.position_v) { |
2604 | intel_sdvo_write_cmd(intel_output, | 2609 | intel_sdvo_write_cmd(intel_encoder, |
2605 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | 2610 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); |
2606 | status = intel_sdvo_read_response(intel_output, | 2611 | status = intel_sdvo_read_response(intel_encoder, |
2607 | &data_value, 4); | 2612 | &data_value, 4); |
2608 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2613 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2609 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | 2614 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); |
2610 | return; | 2615 | return; |
2611 | } | 2616 | } |
2612 | intel_sdvo_write_cmd(intel_output, | 2617 | intel_sdvo_write_cmd(intel_encoder, |
2613 | SDVO_CMD_GET_POSITION_V, NULL, 0); | 2618 | SDVO_CMD_GET_POSITION_V, NULL, 0); |
2614 | status = intel_sdvo_read_response(intel_output, | 2619 | status = intel_sdvo_read_response(intel_encoder, |
2615 | &response, 2); | 2620 | &response, 2); |
2616 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2621 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2617 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | 2622 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); |
@@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2634 | } | 2639 | } |
2635 | if (sdvo_priv->is_tv) { | 2640 | if (sdvo_priv->is_tv) { |
2636 | if (sdvo_data.saturation) { | 2641 | if (sdvo_data.saturation) { |
2637 | intel_sdvo_write_cmd(intel_output, | 2642 | intel_sdvo_write_cmd(intel_encoder, |
2638 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | 2643 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); |
2639 | status = intel_sdvo_read_response(intel_output, | 2644 | status = intel_sdvo_read_response(intel_encoder, |
2640 | &data_value, 4); | 2645 | &data_value, 4); |
2641 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2646 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2642 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | 2647 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); |
2643 | return; | 2648 | return; |
2644 | } | 2649 | } |
2645 | intel_sdvo_write_cmd(intel_output, | 2650 | intel_sdvo_write_cmd(intel_encoder, |
2646 | SDVO_CMD_GET_SATURATION, NULL, 0); | 2651 | SDVO_CMD_GET_SATURATION, NULL, 0); |
2647 | status = intel_sdvo_read_response(intel_output, | 2652 | status = intel_sdvo_read_response(intel_encoder, |
2648 | &response, 2); | 2653 | &response, 2); |
2649 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2654 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2650 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | 2655 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); |
@@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2666 | data_value[0], data_value[1], response); | 2671 | data_value[0], data_value[1], response); |
2667 | } | 2672 | } |
2668 | if (sdvo_data.contrast) { | 2673 | if (sdvo_data.contrast) { |
2669 | intel_sdvo_write_cmd(intel_output, | 2674 | intel_sdvo_write_cmd(intel_encoder, |
2670 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | 2675 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); |
2671 | status = intel_sdvo_read_response(intel_output, | 2676 | status = intel_sdvo_read_response(intel_encoder, |
2672 | &data_value, 4); | 2677 | &data_value, 4); |
2673 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2678 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2674 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | 2679 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); |
2675 | return; | 2680 | return; |
2676 | } | 2681 | } |
2677 | intel_sdvo_write_cmd(intel_output, | 2682 | intel_sdvo_write_cmd(intel_encoder, |
2678 | SDVO_CMD_GET_CONTRAST, NULL, 0); | 2683 | SDVO_CMD_GET_CONTRAST, NULL, 0); |
2679 | status = intel_sdvo_read_response(intel_output, | 2684 | status = intel_sdvo_read_response(intel_encoder, |
2680 | &response, 2); | 2685 | &response, 2); |
2681 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2686 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2682 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | 2687 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); |
@@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2697 | data_value[0], data_value[1], response); | 2702 | data_value[0], data_value[1], response); |
2698 | } | 2703 | } |
2699 | if (sdvo_data.hue) { | 2704 | if (sdvo_data.hue) { |
2700 | intel_sdvo_write_cmd(intel_output, | 2705 | intel_sdvo_write_cmd(intel_encoder, |
2701 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | 2706 | SDVO_CMD_GET_MAX_HUE, NULL, 0); |
2702 | status = intel_sdvo_read_response(intel_output, | 2707 | status = intel_sdvo_read_response(intel_encoder, |
2703 | &data_value, 4); | 2708 | &data_value, 4); |
2704 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2709 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2705 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | 2710 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); |
2706 | return; | 2711 | return; |
2707 | } | 2712 | } |
2708 | intel_sdvo_write_cmd(intel_output, | 2713 | intel_sdvo_write_cmd(intel_encoder, |
2709 | SDVO_CMD_GET_HUE, NULL, 0); | 2714 | SDVO_CMD_GET_HUE, NULL, 0); |
2710 | status = intel_sdvo_read_response(intel_output, | 2715 | status = intel_sdvo_read_response(intel_encoder, |
2711 | &response, 2); | 2716 | &response, 2); |
2712 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2717 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2713 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | 2718 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); |
@@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2730 | } | 2735 | } |
2731 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 2736 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { |
2732 | if (sdvo_data.brightness) { | 2737 | if (sdvo_data.brightness) { |
2733 | intel_sdvo_write_cmd(intel_output, | 2738 | intel_sdvo_write_cmd(intel_encoder, |
2734 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | 2739 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); |
2735 | status = intel_sdvo_read_response(intel_output, | 2740 | status = intel_sdvo_read_response(intel_encoder, |
2736 | &data_value, 4); | 2741 | &data_value, 4); |
2737 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2742 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2738 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | 2743 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); |
2739 | return; | 2744 | return; |
2740 | } | 2745 | } |
2741 | intel_sdvo_write_cmd(intel_output, | 2746 | intel_sdvo_write_cmd(intel_encoder, |
2742 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | 2747 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); |
2743 | status = intel_sdvo_read_response(intel_output, | 2748 | status = intel_sdvo_read_response(intel_encoder, |
2744 | &response, 2); | 2749 | &response, 2); |
2745 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2750 | if (status != SDVO_CMD_STATUS_SUCCESS) { |
2746 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | 2751 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); |
@@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2765 | return; | 2770 | return; |
2766 | } | 2771 | } |
2767 | 2772 | ||
2768 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2773 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
2769 | { | 2774 | { |
2770 | struct drm_i915_private *dev_priv = dev->dev_private; | 2775 | struct drm_i915_private *dev_priv = dev->dev_private; |
2771 | struct drm_connector *connector; | 2776 | struct drm_connector *connector; |
2772 | struct intel_output *intel_output; | 2777 | struct intel_encoder *intel_encoder; |
2773 | struct intel_sdvo_priv *sdvo_priv; | 2778 | struct intel_sdvo_priv *sdvo_priv; |
2774 | 2779 | ||
2775 | u8 ch[0x40]; | 2780 | u8 ch[0x40]; |
2776 | int i; | 2781 | int i; |
2777 | 2782 | ||
2778 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2783 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
2779 | if (!intel_output) { | 2784 | if (!intel_encoder) { |
2780 | return false; | 2785 | return false; |
2781 | } | 2786 | } |
2782 | 2787 | ||
2783 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | 2788 | sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); |
2784 | sdvo_priv->output_device = output_device; | 2789 | sdvo_priv->sdvo_reg = sdvo_reg; |
2785 | 2790 | ||
2786 | intel_output->dev_priv = sdvo_priv; | 2791 | intel_encoder->dev_priv = sdvo_priv; |
2787 | intel_output->type = INTEL_OUTPUT_SDVO; | 2792 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2788 | 2793 | ||
2789 | /* setup the DDC bus. */ | 2794 | /* setup the DDC bus. */ |
2790 | if (output_device == SDVOB) | 2795 | if (sdvo_reg == SDVOB) |
2791 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 2796 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); |
2792 | else | 2797 | else |
2793 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 2798 | intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); |
2794 | 2799 | ||
2795 | if (!intel_output->i2c_bus) | 2800 | if (!intel_encoder->i2c_bus) |
2796 | goto err_inteloutput; | 2801 | goto err_inteloutput; |
2797 | 2802 | ||
2798 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); | 2803 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); |
2799 | 2804 | ||
2800 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ | 2805 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
2801 | intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; | 2806 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; |
2802 | 2807 | ||
2803 | /* Read the regs to test if we can talk to the device */ | 2808 | /* Read the regs to test if we can talk to the device */ |
2804 | for (i = 0; i < 0x40; i++) { | 2809 | for (i = 0; i < 0x40; i++) { |
2805 | if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { | 2810 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { |
2806 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2811 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
2807 | output_device == SDVOB ? 'B' : 'C'); | 2812 | sdvo_reg == SDVOB ? 'B' : 'C'); |
2808 | goto err_i2c; | 2813 | goto err_i2c; |
2809 | } | 2814 | } |
2810 | } | 2815 | } |
2811 | 2816 | ||
2812 | /* setup the DDC bus. */ | 2817 | /* setup the DDC bus. */ |
2813 | if (output_device == SDVOB) { | 2818 | if (sdvo_reg == SDVOB) { |
2814 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2819 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
2815 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2816 | "SDVOB/VGA DDC BUS"); | 2821 | "SDVOB/VGA DDC BUS"); |
2817 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2822 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
2818 | } else { | 2823 | } else { |
2819 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2824 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2825 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2821 | "SDVOC/VGA DDC BUS"); | 2826 | "SDVOC/VGA DDC BUS"); |
2822 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2827 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
2823 | } | 2828 | } |
2824 | 2829 | ||
2825 | if (intel_output->ddc_bus == NULL) | 2830 | if (intel_encoder->ddc_bus == NULL) |
2826 | goto err_i2c; | 2831 | goto err_i2c; |
2827 | 2832 | ||
2828 | /* Wrap with our custom algo which switches to DDC mode */ | 2833 | /* Wrap with our custom algo which switches to DDC mode */ |
2829 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2834 | intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
2830 | 2835 | ||
2831 | /* In default case sdvo lvds is false */ | 2836 | /* In default case sdvo lvds is false */ |
2832 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2837 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); |
2833 | 2838 | ||
2834 | if (intel_sdvo_output_setup(intel_output, | 2839 | if (intel_sdvo_output_setup(intel_encoder, |
2835 | sdvo_priv->caps.output_flags) != true) { | 2840 | sdvo_priv->caps.output_flags) != true) { |
2836 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2841 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
2837 | output_device == SDVOB ? 'B' : 'C'); | 2842 | sdvo_reg == SDVOB ? 'B' : 'C'); |
2838 | goto err_i2c; | 2843 | goto err_i2c; |
2839 | } | 2844 | } |
2840 | 2845 | ||
2841 | 2846 | ||
2842 | connector = &intel_output->base; | 2847 | connector = &intel_encoder->base; |
2843 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | 2848 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, |
2844 | connector->connector_type); | 2849 | connector->connector_type); |
2845 | 2850 | ||
@@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2848 | connector->doublescan_allowed = 0; | 2853 | connector->doublescan_allowed = 0; |
2849 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2854 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
2850 | 2855 | ||
2851 | drm_encoder_init(dev, &intel_output->enc, | 2856 | drm_encoder_init(dev, &intel_encoder->enc, |
2852 | &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); | 2857 | &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); |
2853 | 2858 | ||
2854 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | 2859 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); |
2855 | 2860 | ||
2856 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2861 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
2857 | if (sdvo_priv->is_tv) | 2862 | if (sdvo_priv->is_tv) |
2858 | intel_sdvo_tv_create_property(connector); | 2863 | intel_sdvo_tv_create_property(connector); |
2859 | 2864 | ||
@@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2865 | intel_sdvo_select_ddc_bus(sdvo_priv); | 2870 | intel_sdvo_select_ddc_bus(sdvo_priv); |
2866 | 2871 | ||
2867 | /* Set the input timing to the screen. Assume always input 0. */ | 2872 | /* Set the input timing to the screen. Assume always input 0. */ |
2868 | intel_sdvo_set_target_input(intel_output, true, false); | 2873 | intel_sdvo_set_target_input(intel_encoder, true, false); |
2869 | 2874 | ||
2870 | intel_sdvo_get_input_pixel_clock_range(intel_output, | 2875 | intel_sdvo_get_input_pixel_clock_range(intel_encoder, |
2871 | &sdvo_priv->pixel_clock_min, | 2876 | &sdvo_priv->pixel_clock_min, |
2872 | &sdvo_priv->pixel_clock_max); | 2877 | &sdvo_priv->pixel_clock_max); |
2873 | 2878 | ||
@@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2894 | err_i2c: | 2899 | err_i2c: |
2895 | if (sdvo_priv->analog_ddc_bus != NULL) | 2900 | if (sdvo_priv->analog_ddc_bus != NULL) |
2896 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 2901 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); |
2897 | if (intel_output->ddc_bus != NULL) | 2902 | if (intel_encoder->ddc_bus != NULL) |
2898 | intel_i2c_destroy(intel_output->ddc_bus); | 2903 | intel_i2c_destroy(intel_encoder->ddc_bus); |
2899 | if (intel_output->i2c_bus != NULL) | 2904 | if (intel_encoder->i2c_bus != NULL) |
2900 | intel_i2c_destroy(intel_output->i2c_bus); | 2905 | intel_i2c_destroy(intel_encoder->i2c_bus); |
2901 | err_inteloutput: | 2906 | err_inteloutput: |
2902 | kfree(intel_output); | 2907 | kfree(intel_encoder); |
2903 | 2908 | ||
2904 | return false; | 2909 | return false; |
2905 | } | 2910 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..d7d39b2327df 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector) | |||
921 | { | 921 | { |
922 | struct drm_device *dev = connector->dev; | 922 | struct drm_device *dev = connector->dev; |
923 | struct drm_i915_private *dev_priv = dev->dev_private; | 923 | struct drm_i915_private *dev_priv = dev->dev_private; |
924 | struct intel_output *intel_output = to_intel_output(connector); | 924 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
925 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 925 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
926 | int i; | 926 | int i; |
927 | 927 | ||
928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); | 928 | tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); |
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector) | |||
971 | { | 971 | { |
972 | struct drm_device *dev = connector->dev; | 972 | struct drm_device *dev = connector->dev; |
973 | struct drm_i915_private *dev_priv = dev->dev_private; | 973 | struct drm_i915_private *dev_priv = dev->dev_private; |
974 | struct intel_output *intel_output = to_intel_output(connector); | 974 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
975 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 975 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
976 | struct drm_crtc *crtc = connector->encoder->crtc; | 976 | struct drm_crtc *crtc = connector->encoder->crtc; |
977 | struct intel_crtc *intel_crtc; | 977 | struct intel_crtc *intel_crtc; |
978 | int i; | 978 | int i; |
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format) | |||
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | static const struct tv_mode * | 1070 | static const struct tv_mode * |
1071 | intel_tv_mode_find (struct intel_output *intel_output) | 1071 | intel_tv_mode_find (struct intel_encoder *intel_encoder) |
1072 | { | 1072 | { |
1073 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1073 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1074 | 1074 | ||
1075 | return intel_tv_mode_lookup(tv_priv->tv_format); | 1075 | return intel_tv_mode_lookup(tv_priv->tv_format); |
1076 | } | 1076 | } |
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output) | |||
1078 | static enum drm_mode_status | 1078 | static enum drm_mode_status |
1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 1079 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) |
1080 | { | 1080 | { |
1081 | struct intel_output *intel_output = to_intel_output(connector); | 1081 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1083 | 1083 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1095 | { | 1095 | { |
1096 | struct drm_device *dev = encoder->dev; | 1096 | struct drm_device *dev = encoder->dev; |
1097 | struct drm_mode_config *drm_config = &dev->mode_config; | 1097 | struct drm_mode_config *drm_config = &dev->mode_config; |
1098 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1098 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); | 1099 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); |
1100 | struct drm_encoder *other_encoder; | 1100 | struct drm_encoder *other_encoder; |
1101 | 1101 | ||
1102 | if (!tv_mode) | 1102 | if (!tv_mode) |
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1121 | struct drm_i915_private *dev_priv = dev->dev_private; | 1121 | struct drm_i915_private *dev_priv = dev->dev_private; |
1122 | struct drm_crtc *crtc = encoder->crtc; | 1122 | struct drm_crtc *crtc = encoder->crtc; |
1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1123 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1124 | struct intel_output *intel_output = enc_to_intel_output(encoder); | 1124 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1125 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1125 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1126 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1127 | u32 tv_ctl; | 1127 | u32 tv_ctl; |
1128 | u32 hctl1, hctl2, hctl3; | 1128 | u32 hctl1, hctl2, hctl3; |
1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; | 1129 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; |
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = { | |||
1360 | * \return false if TV is disconnected. | 1360 | * \return false if TV is disconnected. |
1361 | */ | 1361 | */ |
1362 | static int | 1362 | static int |
1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | 1363 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) |
1364 | { | 1364 | { |
1365 | struct drm_encoder *encoder = &intel_output->enc; | 1365 | struct drm_encoder *encoder = &intel_encoder->enc; |
1366 | struct drm_device *dev = encoder->dev; | 1366 | struct drm_device *dev = encoder->dev; |
1367 | struct drm_i915_private *dev_priv = dev->dev_private; | 1367 | struct drm_i915_private *dev_priv = dev->dev_private; |
1368 | unsigned long irqflags; | 1368 | unsigned long irqflags; |
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1441 | */ | 1441 | */ |
1442 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1442 | static void intel_tv_find_better_format(struct drm_connector *connector) |
1443 | { | 1443 | { |
1444 | struct intel_output *intel_output = to_intel_output(connector); | 1444 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1445 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1445 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1446 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1447 | int i; | 1447 | int i; |
1448 | 1448 | ||
1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == | 1449 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == |
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector) | |||
1475 | { | 1475 | { |
1476 | struct drm_crtc *crtc; | 1476 | struct drm_crtc *crtc; |
1477 | struct drm_display_mode mode; | 1477 | struct drm_display_mode mode; |
1478 | struct intel_output *intel_output = to_intel_output(connector); | 1478 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1479 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1479 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1480 | struct drm_encoder *encoder = &intel_output->enc; | 1480 | struct drm_encoder *encoder = &intel_encoder->enc; |
1481 | int dpms_mode; | 1481 | int dpms_mode; |
1482 | int type = tv_priv->type; | 1482 | int type = tv_priv->type; |
1483 | 1483 | ||
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector) | |||
1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1485 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1486 | 1486 | ||
1487 | if (encoder->crtc && encoder->crtc->enabled) { | 1487 | if (encoder->crtc && encoder->crtc->enabled) { |
1488 | type = intel_tv_detect_type(encoder->crtc, intel_output); | 1488 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); |
1489 | } else { | 1489 | } else { |
1490 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | 1490 | crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); |
1491 | if (crtc) { | 1491 | if (crtc) { |
1492 | type = intel_tv_detect_type(crtc, intel_output); | 1492 | type = intel_tv_detect_type(crtc, intel_encoder); |
1493 | intel_release_load_detect_pipe(intel_output, dpms_mode); | 1493 | intel_release_load_detect_pipe(intel_encoder, dpms_mode); |
1494 | } else | 1494 | } else |
1495 | type = -1; | 1495 | type = -1; |
1496 | } | 1496 | } |
@@ -1525,8 +1525,8 @@ static void | |||
1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | 1525 | intel_tv_chose_preferred_modes(struct drm_connector *connector, |
1526 | struct drm_display_mode *mode_ptr) | 1526 | struct drm_display_mode *mode_ptr) |
1527 | { | 1527 | { |
1528 | struct intel_output *intel_output = to_intel_output(connector); | 1528 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1529 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1530 | 1530 | ||
1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1531 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | 1532 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; |
@@ -1550,8 +1550,8 @@ static int | |||
1550 | intel_tv_get_modes(struct drm_connector *connector) | 1550 | intel_tv_get_modes(struct drm_connector *connector) |
1551 | { | 1551 | { |
1552 | struct drm_display_mode *mode_ptr; | 1552 | struct drm_display_mode *mode_ptr; |
1553 | struct intel_output *intel_output = to_intel_output(connector); | 1553 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1554 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); |
1555 | int j, count = 0; | 1555 | int j, count = 0; |
1556 | u64 tmp; | 1556 | u64 tmp; |
1557 | 1557 | ||
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1604 | static void | 1604 | static void |
1605 | intel_tv_destroy (struct drm_connector *connector) | 1605 | intel_tv_destroy (struct drm_connector *connector) |
1606 | { | 1606 | { |
1607 | struct intel_output *intel_output = to_intel_output(connector); | 1607 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1608 | 1608 | ||
1609 | drm_sysfs_connector_remove(connector); | 1609 | drm_sysfs_connector_remove(connector); |
1610 | drm_connector_cleanup(connector); | 1610 | drm_connector_cleanup(connector); |
1611 | kfree(intel_output); | 1611 | kfree(intel_encoder); |
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | 1614 | ||
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1617 | uint64_t val) | 1617 | uint64_t val) |
1618 | { | 1618 | { |
1619 | struct drm_device *dev = connector->dev; | 1619 | struct drm_device *dev = connector->dev; |
1620 | struct intel_output *intel_output = to_intel_output(connector); | 1620 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); |
1621 | struct intel_tv_priv *tv_priv = intel_output->dev_priv; | 1621 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; |
1622 | struct drm_encoder *encoder = &intel_output->enc; | 1622 | struct drm_encoder *encoder = &intel_encoder->enc; |
1623 | struct drm_crtc *crtc = encoder->crtc; | 1623 | struct drm_crtc *crtc = encoder->crtc; |
1624 | int ret = 0; | 1624 | int ret = 0; |
1625 | bool changed = false; | 1625 | bool changed = false; |
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev) | |||
1740 | { | 1740 | { |
1741 | struct drm_i915_private *dev_priv = dev->dev_private; | 1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
1742 | struct drm_connector *connector; | 1742 | struct drm_connector *connector; |
1743 | struct intel_output *intel_output; | 1743 | struct intel_encoder *intel_encoder; |
1744 | struct intel_tv_priv *tv_priv; | 1744 | struct intel_tv_priv *tv_priv; |
1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1745 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
1746 | char **tv_format_names; | 1746 | char **tv_format_names; |
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev) | |||
1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) | 1780 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) |
1781 | return; | 1781 | return; |
1782 | 1782 | ||
1783 | intel_output = kzalloc(sizeof(struct intel_output) + | 1783 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + |
1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); | 1784 | sizeof(struct intel_tv_priv), GFP_KERNEL); |
1785 | if (!intel_output) { | 1785 | if (!intel_encoder) { |
1786 | return; | 1786 | return; |
1787 | } | 1787 | } |
1788 | 1788 | ||
1789 | connector = &intel_output->base; | 1789 | connector = &intel_encoder->base; |
1790 | 1790 | ||
1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1791 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1792 | DRM_MODE_CONNECTOR_SVIDEO); | 1792 | DRM_MODE_CONNECTOR_SVIDEO); |
1793 | 1793 | ||
1794 | drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, | 1794 | drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, |
1795 | DRM_MODE_ENCODER_TVDAC); | 1795 | DRM_MODE_ENCODER_TVDAC); |
1796 | 1796 | ||
1797 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 1797 | drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); |
1798 | tv_priv = (struct intel_tv_priv *)(intel_output + 1); | 1798 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); |
1799 | intel_output->type = INTEL_OUTPUT_TVOUT; | 1799 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1800 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1800 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1801 | intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1801 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); |
1802 | intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1802 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); |
1803 | intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1803 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1804 | intel_output->dev_priv = tv_priv; | 1804 | intel_encoder->dev_priv = tv_priv; |
1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; | 1805 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; |
1806 | 1806 | ||
1807 | /* BIOS margin values */ | 1807 | /* BIOS margin values */ |
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev) | |||
1812 | 1812 | ||
1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | 1813 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); |
1814 | 1814 | ||
1815 | drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); | 1815 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); |
1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | 1816 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); |
1817 | connector->interlace_allowed = false; | 1817 | connector->interlace_allowed = false; |
1818 | connector->doublescan_allowed = false; | 1818 | connector->doublescan_allowed = false; |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index bcec2d79636e..1d569830ed99 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
908 | uint8_t attr = U8((*ptr)++), shift; | 908 | uint8_t attr = U8((*ptr)++), shift; |
909 | uint32_t saved, dst; | 909 | uint32_t saved, dst; |
910 | int dptr = *ptr; | 910 | int dptr = *ptr; |
911 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
911 | SDEBUG(" dst: "); | 912 | SDEBUG(" dst: "); |
912 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 913 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
914 | /* op needs to full dst value */ | ||
915 | dst = saved; | ||
913 | shift = atom_get_src(ctx, attr, ptr); | 916 | shift = atom_get_src(ctx, attr, ptr); |
914 | SDEBUG(" shift: %d\n", shift); | 917 | SDEBUG(" shift: %d\n", shift); |
915 | dst <<= shift; | 918 | dst <<= shift; |
919 | dst &= atom_arg_mask[dst_align]; | ||
920 | dst >>= atom_arg_shift[dst_align]; | ||
916 | SDEBUG(" dst: "); | 921 | SDEBUG(" dst: "); |
917 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 922 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
918 | } | 923 | } |
@@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
922 | uint8_t attr = U8((*ptr)++), shift; | 927 | uint8_t attr = U8((*ptr)++), shift; |
923 | uint32_t saved, dst; | 928 | uint32_t saved, dst; |
924 | int dptr = *ptr; | 929 | int dptr = *ptr; |
930 | uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; | ||
925 | SDEBUG(" dst: "); | 931 | SDEBUG(" dst: "); |
926 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 932 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
933 | /* op needs to full dst value */ | ||
934 | dst = saved; | ||
927 | shift = atom_get_src(ctx, attr, ptr); | 935 | shift = atom_get_src(ctx, attr, ptr); |
928 | SDEBUG(" shift: %d\n", shift); | 936 | SDEBUG(" shift: %d\n", shift); |
929 | dst >>= shift; | 937 | dst >>= shift; |
938 | dst &= atom_arg_mask[dst_align]; | ||
939 | dst >>= atom_arg_shift[dst_align]; | ||
930 | SDEBUG(" dst: "); | 940 | SDEBUG(" dst: "); |
931 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 941 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
932 | } | 942 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fd4ef6d18849..a87990b3ae84 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 521 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 522 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
523 | adjusted_clock = mode->clock * 2; | 523 | adjusted_clock = mode->clock * 2; |
524 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
525 | pll->algo = PLL_ALGO_LEGACY; | ||
526 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | ||
527 | } | ||
524 | } else { | 528 | } else { |
525 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 529 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
526 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 530 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9580497ede4..d7388fdb6d0b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2891 | { | 2891 | { |
2892 | struct radeon_bo *robj; | 2892 | struct radeon_bo *robj; |
2893 | unsigned long size; | 2893 | unsigned long size; |
2894 | unsigned u, i, w, h; | 2894 | unsigned u, i, w, h, d; |
2895 | int ret; | 2895 | int ret; |
2896 | 2896 | ||
2897 | for (u = 0; u < track->num_texture; u++) { | 2897 | for (u = 0; u < track->num_texture; u++) { |
@@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2923 | h = h / (1 << i); | 2923 | h = h / (1 << i); |
2924 | if (track->textures[u].roundup_h) | 2924 | if (track->textures[u].roundup_h) |
2925 | h = roundup_pow_of_two(h); | 2925 | h = roundup_pow_of_two(h); |
2926 | if (track->textures[u].tex_coord_type == 1) { | ||
2927 | d = (1 << track->textures[u].txdepth) / (1 << i); | ||
2928 | if (!d) | ||
2929 | d = 1; | ||
2930 | } else { | ||
2931 | d = 1; | ||
2932 | } | ||
2926 | if (track->textures[u].compress_format) { | 2933 | if (track->textures[u].compress_format) { |
2927 | 2934 | ||
2928 | size += r100_track_compress_size(track->textures[u].compress_format, w, h); | 2935 | size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; |
2929 | /* compressed textures are block based */ | 2936 | /* compressed textures are block based */ |
2930 | } else | 2937 | } else |
2931 | size += w * h; | 2938 | size += w * h * d; |
2932 | } | 2939 | } |
2933 | size *= track->textures[u].cpp; | 2940 | size *= track->textures[u].cpp; |
2934 | 2941 | ||
2935 | switch (track->textures[u].tex_coord_type) { | 2942 | switch (track->textures[u].tex_coord_type) { |
2936 | case 0: | 2943 | case 0: |
2937 | break; | ||
2938 | case 1: | 2944 | case 1: |
2939 | size *= (1 << track->textures[u].txdepth); | ||
2940 | break; | 2945 | break; |
2941 | case 2: | 2946 | case 2: |
2942 | if (track->separate_cube) { | 2947 | if (track->separate_cube) { |
@@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3007 | } | 3012 | } |
3008 | } | 3013 | } |
3009 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3014 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3010 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | 3015 | if (track->vap_vf_cntl & (1 << 14)) { |
3016 | nverts = track->vap_alt_nverts; | ||
3017 | } else { | ||
3018 | nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; | ||
3019 | } | ||
3011 | switch (prim_walk) { | 3020 | switch (prim_walk) { |
3012 | case 1: | 3021 | case 1: |
3013 | for (i = 0; i < track->num_arrays; i++) { | 3022 | for (i = 0; i < track->num_arrays; i++) { |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index b27a6999d219..fadfe68de9cc 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -64,6 +64,7 @@ struct r100_cs_track { | |||
64 | unsigned maxy; | 64 | unsigned maxy; |
65 | unsigned vtx_size; | 65 | unsigned vtx_size; |
66 | unsigned vap_vf_cntl; | 66 | unsigned vap_vf_cntl; |
67 | unsigned vap_alt_nverts; | ||
67 | unsigned immd_dwords; | 68 | unsigned immd_dwords; |
68 | unsigned num_arrays; | 69 | unsigned num_arrays; |
69 | unsigned max_indx; | 70 | unsigned max_indx; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 2b9affe754ce..eaf1f6bc44f1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -324,13 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
324 | uint32_t gb_tile_config, tmp; | 324 | uint32_t gb_tile_config, tmp; |
325 | 325 | ||
326 | r100_hdp_reset(rdev); | 326 | r100_hdp_reset(rdev); |
327 | /* FIXME: rv380 one pipes ? */ | ||
328 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || | 327 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
329 | (rdev->family == CHIP_R350)) { | 328 | (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { |
330 | /* r300,r350 */ | 329 | /* r300,r350 */ |
331 | rdev->num_gb_pipes = 2; | 330 | rdev->num_gb_pipes = 2; |
332 | } else { | 331 | } else { |
333 | /* rv350,rv370,rv380,r300 AD */ | 332 | /* rv350,rv370,rv380,r300 AD, r350 AH */ |
334 | rdev->num_gb_pipes = 1; | 333 | rdev->num_gb_pipes = 1; |
335 | } | 334 | } |
336 | rdev->num_z_pipes = 1; | 335 | rdev->num_z_pipes = 1; |
@@ -730,6 +729,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
730 | /* VAP_VF_MAX_VTX_INDX */ | 729 | /* VAP_VF_MAX_VTX_INDX */ |
731 | track->max_indx = idx_value & 0x00FFFFFFUL; | 730 | track->max_indx = idx_value & 0x00FFFFFFUL; |
732 | break; | 731 | break; |
732 | case 0x2088: | ||
733 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ | ||
734 | if (p->rdev->family < CHIP_RV515) | ||
735 | goto fail; | ||
736 | track->vap_alt_nverts = idx_value & 0xFFFFFF; | ||
737 | break; | ||
733 | case 0x43E4: | 738 | case 0x43E4: |
734 | /* SC_SCISSOR1 */ | 739 | /* SC_SCISSOR1 */ |
735 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; | 740 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
@@ -767,7 +772,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
767 | tmp = idx_value & ~(0x7 << 16); | 772 | tmp = idx_value & ~(0x7 << 16); |
768 | tmp |= tile_flags; | 773 | tmp |= tile_flags; |
769 | ib[idx] = tmp; | 774 | ib[idx] = tmp; |
770 | |||
771 | i = (reg - 0x4E38) >> 2; | 775 | i = (reg - 0x4E38) >> 2; |
772 | track->cb[i].pitch = idx_value & 0x3FFE; | 776 | track->cb[i].pitch = idx_value & 0x3FFE; |
773 | switch (((idx_value >> 21) & 0xF)) { | 777 | switch (((idx_value >> 21) & 0xF)) { |
@@ -1052,11 +1056,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1052 | break; | 1056 | break; |
1053 | /* fallthrough do not move */ | 1057 | /* fallthrough do not move */ |
1054 | default: | 1058 | default: |
1055 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1059 | goto fail; |
1056 | reg, idx); | ||
1057 | return -EINVAL; | ||
1058 | } | 1060 | } |
1059 | return 0; | 1061 | return 0; |
1062 | fail: | ||
1063 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | ||
1064 | reg, idx); | ||
1065 | return -EINVAL; | ||
1060 | } | 1066 | } |
1061 | 1067 | ||
1062 | static int r300_packet3_check(struct radeon_cs_parser *p, | 1068 | static int r300_packet3_check(struct radeon_cs_parser *p, |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index ea46d558e8f3..c5c2742e4140 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
921 | 921 | ||
922 | ptr_addr = drm_buffer_read_object(cmdbuf->buffer, | 922 | ptr_addr = drm_buffer_read_object(cmdbuf->buffer, |
923 | sizeof(stack_ptr_addr), &stack_ptr_addr); | 923 | sizeof(stack_ptr_addr), &stack_ptr_addr); |
924 | ref_age_base = (u32 *)(unsigned long)*ptr_addr; | 924 | ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr); |
925 | 925 | ||
926 | for (i=0; i < header.scratch.n_bufs; i++) { | 926 | for (i=0; i < header.scratch.n_bufs; i++) { |
927 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); | 927 | buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3dc968c9f5a4..c2bda4ad62e7 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -59,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
59 | /* get max number of pipes */ | 59 | /* get max number of pipes */ |
60 | gb_pipe_select = RREG32(0x402C); | 60 | gb_pipe_select = RREG32(0x402C); |
61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | 61 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
62 | |||
63 | /* SE chips have 1 pipe */ | ||
64 | if ((rdev->pdev->device == 0x5e4c) || | ||
65 | (rdev->pdev->device == 0x5e4f)) | ||
66 | num_pipes = 1; | ||
67 | |||
62 | rdev->num_gb_pipes = num_pipes; | 68 | rdev->num_gb_pipes = num_pipes; |
63 | tmp = 0; | 69 | tmp = 0; |
64 | switch (num_pipes) { | 70 | switch (num_pipes) { |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index dac7042b797e..1d898051c631 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 36 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
37 | { | 37 | { |
38 | return rdev->family >= CHIP_R600 | 38 | return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) |
39 | || rdev->family == CHIP_RS600 | 39 | || rdev->family == CHIP_RS600 |
40 | || rdev->family == CHIP_RS690 | 40 | || rdev->family == CHIP_RS690 |
41 | || rdev->family == CHIP_RS740; | 41 | || rdev->family == CHIP_RS740; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 029fa1406d1d..2616b822ba68 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
314 | struct radeon_device *rdev = dev->dev_private; | 314 | struct radeon_device *rdev = dev->dev_private; |
315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; | 315 | uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; |
316 | 316 | ||
317 | if (ASIC_IS_DCE4(rdev)) | ||
318 | return; | ||
319 | |||
317 | if (!offset) | 320 | if (!offset) |
318 | return; | 321 | return; |
319 | 322 | ||
@@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder) | |||
484 | struct radeon_device *rdev = dev->dev_private; | 487 | struct radeon_device *rdev = dev->dev_private; |
485 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 488 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
486 | 489 | ||
490 | if (ASIC_IS_DCE4(rdev)) | ||
491 | return; | ||
492 | |||
487 | if (!radeon_encoder->hdmi_offset) { | 493 | if (!radeon_encoder->hdmi_offset) { |
488 | r600_hdmi_assign_block(encoder); | 494 | r600_hdmi_assign_block(encoder); |
489 | if (!radeon_encoder->hdmi_offset) { | 495 | if (!radeon_encoder->hdmi_offset) { |
@@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder) | |||
525 | struct radeon_device *rdev = dev->dev_private; | 531 | struct radeon_device *rdev = dev->dev_private; |
526 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 532 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
527 | 533 | ||
534 | if (ASIC_IS_DCE4(rdev)) | ||
535 | return; | ||
536 | |||
528 | if (!radeon_encoder->hdmi_offset) { | 537 | if (!radeon_encoder->hdmi_offset) { |
529 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); | 538 | dev_err(rdev->dev, "Disabling not enabled HDMI\n"); |
530 | return; | 539 | return; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3fba50540f72..1331351c5178 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
162 | { | 162 | { |
163 | struct drm_device *dev = connector->dev; | 163 | struct drm_device *dev = connector->dev; |
164 | struct drm_connector *conflict; | 164 | struct drm_connector *conflict; |
165 | struct radeon_connector *radeon_conflict; | ||
165 | int i; | 166 | int i; |
166 | 167 | ||
167 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { | 168 | list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { |
168 | if (conflict == connector) | 169 | if (conflict == connector) |
169 | continue; | 170 | continue; |
170 | 171 | ||
172 | radeon_conflict = to_radeon_connector(conflict); | ||
171 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 173 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
172 | if (conflict->encoder_ids[i] == 0) | 174 | if (conflict->encoder_ids[i] == 0) |
173 | break; | 175 | break; |
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
177 | if (conflict->status != connector_status_connected) | 179 | if (conflict->status != connector_status_connected) |
178 | continue; | 180 | continue; |
179 | 181 | ||
182 | if (radeon_conflict->use_digital) | ||
183 | continue; | ||
184 | |||
180 | if (priority == true) { | 185 | if (priority == true) { |
181 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
182 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); |
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
287 | 292 | ||
288 | if (property == rdev->mode_info.coherent_mode_property) { | 293 | if (property == rdev->mode_info.coherent_mode_property) { |
289 | struct radeon_encoder_atom_dig *dig; | 294 | struct radeon_encoder_atom_dig *dig; |
295 | bool new_coherent_mode; | ||
290 | 296 | ||
291 | /* need to find digital encoder on connector */ | 297 | /* need to find digital encoder on connector */ |
292 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); | 298 | encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
299 | return 0; | 305 | return 0; |
300 | 306 | ||
301 | dig = radeon_encoder->enc_priv; | 307 | dig = radeon_encoder->enc_priv; |
302 | dig->coherent_mode = val ? true : false; | 308 | new_coherent_mode = val ? true : false; |
303 | radeon_property_change_mode(&radeon_encoder->base); | 309 | if (dig->coherent_mode != new_coherent_mode) { |
310 | dig->coherent_mode = new_coherent_mode; | ||
311 | radeon_property_change_mode(&radeon_encoder->base); | ||
312 | } | ||
304 | } | 313 | } |
305 | 314 | ||
306 | if (property == rdev->mode_info.tv_std_property) { | 315 | if (property == rdev->mode_info.tv_std_property) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 419630dd2075..2f042a3c0e62 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev) | |||
435 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { | 435 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { |
436 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); | 436 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); |
437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | 437 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; |
438 | /* SE cards have 1 pipe */ | ||
439 | if ((dev->pdev->device == 0x5e4c) || | ||
440 | (dev->pdev->device == 0x5e4f)) | ||
441 | dev_priv->num_gb_pipes = 1; | ||
438 | } else { | 442 | } else { |
439 | /* R3xx */ | 443 | /* R3xx */ |
440 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && | 444 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && |
441 | dev->pdev->device != 0x4144) || | 445 | dev->pdev->device != 0x4144) || |
442 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { | 446 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && |
447 | dev->pdev->device != 0x4148)) { | ||
443 | dev_priv->num_gb_pipes = 2; | 448 | dev_priv->num_gb_pipes = 2; |
444 | } else { | 449 | } else { |
445 | /* RV3xx/R300 AD */ | 450 | /* RV3xx/R300 AD/R350 AH */ |
446 | dev_priv->num_gb_pipes = 1; | 451 | dev_priv->num_gb_pipes = 1; |
447 | } | 452 | } |
448 | } | 453 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bddf17f97da8..7b629e305560 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -36,6 +36,54 @@ | |||
36 | #include "radeon.h" | 36 | #include "radeon.h" |
37 | #include "atom.h" | 37 | #include "atom.h" |
38 | 38 | ||
39 | static const char radeon_family_name[][16] = { | ||
40 | "R100", | ||
41 | "RV100", | ||
42 | "RS100", | ||
43 | "RV200", | ||
44 | "RS200", | ||
45 | "R200", | ||
46 | "RV250", | ||
47 | "RS300", | ||
48 | "RV280", | ||
49 | "R300", | ||
50 | "R350", | ||
51 | "RV350", | ||
52 | "RV380", | ||
53 | "R420", | ||
54 | "R423", | ||
55 | "RV410", | ||
56 | "RS400", | ||
57 | "RS480", | ||
58 | "RS600", | ||
59 | "RS690", | ||
60 | "RS740", | ||
61 | "RV515", | ||
62 | "R520", | ||
63 | "RV530", | ||
64 | "RV560", | ||
65 | "RV570", | ||
66 | "R580", | ||
67 | "R600", | ||
68 | "RV610", | ||
69 | "RV630", | ||
70 | "RV670", | ||
71 | "RV620", | ||
72 | "RV635", | ||
73 | "RS780", | ||
74 | "RS880", | ||
75 | "RV770", | ||
76 | "RV730", | ||
77 | "RV710", | ||
78 | "RV740", | ||
79 | "CEDAR", | ||
80 | "REDWOOD", | ||
81 | "JUNIPER", | ||
82 | "CYPRESS", | ||
83 | "HEMLOCK", | ||
84 | "LAST", | ||
85 | }; | ||
86 | |||
39 | /* | 87 | /* |
40 | * Clear GPU surface registers. | 88 | * Clear GPU surface registers. |
41 | */ | 89 | */ |
@@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
526 | int r; | 574 | int r; |
527 | int dma_bits; | 575 | int dma_bits; |
528 | 576 | ||
529 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | ||
530 | rdev->shutdown = false; | 577 | rdev->shutdown = false; |
531 | rdev->dev = &pdev->dev; | 578 | rdev->dev = &pdev->dev; |
532 | rdev->ddev = ddev; | 579 | rdev->ddev = ddev; |
@@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
538 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 585 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
539 | rdev->gpu_lockup = false; | 586 | rdev->gpu_lockup = false; |
540 | rdev->accel_working = false; | 587 | rdev->accel_working = false; |
588 | |||
589 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | ||
590 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | ||
591 | |||
541 | /* mutex initialization are all done here so we | 592 | /* mutex initialization are all done here so we |
542 | * can recall function without having locking issues */ | 593 | * can recall function without having locking issues */ |
543 | mutex_init(&rdev->cs_mutex); | 594 | mutex_init(&rdev->cs_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b8d672828246..bb1c122cad21 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | |||
86 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); | 86 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); |
87 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); | 87 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); |
88 | 88 | ||
89 | WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); | 89 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); |
90 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); | 90 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); |
91 | 91 | ||
92 | WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); | 92 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); |
93 | for (i = 0; i < 256; i++) { | 93 | for (i = 0; i < 256; i++) { |
94 | WREG32(EVERGREEN_DC_LUT_30_COLOR, | 94 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, |
95 | (radeon_crtc->lut_r[i] << 20) | | 95 | (radeon_crtc->lut_r[i] << 20) | |
96 | (radeon_crtc->lut_g[i] << 10) | | 96 | (radeon_crtc->lut_g[i] << 10) | |
97 | (radeon_crtc->lut_b[i] << 0)); | 97 | (radeon_crtc->lut_b[i] << 0)); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 055a51732dcb..4b05563d99e1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -43,9 +43,10 @@ | |||
43 | * - 2.0.0 - initial interface | 43 | * - 2.0.0 - initial interface |
44 | * - 2.1.0 - add square tiling interface | 44 | * - 2.1.0 - add square tiling interface |
45 | * - 2.2.0 - add r6xx/r7xx const buffer support | 45 | * - 2.2.0 - add r6xx/r7xx const buffer support |
46 | * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs | ||
46 | */ | 47 | */ |
47 | #define KMS_DRIVER_MAJOR 2 | 48 | #define KMS_DRIVER_MAJOR 2 |
48 | #define KMS_DRIVER_MINOR 2 | 49 | #define KMS_DRIVER_MINOR 3 |
49 | #define KMS_DRIVER_PATCHLEVEL 0 | 50 | #define KMS_DRIVER_PATCHLEVEL 0 |
50 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 51 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
51 | int radeon_driver_unload_kms(struct drm_device *dev); | 52 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c52fc3080b67..fed7b8084779 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -865,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
865 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 865 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
866 | if (dig->coherent_mode) | 866 | if (dig->coherent_mode) |
867 | args.v3.acConfig.fCoherentMode = 1; | 867 | args.v3.acConfig.fCoherentMode = 1; |
868 | if (radeon_encoder->pixel_clock > 165000) | ||
869 | args.v3.acConfig.fDualLinkConnector = 1; | ||
868 | } | 870 | } |
869 | } else if (ASIC_IS_DCE32(rdev)) { | 871 | } else if (ASIC_IS_DCE32(rdev)) { |
870 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 872 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
@@ -888,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
888 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 890 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
889 | if (dig->coherent_mode) | 891 | if (dig->coherent_mode) |
890 | args.v2.acConfig.fCoherentMode = 1; | 892 | args.v2.acConfig.fCoherentMode = 1; |
893 | if (radeon_encoder->pixel_clock > 165000) | ||
894 | args.v2.acConfig.fDualLinkConnector = 1; | ||
891 | } | 895 | } |
892 | } else { | 896 | } else { |
893 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | 897 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; |
@@ -1322,7 +1326,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1322 | 1326 | ||
1323 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1327 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1324 | 1328 | ||
1325 | if (ASIC_IS_AVIVO(rdev)) { | 1329 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { |
1326 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | 1330 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
1327 | atombios_yuv_setup(encoder, true); | 1331 | atombios_yuv_setup(encoder, true); |
1328 | else | 1332 | else |
@@ -1373,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1373 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1377 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
1374 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1378 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
1375 | atombios_dac_setup(encoder, ATOM_ENABLE); | 1379 | atombios_dac_setup(encoder, ATOM_ENABLE); |
1376 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1380 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { |
1377 | atombios_tv_setup(encoder, ATOM_ENABLE); | 1381 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
1382 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
1383 | else | ||
1384 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
1385 | } | ||
1378 | break; | 1386 | break; |
1379 | } | 1387 | } |
1380 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1388 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 93c7d5d41914..e329066dcabd 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -36,7 +36,7 @@ | |||
36 | * Radeon chip families | 36 | * Radeon chip families |
37 | */ | 37 | */ |
38 | enum radeon_family { | 38 | enum radeon_family { |
39 | CHIP_R100, | 39 | CHIP_R100 = 0, |
40 | CHIP_RV100, | 40 | CHIP_RV100, |
41 | CHIP_RS100, | 41 | CHIP_RS100, |
42 | CHIP_RV200, | 42 | CHIP_RV200, |
@@ -99,4 +99,5 @@ enum radeon_chip_flags { | |||
99 | RADEON_IS_PCI = 0x00800000UL, | 99 | RADEON_IS_PCI = 0x00800000UL, |
100 | RADEON_IS_IGPGART = 0x01000000UL, | 100 | RADEON_IS_IGPGART = 0x01000000UL, |
101 | }; | 101 | }; |
102 | |||
102 | #endif | 103 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index d3657dcfdd26..c633319f98ed 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -165,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | |||
165 | { | 165 | { |
166 | struct radeon_device *rdev = dev->dev_private; | 166 | struct radeon_device *rdev = dev->dev_private; |
167 | 167 | ||
168 | if (crtc < 0 || crtc > 1) { | 168 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
169 | DRM_ERROR("Invalid crtc %d\n", crtc); | 169 | DRM_ERROR("Invalid crtc %d\n", crtc); |
170 | return -EINVAL; | 170 | return -EINVAL; |
171 | } | 171 | } |
@@ -177,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) | |||
177 | { | 177 | { |
178 | struct radeon_device *rdev = dev->dev_private; | 178 | struct radeon_device *rdev = dev->dev_private; |
179 | 179 | ||
180 | if (crtc < 0 || crtc > 1) { | 180 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
181 | DRM_ERROR("Invalid crtc %d\n", crtc); | 181 | DRM_ERROR("Invalid crtc %d\n", crtc); |
182 | return -EINVAL; | 182 | return -EINVAL; |
183 | } | 183 | } |
@@ -191,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) | |||
191 | { | 191 | { |
192 | struct radeon_device *rdev = dev->dev_private; | 192 | struct radeon_device *rdev = dev->dev_private; |
193 | 193 | ||
194 | if (crtc < 0 || crtc > 1) { | 194 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
195 | DRM_ERROR("Invalid crtc %d\n", crtc); | 195 | DRM_ERROR("Invalid crtc %d\n", crtc); |
196 | return; | 196 | return; |
197 | } | 197 | } |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 19c4663fa9c6..1e97b2d129fd 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
@@ -125,6 +125,8 @@ r300 0x4f60 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 989f7a020832..e958980d00f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
@@ -125,6 +125,8 @@ r420 0x4f60 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 6801b865d1c4..83e8bc0c2bb2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
@@ -125,6 +125,8 @@ rs600 0x6d40 | |||
125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
127 | 0x4008 GB_ENABLE | 127 | 0x4008 GB_ENABLE |
128 | 0x4010 GB_MSPOS0 | ||
129 | 0x4014 GB_MSPOS1 | ||
128 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
129 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
130 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 38abf63bf2cd..1e46233985eb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -35,6 +35,7 @@ rv515 0x6d40 | |||
35 | 0x1DA8 VAP_VPORT_ZSCALE | 35 | 0x1DA8 VAP_VPORT_ZSCALE |
36 | 0x1DAC VAP_VPORT_ZOFFSET | 36 | 0x1DAC VAP_VPORT_ZOFFSET |
37 | 0x2080 VAP_CNTL | 37 | 0x2080 VAP_CNTL |
38 | 0x208C VAP_INDEX_OFFSET | ||
38 | 0x2090 VAP_OUT_VTX_FMT_0 | 39 | 0x2090 VAP_OUT_VTX_FMT_0 |
39 | 0x2094 VAP_OUT_VTX_FMT_1 | 40 | 0x2094 VAP_OUT_VTX_FMT_1 |
40 | 0x20B0 VAP_VTE_CNTL | 41 | 0x20B0 VAP_VTE_CNTL |
@@ -158,6 +159,8 @@ rv515 0x6d40 | |||
158 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | 159 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 |
159 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | 160 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 |
160 | 0x4008 GB_ENABLE | 161 | 0x4008 GB_ENABLE |
162 | 0x4010 GB_MSPOS0 | ||
163 | 0x4014 GB_MSPOS1 | ||
161 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
162 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
163 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index abf824c2123d..a81bc7a21e14 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) | |||
159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 159 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
160 | 160 | ||
161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 161 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); | 162 | tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); |
163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); | 163 | WREG32_MC(R_000100_MC_PT0_CNTL, tmp); |
164 | 164 | ||
165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 165 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index c1605b528e8f..0f28d91f29d8 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = { | |||
142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", | 142 | "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", |
143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", | 143 | "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", |
144 | NULL }, | 144 | NULL }, |
145 | /* Set 17: iMac 9,1 */ | ||
146 | { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P", | ||
147 | "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL }, | ||
148 | /* Set 18: MacBook Pro 2,2 */ | ||
149 | { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", | ||
150 | "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, | ||
145 | }; | 151 | }; |
146 | 152 | ||
147 | /* List of keys used to read/write fan speeds */ | 153 | /* List of keys used to read/write fan speeds */ |
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { | |||
1350 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, | 1356 | { .accelerometer = 1, .light = 1, .temperature_set = 15 }, |
1351 | /* MacPro3,1: temperature set 16 */ | 1357 | /* MacPro3,1: temperature set 16 */ |
1352 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, | 1358 | { .accelerometer = 0, .light = 0, .temperature_set = 16 }, |
1359 | /* iMac 9,1: light sensor only, temperature set 17 */ | ||
1360 | { .accelerometer = 0, .light = 0, .temperature_set = 17 }, | ||
1361 | /* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ | ||
1362 | { .accelerometer = 1, .light = 1, .temperature_set = 18 }, | ||
1353 | }; | 1363 | }; |
1354 | 1364 | ||
1355 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". | 1365 | /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". |
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1375 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1385 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1376 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, | 1386 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, |
1377 | &applesmc_dmi_data[9]}, | 1387 | &applesmc_dmi_data[9]}, |
1388 | { applesmc_dmi_match, "Apple MacBook Pro 2,2", { | ||
1389 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."), | ||
1390 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") }, | ||
1391 | &applesmc_dmi_data[18]}, | ||
1378 | { applesmc_dmi_match, "Apple MacBook Pro", { | 1392 | { applesmc_dmi_match, "Apple MacBook Pro", { |
1379 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), | 1393 | DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), |
1380 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, | 1394 | DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, |
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { | |||
1415 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1429 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1416 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, | 1430 | DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, |
1417 | &applesmc_dmi_data[4]}, | 1431 | &applesmc_dmi_data[4]}, |
1432 | { applesmc_dmi_match, "Apple iMac 9,1", { | ||
1433 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), | ||
1434 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") }, | ||
1435 | &applesmc_dmi_data[17]}, | ||
1418 | { applesmc_dmi_match, "Apple iMac 8", { | 1436 | { applesmc_dmi_match, "Apple iMac 8", { |
1419 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | 1437 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), |
1420 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, | 1438 | DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 75f3fa55663d..16c420240724 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data) | |||
1169 | int err; | 1169 | int err; |
1170 | 1170 | ||
1171 | list_for_each_entry(s, &data->sensor_list, list) { | 1171 | list_for_each_entry(s, &data->sensor_list, list) { |
1172 | sysfs_attr_init(&s->input_attr.attr); | ||
1172 | err = device_create_file(data->hwmon_dev, &s->input_attr); | 1173 | err = device_create_file(data->hwmon_dev, &s->input_attr); |
1173 | if (err) | 1174 | if (err) |
1174 | return err; | 1175 | return err; |
1176 | sysfs_attr_init(&s->label_attr.attr); | ||
1175 | err = device_create_file(data->hwmon_dev, &s->label_attr); | 1177 | err = device_create_file(data->hwmon_dev, &s->label_attr); |
1176 | if (err) | 1178 | if (err) |
1177 | return err; | 1179 | return err; |
1180 | sysfs_attr_init(&s->limit1_attr.attr); | ||
1178 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); | 1181 | err = device_create_file(data->hwmon_dev, &s->limit1_attr); |
1179 | if (err) | 1182 | if (err) |
1180 | return err; | 1183 | return err; |
1184 | sysfs_attr_init(&s->limit2_attr.attr); | ||
1181 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); | 1185 | err = device_create_file(data->hwmon_dev, &s->limit2_attr); |
1182 | if (err) | 1186 | if (err) |
1183 | return err; | 1187 | return err; |
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index be475e844c2a..c8ab50516672 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c | |||
@@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { | |||
217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), | 217 | AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), |
218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), | 218 | AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), |
219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), | 219 | AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), |
220 | AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), | ||
221 | AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), | ||
222 | AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), | ||
223 | AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), | ||
220 | { NULL, } | 224 | { NULL, } |
221 | /* Laptop models without axis info (yet): | 225 | /* Laptop models without axis info (yet): |
222 | * "NC6910" "HP Compaq 6910" | 226 | * "NC6910" "HP Compaq 6910" |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 1002befd87d5..5be09c048c5f 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
539 | 539 | ||
540 | struct it87_data *data = dev_get_drvdata(dev); | 540 | struct it87_data *data = dev_get_drvdata(dev); |
541 | long val; | 541 | long val; |
542 | u8 reg; | ||
542 | 543 | ||
543 | if (strict_strtol(buf, 10, &val) < 0) | 544 | if (strict_strtol(buf, 10, &val) < 0) |
544 | return -EINVAL; | 545 | return -EINVAL; |
545 | 546 | ||
546 | mutex_lock(&data->update_lock); | 547 | reg = it87_read_value(data, IT87_REG_TEMP_ENABLE); |
547 | 548 | reg &= ~(1 << nr); | |
548 | data->sensor &= ~(1 << nr); | 549 | reg &= ~(8 << nr); |
549 | data->sensor &= ~(8 << nr); | ||
550 | if (val == 2) { /* backwards compatibility */ | 550 | if (val == 2) { /* backwards compatibility */ |
551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " | 551 | dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " |
552 | "instead\n"); | 552 | "instead\n"); |
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, | |||
554 | } | 554 | } |
555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ | 555 | /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ |
556 | if (val == 3) | 556 | if (val == 3) |
557 | data->sensor |= 1 << nr; | 557 | reg |= 1 << nr; |
558 | else if (val == 4) | 558 | else if (val == 4) |
559 | data->sensor |= 8 << nr; | 559 | reg |= 8 << nr; |
560 | else if (val != 0) { | 560 | else if (val != 0) |
561 | mutex_unlock(&data->update_lock); | ||
562 | return -EINVAL; | 561 | return -EINVAL; |
563 | } | 562 | |
563 | mutex_lock(&data->update_lock); | ||
564 | data->sensor = reg; | ||
564 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); | 565 | it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); |
566 | data->valid = 0; /* Force cache refresh */ | ||
565 | mutex_unlock(&data->update_lock); | 567 | mutex_unlock(&data->update_lock); |
566 | return count; | 568 | return count; |
567 | } | 569 | } |
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev) | |||
1841 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); | 1843 | it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); |
1842 | } | 1844 | } |
1843 | 1845 | ||
1844 | /* Check if temperature channels are reset manually or by some reason */ | 1846 | /* Temperature channels are not forcibly enabled, as they can be |
1845 | tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); | 1847 | * set to two different sensor types and we can't guess which one |
1846 | if ((tmp & 0x3f) == 0) { | 1848 | * is correct for a given system. These channels can be enabled at |
1847 | /* Temp1,Temp3=thermistor; Temp2=thermal diode */ | 1849 | * run-time through the temp{1-3}_type sysfs accessors if needed. */ |
1848 | tmp = (tmp & 0xc0) | 0x2a; | ||
1849 | it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp); | ||
1850 | } | ||
1851 | data->sensor = tmp; | ||
1852 | 1850 | ||
1853 | /* Check if voltage monitors are reset manually or by some reason */ | 1851 | /* Check if voltage monitors are reset manually or by some reason */ |
1854 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); | 1852 | tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); |
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 6b2d8ae64fe1..a610e7880fb3 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c | |||
@@ -303,13 +303,13 @@ error_ret: | |||
303 | **/ | 303 | **/ |
304 | static inline int sht15_calc_temp(struct sht15_data *data) | 304 | static inline int sht15_calc_temp(struct sht15_data *data) |
305 | { | 305 | { |
306 | int d1 = 0; | 306 | int d1 = temppoints[0].d1; |
307 | int i; | 307 | int i; |
308 | 308 | ||
309 | for (i = 1; i < ARRAY_SIZE(temppoints); i++) | 309 | for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) |
310 | /* Find pointer to interpolate */ | 310 | /* Find pointer to interpolate */ |
311 | if (data->supply_uV > temppoints[i - 1].vdd) { | 311 | if (data->supply_uV > temppoints[i - 1].vdd) { |
312 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | 312 | d1 = (data->supply_uV - temppoints[i - 1].vdd) |
313 | * (temppoints[i].d1 - temppoints[i - 1].d1) | 313 | * (temppoints[i].d1 - temppoints[i - 1].d1) |
314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) | 314 | / (temppoints[i].vdd - temppoints[i - 1].vdd) |
315 | + temppoints[i - 1].d1; | 315 | + temppoints[i - 1].d1; |
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev) | |||
542 | /* If a regulator is available, query what the supply voltage actually is!*/ | 542 | /* If a regulator is available, query what the supply voltage actually is!*/ |
543 | data->reg = regulator_get(data->dev, "vcc"); | 543 | data->reg = regulator_get(data->dev, "vcc"); |
544 | if (!IS_ERR(data->reg)) { | 544 | if (!IS_ERR(data->reg)) { |
545 | data->supply_uV = regulator_get_voltage(data->reg); | 545 | int voltage; |
546 | |||
547 | voltage = regulator_get_voltage(data->reg); | ||
548 | if (voltage) | ||
549 | data->supply_uV = voltage; | ||
550 | |||
546 | regulator_enable(data->reg); | 551 | regulator_enable(data->reg); |
547 | /* setup a notifier block to update this if another device | 552 | /* setup a notifier block to update this if another device |
548 | * causes the voltage to change */ | 553 | * causes the voltage to change */ |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index f7e27b702375..d1ff9408dc1f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) | |||
146 | "<%s> I2C Interrupted\n", __func__); | 146 | "<%s> I2C Interrupted\n", __func__); |
147 | return -EINTR; | 147 | return -EINTR; |
148 | } | 148 | } |
149 | if (time_after(jiffies, orig_jiffies + HZ / 1000)) { | 149 | if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { |
150 | dev_dbg(&i2c_imx->adapter.dev, | 150 | dev_dbg(&i2c_imx->adapter.dev, |
151 | "<%s> I2C bus is busy\n", __func__); | 151 | "<%s> I2C bus is busy\n", __func__); |
152 | return -EIO; | 152 | return -ETIMEDOUT; |
153 | } | 153 | } |
154 | schedule(); | 154 | schedule(); |
155 | } | 155 | } |
@@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, | |||
444 | result = i2c_imx_read(i2c_imx, &msgs[i]); | 444 | result = i2c_imx_read(i2c_imx, &msgs[i]); |
445 | else | 445 | else |
446 | result = i2c_imx_write(i2c_imx, &msgs[i]); | 446 | result = i2c_imx_write(i2c_imx, &msgs[i]); |
447 | if (result) | ||
448 | goto fail0; | ||
447 | } | 449 | } |
448 | 450 | ||
449 | fail0: | 451 | fail0: |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 6bd0f19cd451..389ac6032a7b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *pdev) | |||
903 | 903 | ||
904 | platform_set_drvdata(pdev, dev); | 904 | platform_set_drvdata(pdev, dev); |
905 | 905 | ||
906 | if (cpu_is_omap7xx()) | ||
907 | dev->reg_shift = 1; | ||
908 | else | ||
909 | dev->reg_shift = 2; | ||
910 | |||
906 | if ((r = omap_i2c_get_clocks(dev)) != 0) | 911 | if ((r = omap_i2c_get_clocks(dev)) != 0) |
907 | goto err_iounmap; | 912 | goto err_iounmap; |
908 | 913 | ||
@@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *pdev) | |||
926 | dev->b_hw = 1; /* Enable hardware fixes */ | 931 | dev->b_hw = 1; /* Enable hardware fixes */ |
927 | } | 932 | } |
928 | 933 | ||
929 | if (cpu_is_omap7xx()) | ||
930 | dev->reg_shift = 1; | ||
931 | else | ||
932 | dev->reg_shift = 2; | ||
933 | |||
934 | /* reset ASAP, clearing any IRQs */ | 934 | /* reset ASAP, clearing any IRQs */ |
935 | omap_i2c_init(dev); | 935 | omap_i2c_init(dev); |
936 | 936 | ||
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 247103372a06..a97e3fec8148 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c | |||
@@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) | |||
173 | /* We still have something to talk about... */ | 173 | /* We still have something to talk about... */ |
174 | val = *alg_data->mif.buf++; | 174 | val = *alg_data->mif.buf++; |
175 | 175 | ||
176 | if (alg_data->mif.len == 1) | ||
177 | val |= stop_bit; | ||
178 | |||
176 | alg_data->mif.len--; | 179 | alg_data->mif.len--; |
177 | iowrite32(val, I2C_REG_TX(alg_data)); | 180 | iowrite32(val, I2C_REG_TX(alg_data)); |
178 | 181 | ||
@@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) | |||
246 | __func__); | 249 | __func__); |
247 | 250 | ||
248 | if (alg_data->mif.len == 1) { | 251 | if (alg_data->mif.len == 1) { |
252 | /* Last byte, do not acknowledge next rcv. */ | ||
253 | val |= stop_bit; | ||
254 | |||
249 | /* | 255 | /* |
250 | * Enable interrupt RFDAIE (data in Rx fifo), | 256 | * Enable interrupt RFDAIE (data in Rx fifo), |
251 | * and disable DRMIE (need data for Tx) | 257 | * and disable DRMIE (need data for Tx) |
@@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev) | |||
633 | */ | 639 | */ |
634 | 640 | ||
635 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; | 641 | tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; |
642 | if (tmp > 0x3FF) | ||
643 | tmp = 0x3FF; | ||
636 | iowrite32(tmp, I2C_REG_CKH(alg_data)); | 644 | iowrite32(tmp, I2C_REG_CKH(alg_data)); |
637 | iowrite32(tmp, I2C_REG_CKL(alg_data)); | 645 | iowrite32(tmp, I2C_REG_CKL(alg_data)); |
638 | 646 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 1f5b38be73bc..495be451d326 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) | |||
498 | int i = 0; | 498 | int i = 0; |
499 | 499 | ||
500 | /* Locate the apropriate clock setting */ | 500 | /* Locate the apropriate clock setting */ |
501 | while (i < ARRAY_SIZE(stu300_clktable) && | 501 | while (i < ARRAY_SIZE(stu300_clktable) - 1 && |
502 | stu300_clktable[i].rate < clkrate) | 502 | stu300_clktable[i].rate < clkrate) |
503 | i++; | 503 | i++; |
504 | 504 | ||
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index ab87e4f7cec9..defce2877eef 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), | 409 | PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), |
410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | 410 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), |
411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 411 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), | ||
413 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), | ||
412 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), | 414 | PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), |
413 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 415 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
414 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 416 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
@@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
429 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
430 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 432 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 433 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
434 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), | ||
435 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), | ||
432 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), | 436 | PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), |
433 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), | 437 | PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), |
434 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), | 438 | PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), |
diff --git a/drivers/input/input.c b/drivers/input/input.c index afd4e2b7658c..9c79bd56b51a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev, | |||
660 | int input_get_keycode(struct input_dev *dev, | 660 | int input_get_keycode(struct input_dev *dev, |
661 | unsigned int scancode, unsigned int *keycode) | 661 | unsigned int scancode, unsigned int *keycode) |
662 | { | 662 | { |
663 | return dev->getkeycode(dev, scancode, keycode); | 663 | unsigned long flags; |
664 | int retval; | ||
665 | |||
666 | spin_lock_irqsave(&dev->event_lock, flags); | ||
667 | retval = dev->getkeycode(dev, scancode, keycode); | ||
668 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
669 | |||
670 | return retval; | ||
664 | } | 671 | } |
665 | EXPORT_SYMBOL(input_get_keycode); | 672 | EXPORT_SYMBOL(input_get_keycode); |
666 | 673 | ||
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index ffc25cfcef7a..b443e088fd3c 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
374 | input_dev->name = pdev->name; | 374 | input_dev->name = pdev->name; |
375 | input_dev->id.bustype = BUS_HOST; | 375 | input_dev->id.bustype = BUS_HOST; |
376 | input_dev->dev.parent = &pdev->dev; | 376 | input_dev->dev.parent = &pdev->dev; |
377 | input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); | 377 | input_dev->evbit[0] = BIT_MASK(EV_KEY); |
378 | if (!pdata->no_autorepeat) | ||
379 | input_dev->evbit[0] |= BIT_MASK(EV_REP); | ||
378 | input_dev->open = matrix_keypad_start; | 380 | input_dev->open = matrix_keypad_start; |
379 | input_dev->close = matrix_keypad_stop; | 381 | input_dev->close = matrix_keypad_stop; |
380 | 382 | ||
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 99d58764ef03..0d22cb9ce42e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = { | |||
64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, | 64 | { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, |
65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, | 65 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, |
66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ | 66 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ |
67 | { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */ | ||
67 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, | 68 | { { 0x52, 0x01, 0x14 }, 0xff, 0xff, |
68 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ | 69 | ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ |
69 | }; | 70 | }; |
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 4f8fe0886b2a..b89879bd860f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = { | |||
803 | .disconnect = bcm5974_disconnect, | 803 | .disconnect = bcm5974_disconnect, |
804 | .suspend = bcm5974_suspend, | 804 | .suspend = bcm5974_suspend, |
805 | .resume = bcm5974_resume, | 805 | .resume = bcm5974_resume, |
806 | .reset_resume = bcm5974_resume, | ||
807 | .id_table = bcm5974_table, | 806 | .id_table = bcm5974_table, |
808 | .supports_autosuspend = 1, | 807 | .supports_autosuspend = 1, |
809 | }; | 808 | }; |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 577688b5b951..6440a8f55686 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); | |||
39 | 39 | ||
40 | static bool i8042_nomux; | 40 | static bool i8042_nomux; |
41 | module_param_named(nomux, i8042_nomux, bool, 0); | 41 | module_param_named(nomux, i8042_nomux, bool, 0); |
42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); | 42 | MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); |
43 | 43 | ||
44 | static bool i8042_unlock; | 44 | static bool i8042_unlock; |
45 | module_param_named(unlock, i8042_unlock, bool, 0); | 45 | module_param_named(unlock, i8042_unlock, bool, 0); |
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 82ae18d29685..014248344763 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c | |||
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev, | |||
68 | unsigned int scancode, | 68 | unsigned int scancode, |
69 | unsigned int *keycode) | 69 | unsigned int *keycode) |
70 | { | 70 | { |
71 | const struct key_entry *key = | 71 | const struct key_entry *key; |
72 | sparse_keymap_entry_from_scancode(dev, scancode); | ||
73 | 72 | ||
74 | if (key && key->type == KE_KEY) { | 73 | if (dev->keycode) { |
75 | *keycode = key->keycode; | 74 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
76 | return 0; | 75 | if (key && key->type == KE_KEY) { |
76 | *keycode = key->keycode; | ||
77 | return 0; | ||
78 | } | ||
77 | } | 79 | } |
78 | 80 | ||
79 | return -EINVAL; | 81 | return -EINVAL; |
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev, | |||
86 | struct key_entry *key; | 88 | struct key_entry *key; |
87 | int old_keycode; | 89 | int old_keycode; |
88 | 90 | ||
89 | if (keycode < 0 || keycode > KEY_MAX) | 91 | if (dev->keycode) { |
90 | return -EINVAL; | 92 | key = sparse_keymap_entry_from_scancode(dev, scancode); |
91 | 93 | if (key && key->type == KE_KEY) { | |
92 | key = sparse_keymap_entry_from_scancode(dev, scancode); | 94 | old_keycode = key->keycode; |
93 | if (key && key->type == KE_KEY) { | 95 | key->keycode = keycode; |
94 | old_keycode = key->keycode; | 96 | set_bit(keycode, dev->keybit); |
95 | key->keycode = keycode; | 97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) |
96 | set_bit(keycode, dev->keybit); | 98 | clear_bit(old_keycode, dev->keybit); |
97 | if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) | 99 | return 0; |
98 | clear_bit(old_keycode, dev->keybit); | 100 | } |
99 | return 0; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | return -EINVAL; | 103 | return -EINVAL; |
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev, | |||
164 | return 0; | 165 | return 0; |
165 | 166 | ||
166 | err_out: | 167 | err_out: |
167 | kfree(keymap); | 168 | kfree(map); |
168 | return error; | 169 | return error; |
169 | 170 | ||
170 | } | 171 | } |
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup); | |||
176 | * | 177 | * |
177 | * This function is used to free memory allocated by sparse keymap | 178 | * This function is used to free memory allocated by sparse keymap |
178 | * in an input device that was set up by sparse_keymap_setup(). | 179 | * in an input device that was set up by sparse_keymap_setup(). |
180 | * NOTE: It is safe to cal this function while input device is | ||
181 | * still registered (however the drivers should care not to try to | ||
182 | * use freed keymap and thus have to shut off interrups/polling | ||
183 | * before freeing the keymap). | ||
179 | */ | 184 | */ |
180 | void sparse_keymap_free(struct input_dev *dev) | 185 | void sparse_keymap_free(struct input_dev *dev) |
181 | { | 186 | { |
187 | unsigned long flags; | ||
188 | |||
189 | /* | ||
190 | * Take event lock to prevent racing with input_get_keycode() | ||
191 | * and input_set_keycode() if we are called while input device | ||
192 | * is still registered. | ||
193 | */ | ||
194 | spin_lock_irqsave(&dev->event_lock, flags); | ||
195 | |||
182 | kfree(dev->keycode); | 196 | kfree(dev->keycode); |
183 | dev->keycode = NULL; | 197 | dev->keycode = NULL; |
184 | dev->keycodemax = 0; | 198 | dev->keycodemax = 0; |
185 | dev->getkeycode = NULL; | 199 | |
186 | dev->setkeycode = NULL; | 200 | spin_unlock_irqrestore(&dev->event_lock, flags); |
187 | } | 201 | } |
188 | EXPORT_SYMBOL(sparse_keymap_free); | 202 | EXPORT_SYMBOL(sparse_keymap_free); |
189 | 203 | ||
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 8b5d2873f0c4..f46502589e4e 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf) | |||
673 | int rv; | 673 | int rv; |
674 | 674 | ||
675 | mutex_lock(&wacom->lock); | 675 | mutex_lock(&wacom->lock); |
676 | if (wacom->open) { | 676 | |
677 | /* switch to wacom mode first */ | ||
678 | wacom_query_tablet_data(intf, features); | ||
679 | |||
680 | if (wacom->open) | ||
677 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); | 681 | rv = usb_submit_urb(wacom->irq, GFP_NOIO); |
678 | /* switch to wacom mode if needed */ | 682 | else |
679 | if (!wacom_retrieve_hid_descriptor(intf, features)) | ||
680 | wacom_query_tablet_data(intf, features); | ||
681 | } else | ||
682 | rv = 0; | 683 | rv = 0; |
684 | |||
683 | mutex_unlock(&wacom->lock); | 685 | mutex_unlock(&wacom->lock); |
684 | 686 | ||
685 | return rv; | 687 | return rv; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3ba3437a2eb..4a852d815c68 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
155 | { | 155 | { |
156 | struct wacom_features *features = &wacom->features; | 156 | struct wacom_features *features = &wacom->features; |
157 | unsigned char *data = wacom->data; | 157 | unsigned char *data = wacom->data; |
158 | int x, y, prox; | 158 | int x, y, rw; |
159 | int rw = 0; | 159 | static int penData = 0; |
160 | int retval = 0; | ||
161 | 160 | ||
162 | if (data[0] != WACOM_REPORT_PENABLED) { | 161 | if (data[0] != WACOM_REPORT_PENABLED) { |
163 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); | 162 | dbg("wacom_graphire_irq: received unknown report #%d", data[0]); |
164 | goto exit; | 163 | return 0; |
165 | } | 164 | } |
166 | 165 | ||
167 | prox = data[1] & 0x80; | 166 | if (data[1] & 0x80) { |
168 | if (prox || wacom->id[0]) { | 167 | /* in prox and not a pad data */ |
169 | if (prox) { | 168 | penData = 1; |
170 | switch ((data[1] >> 5) & 3) { | 169 | |
170 | switch ((data[1] >> 5) & 3) { | ||
171 | 171 | ||
172 | case 0: /* Pen */ | 172 | case 0: /* Pen */ |
173 | wacom->tool[0] = BTN_TOOL_PEN; | 173 | wacom->tool[0] = BTN_TOOL_PEN; |
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
181 | 181 | ||
182 | case 2: /* Mouse with wheel */ | 182 | case 2: /* Mouse with wheel */ |
183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); | 183 | wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); |
184 | if (features->type == WACOM_G4 || features->type == WACOM_MO) { | ||
185 | rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03); | ||
186 | wacom_report_rel(wcombo, REL_WHEEL, -rw); | ||
187 | } else | ||
188 | wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]); | ||
184 | /* fall through */ | 189 | /* fall through */ |
185 | 190 | ||
186 | case 3: /* Mouse without wheel */ | 191 | case 3: /* Mouse without wheel */ |
187 | wacom->tool[0] = BTN_TOOL_MOUSE; | 192 | wacom->tool[0] = BTN_TOOL_MOUSE; |
188 | wacom->id[0] = CURSOR_DEVICE_ID; | 193 | wacom->id[0] = CURSOR_DEVICE_ID; |
194 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
195 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
196 | if (features->type == WACOM_G4 || features->type == WACOM_MO) | ||
197 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
198 | else | ||
199 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
189 | break; | 200 | break; |
190 | } | ||
191 | } | 201 | } |
192 | x = wacom_le16_to_cpu(&data[2]); | 202 | x = wacom_le16_to_cpu(&data[2]); |
193 | y = wacom_le16_to_cpu(&data[4]); | 203 | y = wacom_le16_to_cpu(&data[4]); |
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
198 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); | 208 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); |
199 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 209 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
200 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); | 210 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); |
201 | } else { | ||
202 | wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); | ||
203 | wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); | ||
204 | if (features->type == WACOM_G4 || | ||
205 | features->type == WACOM_MO) { | ||
206 | wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); | ||
207 | rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); | ||
208 | } else { | ||
209 | wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); | ||
210 | rw = -(signed)data[6]; | ||
211 | } | ||
212 | wacom_report_rel(wcombo, REL_WHEEL, rw); | ||
213 | } | 211 | } |
214 | |||
215 | if (!prox) | ||
216 | wacom->id[0] = 0; | ||
217 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ | 212 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ |
218 | wacom_report_key(wcombo, wacom->tool[0], prox); | 213 | wacom_report_key(wcombo, wacom->tool[0], 1); |
219 | wacom_input_sync(wcombo); /* sync last event */ | 214 | } else if (wacom->id[0]) { |
215 | wacom_report_abs(wcombo, ABS_X, 0); | ||
216 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
217 | if (wacom->tool[0] == BTN_TOOL_MOUSE) { | ||
218 | wacom_report_key(wcombo, BTN_LEFT, 0); | ||
219 | wacom_report_key(wcombo, BTN_RIGHT, 0); | ||
220 | wacom_report_abs(wcombo, ABS_DISTANCE, 0); | ||
221 | } else { | ||
222 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
223 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
224 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
225 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
226 | } | ||
227 | wacom->id[0] = 0; | ||
228 | wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ | ||
229 | wacom_report_key(wcombo, wacom->tool[0], 0); | ||
220 | } | 230 | } |
221 | 231 | ||
222 | /* send pad data */ | 232 | /* send pad data */ |
223 | switch (features->type) { | 233 | switch (features->type) { |
224 | case WACOM_G4: | 234 | case WACOM_G4: |
225 | prox = data[7] & 0xf8; | 235 | if (data[7] & 0xf8) { |
226 | if (prox || wacom->id[1]) { | 236 | if (penData) { |
237 | wacom_input_sync(wcombo); /* sync last event */ | ||
238 | if (!wacom->id[0]) | ||
239 | penData = 0; | ||
240 | } | ||
227 | wacom->id[1] = PAD_DEVICE_ID; | 241 | wacom->id[1] = PAD_DEVICE_ID; |
228 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | 242 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); |
229 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | 243 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); |
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
231 | wacom_report_rel(wcombo, REL_WHEEL, rw); | 245 | wacom_report_rel(wcombo, REL_WHEEL, rw); |
232 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 246 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
233 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 247 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
234 | if (!prox) | 248 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
235 | wacom->id[1] = 0; | 249 | } else if (wacom->id[1]) { |
236 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 250 | if (penData) { |
251 | wacom_input_sync(wcombo); /* sync last event */ | ||
252 | if (!wacom->id[0]) | ||
253 | penData = 0; | ||
254 | } | ||
255 | wacom->id[1] = 0; | ||
256 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); | ||
257 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); | ||
258 | wacom_report_rel(wcombo, REL_WHEEL, 0); | ||
259 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
260 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
237 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 261 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
238 | } | 262 | } |
239 | retval = 1; | ||
240 | break; | 263 | break; |
241 | case WACOM_MO: | 264 | case WACOM_MO: |
242 | prox = (data[7] & 0xf8) || data[8]; | 265 | if ((data[7] & 0xf8) || (data[8] & 0xff)) { |
243 | if (prox || wacom->id[1]) { | 266 | if (penData) { |
267 | wacom_input_sync(wcombo); /* sync last event */ | ||
268 | if (!wacom->id[0]) | ||
269 | penData = 0; | ||
270 | } | ||
244 | wacom->id[1] = PAD_DEVICE_ID; | 271 | wacom->id[1] = PAD_DEVICE_ID; |
245 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | 272 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); |
246 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | 273 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); |
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) | |||
248 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | 275 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); |
249 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | 276 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); |
250 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); | 277 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); |
251 | if (!prox) | ||
252 | wacom->id[1] = 0; | ||
253 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); | 278 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); |
254 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | 279 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); |
280 | } else if (wacom->id[1]) { | ||
281 | if (penData) { | ||
282 | wacom_input_sync(wcombo); /* sync last event */ | ||
283 | if (!wacom->id[0]) | ||
284 | penData = 0; | ||
285 | } | ||
286 | wacom->id[1] = 0; | ||
287 | wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); | ||
288 | wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); | ||
289 | wacom_report_key(wcombo, BTN_4, (data[7] & 0x10)); | ||
290 | wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); | ||
291 | wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); | ||
292 | wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); | ||
293 | wacom_report_abs(wcombo, ABS_MISC, 0); | ||
294 | wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); | ||
255 | } | 295 | } |
256 | retval = 1; | ||
257 | break; | 296 | break; |
258 | } | 297 | } |
259 | exit: | 298 | return 1; |
260 | return retval; | ||
261 | } | 299 | } |
262 | 300 | ||
263 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) | 301 | static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) |
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo) | |||
598 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) | 636 | static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) |
599 | { | 637 | { |
600 | wacom_report_abs(wcombo, ABS_X, | 638 | wacom_report_abs(wcombo, ABS_X, |
601 | data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); | 639 | (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8)); |
602 | wacom_report_abs(wcombo, ABS_Y, | 640 | wacom_report_abs(wcombo, ABS_Y, |
603 | data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); | 641 | (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8)); |
604 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); | 642 | wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); |
605 | wacom_report_key(wcombo, wacom->tool[idx], 1); | 643 | wacom_report_key(wcombo, wacom->tool[idx], 1); |
606 | if (idx) | 644 | if (idx) |
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo) | |||
744 | 782 | ||
745 | touchInProx = 0; | 783 | touchInProx = 0; |
746 | 784 | ||
747 | if (!wacom->id[0]) { /* first in prox */ | 785 | if (prox) { /* in prox */ |
748 | /* Going into proximity select tool */ | 786 | if (!wacom->id[0]) { |
749 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; | 787 | /* Going into proximity select tool */ |
750 | if (wacom->tool[0] == BTN_TOOL_PEN) | 788 | wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; |
751 | wacom->id[0] = STYLUS_DEVICE_ID; | 789 | if (wacom->tool[0] == BTN_TOOL_PEN) |
752 | else | 790 | wacom->id[0] = STYLUS_DEVICE_ID; |
753 | wacom->id[0] = ERASER_DEVICE_ID; | 791 | else |
754 | } | 792 | wacom->id[0] = ERASER_DEVICE_ID; |
755 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); | 793 | } |
756 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); | 794 | wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); |
757 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); | 795 | wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); |
758 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); | 796 | wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); |
759 | pressure = ((data[7] & 0x01) << 8) | data[6]; | 797 | wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); |
760 | if (pressure < 0) | 798 | pressure = ((data[7] & 0x01) << 8) | data[6]; |
761 | pressure = features->pressure_max + pressure + 1; | 799 | if (pressure < 0) |
762 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); | 800 | pressure = features->pressure_max + pressure + 1; |
763 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); | 801 | wacom_report_abs(wcombo, ABS_PRESSURE, pressure); |
764 | if (!prox) { /* out-prox */ | 802 | wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); |
803 | } else { | ||
804 | wacom_report_abs(wcombo, ABS_X, 0); | ||
805 | wacom_report_abs(wcombo, ABS_Y, 0); | ||
806 | wacom_report_abs(wcombo, ABS_PRESSURE, 0); | ||
807 | wacom_report_key(wcombo, BTN_STYLUS, 0); | ||
808 | wacom_report_key(wcombo, BTN_STYLUS2, 0); | ||
809 | wacom_report_key(wcombo, BTN_TOUCH, 0); | ||
765 | wacom->id[0] = 0; | 810 | wacom->id[0] = 0; |
766 | /* pen is out so touch can be enabled now */ | 811 | /* pen is out so touch can be enabled now */ |
767 | touchInProx = 1; | 812 | touchInProx = 1; |
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 0be15c70c16d..47a5ffec55a3 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
@@ -14,11 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | |||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/timer.h> | ||
22 | #include <linux/usb.h> | 17 | #include <linux/usb.h> |
23 | #include <linux/module.h> | 18 | #include <linux/module.h> |
24 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index eb7e27105a82..964a55fb1486 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c | |||
@@ -12,8 +12,6 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
15 | #include <linux/slab.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
19 | #include <linux/isdn/capilli.h> | 17 | #include <linux/isdn/capilli.h> |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 0b39b387c125..f6f45f221920 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -14,10 +14,8 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/ctype.h> | ||
18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
20 | #include <linux/slab.h> | ||
21 | 19 | ||
22 | /* Version Information */ | 20 | /* Version Information */ |
23 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" | 21 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" |
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 9ef5b0463fd5..05947f9c1849 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h | |||
@@ -20,11 +20,12 @@ | |||
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/sched.h> | ||
23 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
24 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/ctype.h> | ||
25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
27 | #include <linux/usb.h> | ||
28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
29 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
30 | #include <linux/ppp_defs.h> | 31 | #include <linux/ppp_defs.h> |
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index c99fb9790a13..c22e5ace8276 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/isdnif.h> | 17 | #include <linux/isdnif.h> |
18 | #include <linux/slab.h> | ||
19 | 18 | ||
20 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ | 19 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ |
21 | 20 | ||
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f0dc6c9cc283..c9f28dd40d5c 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include "gigaset.h" | 14 | #include "gigaset.h" |
15 | #include <linux/gigaset_dev.h> | 15 | #include <linux/gigaset_dev.h> |
16 | #include <linux/tty.h> | ||
17 | #include <linux/tty_flip.h> | 16 | #include <linux/tty_flip.h> |
18 | 17 | ||
19 | /*** our ioctls ***/ | 18 | /*** our ioctls ***/ |
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c index b69f73a0668f..b943efbff44d 100644 --- a/drivers/isdn/gigaset/proc.c +++ b/drivers/isdn/gigaset/proc.c | |||
@@ -14,7 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include "gigaset.h" | 16 | #include "gigaset.h" |
17 | #include <linux/ctype.h> | ||
18 | 17 | ||
19 | static ssize_t show_cidmode(struct device *dev, | 18 | static ssize_t show_cidmode(struct device *dev, |
20 | struct device_attribute *attr, char *buf) | 19 | struct device_attribute *attr, char *buf) |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8b0afd203a07..e96c0586886c 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
@@ -11,13 +11,10 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "gigaset.h" | 13 | #include "gigaset.h" |
14 | |||
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
18 | #include <linux/tty.h> | ||
19 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
20 | #include <linux/slab.h> | ||
21 | 18 | ||
22 | /* Version Information */ | 19 | /* Version Information */ |
23 | #define DRIVER_AUTHOR "Tilman Schmidt" | 20 | #define DRIVER_AUTHOR "Tilman Schmidt" |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 9430a2bbb523..76dbb20f3065 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -16,10 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "gigaset.h" | 18 | #include "gigaset.h" |
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
24 | #include <linux/module.h> | 20 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 07090f379c63..69c84a1d88ea 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status) | |||
178 | 178 | ||
179 | /* We set the status. */ | 179 | /* We set the status. */ |
180 | to_lgdev(vdev)->desc->status = status; | 180 | to_lgdev(vdev)->desc->status = status; |
181 | kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); | 181 | hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void lg_set_status(struct virtio_device *vdev, u8 status) | 184 | static void lg_set_status(struct virtio_device *vdev, u8 status) |
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq) | |||
229 | */ | 229 | */ |
230 | struct lguest_vq_info *lvq = vq->priv; | 230 | struct lguest_vq_info *lvq = vq->priv; |
231 | 231 | ||
232 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); | 232 | hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); |
233 | } | 233 | } |
234 | 234 | ||
235 | /* An extern declaration inside a C file is bad form. Don't do it. */ | 235 | /* An extern declaration inside a C file is bad form. Don't do it. */ |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index fb2b7ef7868e..b4eb675a807e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
288 | insn = lgread(cpu, physaddr, u8); | 288 | insn = lgread(cpu, physaddr, u8); |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Around 2.6.33, the kernel started using an emulation for the | ||
292 | * cmpxchg8b instruction in early boot on many configurations. This | ||
293 | * code isn't paravirtualized, and it tries to disable interrupts. | ||
294 | * Ignore it, which will Mostly Work. | ||
295 | */ | ||
296 | if (insn == 0xfa) { | ||
297 | /* "cli", or Clear Interrupt Enable instruction. Skip it. */ | ||
298 | cpu->regs->eip++; | ||
299 | return 1; | ||
300 | } | ||
301 | |||
302 | /* | ||
291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits | 303 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits |
292 | * of the eax register. | 304 | * of the eax register. |
293 | */ | 305 | */ |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3e9a36ea3b7..58ea0ecae7c3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1650 | int previous, int *dd_idx, | 1650 | int previous, int *dd_idx, |
1651 | struct stripe_head *sh) | 1651 | struct stripe_head *sh) |
1652 | { | 1652 | { |
1653 | long stripe; | 1653 | sector_t stripe, stripe2; |
1654 | unsigned long chunk_number; | 1654 | sector_t chunk_number; |
1655 | unsigned int chunk_offset; | 1655 | unsigned int chunk_offset; |
1656 | int pd_idx, qd_idx; | 1656 | int pd_idx, qd_idx; |
1657 | int ddf_layout = 0; | 1657 | int ddf_layout = 0; |
@@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1671 | */ | 1671 | */ |
1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); | 1672 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
1673 | chunk_number = r_sector; | 1673 | chunk_number = r_sector; |
1674 | BUG_ON(r_sector != chunk_number); | ||
1675 | 1674 | ||
1676 | /* | 1675 | /* |
1677 | * Compute the stripe number | 1676 | * Compute the stripe number |
1678 | */ | 1677 | */ |
1679 | stripe = chunk_number / data_disks; | 1678 | stripe = chunk_number; |
1680 | 1679 | *dd_idx = sector_div(stripe, data_disks); | |
1681 | /* | 1680 | stripe2 = stripe; |
1682 | * Compute the data disk and parity disk indexes inside the stripe | ||
1683 | */ | ||
1684 | *dd_idx = chunk_number % data_disks; | ||
1685 | |||
1686 | /* | 1681 | /* |
1687 | * Select the parity disk based on the user selected algorithm. | 1682 | * Select the parity disk based on the user selected algorithm. |
1688 | */ | 1683 | */ |
@@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1694 | case 5: | 1689 | case 5: |
1695 | switch (algorithm) { | 1690 | switch (algorithm) { |
1696 | case ALGORITHM_LEFT_ASYMMETRIC: | 1691 | case ALGORITHM_LEFT_ASYMMETRIC: |
1697 | pd_idx = data_disks - stripe % raid_disks; | 1692 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
1698 | if (*dd_idx >= pd_idx) | 1693 | if (*dd_idx >= pd_idx) |
1699 | (*dd_idx)++; | 1694 | (*dd_idx)++; |
1700 | break; | 1695 | break; |
1701 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1696 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1702 | pd_idx = stripe % raid_disks; | 1697 | pd_idx = sector_div(stripe2, raid_disks); |
1703 | if (*dd_idx >= pd_idx) | 1698 | if (*dd_idx >= pd_idx) |
1704 | (*dd_idx)++; | 1699 | (*dd_idx)++; |
1705 | break; | 1700 | break; |
1706 | case ALGORITHM_LEFT_SYMMETRIC: | 1701 | case ALGORITHM_LEFT_SYMMETRIC: |
1707 | pd_idx = data_disks - stripe % raid_disks; | 1702 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
1708 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1703 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1709 | break; | 1704 | break; |
1710 | case ALGORITHM_RIGHT_SYMMETRIC: | 1705 | case ALGORITHM_RIGHT_SYMMETRIC: |
1711 | pd_idx = stripe % raid_disks; | 1706 | pd_idx = sector_div(stripe2, raid_disks); |
1712 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1707 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1713 | break; | 1708 | break; |
1714 | case ALGORITHM_PARITY_0: | 1709 | case ALGORITHM_PARITY_0: |
@@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1728 | 1723 | ||
1729 | switch (algorithm) { | 1724 | switch (algorithm) { |
1730 | case ALGORITHM_LEFT_ASYMMETRIC: | 1725 | case ALGORITHM_LEFT_ASYMMETRIC: |
1731 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1726 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1732 | qd_idx = pd_idx + 1; | 1727 | qd_idx = pd_idx + 1; |
1733 | if (pd_idx == raid_disks-1) { | 1728 | if (pd_idx == raid_disks-1) { |
1734 | (*dd_idx)++; /* Q D D D P */ | 1729 | (*dd_idx)++; /* Q D D D P */ |
@@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1737 | (*dd_idx) += 2; /* D D P Q D */ | 1732 | (*dd_idx) += 2; /* D D P Q D */ |
1738 | break; | 1733 | break; |
1739 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1734 | case ALGORITHM_RIGHT_ASYMMETRIC: |
1740 | pd_idx = stripe % raid_disks; | 1735 | pd_idx = sector_div(stripe2, raid_disks); |
1741 | qd_idx = pd_idx + 1; | 1736 | qd_idx = pd_idx + 1; |
1742 | if (pd_idx == raid_disks-1) { | 1737 | if (pd_idx == raid_disks-1) { |
1743 | (*dd_idx)++; /* Q D D D P */ | 1738 | (*dd_idx)++; /* Q D D D P */ |
@@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1746 | (*dd_idx) += 2; /* D D P Q D */ | 1741 | (*dd_idx) += 2; /* D D P Q D */ |
1747 | break; | 1742 | break; |
1748 | case ALGORITHM_LEFT_SYMMETRIC: | 1743 | case ALGORITHM_LEFT_SYMMETRIC: |
1749 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1744 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1750 | qd_idx = (pd_idx + 1) % raid_disks; | 1745 | qd_idx = (pd_idx + 1) % raid_disks; |
1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1746 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
1752 | break; | 1747 | break; |
1753 | case ALGORITHM_RIGHT_SYMMETRIC: | 1748 | case ALGORITHM_RIGHT_SYMMETRIC: |
1754 | pd_idx = stripe % raid_disks; | 1749 | pd_idx = sector_div(stripe2, raid_disks); |
1755 | qd_idx = (pd_idx + 1) % raid_disks; | 1750 | qd_idx = (pd_idx + 1) % raid_disks; |
1756 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
1757 | break; | 1752 | break; |
@@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1770 | /* Exactly the same as RIGHT_ASYMMETRIC, but or | 1765 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
1771 | * of blocks for computing Q is different. | 1766 | * of blocks for computing Q is different. |
1772 | */ | 1767 | */ |
1773 | pd_idx = stripe % raid_disks; | 1768 | pd_idx = sector_div(stripe2, raid_disks); |
1774 | qd_idx = pd_idx + 1; | 1769 | qd_idx = pd_idx + 1; |
1775 | if (pd_idx == raid_disks-1) { | 1770 | if (pd_idx == raid_disks-1) { |
1776 | (*dd_idx)++; /* Q D D D P */ | 1771 | (*dd_idx)++; /* Q D D D P */ |
@@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1785 | * D D D P Q rather than | 1780 | * D D D P Q rather than |
1786 | * Q D D D P | 1781 | * Q D D D P |
1787 | */ | 1782 | */ |
1788 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); | 1783 | stripe2 += 1; |
1784 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); | ||
1789 | qd_idx = pd_idx + 1; | 1785 | qd_idx = pd_idx + 1; |
1790 | if (pd_idx == raid_disks-1) { | 1786 | if (pd_idx == raid_disks-1) { |
1791 | (*dd_idx)++; /* Q D D D P */ | 1787 | (*dd_idx)++; /* Q D D D P */ |
@@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1797 | 1793 | ||
1798 | case ALGORITHM_ROTATING_N_CONTINUE: | 1794 | case ALGORITHM_ROTATING_N_CONTINUE: |
1799 | /* Same as left_symmetric but Q is before P */ | 1795 | /* Same as left_symmetric but Q is before P */ |
1800 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1796 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
1801 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; | 1797 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
1802 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1798 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
1803 | ddf_layout = 1; | 1799 | ddf_layout = 1; |
@@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
1805 | 1801 | ||
1806 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1802 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
1807 | /* RAID5 left_asymmetric, with Q on last device */ | 1803 | /* RAID5 left_asymmetric, with Q on last device */ |
1808 | pd_idx = data_disks - stripe % (raid_disks-1); | 1804 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
1809 | if (*dd_idx >= pd_idx) | 1805 | if (*dd_idx >= pd_idx) |
1810 | (*dd_idx)++; | 1806 | (*dd_idx)++; |
1811 | qd_idx = raid_disks - 1; | 1807 | qd_idx = raid_disks - 1; |
1812 | break; | 1808 | break; |
1813 | 1809 | ||
1814 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1810 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
1815 | pd_idx = stripe % (raid_disks-1); | 1811 | pd_idx = sector_div(stripe2, raid_disks-1); |
1816 | if (*dd_idx >= pd_idx) | 1812 | if (*dd_idx >= pd_idx) |
1817 | (*dd_idx)++; | 1813 | (*dd_idx)++; |
1818 | qd_idx = raid_disks - 1; | 1814 | qd_idx = raid_disks - 1; |
1819 | break; | 1815 | break; |
1820 | 1816 | ||
1821 | case ALGORITHM_LEFT_SYMMETRIC_6: | 1817 | case ALGORITHM_LEFT_SYMMETRIC_6: |
1822 | pd_idx = data_disks - stripe % (raid_disks-1); | 1818 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
1823 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1819 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
1824 | qd_idx = raid_disks - 1; | 1820 | qd_idx = raid_disks - 1; |
1825 | break; | 1821 | break; |
1826 | 1822 | ||
1827 | case ALGORITHM_RIGHT_SYMMETRIC_6: | 1823 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
1828 | pd_idx = stripe % (raid_disks-1); | 1824 | pd_idx = sector_div(stripe2, raid_disks-1); |
1829 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1825 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
1830 | qd_idx = raid_disks - 1; | 1826 | qd_idx = raid_disks - 1; |
1831 | break; | 1827 | break; |
@@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1870 | : conf->algorithm; | 1866 | : conf->algorithm; |
1871 | sector_t stripe; | 1867 | sector_t stripe; |
1872 | int chunk_offset; | 1868 | int chunk_offset; |
1873 | int chunk_number, dummy1, dd_idx = i; | 1869 | sector_t chunk_number; |
1870 | int dummy1, dd_idx = i; | ||
1874 | sector_t r_sector; | 1871 | sector_t r_sector; |
1875 | struct stripe_head sh2; | 1872 | struct stripe_head sh2; |
1876 | 1873 | ||
1877 | 1874 | ||
1878 | chunk_offset = sector_div(new_sector, sectors_per_chunk); | 1875 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
1879 | stripe = new_sector; | 1876 | stripe = new_sector; |
1880 | BUG_ON(new_sector != stripe); | ||
1881 | 1877 | ||
1882 | if (i == sh->pd_idx) | 1878 | if (i == sh->pd_idx) |
1883 | return 0; | 1879 | return 0; |
@@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
1970 | } | 1966 | } |
1971 | 1967 | ||
1972 | chunk_number = stripe * data_disks + i; | 1968 | chunk_number = stripe * data_disks + i; |
1973 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; | 1969 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
1974 | 1970 | ||
1975 | check = raid5_compute_sector(conf, r_sector, | 1971 | check = raid5_compute_sector(conf, r_sector, |
1976 | previous, &dummy1, &sh2); | 1972 | previous, &dummy1, &sh2); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 2191c8d896a0..0d0d625fece2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -311,6 +311,22 @@ config TI_DAC7512 | |||
311 | This driver can also be built as a module. If so, the module | 311 | This driver can also be built as a module. If so, the module |
312 | will be calles ti_dac7512. | 312 | will be calles ti_dac7512. |
313 | 313 | ||
314 | config VMWARE_BALLOON | ||
315 | tristate "VMware Balloon Driver" | ||
316 | depends on X86 | ||
317 | help | ||
318 | This is VMware physical memory management driver which acts | ||
319 | like a "balloon" that can be inflated to reclaim physical pages | ||
320 | by reserving them in the guest and invalidating them in the | ||
321 | monitor, freeing up the underlying machine pages so they can | ||
322 | be allocated to other guests. The balloon can also be deflated | ||
323 | to allow the guest to use more physical memory. | ||
324 | |||
325 | If unsure, say N. | ||
326 | |||
327 | To compile this driver as a module, choose M here: the | ||
328 | module will be called vmware_balloon. | ||
329 | |||
314 | source "drivers/misc/c2port/Kconfig" | 330 | source "drivers/misc/c2port/Kconfig" |
315 | source "drivers/misc/eeprom/Kconfig" | 331 | source "drivers/misc/eeprom/Kconfig" |
316 | source "drivers/misc/cb710/Kconfig" | 332 | source "drivers/misc/cb710/Kconfig" |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 27c484355414..7b6f7eefdf8d 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT) += c2port/ | |||
29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ | 29 | obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ |
30 | obj-y += eeprom/ | 30 | obj-y += eeprom/ |
31 | obj-y += cb710/ | 31 | obj-y += cb710/ |
32 | obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o | ||
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c new file mode 100644 index 000000000000..e7161c4e3798 --- /dev/null +++ b/drivers/misc/vmware_balloon.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* | ||
2 | * VMware Balloon driver. | ||
3 | * | ||
4 | * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * Maintained by: Dmitry Torokhov <dtor@vmware.com> | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * This is VMware physical memory management driver for Linux. The driver | ||
25 | * acts like a "balloon" that can be inflated to reclaim physical pages by | ||
26 | * reserving them in the guest and invalidating them in the monitor, | ||
27 | * freeing up the underlying machine pages so they can be allocated to | ||
28 | * other guests. The balloon can also be deflated to allow the guest to | ||
29 | * use more physical memory. Higher level policies can control the sizes | ||
30 | * of balloons in VMs in order to manage physical memory resources. | ||
31 | */ | ||
32 | |||
33 | //#define DEBUG | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/debugfs.h> | ||
43 | #include <linux/seq_file.h> | ||
44 | #include <asm/vmware.h> | ||
45 | |||
46 | MODULE_AUTHOR("VMware, Inc."); | ||
47 | MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); | ||
48 | MODULE_VERSION("1.2.1.0-K"); | ||
49 | MODULE_ALIAS("dmi:*:svnVMware*:*"); | ||
50 | MODULE_ALIAS("vmware_vmmemctl"); | ||
51 | MODULE_LICENSE("GPL"); | ||
52 | |||
53 | /* | ||
54 | * Various constants controlling rate of inflaint/deflating balloon, | ||
55 | * measured in pages. | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * Rate of allocating memory when there is no memory pressure | ||
60 | * (driver performs non-sleeping allocations). | ||
61 | */ | ||
62 | #define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U | ||
63 | |||
64 | /* | ||
65 | * Rates of memory allocaton when guest experiences memory pressure | ||
66 | * (driver performs sleeping allocations). | ||
67 | */ | ||
68 | #define VMW_BALLOON_RATE_ALLOC_MIN 512U | ||
69 | #define VMW_BALLOON_RATE_ALLOC_MAX 2048U | ||
70 | #define VMW_BALLOON_RATE_ALLOC_INC 16U | ||
71 | |||
72 | /* | ||
73 | * Rates for releasing pages while deflating balloon. | ||
74 | */ | ||
75 | #define VMW_BALLOON_RATE_FREE_MIN 512U | ||
76 | #define VMW_BALLOON_RATE_FREE_MAX 16384U | ||
77 | #define VMW_BALLOON_RATE_FREE_INC 16U | ||
78 | |||
79 | /* | ||
80 | * When guest is under memory pressure, use a reduced page allocation | ||
81 | * rate for next several cycles. | ||
82 | */ | ||
83 | #define VMW_BALLOON_SLOW_CYCLES 4 | ||
84 | |||
85 | /* | ||
86 | * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't | ||
87 | * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use | ||
88 | * __GFP_NOWARN, to suppress page allocation failure warnings. | ||
89 | */ | ||
90 | #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) | ||
91 | |||
92 | /* | ||
93 | * Use GFP_HIGHUSER when executing in a separate kernel thread | ||
94 | * context and allocation can sleep. This is less stressful to | ||
95 | * the guest memory system, since it allows the thread to block | ||
96 | * while memory is reclaimed, and won't take pages from emergency | ||
97 | * low-memory pools. | ||
98 | */ | ||
99 | #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) | ||
100 | |||
101 | /* Maximum number of page allocations without yielding processor */ | ||
102 | #define VMW_BALLOON_YIELD_THRESHOLD 1024 | ||
103 | |||
104 | |||
105 | /* | ||
106 | * Hypervisor communication port definitions. | ||
107 | */ | ||
108 | #define VMW_BALLOON_HV_PORT 0x5670 | ||
109 | #define VMW_BALLOON_HV_MAGIC 0x456c6d6f | ||
110 | #define VMW_BALLOON_PROTOCOL_VERSION 2 | ||
111 | #define VMW_BALLOON_GUEST_ID 1 /* Linux */ | ||
112 | |||
113 | #define VMW_BALLOON_CMD_START 0 | ||
114 | #define VMW_BALLOON_CMD_GET_TARGET 1 | ||
115 | #define VMW_BALLOON_CMD_LOCK 2 | ||
116 | #define VMW_BALLOON_CMD_UNLOCK 3 | ||
117 | #define VMW_BALLOON_CMD_GUEST_ID 4 | ||
118 | |||
119 | /* error codes */ | ||
120 | #define VMW_BALLOON_SUCCESS 0 | ||
121 | #define VMW_BALLOON_FAILURE -1 | ||
122 | #define VMW_BALLOON_ERROR_CMD_INVALID 1 | ||
123 | #define VMW_BALLOON_ERROR_PPN_INVALID 2 | ||
124 | #define VMW_BALLOON_ERROR_PPN_LOCKED 3 | ||
125 | #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 | ||
126 | #define VMW_BALLOON_ERROR_PPN_PINNED 5 | ||
127 | #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 | ||
128 | #define VMW_BALLOON_ERROR_RESET 7 | ||
129 | #define VMW_BALLOON_ERROR_BUSY 8 | ||
130 | |||
131 | #define VMWARE_BALLOON_CMD(cmd, data, result) \ | ||
132 | ({ \ | ||
133 | unsigned long __stat, __dummy1, __dummy2; \ | ||
134 | __asm__ __volatile__ ("inl (%%dx)" : \ | ||
135 | "=a"(__stat), \ | ||
136 | "=c"(__dummy1), \ | ||
137 | "=d"(__dummy2), \ | ||
138 | "=b"(result) : \ | ||
139 | "0"(VMW_BALLOON_HV_MAGIC), \ | ||
140 | "1"(VMW_BALLOON_CMD_##cmd), \ | ||
141 | "2"(VMW_BALLOON_HV_PORT), \ | ||
142 | "3"(data) : \ | ||
143 | "memory"); \ | ||
144 | result &= -1UL; \ | ||
145 | __stat & -1UL; \ | ||
146 | }) | ||
147 | |||
148 | #ifdef CONFIG_DEBUG_FS | ||
149 | struct vmballoon_stats { | ||
150 | unsigned int timer; | ||
151 | |||
152 | /* allocation statustics */ | ||
153 | unsigned int alloc; | ||
154 | unsigned int alloc_fail; | ||
155 | unsigned int sleep_alloc; | ||
156 | unsigned int sleep_alloc_fail; | ||
157 | unsigned int refused_alloc; | ||
158 | unsigned int refused_free; | ||
159 | unsigned int free; | ||
160 | |||
161 | /* monitor operations */ | ||
162 | unsigned int lock; | ||
163 | unsigned int lock_fail; | ||
164 | unsigned int unlock; | ||
165 | unsigned int unlock_fail; | ||
166 | unsigned int target; | ||
167 | unsigned int target_fail; | ||
168 | unsigned int start; | ||
169 | unsigned int start_fail; | ||
170 | unsigned int guest_type; | ||
171 | unsigned int guest_type_fail; | ||
172 | }; | ||
173 | |||
174 | #define STATS_INC(stat) (stat)++ | ||
175 | #else | ||
176 | #define STATS_INC(stat) | ||
177 | #endif | ||
178 | |||
179 | struct vmballoon { | ||
180 | |||
181 | /* list of reserved physical pages */ | ||
182 | struct list_head pages; | ||
183 | |||
184 | /* transient list of non-balloonable pages */ | ||
185 | struct list_head refused_pages; | ||
186 | |||
187 | /* balloon size in pages */ | ||
188 | unsigned int size; | ||
189 | unsigned int target; | ||
190 | |||
191 | /* reset flag */ | ||
192 | bool reset_required; | ||
193 | |||
194 | /* adjustment rates (pages per second) */ | ||
195 | unsigned int rate_alloc; | ||
196 | unsigned int rate_free; | ||
197 | |||
198 | /* slowdown page allocations for next few cycles */ | ||
199 | unsigned int slow_allocation_cycles; | ||
200 | |||
201 | #ifdef CONFIG_DEBUG_FS | ||
202 | /* statistics */ | ||
203 | struct vmballoon_stats stats; | ||
204 | |||
205 | /* debugfs file exporting statistics */ | ||
206 | struct dentry *dbg_entry; | ||
207 | #endif | ||
208 | |||
209 | struct sysinfo sysinfo; | ||
210 | |||
211 | struct delayed_work dwork; | ||
212 | }; | ||
213 | |||
214 | static struct vmballoon balloon; | ||
215 | static struct workqueue_struct *vmballoon_wq; | ||
216 | |||
217 | /* | ||
218 | * Send "start" command to the host, communicating supported version | ||
219 | * of the protocol. | ||
220 | */ | ||
221 | static bool vmballoon_send_start(struct vmballoon *b) | ||
222 | { | ||
223 | unsigned long status, dummy; | ||
224 | |||
225 | STATS_INC(b->stats.start); | ||
226 | |||
227 | status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); | ||
228 | if (status == VMW_BALLOON_SUCCESS) | ||
229 | return true; | ||
230 | |||
231 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
232 | STATS_INC(b->stats.start_fail); | ||
233 | return false; | ||
234 | } | ||
235 | |||
236 | static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) | ||
237 | { | ||
238 | switch (status) { | ||
239 | case VMW_BALLOON_SUCCESS: | ||
240 | return true; | ||
241 | |||
242 | case VMW_BALLOON_ERROR_RESET: | ||
243 | b->reset_required = true; | ||
244 | /* fall through */ | ||
245 | |||
246 | default: | ||
247 | return false; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Communicate guest type to the host so that it can adjust ballooning | ||
253 | * algorithm to the one most appropriate for the guest. This command | ||
254 | * is normally issued after sending "start" command and is part of | ||
255 | * standard reset sequence. | ||
256 | */ | ||
257 | static bool vmballoon_send_guest_id(struct vmballoon *b) | ||
258 | { | ||
259 | unsigned long status, dummy; | ||
260 | |||
261 | status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); | ||
262 | |||
263 | STATS_INC(b->stats.guest_type); | ||
264 | |||
265 | if (vmballoon_check_status(b, status)) | ||
266 | return true; | ||
267 | |||
268 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
269 | STATS_INC(b->stats.guest_type_fail); | ||
270 | return false; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Retrieve desired balloon size from the host. | ||
275 | */ | ||
276 | static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) | ||
277 | { | ||
278 | unsigned long status; | ||
279 | unsigned long target; | ||
280 | unsigned long limit; | ||
281 | u32 limit32; | ||
282 | |||
283 | /* | ||
284 | * si_meminfo() is cheap. Moreover, we want to provide dynamic | ||
285 | * max balloon size later. So let us call si_meminfo() every | ||
286 | * iteration. | ||
287 | */ | ||
288 | si_meminfo(&b->sysinfo); | ||
289 | limit = b->sysinfo.totalram; | ||
290 | |||
291 | /* Ensure limit fits in 32-bits */ | ||
292 | limit32 = (u32)limit; | ||
293 | if (limit != limit32) | ||
294 | return false; | ||
295 | |||
296 | /* update stats */ | ||
297 | STATS_INC(b->stats.target); | ||
298 | |||
299 | status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); | ||
300 | if (vmballoon_check_status(b, status)) { | ||
301 | *new_target = target; | ||
302 | return true; | ||
303 | } | ||
304 | |||
305 | pr_debug("%s - failed, hv returns %ld\n", __func__, status); | ||
306 | STATS_INC(b->stats.target_fail); | ||
307 | return false; | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Notify the host about allocated page so that host can use it without | ||
312 | * fear that guest will need it. Host may reject some pages, we need to | ||
313 | * check the return value and maybe submit a different page. | ||
314 | */ | ||
315 | static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn) | ||
316 | { | ||
317 | unsigned long status, dummy; | ||
318 | u32 pfn32; | ||
319 | |||
320 | pfn32 = (u32)pfn; | ||
321 | if (pfn32 != pfn) | ||
322 | return false; | ||
323 | |||
324 | STATS_INC(b->stats.lock); | ||
325 | |||
326 | status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); | ||
327 | if (vmballoon_check_status(b, status)) | ||
328 | return true; | ||
329 | |||
330 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
331 | STATS_INC(b->stats.lock_fail); | ||
332 | return false; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Notify the host that guest intends to release given page back into | ||
337 | * the pool of available (to the guest) pages. | ||
338 | */ | ||
339 | static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) | ||
340 | { | ||
341 | unsigned long status, dummy; | ||
342 | u32 pfn32; | ||
343 | |||
344 | pfn32 = (u32)pfn; | ||
345 | if (pfn32 != pfn) | ||
346 | return false; | ||
347 | |||
348 | STATS_INC(b->stats.unlock); | ||
349 | |||
350 | status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); | ||
351 | if (vmballoon_check_status(b, status)) | ||
352 | return true; | ||
353 | |||
354 | pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); | ||
355 | STATS_INC(b->stats.unlock_fail); | ||
356 | return false; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Quickly release all pages allocated for the balloon. This function is | ||
361 | * called when host decides to "reset" balloon for one reason or another. | ||
362 | * Unlike normal "deflate" we do not (shall not) notify host of the pages | ||
363 | * being released. | ||
364 | */ | ||
365 | static void vmballoon_pop(struct vmballoon *b) | ||
366 | { | ||
367 | struct page *page, *next; | ||
368 | unsigned int count = 0; | ||
369 | |||
370 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
371 | list_del(&page->lru); | ||
372 | __free_page(page); | ||
373 | STATS_INC(b->stats.free); | ||
374 | b->size--; | ||
375 | |||
376 | if (++count >= b->rate_free) { | ||
377 | count = 0; | ||
378 | cond_resched(); | ||
379 | } | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Perform standard reset sequence by popping the balloon (in case it | ||
385 | * is not empty) and then restarting protocol. This operation normally | ||
386 | * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. | ||
387 | */ | ||
388 | static void vmballoon_reset(struct vmballoon *b) | ||
389 | { | ||
390 | /* free all pages, skipping monitor unlock */ | ||
391 | vmballoon_pop(b); | ||
392 | |||
393 | if (vmballoon_send_start(b)) { | ||
394 | b->reset_required = false; | ||
395 | if (!vmballoon_send_guest_id(b)) | ||
396 | pr_err("failed to send guest ID to the host\n"); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Allocate (or reserve) a page for the balloon and notify the host. If host | ||
402 | * refuses the page put it on "refuse" list and allocate another one until host | ||
403 | * is satisfied. "Refused" pages are released at the end of inflation cycle | ||
404 | * (when we allocate b->rate_alloc pages). | ||
405 | */ | ||
406 | static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) | ||
407 | { | ||
408 | struct page *page; | ||
409 | gfp_t flags; | ||
410 | bool locked = false; | ||
411 | |||
412 | do { | ||
413 | if (!can_sleep) | ||
414 | STATS_INC(b->stats.alloc); | ||
415 | else | ||
416 | STATS_INC(b->stats.sleep_alloc); | ||
417 | |||
418 | flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; | ||
419 | page = alloc_page(flags); | ||
420 | if (!page) { | ||
421 | if (!can_sleep) | ||
422 | STATS_INC(b->stats.alloc_fail); | ||
423 | else | ||
424 | STATS_INC(b->stats.sleep_alloc_fail); | ||
425 | return -ENOMEM; | ||
426 | } | ||
427 | |||
428 | /* inform monitor */ | ||
429 | locked = vmballoon_send_lock_page(b, page_to_pfn(page)); | ||
430 | if (!locked) { | ||
431 | if (b->reset_required) { | ||
432 | __free_page(page); | ||
433 | return -EIO; | ||
434 | } | ||
435 | |||
436 | /* place on list of non-balloonable pages, retry allocation */ | ||
437 | list_add(&page->lru, &b->refused_pages); | ||
438 | STATS_INC(b->stats.refused_alloc); | ||
439 | } | ||
440 | } while (!locked); | ||
441 | |||
442 | /* track allocated page */ | ||
443 | list_add(&page->lru, &b->pages); | ||
444 | |||
445 | /* update balloon size */ | ||
446 | b->size++; | ||
447 | |||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Release the page allocated for the balloon. Note that we first notify | ||
453 | * the host so it can make sure the page will be available for the guest | ||
454 | * to use, if needed. | ||
455 | */ | ||
456 | static int vmballoon_release_page(struct vmballoon *b, struct page *page) | ||
457 | { | ||
458 | if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) | ||
459 | return -EIO; | ||
460 | |||
461 | list_del(&page->lru); | ||
462 | |||
463 | /* deallocate page */ | ||
464 | __free_page(page); | ||
465 | STATS_INC(b->stats.free); | ||
466 | |||
467 | /* update balloon size */ | ||
468 | b->size--; | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Release pages that were allocated while attempting to inflate the | ||
475 | * balloon but were refused by the host for one reason or another. | ||
476 | */ | ||
477 | static void vmballoon_release_refused_pages(struct vmballoon *b) | ||
478 | { | ||
479 | struct page *page, *next; | ||
480 | |||
481 | list_for_each_entry_safe(page, next, &b->refused_pages, lru) { | ||
482 | list_del(&page->lru); | ||
483 | __free_page(page); | ||
484 | STATS_INC(b->stats.refused_free); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Inflate the balloon towards its target size. Note that we try to limit | ||
490 | * the rate of allocation to make sure we are not choking the rest of the | ||
491 | * system. | ||
492 | */ | ||
493 | static void vmballoon_inflate(struct vmballoon *b) | ||
494 | { | ||
495 | unsigned int goal; | ||
496 | unsigned int rate; | ||
497 | unsigned int i; | ||
498 | unsigned int allocations = 0; | ||
499 | int error = 0; | ||
500 | bool alloc_can_sleep = false; | ||
501 | |||
502 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
503 | |||
504 | /* | ||
505 | * First try NOSLEEP page allocations to inflate balloon. | ||
506 | * | ||
507 | * If we do not throttle nosleep allocations, we can drain all | ||
508 | * free pages in the guest quickly (if the balloon target is high). | ||
509 | * As a side-effect, draining free pages helps to inform (force) | ||
510 | * the guest to start swapping if balloon target is not met yet, | ||
511 | * which is a desired behavior. However, balloon driver can consume | ||
512 | * all available CPU cycles if too many pages are allocated in a | ||
513 | * second. Therefore, we throttle nosleep allocations even when | ||
514 | * the guest is not under memory pressure. OTOH, if we have already | ||
515 | * predicted that the guest is under memory pressure, then we | ||
516 | * slowdown page allocations considerably. | ||
517 | */ | ||
518 | |||
519 | goal = b->target - b->size; | ||
520 | /* | ||
521 | * Start with no sleep allocation rate which may be higher | ||
522 | * than sleeping allocation rate. | ||
523 | */ | ||
524 | rate = b->slow_allocation_cycles ? | ||
525 | b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; | ||
526 | |||
527 | pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", | ||
528 | __func__, goal, rate, b->rate_alloc); | ||
529 | |||
530 | for (i = 0; i < goal; i++) { | ||
531 | |||
532 | error = vmballoon_reserve_page(b, alloc_can_sleep); | ||
533 | if (error) { | ||
534 | if (error != -ENOMEM) { | ||
535 | /* | ||
536 | * Not a page allocation failure, stop this | ||
537 | * cycle. Maybe we'll get new target from | ||
538 | * the host soon. | ||
539 | */ | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | if (alloc_can_sleep) { | ||
544 | /* | ||
545 | * CANSLEEP page allocation failed, so guest | ||
546 | * is under severe memory pressure. Quickly | ||
547 | * decrease allocation rate. | ||
548 | */ | ||
549 | b->rate_alloc = max(b->rate_alloc / 2, | ||
550 | VMW_BALLOON_RATE_ALLOC_MIN); | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * NOSLEEP page allocation failed, so the guest is | ||
556 | * under memory pressure. Let us slow down page | ||
557 | * allocations for next few cycles so that the guest | ||
558 | * gets out of memory pressure. Also, if we already | ||
559 | * allocated b->rate_alloc pages, let's pause, | ||
560 | * otherwise switch to sleeping allocations. | ||
561 | */ | ||
562 | b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; | ||
563 | |||
564 | if (i >= b->rate_alloc) | ||
565 | break; | ||
566 | |||
567 | alloc_can_sleep = true; | ||
568 | /* Lower rate for sleeping allocations. */ | ||
569 | rate = b->rate_alloc; | ||
570 | } | ||
571 | |||
572 | if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { | ||
573 | cond_resched(); | ||
574 | allocations = 0; | ||
575 | } | ||
576 | |||
577 | if (i >= rate) { | ||
578 | /* We allocated enough pages, let's take a break. */ | ||
579 | break; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * We reached our goal without failures so try increasing | ||
585 | * allocation rate. | ||
586 | */ | ||
587 | if (error == 0 && i >= b->rate_alloc) { | ||
588 | unsigned int mult = i / b->rate_alloc; | ||
589 | |||
590 | b->rate_alloc = | ||
591 | min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, | ||
592 | VMW_BALLOON_RATE_ALLOC_MAX); | ||
593 | } | ||
594 | |||
595 | vmballoon_release_refused_pages(b); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Decrease the size of the balloon allowing guest to use more memory. | ||
600 | */ | ||
601 | static void vmballoon_deflate(struct vmballoon *b) | ||
602 | { | ||
603 | struct page *page, *next; | ||
604 | unsigned int i = 0; | ||
605 | unsigned int goal; | ||
606 | int error; | ||
607 | |||
608 | pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); | ||
609 | |||
610 | /* limit deallocation rate */ | ||
611 | goal = min(b->size - b->target, b->rate_free); | ||
612 | |||
613 | pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); | ||
614 | |||
615 | /* free pages to reach target */ | ||
616 | list_for_each_entry_safe(page, next, &b->pages, lru) { | ||
617 | error = vmballoon_release_page(b, page); | ||
618 | if (error) { | ||
619 | /* quickly decrease rate in case of error */ | ||
620 | b->rate_free = max(b->rate_free / 2, | ||
621 | VMW_BALLOON_RATE_FREE_MIN); | ||
622 | return; | ||
623 | } | ||
624 | |||
625 | if (++i >= goal) | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | /* slowly increase rate if there were no errors */ | ||
630 | b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, | ||
631 | VMW_BALLOON_RATE_FREE_MAX); | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Balloon work function: reset protocol, if needed, get the new size and | ||
636 | * adjust balloon as needed. Repeat in 1 sec. | ||
637 | */ | ||
638 | static void vmballoon_work(struct work_struct *work) | ||
639 | { | ||
640 | struct delayed_work *dwork = to_delayed_work(work); | ||
641 | struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); | ||
642 | unsigned int target; | ||
643 | |||
644 | STATS_INC(b->stats.timer); | ||
645 | |||
646 | if (b->reset_required) | ||
647 | vmballoon_reset(b); | ||
648 | |||
649 | if (b->slow_allocation_cycles > 0) | ||
650 | b->slow_allocation_cycles--; | ||
651 | |||
652 | if (vmballoon_send_get_target(b, &target)) { | ||
653 | /* update target, adjust size */ | ||
654 | b->target = target; | ||
655 | |||
656 | if (b->size < target) | ||
657 | vmballoon_inflate(b); | ||
658 | else if (b->size > target) | ||
659 | vmballoon_deflate(b); | ||
660 | } | ||
661 | |||
662 | queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ)); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * DEBUGFS Interface | ||
667 | */ | ||
668 | #ifdef CONFIG_DEBUG_FS | ||
669 | |||
670 | static int vmballoon_debug_show(struct seq_file *f, void *offset) | ||
671 | { | ||
672 | struct vmballoon *b = f->private; | ||
673 | struct vmballoon_stats *stats = &b->stats; | ||
674 | |||
675 | /* format size info */ | ||
676 | seq_printf(f, | ||
677 | "target: %8d pages\n" | ||
678 | "current: %8d pages\n", | ||
679 | b->target, b->size); | ||
680 | |||
681 | /* format rate info */ | ||
682 | seq_printf(f, | ||
683 | "rateNoSleepAlloc: %8d pages/sec\n" | ||
684 | "rateSleepAlloc: %8d pages/sec\n" | ||
685 | "rateFree: %8d pages/sec\n", | ||
686 | VMW_BALLOON_NOSLEEP_ALLOC_MAX, | ||
687 | b->rate_alloc, b->rate_free); | ||
688 | |||
689 | seq_printf(f, | ||
690 | "\n" | ||
691 | "timer: %8u\n" | ||
692 | "start: %8u (%4u failed)\n" | ||
693 | "guestType: %8u (%4u failed)\n" | ||
694 | "lock: %8u (%4u failed)\n" | ||
695 | "unlock: %8u (%4u failed)\n" | ||
696 | "target: %8u (%4u failed)\n" | ||
697 | "primNoSleepAlloc: %8u (%4u failed)\n" | ||
698 | "primCanSleepAlloc: %8u (%4u failed)\n" | ||
699 | "primFree: %8u\n" | ||
700 | "errAlloc: %8u\n" | ||
701 | "errFree: %8u\n", | ||
702 | stats->timer, | ||
703 | stats->start, stats->start_fail, | ||
704 | stats->guest_type, stats->guest_type_fail, | ||
705 | stats->lock, stats->lock_fail, | ||
706 | stats->unlock, stats->unlock_fail, | ||
707 | stats->target, stats->target_fail, | ||
708 | stats->alloc, stats->alloc_fail, | ||
709 | stats->sleep_alloc, stats->sleep_alloc_fail, | ||
710 | stats->free, | ||
711 | stats->refused_alloc, stats->refused_free); | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int vmballoon_debug_open(struct inode *inode, struct file *file) | ||
717 | { | ||
718 | return single_open(file, vmballoon_debug_show, inode->i_private); | ||
719 | } | ||
720 | |||
721 | static const struct file_operations vmballoon_debug_fops = { | ||
722 | .owner = THIS_MODULE, | ||
723 | .open = vmballoon_debug_open, | ||
724 | .read = seq_read, | ||
725 | .llseek = seq_lseek, | ||
726 | .release = single_release, | ||
727 | }; | ||
728 | |||
729 | static int __init vmballoon_debugfs_init(struct vmballoon *b) | ||
730 | { | ||
731 | int error; | ||
732 | |||
733 | b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, | ||
734 | &vmballoon_debug_fops); | ||
735 | if (IS_ERR(b->dbg_entry)) { | ||
736 | error = PTR_ERR(b->dbg_entry); | ||
737 | pr_err("failed to create debugfs entry, error: %d\n", error); | ||
738 | return error; | ||
739 | } | ||
740 | |||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | static void __exit vmballoon_debugfs_exit(struct vmballoon *b) | ||
745 | { | ||
746 | debugfs_remove(b->dbg_entry); | ||
747 | } | ||
748 | |||
749 | #else | ||
750 | |||
751 | static inline int vmballoon_debugfs_init(struct vmballoon *b) | ||
752 | { | ||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | static inline void vmballoon_debugfs_exit(struct vmballoon *b) | ||
757 | { | ||
758 | } | ||
759 | |||
760 | #endif /* CONFIG_DEBUG_FS */ | ||
761 | |||
762 | static int __init vmballoon_init(void) | ||
763 | { | ||
764 | int error; | ||
765 | |||
766 | /* | ||
767 | * Check if we are running on VMware's hypervisor and bail out | ||
768 | * if we are not. | ||
769 | */ | ||
770 | if (!vmware_platform()) | ||
771 | return -ENODEV; | ||
772 | |||
773 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | ||
774 | if (!vmballoon_wq) { | ||
775 | pr_err("failed to create workqueue\n"); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | |||
779 | INIT_LIST_HEAD(&balloon.pages); | ||
780 | INIT_LIST_HEAD(&balloon.refused_pages); | ||
781 | |||
782 | /* initialize rates */ | ||
783 | balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; | ||
784 | balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX; | ||
785 | |||
786 | INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); | ||
787 | |||
788 | /* | ||
789 | * Start balloon. | ||
790 | */ | ||
791 | if (!vmballoon_send_start(&balloon)) { | ||
792 | pr_err("failed to send start command to the host\n"); | ||
793 | error = -EIO; | ||
794 | goto fail; | ||
795 | } | ||
796 | |||
797 | if (!vmballoon_send_guest_id(&balloon)) { | ||
798 | pr_err("failed to send guest ID to the host\n"); | ||
799 | error = -EIO; | ||
800 | goto fail; | ||
801 | } | ||
802 | |||
803 | error = vmballoon_debugfs_init(&balloon); | ||
804 | if (error) | ||
805 | goto fail; | ||
806 | |||
807 | queue_delayed_work(vmballoon_wq, &balloon.dwork, 0); | ||
808 | |||
809 | return 0; | ||
810 | |||
811 | fail: | ||
812 | destroy_workqueue(vmballoon_wq); | ||
813 | return error; | ||
814 | } | ||
815 | module_init(vmballoon_init); | ||
816 | |||
817 | static void __exit vmballoon_exit(void) | ||
818 | { | ||
819 | cancel_delayed_work_sync(&balloon.dwork); | ||
820 | destroy_workqueue(vmballoon_wq); | ||
821 | |||
822 | vmballoon_debugfs_exit(&balloon); | ||
823 | |||
824 | /* | ||
825 | * Deallocate all reserved memory, and reset connection with monitor. | ||
826 | * Reset connection before deallocating memory to avoid potential for | ||
827 | * additional spurious resets from guest touching deallocated pages. | ||
828 | */ | ||
829 | vmballoon_send_start(&balloon); | ||
830 | vmballoon_pop(&balloon); | ||
831 | } | ||
832 | module_exit(vmballoon_exit); | ||
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 82d1e4de475b..4521b1ecce45 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o mtdbdi.o | 7 | mtd-y := mtdcore.o mtdsuper.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | 9 | ||
10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | 10 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o |
diff --git a/drivers/mtd/internal.h b/drivers/mtd/internal.h index c658fe7216b5..e69de29bb2d1 100644 --- a/drivers/mtd/internal.h +++ b/drivers/mtd/internal.h | |||
@@ -1,17 +0,0 @@ | |||
1 | /* Internal MTD definitions | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * mtdbdi.c | ||
14 | */ | ||
15 | extern struct backing_dev_info mtd_bdi_unmappable; | ||
16 | extern struct backing_dev_info mtd_bdi_ro_mappable; | ||
17 | extern struct backing_dev_info mtd_bdi_rw_mappable; | ||
diff --git a/drivers/mtd/mtdbdi.c b/drivers/mtd/mtdbdi.c index 5ca5aed0b225..e69de29bb2d1 100644 --- a/drivers/mtd/mtdbdi.c +++ b/drivers/mtd/mtdbdi.c | |||
@@ -1,43 +0,0 @@ | |||
1 | /* MTD backing device capabilities | ||
2 | * | ||
3 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/backing-dev.h> | ||
13 | #include <linux/mtd/mtd.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | /* | ||
17 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
18 | * - permits private mappings, copies are taken of the data | ||
19 | */ | ||
20 | struct backing_dev_info mtd_bdi_unmappable = { | ||
21 | .capabilities = BDI_CAP_MAP_COPY, | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
26 | * - permits private mappings, copies are taken of the data | ||
27 | * - permits non-writable shared mappings | ||
28 | */ | ||
29 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
30 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
31 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * backing device capabilities for writable mappable devices (such as RAM) | ||
36 | * - permits private mappings, copies are taken of the data | ||
37 | * - permits non-writable shared mappings | ||
38 | */ | ||
39 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
40 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
41 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
42 | BDI_CAP_WRITE_MAP), | ||
43 | }; | ||
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5b38b17d2229..b177e750efc3 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * Core registration and callback routines for MTD | 2 | * Core registration and callback routines for MTD |
3 | * drivers and users. | 3 | * drivers and users. |
4 | * | 4 | * |
5 | * bdi bits are: | ||
6 | * Copyright © 2006 Red Hat, Inc. All Rights Reserved. | ||
7 | * Written by David Howells (dhowells@redhat.com) | ||
5 | */ | 8 | */ |
6 | 9 | ||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -16,11 +19,39 @@ | |||
16 | #include <linux/init.h> | 19 | #include <linux/init.h> |
17 | #include <linux/mtd/compatmac.h> | 20 | #include <linux/mtd/compatmac.h> |
18 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/backing-dev.h> | ||
19 | 23 | ||
20 | #include <linux/mtd/mtd.h> | 24 | #include <linux/mtd/mtd.h> |
21 | #include "internal.h" | ||
22 | 25 | ||
23 | #include "mtdcore.h" | 26 | #include "mtdcore.h" |
27 | /* | ||
28 | * backing device capabilities for non-mappable devices (such as NAND flash) | ||
29 | * - permits private mappings, copies are taken of the data | ||
30 | */ | ||
31 | struct backing_dev_info mtd_bdi_unmappable = { | ||
32 | .capabilities = BDI_CAP_MAP_COPY, | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * backing device capabilities for R/O mappable devices (such as ROM) | ||
37 | * - permits private mappings, copies are taken of the data | ||
38 | * - permits non-writable shared mappings | ||
39 | */ | ||
40 | struct backing_dev_info mtd_bdi_ro_mappable = { | ||
41 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
42 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | ||
43 | }; | ||
44 | |||
45 | /* | ||
46 | * backing device capabilities for writable mappable devices (such as RAM) | ||
47 | * - permits private mappings, copies are taken of the data | ||
48 | * - permits non-writable shared mappings | ||
49 | */ | ||
50 | struct backing_dev_info mtd_bdi_rw_mappable = { | ||
51 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | ||
52 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | ||
53 | BDI_CAP_WRITE_MAP), | ||
54 | }; | ||
24 | 55 | ||
25 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); | 56 | static int mtd_cls_suspend(struct device *dev, pm_message_t state); |
26 | static int mtd_cls_resume(struct device *dev); | 57 | static int mtd_cls_resume(struct device *dev); |
@@ -628,20 +659,55 @@ done: | |||
628 | /*====================================================================*/ | 659 | /*====================================================================*/ |
629 | /* Init code */ | 660 | /* Init code */ |
630 | 661 | ||
662 | static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) | ||
663 | { | ||
664 | int ret; | ||
665 | |||
666 | ret = bdi_init(bdi); | ||
667 | if (!ret) | ||
668 | ret = bdi_register(bdi, NULL, name); | ||
669 | |||
670 | if (ret) | ||
671 | bdi_destroy(bdi); | ||
672 | |||
673 | return ret; | ||
674 | } | ||
675 | |||
631 | static int __init init_mtd(void) | 676 | static int __init init_mtd(void) |
632 | { | 677 | { |
633 | int ret; | 678 | int ret; |
679 | |||
634 | ret = class_register(&mtd_class); | 680 | ret = class_register(&mtd_class); |
681 | if (ret) | ||
682 | goto err_reg; | ||
683 | |||
684 | ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); | ||
685 | if (ret) | ||
686 | goto err_bdi1; | ||
687 | |||
688 | ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); | ||
689 | if (ret) | ||
690 | goto err_bdi2; | ||
691 | |||
692 | ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); | ||
693 | if (ret) | ||
694 | goto err_bdi3; | ||
635 | 695 | ||
636 | if (ret) { | ||
637 | pr_err("Error registering mtd class: %d\n", ret); | ||
638 | return ret; | ||
639 | } | ||
640 | #ifdef CONFIG_PROC_FS | 696 | #ifdef CONFIG_PROC_FS |
641 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) | 697 | if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) |
642 | proc_mtd->read_proc = mtd_read_proc; | 698 | proc_mtd->read_proc = mtd_read_proc; |
643 | #endif /* CONFIG_PROC_FS */ | 699 | #endif /* CONFIG_PROC_FS */ |
644 | return 0; | 700 | return 0; |
701 | |||
702 | err_bdi3: | ||
703 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
704 | err_bdi2: | ||
705 | bdi_destroy(&mtd_bdi_unmappable); | ||
706 | err_bdi1: | ||
707 | class_unregister(&mtd_class); | ||
708 | err_reg: | ||
709 | pr_err("Error registering mtd class or bdi: %d\n", ret); | ||
710 | return ret; | ||
645 | } | 711 | } |
646 | 712 | ||
647 | static void __exit cleanup_mtd(void) | 713 | static void __exit cleanup_mtd(void) |
@@ -651,6 +717,9 @@ static void __exit cleanup_mtd(void) | |||
651 | remove_proc_entry( "mtd", NULL); | 717 | remove_proc_entry( "mtd", NULL); |
652 | #endif /* CONFIG_PROC_FS */ | 718 | #endif /* CONFIG_PROC_FS */ |
653 | class_unregister(&mtd_class); | 719 | class_unregister(&mtd_class); |
720 | bdi_destroy(&mtd_bdi_unmappable); | ||
721 | bdi_destroy(&mtd_bdi_ro_mappable); | ||
722 | bdi_destroy(&mtd_bdi_rw_mappable); | ||
654 | } | 723 | } |
655 | 724 | ||
656 | module_init(init_mtd); | 725 | module_init(init_mtd); |
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index af8b42e0a55b..7c003191fca4 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mtd/super.h> | 13 | #include <linux/mtd/super.h> |
14 | #include <linux/namei.h> | 14 | #include <linux/namei.h> |
15 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
16 | #include <linux/slab.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * compare superblocks to see if they're equivalent | 19 | * compare superblocks to see if they're equivalent |
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd) | |||
44 | 45 | ||
45 | sb->s_mtd = mtd; | 46 | sb->s_mtd = mtd; |
46 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); | 47 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
48 | sb->s_bdi = mtd->backing_dev_info; | ||
47 | return 0; | 49 | return 0; |
48 | } | 50 | } |
49 | 51 | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index f59c07427af3..d60fc5719fef 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
60 | } | 60 | } |
61 | buf64 = (uint64_t *)buf; | 61 | buf64 = (uint64_t *)buf; |
62 | while (i < len/8) { | 62 | while (i < len/8) { |
63 | uint64_t x; | 63 | /* |
64 | * Since GCC has no proper constraint (PR 43518) | ||
65 | * force x variable to r2/r3 registers as ldrd instruction | ||
66 | * requires first register to be even. | ||
67 | */ | ||
68 | register uint64_t x asm ("r2"); | ||
69 | |||
64 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); | 70 | asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base)); |
65 | buf64[i++] = x; | 71 | buf64[i++] = x; |
66 | } | 72 | } |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index a03d291de854..f0d23de32967 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, | |||
1944 | netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", | 1944 | netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", |
1945 | __func__, rx_status, rx_size, cur_rx); | 1945 | __func__, rx_status, rx_size, cur_rx); |
1946 | #if RTL8139_DEBUG > 2 | 1946 | #if RTL8139_DEBUG > 2 |
1947 | print_dump_hex(KERN_DEBUG, "Frame contents: ", | 1947 | print_hex_dump(KERN_DEBUG, "Frame contents: ", |
1948 | DUMP_PREFIX_OFFSET, 16, 1, | 1948 | DUMP_PREFIX_OFFSET, 16, 1, |
1949 | &rx_ring[ring_offset], 70, true); | 1949 | &rx_ring[ring_offset], 70, true); |
1950 | #endif | 1950 | #endif |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a583b50d9de8..12b280afdd51 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/ | |||
273 | obj-$(CONFIG_USB_HSO) += usb/ | 273 | obj-$(CONFIG_USB_HSO) += usb/ |
274 | obj-$(CONFIG_USB_USBNET) += usb/ | 274 | obj-$(CONFIG_USB_USBNET) += usb/ |
275 | obj-$(CONFIG_USB_ZD1201) += usb/ | 275 | obj-$(CONFIG_USB_ZD1201) += usb/ |
276 | obj-$(CONFIG_USB_IPHETH) += usb/ | ||
276 | 277 | ||
277 | obj-y += wireless/ | 278 | obj-y += wireless/ |
278 | obj-$(CONFIG_NET_TULIP) += tulip/ | 279 | obj-$(CONFIG_NET_TULIP) += tulip/ |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a257babd1bb4..ac90a3828f69 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -58,8 +58,8 @@ | |||
58 | #include "bnx2_fw.h" | 58 | #include "bnx2_fw.h" |
59 | 59 | ||
60 | #define DRV_MODULE_NAME "bnx2" | 60 | #define DRV_MODULE_NAME "bnx2" |
61 | #define DRV_MODULE_VERSION "2.0.8" | 61 | #define DRV_MODULE_VERSION "2.0.9" |
62 | #define DRV_MODULE_RELDATE "Feb 15, 2010" | 62 | #define DRV_MODULE_RELDATE "April 27, 2010" |
63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" | 63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" |
64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" | 64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" |
65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" | 65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" |
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp) | |||
651 | } | 651 | } |
652 | 652 | ||
653 | static void | 653 | static void |
654 | bnx2_netif_stop(struct bnx2 *bp) | 654 | bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) |
655 | { | 655 | { |
656 | bnx2_cnic_stop(bp); | 656 | if (stop_cnic) |
657 | bnx2_cnic_stop(bp); | ||
657 | if (netif_running(bp->dev)) { | 658 | if (netif_running(bp->dev)) { |
658 | int i; | 659 | int i; |
659 | 660 | ||
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp) | |||
671 | } | 672 | } |
672 | 673 | ||
673 | static void | 674 | static void |
674 | bnx2_netif_start(struct bnx2 *bp) | 675 | bnx2_netif_start(struct bnx2 *bp, bool start_cnic) |
675 | { | 676 | { |
676 | if (atomic_dec_and_test(&bp->intr_sem)) { | 677 | if (atomic_dec_and_test(&bp->intr_sem)) { |
677 | if (netif_running(bp->dev)) { | 678 | if (netif_running(bp->dev)) { |
678 | netif_tx_wake_all_queues(bp->dev); | 679 | netif_tx_wake_all_queues(bp->dev); |
679 | bnx2_napi_enable(bp); | 680 | bnx2_napi_enable(bp); |
680 | bnx2_enable_int(bp); | 681 | bnx2_enable_int(bp); |
681 | bnx2_cnic_start(bp); | 682 | if (start_cnic) |
683 | bnx2_cnic_start(bp); | ||
682 | } | 684 | } |
683 | } | 685 | } |
684 | } | 686 | } |
@@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
4759 | rc = bnx2_alloc_bad_rbuf(bp); | 4761 | rc = bnx2_alloc_bad_rbuf(bp); |
4760 | } | 4762 | } |
4761 | 4763 | ||
4762 | if (bp->flags & BNX2_FLAG_USING_MSIX) | 4764 | if (bp->flags & BNX2_FLAG_USING_MSIX) { |
4763 | bnx2_setup_msix_tbl(bp); | 4765 | bnx2_setup_msix_tbl(bp); |
4766 | /* Prevent MSIX table reads and write from timing out */ | ||
4767 | REG_WR(bp, BNX2_MISC_ECO_HW_CTL, | ||
4768 | BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); | ||
4769 | } | ||
4764 | 4770 | ||
4765 | return rc; | 4771 | return rc; |
4766 | } | 4772 | } |
@@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work) | |||
6273 | return; | 6279 | return; |
6274 | } | 6280 | } |
6275 | 6281 | ||
6276 | bnx2_netif_stop(bp); | 6282 | bnx2_netif_stop(bp, true); |
6277 | 6283 | ||
6278 | bnx2_init_nic(bp, 1); | 6284 | bnx2_init_nic(bp, 1); |
6279 | 6285 | ||
6280 | atomic_set(&bp->intr_sem, 1); | 6286 | atomic_set(&bp->intr_sem, 1); |
6281 | bnx2_netif_start(bp); | 6287 | bnx2_netif_start(bp, true); |
6282 | rtnl_unlock(); | 6288 | rtnl_unlock(); |
6283 | } | 6289 | } |
6284 | 6290 | ||
@@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6320 | struct bnx2 *bp = netdev_priv(dev); | 6326 | struct bnx2 *bp = netdev_priv(dev); |
6321 | 6327 | ||
6322 | if (netif_running(dev)) | 6328 | if (netif_running(dev)) |
6323 | bnx2_netif_stop(bp); | 6329 | bnx2_netif_stop(bp, false); |
6324 | 6330 | ||
6325 | bp->vlgrp = vlgrp; | 6331 | bp->vlgrp = vlgrp; |
6326 | 6332 | ||
@@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp) | |||
6331 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) | 6337 | if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) |
6332 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); | 6338 | bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); |
6333 | 6339 | ||
6334 | bnx2_netif_start(bp); | 6340 | bnx2_netif_start(bp, false); |
6335 | } | 6341 | } |
6336 | #endif | 6342 | #endif |
6337 | 6343 | ||
@@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) | |||
7051 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; | 7057 | bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; |
7052 | 7058 | ||
7053 | if (netif_running(bp->dev)) { | 7059 | if (netif_running(bp->dev)) { |
7054 | bnx2_netif_stop(bp); | 7060 | bnx2_netif_stop(bp, true); |
7055 | bnx2_init_nic(bp, 0); | 7061 | bnx2_init_nic(bp, 0); |
7056 | bnx2_netif_start(bp); | 7062 | bnx2_netif_start(bp, true); |
7057 | } | 7063 | } |
7058 | 7064 | ||
7059 | return 0; | 7065 | return 0; |
@@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7083 | /* Reset will erase chipset stats; save them */ | 7089 | /* Reset will erase chipset stats; save them */ |
7084 | bnx2_save_stats(bp); | 7090 | bnx2_save_stats(bp); |
7085 | 7091 | ||
7086 | bnx2_netif_stop(bp); | 7092 | bnx2_netif_stop(bp, true); |
7087 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | 7093 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); |
7088 | bnx2_free_skbs(bp); | 7094 | bnx2_free_skbs(bp); |
7089 | bnx2_free_mem(bp); | 7095 | bnx2_free_mem(bp); |
@@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) | |||
7111 | bnx2_setup_cnic_irq_info(bp); | 7117 | bnx2_setup_cnic_irq_info(bp); |
7112 | mutex_unlock(&bp->cnic_lock); | 7118 | mutex_unlock(&bp->cnic_lock); |
7113 | #endif | 7119 | #endif |
7114 | bnx2_netif_start(bp); | 7120 | bnx2_netif_start(bp, true); |
7115 | } | 7121 | } |
7116 | return 0; | 7122 | return 0; |
7117 | } | 7123 | } |
@@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7364 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 7370 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
7365 | int i; | 7371 | int i; |
7366 | 7372 | ||
7367 | bnx2_netif_stop(bp); | 7373 | bnx2_netif_stop(bp, true); |
7368 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); | 7374 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); |
7369 | bnx2_free_skbs(bp); | 7375 | bnx2_free_skbs(bp); |
7370 | 7376 | ||
@@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) | |||
7383 | bnx2_shutdown_chip(bp); | 7389 | bnx2_shutdown_chip(bp); |
7384 | else { | 7390 | else { |
7385 | bnx2_init_nic(bp, 1); | 7391 | bnx2_init_nic(bp, 1); |
7386 | bnx2_netif_start(bp); | 7392 | bnx2_netif_start(bp, true); |
7387 | } | 7393 | } |
7388 | 7394 | ||
7389 | /* wait for link up */ | 7395 | /* wait for link up */ |
@@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
8377 | return 0; | 8383 | return 0; |
8378 | 8384 | ||
8379 | flush_scheduled_work(); | 8385 | flush_scheduled_work(); |
8380 | bnx2_netif_stop(bp); | 8386 | bnx2_netif_stop(bp, true); |
8381 | netif_device_detach(dev); | 8387 | netif_device_detach(dev); |
8382 | del_timer_sync(&bp->timer); | 8388 | del_timer_sync(&bp->timer); |
8383 | bnx2_shutdown_chip(bp); | 8389 | bnx2_shutdown_chip(bp); |
@@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev) | |||
8399 | bnx2_set_power_state(bp, PCI_D0); | 8405 | bnx2_set_power_state(bp, PCI_D0); |
8400 | netif_device_attach(dev); | 8406 | netif_device_attach(dev); |
8401 | bnx2_init_nic(bp, 1); | 8407 | bnx2_init_nic(bp, 1); |
8402 | bnx2_netif_start(bp); | 8408 | bnx2_netif_start(bp, true); |
8403 | return 0; | 8409 | return 0; |
8404 | } | 8410 | } |
8405 | 8411 | ||
@@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, | |||
8426 | } | 8432 | } |
8427 | 8433 | ||
8428 | if (netif_running(dev)) { | 8434 | if (netif_running(dev)) { |
8429 | bnx2_netif_stop(bp); | 8435 | bnx2_netif_stop(bp, true); |
8430 | del_timer_sync(&bp->timer); | 8436 | del_timer_sync(&bp->timer); |
8431 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); | 8437 | bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); |
8432 | } | 8438 | } |
@@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev) | |||
8483 | 8489 | ||
8484 | rtnl_lock(); | 8490 | rtnl_lock(); |
8485 | if (netif_running(dev)) | 8491 | if (netif_running(dev)) |
8486 | bnx2_netif_start(bp); | 8492 | bnx2_netif_start(bp, true); |
8487 | 8493 | ||
8488 | netif_device_attach(dev); | 8494 | netif_device_attach(dev); |
8489 | rtnl_unlock(); | 8495 | rtnl_unlock(); |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 33451092b8e8..d800b598ae3d 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf, | |||
1006 | 1006 | ||
1007 | netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); | 1007 | netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); |
1008 | if (!netdev) { | 1008 | if (!netdev) { |
1009 | dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); | 1009 | dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n"); |
1010 | return -ENOMEM; | 1010 | return -ENOMEM; |
1011 | } | 1011 | } |
1012 | 1012 | ||
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf, | |||
1036 | 1036 | ||
1037 | dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); | 1037 | dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); |
1038 | if (!dev->intr_urb) { | 1038 | if (!dev->intr_urb) { |
1039 | dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); | 1039 | dev_err(&intf->dev, "Couldn't alloc intr URB\n"); |
1040 | goto cleanup_candev; | 1040 | goto cleanup_candev; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); | 1043 | dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); |
1044 | if (!dev->intr_in_buffer) { | 1044 | if (!dev->intr_in_buffer) { |
1045 | dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); | 1045 | dev_err(&intf->dev, "Couldn't alloc Intr buffer\n"); |
1046 | goto cleanup_intr_urb; | 1046 | goto cleanup_intr_urb; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + | 1049 | dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + |
1050 | sizeof(struct ems_cpc_msg), GFP_KERNEL); | 1050 | sizeof(struct ems_cpc_msg), GFP_KERNEL); |
1051 | if (!dev->tx_msg_buffer) { | 1051 | if (!dev->tx_msg_buffer) { |
1052 | dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); | 1052 | dev_err(&intf->dev, "Couldn't alloc Tx buffer\n"); |
1053 | goto cleanup_intr_in_buffer; | 1053 | goto cleanup_intr_in_buffer; |
1054 | } | 1054 | } |
1055 | 1055 | ||
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 9781942992e9..4b451a7c03e9 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk) | |||
2334 | struct cnic_local *cp = dev->cnic_priv; | 2334 | struct cnic_local *cp = dev->cnic_priv; |
2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; | 2335 | u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; |
2336 | 2336 | ||
2337 | prefetch(cp->status_blk.bnx2x); | 2337 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { |
2338 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | 2338 | prefetch(cp->status_blk.bnx2x); |
2339 | prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); | ||
2339 | 2340 | ||
2340 | if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) | ||
2341 | tasklet_schedule(&cp->cnic_irq_task); | 2341 | tasklet_schedule(&cp->cnic_irq_task); |
2342 | 2342 | cnic_chk_pkt_rings(cp); | |
2343 | cnic_chk_pkt_rings(cp); | 2343 | } |
2344 | 2344 | ||
2345 | return 0; | 2345 | return 0; |
2346 | } | 2346 | } |
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c index 5248f9e0b2f4..35cd36729155 100644 --- a/drivers/net/cxgb3/ael1002.c +++ b/drivers/net/cxgb3/ael1002.c | |||
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = { | |||
934 | int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, | 934 | int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, |
935 | int phy_addr, const struct mdio_ops *mdio_ops) | 935 | int phy_addr, const struct mdio_ops *mdio_ops) |
936 | { | 936 | { |
937 | cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, | 937 | cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, |
938 | SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, | 938 | SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, |
939 | "10GBASE-CX4"); | 939 | "10GBASE-CX4"); |
940 | return 0; | 940 | return 0; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index aced6c5e635c..e3f1b8566495 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter) | |||
439 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, | 439 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, |
440 | unsigned long n) | 440 | unsigned long n) |
441 | { | 441 | { |
442 | int attempts = 5; | 442 | int attempts = 10; |
443 | 443 | ||
444 | while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { | 444 | while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { |
445 | if (!--attempts) | 445 | if (!--attempts) |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index b997e578e58f..791080303db1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -166,6 +166,7 @@ | |||
166 | #include <linux/ethtool.h> | 166 | #include <linux/ethtool.h> |
167 | #include <linux/string.h> | 167 | #include <linux/string.h> |
168 | #include <linux/firmware.h> | 168 | #include <linux/firmware.h> |
169 | #include <linux/rtnetlink.h> | ||
169 | #include <asm/unaligned.h> | 170 | #include <asm/unaligned.h> |
170 | 171 | ||
171 | 172 | ||
@@ -2265,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work) | |||
2265 | 2266 | ||
2266 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 2267 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", |
2267 | ioread8(&nic->csr->scb.status)); | 2268 | ioread8(&nic->csr->scb.status)); |
2268 | e100_down(netdev_priv(netdev)); | 2269 | |
2269 | e100_up(netdev_priv(netdev)); | 2270 | rtnl_lock(); |
2271 | if (netif_running(netdev)) { | ||
2272 | e100_down(netdev_priv(netdev)); | ||
2273 | e100_up(netdev_priv(netdev)); | ||
2274 | } | ||
2275 | rtnl_unlock(); | ||
2270 | } | 2276 | } |
2271 | 2277 | ||
2272 | static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) | 2278 | static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 712ccc66ba25..90155552ea09 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
336 | struct e1000_hw *hw = &adapter->hw; | 336 | struct e1000_hw *hw = &adapter->hw; |
337 | static int global_quad_port_a; /* global port a indication */ | 337 | static int global_quad_port_a; /* global port a indication */ |
338 | struct pci_dev *pdev = adapter->pdev; | 338 | struct pci_dev *pdev = adapter->pdev; |
339 | u16 eeprom_data = 0; | ||
340 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; | 339 | int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; |
341 | s32 rc; | 340 | s32 rc; |
342 | 341 | ||
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) | |||
387 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) | 386 | if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) |
388 | adapter->flags &= ~FLAG_HAS_WOL; | 387 | adapter->flags &= ~FLAG_HAS_WOL; |
389 | break; | 388 | break; |
390 | |||
391 | case e1000_82573: | 389 | case e1000_82573: |
390 | case e1000_82574: | ||
391 | case e1000_82583: | ||
392 | /* Disable ASPM L0s due to hardware errata */ | ||
393 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S); | ||
394 | |||
392 | if (pdev->device == E1000_DEV_ID_82573L) { | 395 | if (pdev->device == E1000_DEV_ID_82573L) { |
393 | if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, | 396 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; |
394 | &eeprom_data) < 0) | 397 | adapter->max_hw_frame_size = DEFAULT_JUMBO; |
395 | break; | ||
396 | if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) { | ||
397 | adapter->flags |= FLAG_HAS_JUMBO_FRAMES; | ||
398 | adapter->max_hw_frame_size = DEFAULT_JUMBO; | ||
399 | } | ||
400 | } | 398 | } |
401 | break; | 399 | break; |
402 | default: | 400 | default: |
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = { | |||
1792 | | FLAG_RESET_OVERWRITES_LAA /* errata */ | 1790 | | FLAG_RESET_OVERWRITES_LAA /* errata */ |
1793 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ | 1791 | | FLAG_TARC_SPEED_MODE_BIT /* errata */ |
1794 | | FLAG_APME_CHECK_PORT_B, | 1792 | | FLAG_APME_CHECK_PORT_B, |
1793 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1795 | .pba = 38, | 1794 | .pba = 38, |
1796 | .max_hw_frame_size = DEFAULT_JUMBO, | 1795 | .max_hw_frame_size = DEFAULT_JUMBO, |
1797 | .get_variants = e1000_get_variants_82571, | 1796 | .get_variants = e1000_get_variants_82571, |
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = { | |||
1809 | | FLAG_RX_CSUM_ENABLED | 1808 | | FLAG_RX_CSUM_ENABLED |
1810 | | FLAG_HAS_CTRLEXT_ON_LOAD | 1809 | | FLAG_HAS_CTRLEXT_ON_LOAD |
1811 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ | 1810 | | FLAG_TARC_SPEED_MODE_BIT, /* errata */ |
1811 | .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ | ||
1812 | .pba = 38, | 1812 | .pba = 38, |
1813 | .max_hw_frame_size = DEFAULT_JUMBO, | 1813 | .max_hw_frame_size = DEFAULT_JUMBO, |
1814 | .get_variants = e1000_get_variants_82571, | 1814 | .get_variants = e1000_get_variants_82571, |
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = { | |||
1820 | struct e1000_info e1000_82573_info = { | 1820 | struct e1000_info e1000_82573_info = { |
1821 | .mac = e1000_82573, | 1821 | .mac = e1000_82573, |
1822 | .flags = FLAG_HAS_HW_VLAN_FILTER | 1822 | .flags = FLAG_HAS_HW_VLAN_FILTER |
1823 | | FLAG_HAS_JUMBO_FRAMES | ||
1824 | | FLAG_HAS_WOL | 1823 | | FLAG_HAS_WOL |
1825 | | FLAG_APME_IN_CTRL3 | 1824 | | FLAG_APME_IN_CTRL3 |
1826 | | FLAG_RX_CSUM_ENABLED | 1825 | | FLAG_RX_CSUM_ENABLED |
1827 | | FLAG_HAS_SMART_POWER_DOWN | 1826 | | FLAG_HAS_SMART_POWER_DOWN |
1828 | | FLAG_HAS_AMT | 1827 | | FLAG_HAS_AMT |
1829 | | FLAG_HAS_ERT | ||
1830 | | FLAG_HAS_SWSM_ON_LOAD, | 1828 | | FLAG_HAS_SWSM_ON_LOAD, |
1831 | .pba = 20, | 1829 | .pba = 20, |
1832 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1830 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 118bdf483593..ee32b9b27a9f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/pci-aspm.h> | ||
40 | 41 | ||
41 | #include "hw.h" | 42 | #include "hw.h" |
42 | 43 | ||
@@ -374,7 +375,7 @@ struct e1000_adapter { | |||
374 | struct e1000_info { | 375 | struct e1000_info { |
375 | enum e1000_mac_type mac; | 376 | enum e1000_mac_type mac; |
376 | unsigned int flags; | 377 | unsigned int flags; |
377 | unsigned int flags2; | 378 | unsigned int flags2; |
378 | u32 pba; | 379 | u32 pba; |
379 | u32 max_hw_frame_size; | 380 | u32 max_hw_frame_size; |
380 | s32 (*get_variants)(struct e1000_adapter *); | 381 | s32 (*get_variants)(struct e1000_adapter *); |
@@ -421,6 +422,7 @@ struct e1000_info { | |||
421 | #define FLAG2_CRC_STRIPPING (1 << 0) | 422 | #define FLAG2_CRC_STRIPPING (1 << 0) |
422 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | 423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) |
423 | #define FLAG2_IS_DISCARDING (1 << 2) | 424 | #define FLAG2_IS_DISCARDING (1 << 2) |
425 | #define FLAG2_DISABLE_ASPM_L1 (1 << 3) | ||
424 | 426 | ||
425 | #define E1000_RX_DESC_PS(R, i) \ | 427 | #define E1000_RX_DESC_PS(R, i) \ |
426 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 428 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
@@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter); | |||
461 | extern bool e1000e_has_link(struct e1000_adapter *adapter); | 463 | extern bool e1000e_has_link(struct e1000_adapter *adapter); |
462 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | 464 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); |
463 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | 465 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); |
466 | extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | ||
464 | 467 | ||
465 | extern unsigned int copybreak; | 468 | extern unsigned int copybreak; |
466 | 469 | ||
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index cfd09cea7214..fb8fc7d1b50d 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
661 | i = 0; | 661 | i = 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | if (i == tx_ring->next_to_use) | ||
665 | break; | ||
664 | eop = tx_ring->buffer_info[i].next_to_watch; | 666 | eop = tx_ring->buffer_info[i].next_to_watch; |
665 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 667 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
666 | } | 668 | } |
@@ -4281,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4281 | return -EINVAL; | 4283 | return -EINVAL; |
4282 | } | 4284 | } |
4283 | 4285 | ||
4286 | /* 82573 Errata 17 */ | ||
4287 | if (((adapter->hw.mac.type == e1000_82573) || | ||
4288 | (adapter->hw.mac.type == e1000_82574)) && | ||
4289 | (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { | ||
4290 | adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; | ||
4291 | e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); | ||
4292 | } | ||
4293 | |||
4284 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4294 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4285 | msleep(1); | 4295 | msleep(1); |
4286 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ | 4296 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
@@ -4603,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |||
4603 | } | 4613 | } |
4604 | } | 4614 | } |
4605 | 4615 | ||
4606 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) | 4616 | #ifdef CONFIG_PCIEASPM |
4617 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4618 | { | ||
4619 | pci_disable_link_state(pdev, state); | ||
4620 | } | ||
4621 | #else | ||
4622 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4607 | { | 4623 | { |
4608 | int pos; | 4624 | int pos; |
4609 | u16 val; | 4625 | u16 reg16; |
4610 | 4626 | ||
4611 | /* | 4627 | /* |
4612 | * 82573 workaround - disable L1 ASPM on mobile chipsets | 4628 | * Both device and parent should have the same ASPM setting. |
4613 | * | 4629 | * Disable ASPM in downstream component first and then upstream. |
4614 | * L1 ASPM on various mobile (ich7) chipsets do not behave properly | ||
4615 | * resulting in lost data or garbage information on the pci-e link | ||
4616 | * level. This could result in (false) bad EEPROM checksum errors, | ||
4617 | * long ping times (up to 2s) or even a system freeze/hang. | ||
4618 | * | ||
4619 | * Unfortunately this feature saves about 1W power consumption when | ||
4620 | * active. | ||
4621 | */ | 4630 | */ |
4622 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 4631 | pos = pci_pcie_cap(pdev); |
4623 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); | 4632 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); |
4624 | if (val & 0x2) { | 4633 | reg16 &= ~state; |
4625 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); | 4634 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); |
4626 | val &= ~0x2; | 4635 | |
4627 | pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); | 4636 | pos = pci_pcie_cap(pdev->bus->self); |
4628 | } | 4637 | pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); |
4638 | reg16 &= ~state; | ||
4639 | pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); | ||
4640 | } | ||
4641 | #endif | ||
4642 | void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | ||
4643 | { | ||
4644 | dev_info(&pdev->dev, "Disabling ASPM %s %s\n", | ||
4645 | (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", | ||
4646 | (state & PCIE_LINK_STATE_L1) ? "L1" : ""); | ||
4647 | |||
4648 | __e1000e_disable_aspm(pdev, state); | ||
4629 | } | 4649 | } |
4630 | 4650 | ||
4631 | #ifdef CONFIG_PM | 4651 | #ifdef CONFIG_PM |
@@ -4651,7 +4671,8 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4651 | pci_set_power_state(pdev, PCI_D0); | 4671 | pci_set_power_state(pdev, PCI_D0); |
4652 | pci_restore_state(pdev); | 4672 | pci_restore_state(pdev); |
4653 | pci_save_state(pdev); | 4673 | pci_save_state(pdev); |
4654 | e1000e_disable_l1aspm(pdev); | 4674 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) |
4675 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4655 | 4676 | ||
4656 | err = pci_enable_device_mem(pdev); | 4677 | err = pci_enable_device_mem(pdev); |
4657 | if (err) { | 4678 | if (err) { |
@@ -4793,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4793 | int err; | 4814 | int err; |
4794 | pci_ers_result_t result; | 4815 | pci_ers_result_t result; |
4795 | 4816 | ||
4796 | e1000e_disable_l1aspm(pdev); | 4817 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) |
4818 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4797 | err = pci_enable_device_mem(pdev); | 4819 | err = pci_enable_device_mem(pdev); |
4798 | if (err) { | 4820 | if (err) { |
4799 | dev_err(&pdev->dev, | 4821 | dev_err(&pdev->dev, |
@@ -4887,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) | |||
4887 | dev_warn(&adapter->pdev->dev, | 4909 | dev_warn(&adapter->pdev->dev, |
4888 | "Warning: detected DSPD enabled in EEPROM\n"); | 4910 | "Warning: detected DSPD enabled in EEPROM\n"); |
4889 | } | 4911 | } |
4890 | |||
4891 | ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); | ||
4892 | if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) { | ||
4893 | /* ASPM enable */ | ||
4894 | dev_warn(&adapter->pdev->dev, | ||
4895 | "Warning: detected ASPM enabled in EEPROM\n"); | ||
4896 | } | ||
4897 | } | 4912 | } |
4898 | 4913 | ||
4899 | static const struct net_device_ops e1000e_netdev_ops = { | 4914 | static const struct net_device_ops e1000e_netdev_ops = { |
@@ -4942,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4942 | u16 eeprom_data = 0; | 4957 | u16 eeprom_data = 0; |
4943 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 4958 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
4944 | 4959 | ||
4945 | e1000e_disable_l1aspm(pdev); | 4960 | if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) |
4961 | e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); | ||
4946 | 4962 | ||
4947 | err = pci_enable_device_mem(pdev); | 4963 | err = pci_enable_device_mem(pdev); |
4948 | if (err) | 4964 | if (err) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 73b260c3c654..5c98f7c22425 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5899 | /* Limit the number of tx's outstanding for hw bug */ | 5899 | /* Limit the number of tx's outstanding for hw bug */ |
5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { | 5900 | if (id->driver_data & DEV_NEED_TX_LIMIT) { |
5901 | np->tx_limit = 1; | 5901 | np->tx_limit = 1; |
5902 | if ((id->driver_data & DEV_NEED_TX_LIMIT2) && | 5902 | if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && |
5903 | pci_dev->revision >= 0xA2) | 5903 | pci_dev->revision >= 0xA2) |
5904 | np->tx_limit = 0; | 5904 | np->tx_limit = 0; |
5905 | } | 5905 | } |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index d5160edf2fcf..3acac5f930c8 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) | |||
205 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) | 205 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) |
206 | { | 206 | { |
207 | struct gfar __iomem *enet_regs; | 207 | struct gfar __iomem *enet_regs; |
208 | u32 __iomem *ioremap_tbipa; | ||
209 | u64 addr, size; | ||
210 | 208 | ||
211 | /* | 209 | /* |
212 | * This is mildly evil, but so is our hardware for doing this. | 210 | * This is mildly evil, but so is our hardware for doing this. |
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi | |||
220 | return &enet_regs->tbipa; | 218 | return &enet_regs->tbipa; |
221 | } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || | 219 | } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || |
222 | of_device_is_compatible(np, "fsl,etsec2-tbi")) { | 220 | of_device_is_compatible(np, "fsl,etsec2-tbi")) { |
223 | addr = of_translate_address(np, of_get_address(np, 1, &size, NULL)); | 221 | return of_iomap(np, 1); |
224 | ioremap_tbipa = ioremap(addr, size); | ||
225 | return ioremap_tbipa; | ||
226 | } else | 222 | } else |
227 | return NULL; | 223 | return NULL; |
228 | } | 224 | } |
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
279 | u32 __iomem *tbipa; | 275 | u32 __iomem *tbipa; |
280 | struct mii_bus *new_bus; | 276 | struct mii_bus *new_bus; |
281 | int tbiaddr = -1; | 277 | int tbiaddr = -1; |
278 | const u32 *addrp; | ||
282 | u64 addr = 0, size = 0; | 279 | u64 addr = 0, size = 0; |
283 | int err = 0; | 280 | int err = 0; |
284 | 281 | ||
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
297 | new_bus->priv = priv; | 294 | new_bus->priv = priv; |
298 | fsl_pq_mdio_bus_name(new_bus->id, np); | 295 | fsl_pq_mdio_bus_name(new_bus->id, np); |
299 | 296 | ||
297 | addrp = of_get_address(np, 0, &size, NULL); | ||
298 | if (!addrp) { | ||
299 | err = -EINVAL; | ||
300 | goto err_free_bus; | ||
301 | } | ||
302 | |||
300 | /* Set the PHY base address */ | 303 | /* Set the PHY base address */ |
301 | addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); | 304 | addr = of_translate_address(np, addrp); |
305 | if (addr == OF_BAD_ADDR) { | ||
306 | err = -EINVAL; | ||
307 | goto err_free_bus; | ||
308 | } | ||
309 | |||
302 | map = ioremap(addr, size); | 310 | map = ioremap(addr, size); |
303 | if (!map) { | 311 | if (!map) { |
304 | err = -ENOMEM; | 312 | err = -ENOMEM; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 080d1cea5b26..4e97ca182997 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -549,12 +549,8 @@ static int gfar_parse_group(struct device_node *np, | |||
549 | struct gfar_private *priv, const char *model) | 549 | struct gfar_private *priv, const char *model) |
550 | { | 550 | { |
551 | u32 *queue_mask; | 551 | u32 *queue_mask; |
552 | u64 addr, size; | ||
553 | |||
554 | addr = of_translate_address(np, | ||
555 | of_get_address(np, 0, &size, NULL)); | ||
556 | priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); | ||
557 | 552 | ||
553 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); | ||
558 | if (!priv->gfargrp[priv->num_grps].regs) | 554 | if (!priv->gfargrp[priv->num_grps].regs) |
559 | return -ENOMEM; | 555 | return -ENOMEM; |
560 | 556 | ||
@@ -1515,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
1515 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | 1511 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1516 | gfar_write(®s->dmactrl, tempval); | 1512 | gfar_write(®s->dmactrl, tempval); |
1517 | 1513 | ||
1518 | while (!(gfar_read(®s->ievent) & | 1514 | spin_event_timeout(((gfar_read(®s->ievent) & |
1519 | (IEVENT_GRSC | IEVENT_GTSC))) | 1515 | (IEVENT_GRSC | IEVENT_GTSC)) == |
1520 | cpu_relax(); | 1516 | (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); |
1521 | } | 1517 | } |
1522 | } | 1518 | } |
1523 | 1519 | ||
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index d313fae992da..743038490104 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
1814 | retval = 0; | 1814 | retval = 0; |
1815 | break; | 1815 | break; |
1816 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1816 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1817 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1817 | /* quad port adapters only support WoL on port A */ | 1818 | /* quad port adapters only support WoL on port A */ |
1818 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { | 1819 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { |
1819 | wol->supported = 0; | 1820 | wol->supported = 0; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 9b3c51ab1758..c9baa2aa98cd 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1612 | adapter->eeprom_wol = 0; | 1612 | adapter->eeprom_wol = 0; |
1613 | break; | 1613 | break; |
1614 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1614 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1615 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1615 | /* if quad port adapter, disable WoL on all but port A */ | 1616 | /* if quad port adapter, disable WoL on all but port A */ |
1616 | if (global_quad_port_a != 0) | 1617 | if (global_quad_port_a != 0) |
1617 | adapter->eeprom_wol = 0; | 1618 | adapter->eeprom_wol = 0; |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index b405a00817c6..12fc0e7ba2ca 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | 39 | #define IXGBE_82599_MC_TBL_SIZE 128 |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82599_VFT_TBL_SIZE 128 |
41 | 41 | ||
42 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | ||
43 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | ||
42 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 44 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 45 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
44 | ixgbe_link_speed speed, | 46 | ixgbe_link_speed speed, |
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | |||
69 | if (hw->phy.multispeed_fiber) { | 71 | if (hw->phy.multispeed_fiber) { |
70 | /* Set up dual speed SFP+ support */ | 72 | /* Set up dual speed SFP+ support */ |
71 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | 73 | mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; |
74 | mac->ops.disable_tx_laser = | ||
75 | &ixgbe_disable_tx_laser_multispeed_fiber; | ||
76 | mac->ops.enable_tx_laser = | ||
77 | &ixgbe_enable_tx_laser_multispeed_fiber; | ||
72 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; | 78 | mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; |
73 | } else { | 79 | } else { |
80 | mac->ops.disable_tx_laser = NULL; | ||
81 | mac->ops.enable_tx_laser = NULL; | ||
74 | mac->ops.flap_tx_laser = NULL; | 82 | mac->ops.flap_tx_laser = NULL; |
75 | if ((mac->ops.get_media_type(hw) == | 83 | if ((mac->ops.get_media_type(hw) == |
76 | ixgbe_media_type_backplane) && | 84 | ixgbe_media_type_backplane) && |
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
415 | return status; | 423 | return status; |
416 | } | 424 | } |
417 | 425 | ||
426 | /** | ||
427 | * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser | ||
428 | * @hw: pointer to hardware structure | ||
429 | * | ||
430 | * The base drivers may require better control over SFP+ module | ||
431 | * PHY states. This includes selectively shutting down the Tx | ||
432 | * laser on the PHY, effectively halting physical link. | ||
433 | **/ | ||
434 | void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | ||
435 | { | ||
436 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
437 | |||
438 | /* Disable tx laser; allow 100us to go dark per spec */ | ||
439 | esdp_reg |= IXGBE_ESDP_SDP3; | ||
440 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
441 | IXGBE_WRITE_FLUSH(hw); | ||
442 | udelay(100); | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser | ||
447 | * @hw: pointer to hardware structure | ||
448 | * | ||
449 | * The base drivers may require better control over SFP+ module | ||
450 | * PHY states. This includes selectively turning on the Tx | ||
451 | * laser on the PHY, effectively starting physical link. | ||
452 | **/ | ||
453 | void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | ||
454 | { | ||
455 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
456 | |||
457 | /* Enable tx laser; allow 100ms to light up */ | ||
458 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
459 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
460 | IXGBE_WRITE_FLUSH(hw); | ||
461 | msleep(100); | ||
462 | } | ||
463 | |||
418 | /** | 464 | /** |
419 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser | 465 | * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser |
420 | * @hw: pointer to hardware structure | 466 | * @hw: pointer to hardware structure |
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | |||
429 | **/ | 475 | **/ |
430 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | 476 | void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
431 | { | 477 | { |
432 | u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
433 | |||
434 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); | 478 | hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); |
435 | 479 | ||
436 | if (hw->mac.autotry_restart) { | 480 | if (hw->mac.autotry_restart) { |
437 | /* Disable tx laser; allow 100us to go dark per spec */ | 481 | ixgbe_disable_tx_laser_multispeed_fiber(hw); |
438 | esdp_reg |= IXGBE_ESDP_SDP3; | 482 | ixgbe_enable_tx_laser_multispeed_fiber(hw); |
439 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
440 | IXGBE_WRITE_FLUSH(hw); | ||
441 | udelay(100); | ||
442 | |||
443 | /* Enable tx laser; allow 100ms to light up */ | ||
444 | esdp_reg &= ~IXGBE_ESDP_SDP3; | ||
445 | IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | ||
446 | IXGBE_WRITE_FLUSH(hw); | ||
447 | msleep(100); | ||
448 | |||
449 | hw->mac.autotry_restart = false; | 483 | hw->mac.autotry_restart = false; |
450 | } | 484 | } |
451 | } | 485 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 8f677cb86290..6c00ee493a3b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2982 | else | 2982 | else |
2983 | ixgbe_configure_msi_and_legacy(adapter); | 2983 | ixgbe_configure_msi_and_legacy(adapter); |
2984 | 2984 | ||
2985 | /* enable the optics */ | ||
2986 | if (hw->phy.multispeed_fiber) | ||
2987 | hw->mac.ops.enable_tx_laser(hw); | ||
2988 | |||
2985 | clear_bit(__IXGBE_DOWN, &adapter->state); | 2989 | clear_bit(__IXGBE_DOWN, &adapter->state); |
2986 | ixgbe_napi_enable_all(adapter); | 2990 | ixgbe_napi_enable_all(adapter); |
2987 | 2991 | ||
@@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3243 | /* signal that we are down to the interrupt handler */ | 3247 | /* signal that we are down to the interrupt handler */ |
3244 | set_bit(__IXGBE_DOWN, &adapter->state); | 3248 | set_bit(__IXGBE_DOWN, &adapter->state); |
3245 | 3249 | ||
3250 | /* power down the optics */ | ||
3251 | if (hw->phy.multispeed_fiber) | ||
3252 | hw->mac.ops.disable_tx_laser(hw); | ||
3253 | |||
3246 | /* disable receive for all VFs and wait one second */ | 3254 | /* disable receive for all VFs and wait one second */ |
3247 | if (adapter->num_vfs) { | 3255 | if (adapter->num_vfs) { |
3248 | /* ping all the active vfs to let them know we are going down */ | 3256 | /* ping all the active vfs to let them know we are going down */ |
@@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6253 | goto err_eeprom; | 6261 | goto err_eeprom; |
6254 | } | 6262 | } |
6255 | 6263 | ||
6264 | /* power down the optics */ | ||
6265 | if (hw->phy.multispeed_fiber) | ||
6266 | hw->mac.ops.disable_tx_laser(hw); | ||
6267 | |||
6256 | init_timer(&adapter->watchdog_timer); | 6268 | init_timer(&adapter->watchdog_timer); |
6257 | adapter->watchdog_timer.function = &ixgbe_watchdog; | 6269 | adapter->watchdog_timer.function = &ixgbe_watchdog; |
6258 | adapter->watchdog_timer.data = (unsigned long)adapter; | 6270 | adapter->watchdog_timer.data = (unsigned long)adapter; |
@@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6400 | del_timer_sync(&adapter->sfp_timer); | 6412 | del_timer_sync(&adapter->sfp_timer); |
6401 | cancel_work_sync(&adapter->watchdog_task); | 6413 | cancel_work_sync(&adapter->watchdog_task); |
6402 | cancel_work_sync(&adapter->sfp_task); | 6414 | cancel_work_sync(&adapter->sfp_task); |
6403 | if (adapter->hw.phy.multispeed_fiber) { | ||
6404 | struct ixgbe_hw *hw = &adapter->hw; | ||
6405 | /* | ||
6406 | * Restart clause 37 autoneg, disable and re-enable | ||
6407 | * the tx laser, to clear & alert the link partner | ||
6408 | * that it needs to restart autotry | ||
6409 | */ | ||
6410 | hw->mac.autotry_restart = true; | ||
6411 | hw->mac.ops.flap_tx_laser(hw); | ||
6412 | } | ||
6413 | cancel_work_sync(&adapter->multispeed_fiber_task); | 6415 | cancel_work_sync(&adapter->multispeed_fiber_task); |
6414 | cancel_work_sync(&adapter->sfp_config_module_task); | 6416 | cancel_work_sync(&adapter->sfp_config_module_task); |
6415 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 6417 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 4ec6dc1a5b75..534affcc38ca 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations { | |||
2398 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); | 2398 | s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); |
2399 | 2399 | ||
2400 | /* Link */ | 2400 | /* Link */ |
2401 | void (*disable_tx_laser)(struct ixgbe_hw *); | ||
2402 | void (*enable_tx_laser)(struct ixgbe_hw *); | ||
2401 | void (*flap_tx_laser)(struct ixgbe_hw *); | 2403 | void (*flap_tx_laser)(struct ixgbe_hw *); |
2402 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); | 2404 | s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); |
2403 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); | 2405 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 13cc1ca261d9..9e9f9b349766 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work) | |||
722 | txb = skb_dequeue(&ks->txq); | 722 | txb = skb_dequeue(&ks->txq); |
723 | last = skb_queue_empty(&ks->txq); | 723 | last = skb_queue_empty(&ks->txq); |
724 | 724 | ||
725 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); | 725 | if (txb != NULL) { |
726 | ks8851_wrpkt(ks, txb, last); | 726 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); |
727 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); | 727 | ks8851_wrpkt(ks, txb, last); |
728 | ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); | 728 | ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); |
729 | ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); | ||
729 | 730 | ||
730 | ks8851_done_tx(ks, txb); | 731 | ks8851_done_tx(ks, txb); |
732 | } | ||
731 | } | 733 | } |
732 | 734 | ||
733 | mutex_unlock(&ks->lock); | 735 | mutex_unlock(&ks->lock); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 471887742b02..ecde0876a785 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev, | |||
1690 | if (pause->tx_pause != mgp->pause) | 1690 | if (pause->tx_pause != mgp->pause) |
1691 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1691 | return myri10ge_change_pause(mgp, pause->tx_pause); |
1692 | if (pause->rx_pause != mgp->pause) | 1692 | if (pause->rx_pause != mgp->pause) |
1693 | return myri10ge_change_pause(mgp, pause->tx_pause); | 1693 | return myri10ge_change_pause(mgp, pause->rx_pause); |
1694 | if (pause->autoneg != 0) | 1694 | if (pause->autoneg != 0) |
1695 | return -EINVAL; | 1695 | return -EINVAL; |
1696 | return 0; | 1696 | return 0; |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 3d1d3a7b7ed3..757f87bb1db3 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | |||
781 | inw(ioaddr + EL3_STATUS)); | 781 | inw(ioaddr + EL3_STATUS)); |
782 | 782 | ||
783 | spin_lock_irqsave(&lp->window_lock, flags); | 783 | spin_lock_irqsave(&lp->window_lock, flags); |
784 | |||
785 | dev->stats.tx_bytes += skb->len; | ||
786 | |||
787 | /* Put out the doubleword header... */ | ||
784 | outw(skb->len, ioaddr + TX_FIFO); | 788 | outw(skb->len, ioaddr + TX_FIFO); |
785 | outw(0, ioaddr + TX_FIFO); | 789 | outw(0, ioaddr + TX_FIFO); |
790 | /* ... and the packet rounded to a doubleword. */ | ||
786 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); | 791 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); |
787 | 792 | ||
788 | dev->trans_start = jiffies; | 793 | dev->trans_start = jiffies; |
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev) | |||
1021 | /* BadSSD */ inb(ioaddr + 12); | 1026 | /* BadSSD */ inb(ioaddr + 12); |
1022 | up = inb(ioaddr + 13); | 1027 | up = inb(ioaddr + 13); |
1023 | 1028 | ||
1024 | dev->stats.tx_bytes += tx + ((up & 0xf0) << 12); | ||
1025 | |||
1026 | EL3WINDOW(1); | 1029 | EL3WINDOW(1); |
1027 | } | 1030 | } |
1028 | 1031 | ||
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index ff7eb9116b6a..ccc553782a0d 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev) | |||
1608 | { | 1608 | { |
1609 | unsigned int ioaddr = dev->base_addr; | 1609 | unsigned int ioaddr = dev->base_addr; |
1610 | struct smc_private *smc = netdev_priv(dev); | 1610 | struct smc_private *smc = netdev_priv(dev); |
1611 | u_int multicast_table[ 2 ] = { 0, }; | 1611 | unsigned char multicast_table[8]; |
1612 | unsigned long flags; | 1612 | unsigned long flags; |
1613 | u_short rx_cfg_setting; | 1613 | u_short rx_cfg_setting; |
1614 | int i; | ||
1615 | |||
1616 | memset(multicast_table, 0, sizeof(multicast_table)); | ||
1614 | 1617 | ||
1615 | if (dev->flags & IFF_PROMISC) { | 1618 | if (dev->flags & IFF_PROMISC) { |
1616 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; | 1619 | rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; |
@@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev) | |||
1622 | 1625 | ||
1623 | netdev_for_each_mc_addr(mc_addr, dev) { | 1626 | netdev_for_each_mc_addr(mc_addr, dev) { |
1624 | u_int position = ether_crc(6, mc_addr->dmi_addr); | 1627 | u_int position = ether_crc(6, mc_addr->dmi_addr); |
1625 | #ifndef final_version /* Verify multicast address. */ | ||
1626 | if ((mc_addr->dmi_addr[0] & 1) == 0) | ||
1627 | continue; | ||
1628 | #endif | ||
1629 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); | 1628 | multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); |
1630 | } | 1629 | } |
1631 | } | 1630 | } |
@@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev) | |||
1635 | /* Load MC table and Rx setting into the chip without interrupts. */ | 1634 | /* Load MC table and Rx setting into the chip without interrupts. */ |
1636 | spin_lock_irqsave(&smc->lock, flags); | 1635 | spin_lock_irqsave(&smc->lock, flags); |
1637 | SMC_SELECT_BANK(3); | 1636 | SMC_SELECT_BANK(3); |
1638 | outl(multicast_table[0], ioaddr + MULTICAST0); | 1637 | for (i = 0; i < 8; i++) |
1639 | outl(multicast_table[1], ioaddr + MULTICAST4); | 1638 | outb(multicast_table[i], ioaddr + MULTICAST0 + i); |
1640 | SMC_SELECT_BANK(0); | 1639 | SMC_SELECT_BANK(0); |
1641 | outw(rx_cfg_setting, ioaddr + RCR); | 1640 | outw(rx_cfg_setting, ioaddr + RCR); |
1642 | SMC_SELECT_BANK(2); | 1641 | SMC_SELECT_BANK(2); |
@@ -1805,23 +1804,30 @@ static void media_check(u_long arg) | |||
1805 | SMC_SELECT_BANK(1); | 1804 | SMC_SELECT_BANK(1); |
1806 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; | 1805 | media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; |
1807 | 1806 | ||
1807 | SMC_SELECT_BANK(saved_bank); | ||
1808 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1809 | |||
1808 | /* Check for pending interrupt with watchdog flag set: with | 1810 | /* Check for pending interrupt with watchdog flag set: with |
1809 | this, we can limp along even if the interrupt is blocked */ | 1811 | this, we can limp along even if the interrupt is blocked */ |
1810 | if (smc->watchdog++ && ((i>>8) & i)) { | 1812 | if (smc->watchdog++ && ((i>>8) & i)) { |
1811 | if (!smc->fast_poll) | 1813 | if (!smc->fast_poll) |
1812 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); | 1814 | printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); |
1815 | local_irq_save(flags); | ||
1813 | smc_interrupt(dev->irq, dev); | 1816 | smc_interrupt(dev->irq, dev); |
1817 | local_irq_restore(flags); | ||
1814 | smc->fast_poll = HZ; | 1818 | smc->fast_poll = HZ; |
1815 | } | 1819 | } |
1816 | if (smc->fast_poll) { | 1820 | if (smc->fast_poll) { |
1817 | smc->fast_poll--; | 1821 | smc->fast_poll--; |
1818 | smc->media.expires = jiffies + HZ/100; | 1822 | smc->media.expires = jiffies + HZ/100; |
1819 | add_timer(&smc->media); | 1823 | add_timer(&smc->media); |
1820 | SMC_SELECT_BANK(saved_bank); | ||
1821 | spin_unlock_irqrestore(&smc->lock, flags); | ||
1822 | return; | 1824 | return; |
1823 | } | 1825 | } |
1824 | 1826 | ||
1827 | spin_lock_irqsave(&smc->lock, flags); | ||
1828 | |||
1829 | saved_bank = inw(ioaddr + BANK_SELECT); | ||
1830 | |||
1825 | if (smc->cfg & CFG_MII_SELECT) { | 1831 | if (smc->cfg & CFG_MII_SELECT) { |
1826 | if (smc->mii_if.phy_id < 0) | 1832 | if (smc->mii_if.phy_id < 0) |
1827 | goto reschedule; | 1833 | goto reschedule; |
@@ -1979,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1979 | unsigned int ioaddr = dev->base_addr; | 1985 | unsigned int ioaddr = dev->base_addr; |
1980 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 1986 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1981 | int ret; | 1987 | int ret; |
1988 | unsigned long flags; | ||
1982 | 1989 | ||
1983 | spin_lock_irq(&smc->lock); | 1990 | spin_lock_irqsave(&smc->lock, flags); |
1984 | SMC_SELECT_BANK(3); | 1991 | SMC_SELECT_BANK(3); |
1985 | if (smc->cfg & CFG_MII_SELECT) | 1992 | if (smc->cfg & CFG_MII_SELECT) |
1986 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); | 1993 | ret = mii_ethtool_gset(&smc->mii_if, ecmd); |
1987 | else | 1994 | else |
1988 | ret = smc_netdev_get_ecmd(dev, ecmd); | 1995 | ret = smc_netdev_get_ecmd(dev, ecmd); |
1989 | SMC_SELECT_BANK(saved_bank); | 1996 | SMC_SELECT_BANK(saved_bank); |
1990 | spin_unlock_irq(&smc->lock); | 1997 | spin_unlock_irqrestore(&smc->lock, flags); |
1991 | return ret; | 1998 | return ret; |
1992 | } | 1999 | } |
1993 | 2000 | ||
@@ -1997,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
1997 | unsigned int ioaddr = dev->base_addr; | 2004 | unsigned int ioaddr = dev->base_addr; |
1998 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2005 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
1999 | int ret; | 2006 | int ret; |
2007 | unsigned long flags; | ||
2000 | 2008 | ||
2001 | spin_lock_irq(&smc->lock); | 2009 | spin_lock_irqsave(&smc->lock, flags); |
2002 | SMC_SELECT_BANK(3); | 2010 | SMC_SELECT_BANK(3); |
2003 | if (smc->cfg & CFG_MII_SELECT) | 2011 | if (smc->cfg & CFG_MII_SELECT) |
2004 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); | 2012 | ret = mii_ethtool_sset(&smc->mii_if, ecmd); |
2005 | else | 2013 | else |
2006 | ret = smc_netdev_set_ecmd(dev, ecmd); | 2014 | ret = smc_netdev_set_ecmd(dev, ecmd); |
2007 | SMC_SELECT_BANK(saved_bank); | 2015 | SMC_SELECT_BANK(saved_bank); |
2008 | spin_unlock_irq(&smc->lock); | 2016 | spin_unlock_irqrestore(&smc->lock, flags); |
2009 | return ret; | 2017 | return ret; |
2010 | } | 2018 | } |
2011 | 2019 | ||
@@ -2015,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev) | |||
2015 | unsigned int ioaddr = dev->base_addr; | 2023 | unsigned int ioaddr = dev->base_addr; |
2016 | u16 saved_bank = inw(ioaddr + BANK_SELECT); | 2024 | u16 saved_bank = inw(ioaddr + BANK_SELECT); |
2017 | u32 ret; | 2025 | u32 ret; |
2026 | unsigned long flags; | ||
2018 | 2027 | ||
2019 | spin_lock_irq(&smc->lock); | 2028 | spin_lock_irqsave(&smc->lock, flags); |
2020 | SMC_SELECT_BANK(3); | 2029 | SMC_SELECT_BANK(3); |
2021 | ret = smc_link_ok(dev); | 2030 | ret = smc_link_ok(dev); |
2022 | SMC_SELECT_BANK(saved_bank); | 2031 | SMC_SELECT_BANK(saved_bank); |
2023 | spin_unlock_irq(&smc->lock); | 2032 | spin_unlock_irqrestore(&smc->lock, flags); |
2024 | return ret; | 2033 | return ret; |
2025 | } | 2034 | } |
2026 | 2035 | ||
@@ -2057,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | |||
2057 | int rc = 0; | 2066 | int rc = 0; |
2058 | u16 saved_bank; | 2067 | u16 saved_bank; |
2059 | unsigned int ioaddr = dev->base_addr; | 2068 | unsigned int ioaddr = dev->base_addr; |
2069 | unsigned long flags; | ||
2060 | 2070 | ||
2061 | if (!netif_running(dev)) | 2071 | if (!netif_running(dev)) |
2062 | return -EINVAL; | 2072 | return -EINVAL; |
2063 | 2073 | ||
2064 | spin_lock_irq(&smc->lock); | 2074 | spin_lock_irqsave(&smc->lock, flags); |
2065 | saved_bank = inw(ioaddr + BANK_SELECT); | 2075 | saved_bank = inw(ioaddr + BANK_SELECT); |
2066 | SMC_SELECT_BANK(3); | 2076 | SMC_SELECT_BANK(3); |
2067 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); | 2077 | rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); |
2068 | SMC_SELECT_BANK(saved_bank); | 2078 | SMC_SELECT_BANK(saved_bank); |
2069 | spin_unlock_irq(&smc->lock); | 2079 | spin_unlock_irqrestore(&smc->lock, flags); |
2070 | return rc; | 2080 | return rc; |
2071 | } | 2081 | } |
2072 | 2082 | ||
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index a6ef266a2fe2..e73ba455aa20 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c | |||
@@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev) | |||
431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | 431 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
432 | u32 mode = VPORT_MISS_MODE_DROP; | 432 | u32 mode = VPORT_MISS_MODE_DROP; |
433 | 433 | ||
434 | if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) | ||
435 | return; | ||
436 | |||
434 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); | 437 | qlcnic_nic_add_mac(adapter, adapter->mac_addr); |
435 | qlcnic_nic_add_mac(adapter, bcast_addr); | 438 | qlcnic_nic_add_mac(adapter, bcast_addr); |
436 | 439 | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 43afdb6b25e6..0298d8c1dcb6 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -134,7 +134,7 @@ | |||
134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) | 134 | #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) |
135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) | 135 | #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) |
136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ | 136 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ |
137 | #define MCAST_MAX 4 /* Max number multicast addresses to filter */ | 137 | #define MCAST_MAX 3 /* Max number multicast addresses to filter */ |
138 | 138 | ||
139 | /* Descriptor status */ | 139 | /* Descriptor status */ |
140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ | 140 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ |
@@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev) | |||
982 | crc >>= 26; | 982 | crc >>= 26; |
983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 983 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); |
984 | } | 984 | } |
985 | /* Write the index of the hash table */ | ||
986 | for (i = 0; i < 4; i++) | ||
987 | iowrite16(hash_table[i] << 14, ioaddr + MCR1); | ||
988 | /* Fill the MAC hash tables with their values */ | 985 | /* Fill the MAC hash tables with their values */ |
989 | iowrite16(hash_table[0], ioaddr + MAR0); | 986 | iowrite16(hash_table[0], ioaddr + MAR0); |
990 | iowrite16(hash_table[1], ioaddr + MAR1); | 987 | iowrite16(hash_table[1], ioaddr + MAR1); |
@@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev) | |||
1000 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | 997 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); |
1001 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | 998 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); |
1002 | } else { | 999 | } else { |
1003 | iowrite16(0xffff, ioaddr + MID_0L + 8 * i); | 1000 | iowrite16(0xffff, ioaddr + MID_1L + 8 * i); |
1004 | iowrite16(0xffff, ioaddr + MID_0M + 8 * i); | 1001 | iowrite16(0xffff, ioaddr + MID_1M + 8 * i); |
1005 | iowrite16(0xffff, ioaddr + MID_0H + 8 * i); | 1002 | iowrite16(0xffff, ioaddr + MID_1H + 8 * i); |
1006 | } | 1003 | } |
1007 | i++; | 1004 | i++; |
1008 | } | 1005 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index dbb1f5a1824c..4748c21eb72e 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, | |||
2759 | { | 2759 | { |
2760 | iounmap(ioaddr); | 2760 | iounmap(ioaddr); |
2761 | pci_release_regions(pdev); | 2761 | pci_release_regions(pdev); |
2762 | pci_clear_mwi(pdev); | ||
2762 | pci_disable_device(pdev); | 2763 | pci_disable_device(pdev); |
2763 | free_netdev(dev); | 2764 | free_netdev(dev); |
2764 | } | 2765 | } |
@@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | |||
2825 | spin_lock_irq(&tp->lock); | 2826 | spin_lock_irq(&tp->lock); |
2826 | 2827 | ||
2827 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 2828 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
2829 | |||
2828 | RTL_W32(MAC4, high); | 2830 | RTL_W32(MAC4, high); |
2831 | RTL_R32(MAC4); | ||
2832 | |||
2829 | RTL_W32(MAC0, low); | 2833 | RTL_W32(MAC0, low); |
2834 | RTL_R32(MAC0); | ||
2835 | |||
2830 | RTL_W8(Cfg9346, Cfg9346_Lock); | 2836 | RTL_W8(Cfg9346, Cfg9346_Lock); |
2831 | 2837 | ||
2832 | spin_unlock_irq(&tp->lock); | 2838 | spin_unlock_irq(&tp->lock); |
@@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3014 | goto err_out_free_dev_1; | 3020 | goto err_out_free_dev_1; |
3015 | } | 3021 | } |
3016 | 3022 | ||
3017 | rc = pci_set_mwi(pdev); | 3023 | if (pci_set_mwi(pdev) < 0) |
3018 | if (rc < 0) | 3024 | netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); |
3019 | goto err_out_disable_2; | ||
3020 | 3025 | ||
3021 | /* make sure PCI base addr 1 is MMIO */ | 3026 | /* make sure PCI base addr 1 is MMIO */ |
3022 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { | 3027 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { |
@@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3024 | "region #%d not an MMIO resource, aborting\n", | 3029 | "region #%d not an MMIO resource, aborting\n", |
3025 | region); | 3030 | region); |
3026 | rc = -ENODEV; | 3031 | rc = -ENODEV; |
3027 | goto err_out_mwi_3; | 3032 | goto err_out_mwi_2; |
3028 | } | 3033 | } |
3029 | 3034 | ||
3030 | /* check for weird/broken PCI region reporting */ | 3035 | /* check for weird/broken PCI region reporting */ |
@@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3032 | netif_err(tp, probe, dev, | 3037 | netif_err(tp, probe, dev, |
3033 | "Invalid PCI region size(s), aborting\n"); | 3038 | "Invalid PCI region size(s), aborting\n"); |
3034 | rc = -ENODEV; | 3039 | rc = -ENODEV; |
3035 | goto err_out_mwi_3; | 3040 | goto err_out_mwi_2; |
3036 | } | 3041 | } |
3037 | 3042 | ||
3038 | rc = pci_request_regions(pdev, MODULENAME); | 3043 | rc = pci_request_regions(pdev, MODULENAME); |
3039 | if (rc < 0) { | 3044 | if (rc < 0) { |
3040 | netif_err(tp, probe, dev, "could not request regions\n"); | 3045 | netif_err(tp, probe, dev, "could not request regions\n"); |
3041 | goto err_out_mwi_3; | 3046 | goto err_out_mwi_2; |
3042 | } | 3047 | } |
3043 | 3048 | ||
3044 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3049 | tp->cp_cmd = PCIMulRW | RxChkSum; |
@@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3051 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 3056 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3052 | if (rc < 0) { | 3057 | if (rc < 0) { |
3053 | netif_err(tp, probe, dev, "DMA configuration failed\n"); | 3058 | netif_err(tp, probe, dev, "DMA configuration failed\n"); |
3054 | goto err_out_free_res_4; | 3059 | goto err_out_free_res_3; |
3055 | } | 3060 | } |
3056 | } | 3061 | } |
3057 | 3062 | ||
@@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3060 | if (!ioaddr) { | 3065 | if (!ioaddr) { |
3061 | netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); | 3066 | netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); |
3062 | rc = -EIO; | 3067 | rc = -EIO; |
3063 | goto err_out_free_res_4; | 3068 | goto err_out_free_res_3; |
3064 | } | 3069 | } |
3065 | 3070 | ||
3066 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 3071 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
@@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3102 | if (i == ARRAY_SIZE(rtl_chip_info)) { | 3107 | if (i == ARRAY_SIZE(rtl_chip_info)) { |
3103 | dev_err(&pdev->dev, | 3108 | dev_err(&pdev->dev, |
3104 | "driver bug, MAC version not found in rtl_chip_info\n"); | 3109 | "driver bug, MAC version not found in rtl_chip_info\n"); |
3105 | goto err_out_msi_5; | 3110 | goto err_out_msi_4; |
3106 | } | 3111 | } |
3107 | tp->chipset = i; | 3112 | tp->chipset = i; |
3108 | 3113 | ||
@@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3167 | 3172 | ||
3168 | rc = register_netdev(dev); | 3173 | rc = register_netdev(dev); |
3169 | if (rc < 0) | 3174 | if (rc < 0) |
3170 | goto err_out_msi_5; | 3175 | goto err_out_msi_4; |
3171 | 3176 | ||
3172 | pci_set_drvdata(pdev, dev); | 3177 | pci_set_drvdata(pdev, dev); |
3173 | 3178 | ||
@@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | out: | 3195 | out: |
3191 | return rc; | 3196 | return rc; |
3192 | 3197 | ||
3193 | err_out_msi_5: | 3198 | err_out_msi_4: |
3194 | rtl_disable_msi(pdev, tp); | 3199 | rtl_disable_msi(pdev, tp); |
3195 | iounmap(ioaddr); | 3200 | iounmap(ioaddr); |
3196 | err_out_free_res_4: | 3201 | err_out_free_res_3: |
3197 | pci_release_regions(pdev); | 3202 | pci_release_regions(pdev); |
3198 | err_out_mwi_3: | 3203 | err_out_mwi_2: |
3199 | pci_clear_mwi(pdev); | 3204 | pci_clear_mwi(pdev); |
3200 | err_out_disable_2: | ||
3201 | pci_disable_device(pdev); | 3205 | pci_disable_device(pdev); |
3202 | err_out_free_dev_1: | 3206 | err_out_free_dev_1: |
3203 | free_netdev(dev); | 3207 | free_netdev(dev); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 6486657c47b8..649a264d6a81 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1861,6 +1861,7 @@ out: | |||
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | if (disabled) { | 1863 | if (disabled) { |
1864 | dev_close(efx->net_dev); | ||
1864 | EFX_ERR(efx, "has been disabled\n"); | 1865 | EFX_ERR(efx, "has been disabled\n"); |
1865 | efx->state = STATE_DISABLED; | 1866 | efx->state = STATE_DISABLED; |
1866 | } else { | 1867 | } else { |
@@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data) | |||
1884 | } | 1885 | } |
1885 | 1886 | ||
1886 | rtnl_lock(); | 1887 | rtnl_lock(); |
1887 | if (efx_reset(efx, efx->reset_pending)) | 1888 | (void)efx_reset(efx, efx->reset_pending); |
1888 | dev_close(efx->net_dev); | ||
1889 | rtnl_unlock(); | 1889 | rtnl_unlock(); |
1890 | } | 1890 | } |
1891 | 1891 | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d294d66fd600..08278e7302b3 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) | |||
1320 | 1320 | ||
1321 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); | 1321 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); |
1322 | 1322 | ||
1323 | falcon_probe_board(efx, board_rev); | 1323 | rc = falcon_probe_board(efx, board_rev); |
1324 | if (rc) | ||
1325 | goto fail2; | ||
1324 | 1326 | ||
1325 | kfree(nvconfig); | 1327 | kfree(nvconfig); |
1326 | return 0; | 1328 | return 0; |
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index 5712fddd72f2..c7a933a3292e 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c | |||
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = { | |||
728 | }, | 728 | }, |
729 | }; | 729 | }; |
730 | 730 | ||
731 | static const struct falcon_board_type falcon_dummy_board = { | 731 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info) |
732 | .init = efx_port_dummy_op_int, | ||
733 | .init_phy = efx_port_dummy_op_void, | ||
734 | .fini = efx_port_dummy_op_void, | ||
735 | .set_id_led = efx_port_dummy_op_set_id_led, | ||
736 | .monitor = efx_port_dummy_op_int, | ||
737 | }; | ||
738 | |||
739 | void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | ||
740 | { | 732 | { |
741 | struct falcon_board *board = falcon_board(efx); | 733 | struct falcon_board *board = falcon_board(efx); |
742 | u8 type_id = FALCON_BOARD_TYPE(revision_info); | 734 | u8 type_id = FALCON_BOARD_TYPE(revision_info); |
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info) | |||
754 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | 746 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) |
755 | ? board->type->ref_model : board->type->gen_type, | 747 | ? board->type->ref_model : board->type->gen_type, |
756 | 'A' + board->major, board->minor); | 748 | 'A' + board->major, board->minor); |
749 | return 0; | ||
757 | } else { | 750 | } else { |
758 | EFX_ERR(efx, "unknown board type %d\n", type_id); | 751 | EFX_ERR(efx, "unknown board type %d\n", type_id); |
759 | board->type = &falcon_dummy_board; | 752 | return -ENODEV; |
760 | } | 753 | } |
761 | } | 754 | } |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 9351c0331a47..3166bafdfbef 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type; | |||
156 | ************************************************************************** | 156 | ************************************************************************** |
157 | */ | 157 | */ |
158 | 158 | ||
159 | extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); | 159 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
160 | 160 | ||
161 | /* TX data path */ | 161 | /* TX data path */ |
162 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | 162 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); |
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 38dcc42c4f79..e0c46f59d1f8 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) | |||
456 | 456 | ||
457 | static void siena_update_nic_stats(struct efx_nic *efx) | 457 | static void siena_update_nic_stats(struct efx_nic *efx) |
458 | { | 458 | { |
459 | while (siena_try_update_nic_stats(efx) == -EAGAIN) | 459 | int retry; |
460 | cpu_relax(); | 460 | |
461 | /* If we're unlucky enough to read statistics wduring the DMA, wait | ||
462 | * up to 10ms for it to finish (typically takes <500us) */ | ||
463 | for (retry = 0; retry < 100; ++retry) { | ||
464 | if (siena_try_update_nic_stats(efx) == 0) | ||
465 | return; | ||
466 | udelay(100); | ||
467 | } | ||
468 | |||
469 | /* Use the old values instead */ | ||
461 | } | 470 | } |
462 | 471 | ||
463 | static void siena_start_nic_stats(struct efx_nic *efx) | 472 | static void siena_start_nic_stats(struct efx_nic *efx) |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a214a1627e8b..4111a85ec80e 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1686 | } | 1686 | } |
1687 | pr_info("done!\n"); | 1687 | pr_info("done!\n"); |
1688 | 1688 | ||
1689 | if (!request_mem_region(res->start, (res->end - res->start), | 1689 | if (!request_mem_region(res->start, resource_size(res), |
1690 | pdev->name)) { | 1690 | pdev->name)) { |
1691 | pr_err("%s: ERROR: memory allocation failed" | 1691 | pr_err("%s: ERROR: memory allocation failed" |
1692 | "cannot get the I/O addr 0x%x\n", | 1692 | "cannot get the I/O addr 0x%x\n", |
@@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1695 | goto out; | 1695 | goto out; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | addr = ioremap(res->start, (res->end - res->start)); | 1698 | addr = ioremap(res->start, resource_size(res)); |
1699 | if (!addr) { | 1699 | if (!addr) { |
1700 | pr_err("%s: ERROR: memory mapping failed \n", __func__); | 1700 | pr_err("%s: ERROR: memory mapping failed\n", __func__); |
1701 | ret = -ENOMEM; | 1701 | ret = -ENOMEM; |
1702 | goto out; | 1702 | goto out; |
1703 | } | 1703 | } |
@@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) | |||
1775 | out: | 1775 | out: |
1776 | if (ret < 0) { | 1776 | if (ret < 0) { |
1777 | platform_set_drvdata(pdev, NULL); | 1777 | platform_set_drvdata(pdev, NULL); |
1778 | release_mem_region(res->start, (res->end - res->start)); | 1778 | release_mem_region(res->start, resource_size(res)); |
1779 | if (addr != NULL) | 1779 | if (addr != NULL) |
1780 | iounmap(addr); | 1780 | iounmap(addr); |
1781 | } | 1781 | } |
@@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) | |||
1813 | 1813 | ||
1814 | iounmap((void *)ndev->base_addr); | 1814 | iounmap((void *)ndev->base_addr); |
1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1816 | release_mem_region(res->start, (res->end - res->start)); | 1816 | release_mem_region(res->start, resource_size(res)); |
1817 | 1817 | ||
1818 | free_netdev(ndev); | 1818 | free_netdev(ndev); |
1819 | 1819 | ||
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 22cf1c446de3..ecc41cffb470 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
8633 | pci_disable_msi(tp->pdev); | 8633 | pci_disable_msi(tp->pdev); |
8634 | 8634 | ||
8635 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 8635 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
8636 | tp->napi[0].irq_vec = tp->pdev->irq; | ||
8636 | 8637 | ||
8637 | err = tg3_request_irq(tp, 0); | 8638 | err = tg3_request_irq(tp, 0); |
8638 | if (err) | 8639 | if (err) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 96c39bddc78c..43265207d463 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
387 | } | 387 | } |
388 | } | 388 | } |
389 | 389 | ||
390 | /* Orphan the skb - required as we might hang on to it | ||
391 | * for indefinite time. */ | ||
392 | skb_orphan(skb); | ||
393 | |||
390 | /* Enqueue packet */ | 394 | /* Enqueue packet */ |
391 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); | 395 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); |
392 | dev->trans_start = jiffies; | 396 | dev->trans_start = jiffies; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index ba56ce4382d9..5d58abc224f4 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -385,4 +385,26 @@ config USB_CDC_PHONET | |||
385 | cellular modem, as found on most Nokia handsets with the | 385 | cellular modem, as found on most Nokia handsets with the |
386 | "PC suite" USB profile. | 386 | "PC suite" USB profile. |
387 | 387 | ||
388 | config USB_IPHETH | ||
389 | tristate "Apple iPhone USB Ethernet driver" | ||
390 | default n | ||
391 | ---help--- | ||
392 | Module used to share Internet connection (tethering) from your | ||
393 | iPhone (Original, 3G and 3GS) to your system. | ||
394 | Note that you need userspace libraries and programs that are needed | ||
395 | to pair your device with your system and that understand the iPhone | ||
396 | protocol. | ||
397 | |||
398 | For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver | ||
399 | |||
400 | config USB_SIERRA_NET | ||
401 | tristate "USB-to-WWAN Driver for Sierra Wireless modems" | ||
402 | depends on USB_USBNET | ||
403 | default y | ||
404 | help | ||
405 | Choose this option if you have a Sierra Wireless USB-to-WWAN device. | ||
406 | |||
407 | To compile this driver as a module, choose M here: the | ||
408 | module will be called sierra_net. | ||
409 | |||
388 | endmenu | 410 | endmenu |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 82ea62955b56..b13a279663ba 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o | |||
23 | obj-$(CONFIG_USB_USBNET) += usbnet.o | 23 | obj-$(CONFIG_USB_USBNET) += usbnet.o |
24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o | 24 | obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o |
25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o | 25 | obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o |
26 | obj-$(CONFIG_USB_IPHETH) += ipheth.o | ||
27 | obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o | ||
26 | 28 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c8cdb7f30adc..3547cf13d219 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -431,6 +431,7 @@ static const struct driver_info mbm_info = { | |||
431 | .bind = cdc_bind, | 431 | .bind = cdc_bind, |
432 | .unbind = usbnet_cdc_unbind, | 432 | .unbind = usbnet_cdc_unbind, |
433 | .status = cdc_status, | 433 | .status = cdc_status, |
434 | .manage_power = cdc_manage_power, | ||
434 | }; | 435 | }; |
435 | 436 | ||
436 | /*-------------------------------------------------------------------------*/ | 437 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c new file mode 100644 index 000000000000..418825d26f90 --- /dev/null +++ b/drivers/net/usb/ipheth.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /* | ||
2 | * ipheth.c - Apple iPhone USB Ethernet driver | ||
3 | * | ||
4 | * Copyright (c) 2009 Diego Giagio <diego@giagio.com> | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. Neither the name of GIAGIO.COM nor the names of its contributors | ||
16 | * may be used to endorse or promote products derived from this software | ||
17 | * without specific prior written permission. | ||
18 | * | ||
19 | * Alternatively, provided that this notice is retained in full, this | ||
20 | * software may be distributed under the terms of the GNU General | ||
21 | * Public License ("GPL") version 2, in which case the provisions of the | ||
22 | * GPL apply INSTEAD OF those given above. | ||
23 | * | ||
24 | * The provided data structures and external interfaces from this code | ||
25 | * are not restricted to be used by modules with a GPL compatible license. | ||
26 | * | ||
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | ||
38 | * DAMAGE. | ||
39 | * | ||
40 | * | ||
41 | * Attention: iPhone device must be paired, otherwise it won't respond to our | ||
42 | * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver | ||
43 | * | ||
44 | */ | ||
45 | |||
46 | #include <linux/kernel.h> | ||
47 | #include <linux/errno.h> | ||
48 | #include <linux/init.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/module.h> | ||
51 | #include <linux/netdevice.h> | ||
52 | #include <linux/etherdevice.h> | ||
53 | #include <linux/ethtool.h> | ||
54 | #include <linux/usb.h> | ||
55 | #include <linux/workqueue.h> | ||
56 | |||
57 | #define USB_VENDOR_APPLE 0x05ac | ||
58 | #define USB_PRODUCT_IPHONE 0x1290 | ||
59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | ||
60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | ||
61 | |||
62 | #define IPHETH_USBINTF_CLASS 255 | ||
63 | #define IPHETH_USBINTF_SUBCLASS 253 | ||
64 | #define IPHETH_USBINTF_PROTO 1 | ||
65 | |||
66 | #define IPHETH_BUF_SIZE 1516 | ||
67 | #define IPHETH_TX_TIMEOUT (5 * HZ) | ||
68 | |||
69 | #define IPHETH_INTFNUM 2 | ||
70 | #define IPHETH_ALT_INTFNUM 1 | ||
71 | |||
72 | #define IPHETH_CTRL_ENDP 0x00 | ||
73 | #define IPHETH_CTRL_BUF_SIZE 0x40 | ||
74 | #define IPHETH_CTRL_TIMEOUT (5 * HZ) | ||
75 | |||
76 | #define IPHETH_CMD_GET_MACADDR 0x00 | ||
77 | #define IPHETH_CMD_CARRIER_CHECK 0x45 | ||
78 | |||
79 | #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) | ||
80 | #define IPHETH_CARRIER_ON 0x04 | ||
81 | |||
82 | static struct usb_device_id ipheth_table[] = { | ||
83 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
84 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, | ||
85 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
86 | IPHETH_USBINTF_PROTO) }, | ||
87 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
88 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G, | ||
89 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
90 | IPHETH_USBINTF_PROTO) }, | ||
91 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
92 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, | ||
93 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
94 | IPHETH_USBINTF_PROTO) }, | ||
95 | { } | ||
96 | }; | ||
97 | MODULE_DEVICE_TABLE(usb, ipheth_table); | ||
98 | |||
99 | struct ipheth_device { | ||
100 | struct usb_device *udev; | ||
101 | struct usb_interface *intf; | ||
102 | struct net_device *net; | ||
103 | struct sk_buff *tx_skb; | ||
104 | struct urb *tx_urb; | ||
105 | struct urb *rx_urb; | ||
106 | unsigned char *tx_buf; | ||
107 | unsigned char *rx_buf; | ||
108 | unsigned char *ctrl_buf; | ||
109 | u8 bulk_in; | ||
110 | u8 bulk_out; | ||
111 | struct delayed_work carrier_work; | ||
112 | }; | ||
113 | |||
114 | static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags); | ||
115 | |||
116 | static int ipheth_alloc_urbs(struct ipheth_device *iphone) | ||
117 | { | ||
118 | struct urb *tx_urb = NULL; | ||
119 | struct urb *rx_urb = NULL; | ||
120 | u8 *tx_buf = NULL; | ||
121 | u8 *rx_buf = NULL; | ||
122 | |||
123 | tx_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
124 | if (tx_urb == NULL) | ||
125 | goto error_nomem; | ||
126 | |||
127 | rx_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
128 | if (rx_urb == NULL) | ||
129 | goto free_tx_urb; | ||
130 | |||
131 | tx_buf = usb_buffer_alloc(iphone->udev, | ||
132 | IPHETH_BUF_SIZE, | ||
133 | GFP_KERNEL, | ||
134 | &tx_urb->transfer_dma); | ||
135 | if (tx_buf == NULL) | ||
136 | goto free_rx_urb; | ||
137 | |||
138 | rx_buf = usb_buffer_alloc(iphone->udev, | ||
139 | IPHETH_BUF_SIZE, | ||
140 | GFP_KERNEL, | ||
141 | &rx_urb->transfer_dma); | ||
142 | if (rx_buf == NULL) | ||
143 | goto free_tx_buf; | ||
144 | |||
145 | |||
146 | iphone->tx_urb = tx_urb; | ||
147 | iphone->rx_urb = rx_urb; | ||
148 | iphone->tx_buf = tx_buf; | ||
149 | iphone->rx_buf = rx_buf; | ||
150 | return 0; | ||
151 | |||
152 | free_tx_buf: | ||
153 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf, | ||
154 | tx_urb->transfer_dma); | ||
155 | free_rx_urb: | ||
156 | usb_free_urb(rx_urb); | ||
157 | free_tx_urb: | ||
158 | usb_free_urb(tx_urb); | ||
159 | error_nomem: | ||
160 | return -ENOMEM; | ||
161 | } | ||
162 | |||
163 | static void ipheth_free_urbs(struct ipheth_device *iphone) | ||
164 | { | ||
165 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, | ||
166 | iphone->rx_urb->transfer_dma); | ||
167 | usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, | ||
168 | iphone->tx_urb->transfer_dma); | ||
169 | usb_free_urb(iphone->rx_urb); | ||
170 | usb_free_urb(iphone->tx_urb); | ||
171 | } | ||
172 | |||
173 | static void ipheth_kill_urbs(struct ipheth_device *dev) | ||
174 | { | ||
175 | usb_kill_urb(dev->tx_urb); | ||
176 | usb_kill_urb(dev->rx_urb); | ||
177 | } | ||
178 | |||
179 | static void ipheth_rcvbulk_callback(struct urb *urb) | ||
180 | { | ||
181 | struct ipheth_device *dev; | ||
182 | struct sk_buff *skb; | ||
183 | int status; | ||
184 | char *buf; | ||
185 | int len; | ||
186 | |||
187 | dev = urb->context; | ||
188 | if (dev == NULL) | ||
189 | return; | ||
190 | |||
191 | status = urb->status; | ||
192 | switch (status) { | ||
193 | case -ENOENT: | ||
194 | case -ECONNRESET: | ||
195 | case -ESHUTDOWN: | ||
196 | return; | ||
197 | case 0: | ||
198 | break; | ||
199 | default: | ||
200 | err("%s: urb status: %d", __func__, urb->status); | ||
201 | return; | ||
202 | } | ||
203 | |||
204 | len = urb->actual_length; | ||
205 | buf = urb->transfer_buffer; | ||
206 | |||
207 | skb = dev_alloc_skb(NET_IP_ALIGN + len); | ||
208 | if (!skb) { | ||
209 | err("%s: dev_alloc_skb: -ENOMEM", __func__); | ||
210 | dev->net->stats.rx_dropped++; | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | skb_reserve(skb, NET_IP_ALIGN); | ||
215 | memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); | ||
216 | skb->dev = dev->net; | ||
217 | skb->protocol = eth_type_trans(skb, dev->net); | ||
218 | |||
219 | dev->net->stats.rx_packets++; | ||
220 | dev->net->stats.rx_bytes += len; | ||
221 | |||
222 | netif_rx(skb); | ||
223 | ipheth_rx_submit(dev, GFP_ATOMIC); | ||
224 | } | ||
225 | |||
226 | static void ipheth_sndbulk_callback(struct urb *urb) | ||
227 | { | ||
228 | struct ipheth_device *dev; | ||
229 | |||
230 | dev = urb->context; | ||
231 | if (dev == NULL) | ||
232 | return; | ||
233 | |||
234 | if (urb->status != 0 && | ||
235 | urb->status != -ENOENT && | ||
236 | urb->status != -ECONNRESET && | ||
237 | urb->status != -ESHUTDOWN) | ||
238 | err("%s: urb status: %d", __func__, urb->status); | ||
239 | |||
240 | dev_kfree_skb_irq(dev->tx_skb); | ||
241 | netif_wake_queue(dev->net); | ||
242 | } | ||
243 | |||
244 | static int ipheth_carrier_set(struct ipheth_device *dev) | ||
245 | { | ||
246 | struct usb_device *udev = dev->udev; | ||
247 | int retval; | ||
248 | |||
249 | retval = usb_control_msg(udev, | ||
250 | usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), | ||
251 | IPHETH_CMD_CARRIER_CHECK, /* request */ | ||
252 | 0xc0, /* request type */ | ||
253 | 0x00, /* value */ | ||
254 | 0x02, /* index */ | ||
255 | dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE, | ||
256 | IPHETH_CTRL_TIMEOUT); | ||
257 | if (retval < 0) { | ||
258 | err("%s: usb_control_msg: %d", __func__, retval); | ||
259 | return retval; | ||
260 | } | ||
261 | |||
262 | if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) | ||
263 | netif_carrier_on(dev->net); | ||
264 | else | ||
265 | netif_carrier_off(dev->net); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static void ipheth_carrier_check_work(struct work_struct *work) | ||
271 | { | ||
272 | struct ipheth_device *dev = container_of(work, struct ipheth_device, | ||
273 | carrier_work.work); | ||
274 | |||
275 | ipheth_carrier_set(dev); | ||
276 | schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); | ||
277 | } | ||
278 | |||
279 | static int ipheth_get_macaddr(struct ipheth_device *dev) | ||
280 | { | ||
281 | struct usb_device *udev = dev->udev; | ||
282 | struct net_device *net = dev->net; | ||
283 | int retval; | ||
284 | |||
285 | retval = usb_control_msg(udev, | ||
286 | usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP), | ||
287 | IPHETH_CMD_GET_MACADDR, /* request */ | ||
288 | 0xc0, /* request type */ | ||
289 | 0x00, /* value */ | ||
290 | 0x02, /* index */ | ||
291 | dev->ctrl_buf, | ||
292 | IPHETH_CTRL_BUF_SIZE, | ||
293 | IPHETH_CTRL_TIMEOUT); | ||
294 | if (retval < 0) { | ||
295 | err("%s: usb_control_msg: %d", __func__, retval); | ||
296 | } else if (retval < ETH_ALEN) { | ||
297 | err("%s: usb_control_msg: short packet: %d bytes", | ||
298 | __func__, retval); | ||
299 | retval = -EINVAL; | ||
300 | } else { | ||
301 | memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN); | ||
302 | retval = 0; | ||
303 | } | ||
304 | |||
305 | return retval; | ||
306 | } | ||
307 | |||
308 | static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) | ||
309 | { | ||
310 | struct usb_device *udev = dev->udev; | ||
311 | int retval; | ||
312 | |||
313 | usb_fill_bulk_urb(dev->rx_urb, udev, | ||
314 | usb_rcvbulkpipe(udev, dev->bulk_in), | ||
315 | dev->rx_buf, IPHETH_BUF_SIZE, | ||
316 | ipheth_rcvbulk_callback, | ||
317 | dev); | ||
318 | dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
319 | |||
320 | retval = usb_submit_urb(dev->rx_urb, mem_flags); | ||
321 | if (retval) | ||
322 | err("%s: usb_submit_urb: %d", __func__, retval); | ||
323 | return retval; | ||
324 | } | ||
325 | |||
326 | static int ipheth_open(struct net_device *net) | ||
327 | { | ||
328 | struct ipheth_device *dev = netdev_priv(net); | ||
329 | struct usb_device *udev = dev->udev; | ||
330 | int retval = 0; | ||
331 | |||
332 | usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM); | ||
333 | |||
334 | retval = ipheth_carrier_set(dev); | ||
335 | if (retval) | ||
336 | return retval; | ||
337 | |||
338 | retval = ipheth_rx_submit(dev, GFP_KERNEL); | ||
339 | if (retval) | ||
340 | return retval; | ||
341 | |||
342 | schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT); | ||
343 | netif_start_queue(net); | ||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | static int ipheth_close(struct net_device *net) | ||
348 | { | ||
349 | struct ipheth_device *dev = netdev_priv(net); | ||
350 | |||
351 | cancel_delayed_work_sync(&dev->carrier_work); | ||
352 | netif_stop_queue(net); | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int ipheth_tx(struct sk_buff *skb, struct net_device *net) | ||
357 | { | ||
358 | struct ipheth_device *dev = netdev_priv(net); | ||
359 | struct usb_device *udev = dev->udev; | ||
360 | int retval; | ||
361 | |||
362 | /* Paranoid */ | ||
363 | if (skb->len > IPHETH_BUF_SIZE) { | ||
364 | WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); | ||
365 | dev->net->stats.tx_dropped++; | ||
366 | dev_kfree_skb_irq(skb); | ||
367 | return NETDEV_TX_OK; | ||
368 | } | ||
369 | |||
370 | memcpy(dev->tx_buf, skb->data, skb->len); | ||
371 | if (skb->len < IPHETH_BUF_SIZE) | ||
372 | memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len); | ||
373 | |||
374 | usb_fill_bulk_urb(dev->tx_urb, udev, | ||
375 | usb_sndbulkpipe(udev, dev->bulk_out), | ||
376 | dev->tx_buf, IPHETH_BUF_SIZE, | ||
377 | ipheth_sndbulk_callback, | ||
378 | dev); | ||
379 | dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
380 | |||
381 | retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC); | ||
382 | if (retval) { | ||
383 | err("%s: usb_submit_urb: %d", __func__, retval); | ||
384 | dev->net->stats.tx_errors++; | ||
385 | dev_kfree_skb_irq(skb); | ||
386 | } else { | ||
387 | dev->tx_skb = skb; | ||
388 | |||
389 | dev->net->stats.tx_packets++; | ||
390 | dev->net->stats.tx_bytes += skb->len; | ||
391 | netif_stop_queue(net); | ||
392 | } | ||
393 | |||
394 | return NETDEV_TX_OK; | ||
395 | } | ||
396 | |||
397 | static void ipheth_tx_timeout(struct net_device *net) | ||
398 | { | ||
399 | struct ipheth_device *dev = netdev_priv(net); | ||
400 | |||
401 | err("%s: TX timeout", __func__); | ||
402 | dev->net->stats.tx_errors++; | ||
403 | usb_unlink_urb(dev->tx_urb); | ||
404 | } | ||
405 | |||
406 | static struct net_device_stats *ipheth_stats(struct net_device *net) | ||
407 | { | ||
408 | struct ipheth_device *dev = netdev_priv(net); | ||
409 | return &dev->net->stats; | ||
410 | } | ||
411 | |||
412 | static u32 ipheth_ethtool_op_get_link(struct net_device *net) | ||
413 | { | ||
414 | struct ipheth_device *dev = netdev_priv(net); | ||
415 | return netif_carrier_ok(dev->net); | ||
416 | } | ||
417 | |||
418 | static struct ethtool_ops ops = { | ||
419 | .get_link = ipheth_ethtool_op_get_link | ||
420 | }; | ||
421 | |||
422 | static const struct net_device_ops ipheth_netdev_ops = { | ||
423 | .ndo_open = &ipheth_open, | ||
424 | .ndo_stop = &ipheth_close, | ||
425 | .ndo_start_xmit = &ipheth_tx, | ||
426 | .ndo_tx_timeout = &ipheth_tx_timeout, | ||
427 | .ndo_get_stats = &ipheth_stats, | ||
428 | }; | ||
429 | |||
430 | static struct device_type ipheth_type = { | ||
431 | .name = "wwan", | ||
432 | }; | ||
433 | |||
434 | static int ipheth_probe(struct usb_interface *intf, | ||
435 | const struct usb_device_id *id) | ||
436 | { | ||
437 | struct usb_device *udev = interface_to_usbdev(intf); | ||
438 | struct usb_host_interface *hintf; | ||
439 | struct usb_endpoint_descriptor *endp; | ||
440 | struct ipheth_device *dev; | ||
441 | struct net_device *netdev; | ||
442 | int i; | ||
443 | int retval; | ||
444 | |||
445 | netdev = alloc_etherdev(sizeof(struct ipheth_device)); | ||
446 | if (!netdev) | ||
447 | return -ENOMEM; | ||
448 | |||
449 | netdev->netdev_ops = &ipheth_netdev_ops; | ||
450 | netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; | ||
451 | strcpy(netdev->name, "wwan%d"); | ||
452 | |||
453 | dev = netdev_priv(netdev); | ||
454 | dev->udev = udev; | ||
455 | dev->net = netdev; | ||
456 | dev->intf = intf; | ||
457 | |||
458 | /* Set up endpoints */ | ||
459 | hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM); | ||
460 | if (hintf == NULL) { | ||
461 | retval = -ENODEV; | ||
462 | err("Unable to find alternate settings interface"); | ||
463 | goto err_endpoints; | ||
464 | } | ||
465 | |||
466 | for (i = 0; i < hintf->desc.bNumEndpoints; i++) { | ||
467 | endp = &hintf->endpoint[i].desc; | ||
468 | if (usb_endpoint_is_bulk_in(endp)) | ||
469 | dev->bulk_in = endp->bEndpointAddress; | ||
470 | else if (usb_endpoint_is_bulk_out(endp)) | ||
471 | dev->bulk_out = endp->bEndpointAddress; | ||
472 | } | ||
473 | if (!(dev->bulk_in && dev->bulk_out)) { | ||
474 | retval = -ENODEV; | ||
475 | err("Unable to find endpoints"); | ||
476 | goto err_endpoints; | ||
477 | } | ||
478 | |||
479 | dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL); | ||
480 | if (dev->ctrl_buf == NULL) { | ||
481 | retval = -ENOMEM; | ||
482 | goto err_alloc_ctrl_buf; | ||
483 | } | ||
484 | |||
485 | retval = ipheth_get_macaddr(dev); | ||
486 | if (retval) | ||
487 | goto err_get_macaddr; | ||
488 | |||
489 | INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work); | ||
490 | |||
491 | retval = ipheth_alloc_urbs(dev); | ||
492 | if (retval) { | ||
493 | err("error allocating urbs: %d", retval); | ||
494 | goto err_alloc_urbs; | ||
495 | } | ||
496 | |||
497 | usb_set_intfdata(intf, dev); | ||
498 | |||
499 | SET_NETDEV_DEV(netdev, &intf->dev); | ||
500 | SET_ETHTOOL_OPS(netdev, &ops); | ||
501 | SET_NETDEV_DEVTYPE(netdev, &ipheth_type); | ||
502 | |||
503 | retval = register_netdev(netdev); | ||
504 | if (retval) { | ||
505 | err("error registering netdev: %d", retval); | ||
506 | retval = -EIO; | ||
507 | goto err_register_netdev; | ||
508 | } | ||
509 | |||
510 | dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n"); | ||
511 | return 0; | ||
512 | |||
513 | err_register_netdev: | ||
514 | ipheth_free_urbs(dev); | ||
515 | err_alloc_urbs: | ||
516 | err_get_macaddr: | ||
517 | err_alloc_ctrl_buf: | ||
518 | kfree(dev->ctrl_buf); | ||
519 | err_endpoints: | ||
520 | free_netdev(netdev); | ||
521 | return retval; | ||
522 | } | ||
523 | |||
524 | static void ipheth_disconnect(struct usb_interface *intf) | ||
525 | { | ||
526 | struct ipheth_device *dev; | ||
527 | |||
528 | dev = usb_get_intfdata(intf); | ||
529 | if (dev != NULL) { | ||
530 | unregister_netdev(dev->net); | ||
531 | ipheth_kill_urbs(dev); | ||
532 | ipheth_free_urbs(dev); | ||
533 | kfree(dev->ctrl_buf); | ||
534 | free_netdev(dev->net); | ||
535 | } | ||
536 | usb_set_intfdata(intf, NULL); | ||
537 | dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n"); | ||
538 | } | ||
539 | |||
540 | static struct usb_driver ipheth_driver = { | ||
541 | .name = "ipheth", | ||
542 | .probe = ipheth_probe, | ||
543 | .disconnect = ipheth_disconnect, | ||
544 | .id_table = ipheth_table, | ||
545 | }; | ||
546 | |||
547 | static int __init ipheth_init(void) | ||
548 | { | ||
549 | int retval; | ||
550 | |||
551 | retval = usb_register(&ipheth_driver); | ||
552 | if (retval) { | ||
553 | err("usb_register failed: %d", retval); | ||
554 | return retval; | ||
555 | } | ||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static void __exit ipheth_exit(void) | ||
560 | { | ||
561 | usb_deregister(&ipheth_driver); | ||
562 | } | ||
563 | |||
564 | module_init(ipheth_init); | ||
565 | module_exit(ipheth_exit); | ||
566 | |||
567 | MODULE_AUTHOR("Diego Giagio <diego@giagio.com>"); | ||
568 | MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver"); | ||
569 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 52671ea043a7..c4c334d9770f 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = { | |||
145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ | 145 | { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ |
146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ | 146 | { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ |
147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ | 147 | { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ |
148 | { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ | ||
148 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ | 149 | { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ |
149 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ | 150 | { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ |
150 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ | 151 | { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c new file mode 100644 index 000000000000..a44f9e0ea098 --- /dev/null +++ b/drivers/net/usb/sierra_net.c | |||
@@ -0,0 +1,1001 @@ | |||
1 | /* | ||
2 | * USB-to-WWAN Driver for Sierra Wireless modems | ||
3 | * | ||
4 | * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer | ||
5 | * <linux@sierrawireless.com> | ||
6 | * | ||
7 | * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) | ||
8 | * and Ole Andre Vadla Ravnas (ActiveSync) (2006). | ||
9 | * | ||
10 | * IMPORTANT DISCLAIMER: This driver is not commercially supported by | ||
11 | * Sierra Wireless. Use at your own risk. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
26 | */ | ||
27 | |||
28 | #define DRIVER_VERSION "v.2.0" | ||
29 | #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" | ||
30 | #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" | ||
31 | static const char driver_name[] = "sierra_net"; | ||
32 | |||
33 | /* if defined debug messages enabled */ | ||
34 | /*#define DEBUG*/ | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/timer.h> | ||
42 | #include <linux/usb.h> | ||
43 | #include <linux/usb/cdc.h> | ||
44 | #include <net/ip.h> | ||
45 | #include <net/udp.h> | ||
46 | #include <asm/unaligned.h> | ||
47 | #include <linux/usb/usbnet.h> | ||
48 | |||
49 | #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 | ||
50 | #define SWI_GET_FW_ATTR_MASK 0x08 | ||
51 | |||
52 | /* atomic counter partially included in MAC address to make sure 2 devices | ||
53 | * do not end up with the same MAC - concept breaks in case of > 255 ifaces | ||
54 | */ | ||
55 | static atomic_t iface_counter = ATOMIC_INIT(0); | ||
56 | |||
57 | /* | ||
58 | * SYNC Timer Delay definition used to set the expiry time | ||
59 | */ | ||
60 | #define SIERRA_NET_SYNCDELAY (2*HZ) | ||
61 | |||
62 | /* Max. MTU supported. The modem buffers are limited to 1500 */ | ||
63 | #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 | ||
64 | |||
65 | /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control | ||
66 | * message reception ... and thus the max. received packet. | ||
67 | * (May be the cause for parse_hip returning -EINVAL) | ||
68 | */ | ||
69 | #define SIERRA_NET_USBCTL_BUF_LEN 1024 | ||
70 | |||
71 | /* list of interface numbers - used for constructing interface lists */ | ||
72 | struct sierra_net_iface_info { | ||
73 | const u32 infolen; /* number of interface numbers on list */ | ||
74 | const u8 *ifaceinfo; /* pointer to the array holding the numbers */ | ||
75 | }; | ||
76 | |||
77 | struct sierra_net_info_data { | ||
78 | u16 rx_urb_size; | ||
79 | struct sierra_net_iface_info whitelist; | ||
80 | }; | ||
81 | |||
82 | /* Private data structure */ | ||
83 | struct sierra_net_data { | ||
84 | |||
85 | u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ | ||
86 | |||
87 | u16 link_up; /* air link up or down */ | ||
88 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ | ||
89 | |||
90 | u8 sync_msg[4]; /* SYNC message */ | ||
91 | u8 shdwn_msg[4]; /* Shutdown message */ | ||
92 | |||
93 | /* Backpointer to the container */ | ||
94 | struct usbnet *usbnet; | ||
95 | |||
96 | u8 ifnum; /* interface number */ | ||
97 | |||
98 | /* Bit masks, must be a power of 2 */ | ||
99 | #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 | ||
100 | #define SIERRA_NET_TIMER_EXPIRY 0x02 | ||
101 | unsigned long kevent_flags; | ||
102 | struct work_struct sierra_net_kevent; | ||
103 | struct timer_list sync_timer; /* For retrying SYNC sequence */ | ||
104 | }; | ||
105 | |||
106 | struct param { | ||
107 | int is_present; | ||
108 | union { | ||
109 | void *ptr; | ||
110 | u32 dword; | ||
111 | u16 word; | ||
112 | u8 byte; | ||
113 | }; | ||
114 | }; | ||
115 | |||
116 | /* HIP message type */ | ||
117 | #define SIERRA_NET_HIP_EXTENDEDID 0x7F | ||
118 | #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ | ||
119 | #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ | ||
120 | #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ | ||
121 | #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ | ||
122 | |||
123 | #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 | ||
124 | #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 | ||
125 | |||
126 | /* 3G UMTS Link Sense Indication definitions */ | ||
127 | #define SIERRA_NET_HIP_LSI_UMTSID 0x78 | ||
128 | |||
129 | /* Reverse Channel Grant Indication HIP message */ | ||
130 | #define SIERRA_NET_HIP_RCGI 0x64 | ||
131 | |||
132 | /* LSI Protocol types */ | ||
133 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 | ||
134 | /* LSI Coverage */ | ||
135 | #define SIERRA_NET_COVERAGE_NONE 0x00 | ||
136 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 | ||
137 | |||
138 | /* LSI Session */ | ||
139 | #define SIERRA_NET_SESSION_IDLE 0x00 | ||
140 | /* LSI Link types */ | ||
141 | #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 | ||
142 | |||
143 | struct lsi_umts { | ||
144 | u8 protocol; | ||
145 | u8 unused1; | ||
146 | __be16 length; | ||
147 | /* eventually use a union for the rest - assume umts for now */ | ||
148 | u8 coverage; | ||
149 | u8 unused2[41]; | ||
150 | u8 session_state; | ||
151 | u8 unused3[33]; | ||
152 | u8 link_type; | ||
153 | u8 pdp_addr_len; /* NW-supplied PDP address len */ | ||
154 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ | ||
155 | u8 unused4[23]; | ||
156 | u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ | ||
157 | u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ | ||
158 | u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ | ||
159 | u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ | ||
160 | u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ | ||
161 | u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ | ||
162 | u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ | ||
163 | u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ | ||
164 | u8 unused5[4]; | ||
165 | u8 gw_addr_len; /* NW-supplied GW address len */ | ||
166 | u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ | ||
167 | u8 reserved[8]; | ||
168 | } __attribute__ ((packed)); | ||
169 | |||
170 | #define SIERRA_NET_LSI_COMMON_LEN 4 | ||
171 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | ||
172 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ | ||
173 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
174 | |||
175 | /* Forward definitions */ | ||
176 | static void sierra_sync_timer(unsigned long syncdata); | ||
177 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu); | ||
178 | |||
179 | /* Our own net device operations structure */ | ||
180 | static const struct net_device_ops sierra_net_device_ops = { | ||
181 | .ndo_open = usbnet_open, | ||
182 | .ndo_stop = usbnet_stop, | ||
183 | .ndo_start_xmit = usbnet_start_xmit, | ||
184 | .ndo_tx_timeout = usbnet_tx_timeout, | ||
185 | .ndo_change_mtu = sierra_net_change_mtu, | ||
186 | .ndo_set_mac_address = eth_mac_addr, | ||
187 | .ndo_validate_addr = eth_validate_addr, | ||
188 | }; | ||
189 | |||
190 | /* get private data associated with passed in usbnet device */ | ||
191 | static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) | ||
192 | { | ||
193 | return (struct sierra_net_data *)dev->data[0]; | ||
194 | } | ||
195 | |||
196 | /* set private data associated with passed in usbnet device */ | ||
197 | static inline void sierra_net_set_private(struct usbnet *dev, | ||
198 | struct sierra_net_data *priv) | ||
199 | { | ||
200 | dev->data[0] = (unsigned long)priv; | ||
201 | } | ||
202 | |||
203 | /* is packet IPv4 */ | ||
204 | static inline int is_ip(struct sk_buff *skb) | ||
205 | { | ||
206 | return (skb->protocol == cpu_to_be16(ETH_P_IP)); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * check passed in packet and make sure that: | ||
211 | * - it is linear (no scatter/gather) | ||
212 | * - it is ethernet (mac_header properly set) | ||
213 | */ | ||
214 | static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) | ||
215 | { | ||
216 | skb_reset_mac_header(skb); /* ethernet header */ | ||
217 | |||
218 | if (skb_is_nonlinear(skb)) { | ||
219 | netdev_err(dev->net, "Non linear buffer-dropping\n"); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | if (!pskb_may_pull(skb, ETH_HLEN)) | ||
224 | return 0; | ||
225 | skb->protocol = eth_hdr(skb)->h_proto; | ||
226 | |||
227 | return 1; | ||
228 | } | ||
229 | |||
230 | static const u8 *save16bit(struct param *p, const u8 *datap) | ||
231 | { | ||
232 | p->is_present = 1; | ||
233 | p->word = get_unaligned_be16(datap); | ||
234 | return datap + sizeof(p->word); | ||
235 | } | ||
236 | |||
237 | static const u8 *save8bit(struct param *p, const u8 *datap) | ||
238 | { | ||
239 | p->is_present = 1; | ||
240 | p->byte = *datap; | ||
241 | return datap + sizeof(p->byte); | ||
242 | } | ||
243 | |||
244 | /*----------------------------------------------------------------------------* | ||
245 | * BEGIN HIP * | ||
246 | *----------------------------------------------------------------------------*/ | ||
247 | /* HIP header */ | ||
248 | #define SIERRA_NET_HIP_HDR_LEN 4 | ||
249 | /* Extended HIP header */ | ||
250 | #define SIERRA_NET_HIP_EXT_HDR_LEN 6 | ||
251 | |||
252 | struct hip_hdr { | ||
253 | int hdrlen; | ||
254 | struct param payload_len; | ||
255 | struct param msgid; | ||
256 | struct param msgspecific; | ||
257 | struct param extmsgid; | ||
258 | }; | ||
259 | |||
260 | static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) | ||
261 | { | ||
262 | const u8 *curp = buf; | ||
263 | int padded; | ||
264 | |||
265 | if (buflen < SIERRA_NET_HIP_HDR_LEN) | ||
266 | return -EPROTO; | ||
267 | |||
268 | curp = save16bit(&hh->payload_len, curp); | ||
269 | curp = save8bit(&hh->msgid, curp); | ||
270 | curp = save8bit(&hh->msgspecific, curp); | ||
271 | |||
272 | padded = hh->msgid.byte & 0x80; | ||
273 | hh->msgid.byte &= 0x7F; /* 7 bits */ | ||
274 | |||
275 | hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); | ||
276 | if (hh->extmsgid.is_present) { | ||
277 | if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) | ||
278 | return -EPROTO; | ||
279 | |||
280 | hh->payload_len.word &= 0x3FFF; /* 14 bits */ | ||
281 | |||
282 | curp = save16bit(&hh->extmsgid, curp); | ||
283 | hh->extmsgid.word &= 0x03FF; /* 10 bits */ | ||
284 | |||
285 | hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; | ||
286 | } else { | ||
287 | hh->payload_len.word &= 0x07FF; /* 11 bits */ | ||
288 | hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; | ||
289 | } | ||
290 | |||
291 | if (padded) { | ||
292 | hh->hdrlen++; | ||
293 | hh->payload_len.word--; | ||
294 | } | ||
295 | |||
296 | /* if real packet shorter than the claimed length */ | ||
297 | if (buflen < (hh->hdrlen + hh->payload_len.word)) | ||
298 | return -EINVAL; | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void build_hip(u8 *buf, const u16 payloadlen, | ||
304 | struct sierra_net_data *priv) | ||
305 | { | ||
306 | /* the following doesn't have the full functionality. We | ||
307 | * currently build only one kind of header, so it is faster this way | ||
308 | */ | ||
309 | put_unaligned_be16(payloadlen, buf); | ||
310 | memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); | ||
311 | } | ||
312 | /*----------------------------------------------------------------------------* | ||
313 | * END HIP * | ||
314 | *----------------------------------------------------------------------------*/ | ||
315 | |||
316 | static int sierra_net_send_cmd(struct usbnet *dev, | ||
317 | u8 *cmd, int cmdlen, const char * cmd_name) | ||
318 | { | ||
319 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
320 | int status; | ||
321 | |||
322 | status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
323 | USB_CDC_SEND_ENCAPSULATED_COMMAND, | ||
324 | USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, | ||
325 | priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT); | ||
326 | |||
327 | if (status != cmdlen && status != -ENODEV) | ||
328 | netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); | ||
329 | |||
330 | return status; | ||
331 | } | ||
332 | |||
333 | static int sierra_net_send_sync(struct usbnet *dev) | ||
334 | { | ||
335 | int status; | ||
336 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
337 | |||
338 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
339 | |||
340 | status = sierra_net_send_cmd(dev, priv->sync_msg, | ||
341 | sizeof(priv->sync_msg), "SYNC"); | ||
342 | |||
343 | return status; | ||
344 | } | ||
345 | |||
346 | static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) | ||
347 | { | ||
348 | dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); | ||
349 | priv->tx_hdr_template[0] = 0x3F; | ||
350 | priv->tx_hdr_template[1] = ctx_ix; | ||
351 | *((u16 *)&priv->tx_hdr_template[2]) = | ||
352 | cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); | ||
353 | } | ||
354 | |||
355 | static inline int sierra_net_is_valid_addrlen(u8 len) | ||
356 | { | ||
357 | return (len == sizeof(struct in_addr)); | ||
358 | } | ||
359 | |||
360 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) | ||
361 | { | ||
362 | struct lsi_umts *lsi = (struct lsi_umts *)data; | ||
363 | |||
364 | if (datalen < sizeof(struct lsi_umts)) { | ||
365 | netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", | ||
366 | __func__, datalen, | ||
367 | sizeof(struct lsi_umts)); | ||
368 | return -1; | ||
369 | } | ||
370 | |||
371 | if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { | ||
372 | netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | ||
373 | __func__, be16_to_cpu(lsi->length), | ||
374 | (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); | ||
375 | return -1; | ||
376 | } | ||
377 | |||
378 | /* Validate the protocol - only support UMTS for now */ | ||
379 | if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { | ||
380 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", | ||
381 | lsi->protocol); | ||
382 | return -1; | ||
383 | } | ||
384 | |||
385 | /* Validate the link type */ | ||
386 | if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { | ||
387 | netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
388 | lsi->link_type); | ||
389 | return -1; | ||
390 | } | ||
391 | |||
392 | /* Validate the coverage */ | ||
393 | if (lsi->coverage == SIERRA_NET_COVERAGE_NONE | ||
394 | || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | ||
395 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | /* Validate the session state */ | ||
400 | if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
401 | netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
402 | lsi->session_state); | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* Set link_sense true */ | ||
407 | return 1; | ||
408 | } | ||
409 | |||
410 | static void sierra_net_handle_lsi(struct usbnet *dev, char *data, | ||
411 | struct hip_hdr *hh) | ||
412 | { | ||
413 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
414 | int link_up; | ||
415 | |||
416 | link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, | ||
417 | hh->payload_len.word); | ||
418 | if (link_up < 0) { | ||
419 | netdev_err(dev->net, "Invalid LSI\n"); | ||
420 | return; | ||
421 | } | ||
422 | if (link_up) { | ||
423 | sierra_net_set_ctx_index(priv, hh->msgspecific.byte); | ||
424 | priv->link_up = 1; | ||
425 | netif_carrier_on(dev->net); | ||
426 | } else { | ||
427 | priv->link_up = 0; | ||
428 | netif_carrier_off(dev->net); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | static void sierra_net_dosync(struct usbnet *dev) | ||
433 | { | ||
434 | int status; | ||
435 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
436 | |||
437 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
438 | |||
439 | /* tell modem we are ready */ | ||
440 | status = sierra_net_send_sync(dev); | ||
441 | if (status < 0) | ||
442 | netdev_err(dev->net, | ||
443 | "Send SYNC failed, status %d\n", status); | ||
444 | status = sierra_net_send_sync(dev); | ||
445 | if (status < 0) | ||
446 | netdev_err(dev->net, | ||
447 | "Send SYNC failed, status %d\n", status); | ||
448 | |||
449 | /* Now, start a timer and make sure we get the Restart Indication */ | ||
450 | priv->sync_timer.function = sierra_sync_timer; | ||
451 | priv->sync_timer.data = (unsigned long) dev; | ||
452 | priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; | ||
453 | add_timer(&priv->sync_timer); | ||
454 | } | ||
455 | |||
456 | static void sierra_net_kevent(struct work_struct *work) | ||
457 | { | ||
458 | struct sierra_net_data *priv = | ||
459 | container_of(work, struct sierra_net_data, sierra_net_kevent); | ||
460 | struct usbnet *dev = priv->usbnet; | ||
461 | int len; | ||
462 | int err; | ||
463 | u8 *buf; | ||
464 | u8 ifnum; | ||
465 | |||
466 | if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { | ||
467 | clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); | ||
468 | |||
469 | /* Query the modem for the LSI message */ | ||
470 | buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); | ||
471 | if (!buf) { | ||
472 | netdev_err(dev->net, | ||
473 | "failed to allocate buf for LS msg\n"); | ||
474 | return; | ||
475 | } | ||
476 | ifnum = priv->ifnum; | ||
477 | len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | ||
478 | USB_CDC_GET_ENCAPSULATED_RESPONSE, | ||
479 | USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, | ||
480 | 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, | ||
481 | USB_CTRL_SET_TIMEOUT); | ||
482 | |||
483 | if (len < 0) { | ||
484 | netdev_err(dev->net, | ||
485 | "usb_control_msg failed, status %d\n", len); | ||
486 | } else { | ||
487 | struct hip_hdr hh; | ||
488 | |||
489 | dev_dbg(&dev->udev->dev, "%s: Received status message," | ||
490 | " %04x bytes", __func__, len); | ||
491 | |||
492 | err = parse_hip(buf, len, &hh); | ||
493 | if (err) { | ||
494 | netdev_err(dev->net, "%s: Bad packet," | ||
495 | " parse result %d\n", __func__, err); | ||
496 | kfree(buf); | ||
497 | return; | ||
498 | } | ||
499 | |||
500 | /* Validate packet length */ | ||
501 | if (len != hh.hdrlen + hh.payload_len.word) { | ||
502 | netdev_err(dev->net, "%s: Bad packet, received" | ||
503 | " %d, expected %d\n", __func__, len, | ||
504 | hh.hdrlen + hh.payload_len.word); | ||
505 | kfree(buf); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | /* Switch on received message types */ | ||
510 | switch (hh.msgid.byte) { | ||
511 | case SIERRA_NET_HIP_LSI_UMTSID: | ||
512 | dev_dbg(&dev->udev->dev, "LSI for ctx:%d", | ||
513 | hh.msgspecific.byte); | ||
514 | sierra_net_handle_lsi(dev, buf, &hh); | ||
515 | break; | ||
516 | case SIERRA_NET_HIP_RESTART_ID: | ||
517 | dev_dbg(&dev->udev->dev, "Restart reported: %d," | ||
518 | " stopping sync timer", | ||
519 | hh.msgspecific.byte); | ||
520 | /* Got sync resp - stop timer & clear mask */ | ||
521 | del_timer_sync(&priv->sync_timer); | ||
522 | clear_bit(SIERRA_NET_TIMER_EXPIRY, | ||
523 | &priv->kevent_flags); | ||
524 | break; | ||
525 | case SIERRA_NET_HIP_HSYNC_ID: | ||
526 | dev_dbg(&dev->udev->dev, "SYNC received"); | ||
527 | err = sierra_net_send_sync(dev); | ||
528 | if (err < 0) | ||
529 | netdev_err(dev->net, | ||
530 | "Send SYNC failed %d\n", err); | ||
531 | break; | ||
532 | case SIERRA_NET_HIP_EXTENDEDID: | ||
533 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
534 | "extmsgid 0x%04x\n", hh.extmsgid.word); | ||
535 | break; | ||
536 | case SIERRA_NET_HIP_RCGI: | ||
537 | /* Ignored */ | ||
538 | break; | ||
539 | default: | ||
540 | netdev_err(dev->net, "Unrecognized HIP msg, " | ||
541 | "msgid 0x%02x\n", hh.msgid.byte); | ||
542 | break; | ||
543 | } | ||
544 | } | ||
545 | kfree(buf); | ||
546 | } | ||
547 | /* The sync timer bit might be set */ | ||
548 | if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { | ||
549 | clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); | ||
550 | dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); | ||
551 | sierra_net_dosync(priv->usbnet); | ||
552 | } | ||
553 | |||
554 | if (priv->kevent_flags) | ||
555 | dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " | ||
556 | "kevent_flags = 0x%lx", priv->kevent_flags); | ||
557 | } | ||
558 | |||
559 | static void sierra_net_defer_kevent(struct usbnet *dev, int work) | ||
560 | { | ||
561 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
562 | |||
563 | set_bit(work, &priv->kevent_flags); | ||
564 | schedule_work(&priv->sierra_net_kevent); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Sync Retransmit Timer Handler. On expiry, kick the work queue | ||
569 | */ | ||
570 | void sierra_sync_timer(unsigned long syncdata) | ||
571 | { | ||
572 | struct usbnet *dev = (struct usbnet *)syncdata; | ||
573 | |||
574 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
575 | /* Kick the tasklet */ | ||
576 | sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); | ||
577 | } | ||
578 | |||
579 | static void sierra_net_status(struct usbnet *dev, struct urb *urb) | ||
580 | { | ||
581 | struct usb_cdc_notification *event; | ||
582 | |||
583 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
584 | |||
585 | if (urb->actual_length < sizeof *event) | ||
586 | return; | ||
587 | |||
588 | /* Add cases to handle other standard notifications. */ | ||
589 | event = urb->transfer_buffer; | ||
590 | switch (event->bNotificationType) { | ||
591 | case USB_CDC_NOTIFY_NETWORK_CONNECTION: | ||
592 | case USB_CDC_NOTIFY_SPEED_CHANGE: | ||
593 | /* USB 305 sends those */ | ||
594 | break; | ||
595 | case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: | ||
596 | sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); | ||
597 | break; | ||
598 | default: | ||
599 | netdev_err(dev->net, ": unexpected notification %02x!\n", | ||
600 | event->bNotificationType); | ||
601 | break; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | static void sierra_net_get_drvinfo(struct net_device *net, | ||
606 | struct ethtool_drvinfo *info) | ||
607 | { | ||
608 | /* Inherit standard device info */ | ||
609 | usbnet_get_drvinfo(net, info); | ||
610 | strncpy(info->driver, driver_name, sizeof info->driver); | ||
611 | strncpy(info->version, DRIVER_VERSION, sizeof info->version); | ||
612 | } | ||
613 | |||
614 | static u32 sierra_net_get_link(struct net_device *net) | ||
615 | { | ||
616 | struct usbnet *dev = netdev_priv(net); | ||
617 | /* Report link is down whenever the interface is down */ | ||
618 | return sierra_net_get_private(dev)->link_up && netif_running(net); | ||
619 | } | ||
620 | |||
621 | static struct ethtool_ops sierra_net_ethtool_ops = { | ||
622 | .get_drvinfo = sierra_net_get_drvinfo, | ||
623 | .get_link = sierra_net_get_link, | ||
624 | .get_msglevel = usbnet_get_msglevel, | ||
625 | .set_msglevel = usbnet_set_msglevel, | ||
626 | .get_settings = usbnet_get_settings, | ||
627 | .set_settings = usbnet_set_settings, | ||
628 | .nway_reset = usbnet_nway_reset, | ||
629 | }; | ||
630 | |||
631 | /* MTU can not be more than 1500 bytes, enforce it. */ | ||
632 | static int sierra_net_change_mtu(struct net_device *net, int new_mtu) | ||
633 | { | ||
634 | if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU) | ||
635 | return -EINVAL; | ||
636 | |||
637 | return usbnet_change_mtu(net, new_mtu); | ||
638 | } | ||
639 | |||
640 | static int is_whitelisted(const u8 ifnum, | ||
641 | const struct sierra_net_iface_info *whitelist) | ||
642 | { | ||
643 | if (whitelist) { | ||
644 | const u8 *list = whitelist->ifaceinfo; | ||
645 | int i; | ||
646 | |||
647 | for (i = 0; i < whitelist->infolen; i++) { | ||
648 | if (list[i] == ifnum) | ||
649 | return 1; | ||
650 | } | ||
651 | } | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) | ||
656 | { | ||
657 | int result = 0; | ||
658 | u16 *attrdata; | ||
659 | |||
660 | attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL); | ||
661 | if (!attrdata) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | result = usb_control_msg( | ||
665 | dev->udev, | ||
666 | usb_rcvctrlpipe(dev->udev, 0), | ||
667 | /* _u8 vendor specific request */ | ||
668 | SWI_USB_REQUEST_GET_FW_ATTR, | ||
669 | USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ | ||
670 | 0x0000, /* __u16 value not used */ | ||
671 | 0x0000, /* __u16 index not used */ | ||
672 | attrdata, /* char *data */ | ||
673 | sizeof(*attrdata), /* __u16 size */ | ||
674 | USB_CTRL_SET_TIMEOUT); /* int timeout */ | ||
675 | |||
676 | if (result < 0) { | ||
677 | kfree(attrdata); | ||
678 | return -EIO; | ||
679 | } | ||
680 | |||
681 | *datap = *attrdata; | ||
682 | |||
683 | kfree(attrdata); | ||
684 | return result; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * collects the bulk endpoints, the status endpoint. | ||
689 | */ | ||
690 | static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | ||
691 | { | ||
692 | u8 ifacenum; | ||
693 | u8 numendpoints; | ||
694 | u16 fwattr = 0; | ||
695 | int status; | ||
696 | struct ethhdr *eth; | ||
697 | struct sierra_net_data *priv; | ||
698 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { | ||
699 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; | ||
700 | static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { | ||
701 | 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; | ||
702 | |||
703 | struct sierra_net_info_data *data = | ||
704 | (struct sierra_net_info_data *)dev->driver_info->data; | ||
705 | |||
706 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
707 | |||
708 | ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; | ||
709 | /* We only accept certain interfaces */ | ||
710 | if (!is_whitelisted(ifacenum, &data->whitelist)) { | ||
711 | dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); | ||
712 | return -ENODEV; | ||
713 | } | ||
714 | numendpoints = intf->cur_altsetting->desc.bNumEndpoints; | ||
715 | /* We have three endpoints, bulk in and out, and a status */ | ||
716 | if (numendpoints != 3) { | ||
717 | dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", | ||
718 | numendpoints); | ||
719 | return -ENODEV; | ||
720 | } | ||
721 | /* Status endpoint set in usbnet_get_endpoints() */ | ||
722 | dev->status = NULL; | ||
723 | status = usbnet_get_endpoints(dev, intf); | ||
724 | if (status < 0) { | ||
725 | dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", | ||
726 | status); | ||
727 | return -ENODEV; | ||
728 | } | ||
729 | /* Initialize sierra private data */ | ||
730 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
731 | if (!priv) { | ||
732 | dev_err(&dev->udev->dev, "No memory"); | ||
733 | return -ENOMEM; | ||
734 | } | ||
735 | |||
736 | priv->usbnet = dev; | ||
737 | priv->ifnum = ifacenum; | ||
738 | dev->net->netdev_ops = &sierra_net_device_ops; | ||
739 | |||
740 | /* change MAC addr to include, ifacenum, and to be unique */ | ||
741 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); | ||
742 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; | ||
743 | |||
744 | /* we will have to manufacture ethernet headers, prepare template */ | ||
745 | eth = (struct ethhdr *)priv->ethr_hdr_tmpl; | ||
746 | memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
747 | eth->h_proto = cpu_to_be16(ETH_P_IP); | ||
748 | |||
749 | /* prepare shutdown message template */ | ||
750 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); | ||
751 | /* set context index initially to 0 - prepares tx hdr template */ | ||
752 | sierra_net_set_ctx_index(priv, 0); | ||
753 | |||
754 | /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ | ||
755 | dev->rx_urb_size = data->rx_urb_size; | ||
756 | if (dev->udev->speed != USB_SPEED_HIGH) | ||
757 | dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); | ||
758 | |||
759 | dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; | ||
760 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
761 | |||
762 | /* Set up the netdev */ | ||
763 | dev->net->flags |= IFF_NOARP; | ||
764 | dev->net->ethtool_ops = &sierra_net_ethtool_ops; | ||
765 | netif_carrier_off(dev->net); | ||
766 | |||
767 | sierra_net_set_private(dev, priv); | ||
768 | |||
769 | priv->kevent_flags = 0; | ||
770 | |||
771 | /* Use the shared workqueue */ | ||
772 | INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); | ||
773 | |||
774 | /* Only need to do this once */ | ||
775 | init_timer(&priv->sync_timer); | ||
776 | |||
777 | /* verify fw attributes */ | ||
778 | status = sierra_net_get_fw_attr(dev, &fwattr); | ||
779 | dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); | ||
780 | |||
781 | /* test whether firmware supports DHCP */ | ||
782 | if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { | ||
783 | /* found incompatible firmware version */ | ||
784 | dev_err(&dev->udev->dev, "Incompatible driver and firmware" | ||
785 | " versions\n"); | ||
786 | kfree(priv); | ||
787 | return -ENODEV; | ||
788 | } | ||
789 | /* prepare sync message from template */ | ||
790 | memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) | ||
796 | { | ||
797 | int status; | ||
798 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
799 | |||
800 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
801 | |||
802 | /* Kill the timer then flush the work queue */ | ||
803 | del_timer_sync(&priv->sync_timer); | ||
804 | |||
805 | flush_scheduled_work(); | ||
806 | |||
807 | /* tell modem we are going away */ | ||
808 | status = sierra_net_send_cmd(dev, priv->shdwn_msg, | ||
809 | sizeof(priv->shdwn_msg), "Shutdown"); | ||
810 | if (status < 0) | ||
811 | netdev_err(dev->net, | ||
812 | "usb_control_msg failed, status %d\n", status); | ||
813 | |||
814 | sierra_net_set_private(dev, NULL); | ||
815 | |||
816 | kfree(priv); | ||
817 | } | ||
818 | |||
819 | static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, | ||
820 | struct sk_buff *skb, int len) | ||
821 | { | ||
822 | struct sk_buff *new_skb; | ||
823 | |||
824 | /* clone skb */ | ||
825 | new_skb = skb_clone(skb, GFP_ATOMIC); | ||
826 | |||
827 | /* remove len bytes from original */ | ||
828 | skb_pull(skb, len); | ||
829 | |||
830 | /* trim next packet to it's length */ | ||
831 | if (new_skb) { | ||
832 | skb_trim(new_skb, len); | ||
833 | } else { | ||
834 | if (netif_msg_rx_err(dev)) | ||
835 | netdev_err(dev->net, "failed to get skb\n"); | ||
836 | dev->net->stats.rx_dropped++; | ||
837 | } | ||
838 | |||
839 | return new_skb; | ||
840 | } | ||
841 | |||
842 | /* ---------------------------- Receive data path ----------------------*/ | ||
843 | static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
844 | { | ||
845 | int err; | ||
846 | struct hip_hdr hh; | ||
847 | struct sk_buff *new_skb; | ||
848 | |||
849 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
850 | |||
851 | /* could contain multiple packets */ | ||
852 | while (likely(skb->len)) { | ||
853 | err = parse_hip(skb->data, skb->len, &hh); | ||
854 | if (err) { | ||
855 | if (netif_msg_rx_err(dev)) | ||
856 | netdev_err(dev->net, "Invalid HIP header %d\n", | ||
857 | err); | ||
858 | /* dev->net->stats.rx_errors incremented by caller */ | ||
859 | dev->net->stats.rx_length_errors++; | ||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | /* Validate Extended HIP header */ | ||
864 | if (!hh.extmsgid.is_present | ||
865 | || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { | ||
866 | if (netif_msg_rx_err(dev)) | ||
867 | netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); | ||
868 | |||
869 | dev->net->stats.rx_frame_errors++; | ||
870 | /* dev->net->stats.rx_errors incremented by caller */; | ||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | skb_pull(skb, hh.hdrlen); | ||
875 | |||
876 | /* We are going to accept this packet, prepare it */ | ||
877 | memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, | ||
878 | ETH_HLEN); | ||
879 | |||
880 | /* Last packet in batch handled by usbnet */ | ||
881 | if (hh.payload_len.word == skb->len) | ||
882 | return 1; | ||
883 | |||
884 | new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); | ||
885 | if (new_skb) | ||
886 | usbnet_skb_return(dev, new_skb); | ||
887 | |||
888 | } /* while */ | ||
889 | |||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | /* ---------------------------- Transmit data path ----------------------*/ | ||
894 | struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | ||
895 | gfp_t flags) | ||
896 | { | ||
897 | struct sierra_net_data *priv = sierra_net_get_private(dev); | ||
898 | u16 len; | ||
899 | bool need_tail; | ||
900 | |||
901 | dev_dbg(&dev->udev->dev, "%s", __func__); | ||
902 | if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { | ||
903 | /* enough head room as is? */ | ||
904 | if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { | ||
905 | /* Save the Eth/IP length and set up HIP hdr */ | ||
906 | len = skb->len; | ||
907 | skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); | ||
908 | /* Handle ZLP issue */ | ||
909 | need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) | ||
910 | % dev->maxpacket == 0); | ||
911 | if (need_tail) { | ||
912 | if (unlikely(skb_tailroom(skb) == 0)) { | ||
913 | netdev_err(dev->net, "tx_fixup:" | ||
914 | "no room for packet\n"); | ||
915 | dev_kfree_skb_any(skb); | ||
916 | return NULL; | ||
917 | } else { | ||
918 | skb->data[skb->len] = 0; | ||
919 | __skb_put(skb, 1); | ||
920 | len = len + 1; | ||
921 | } | ||
922 | } | ||
923 | build_hip(skb->data, len, priv); | ||
924 | return skb; | ||
925 | } else { | ||
926 | /* | ||
927 | * compensate in the future if necessary | ||
928 | */ | ||
929 | netdev_err(dev->net, "tx_fixup: no room for HIP\n"); | ||
930 | } /* headroom */ | ||
931 | } | ||
932 | |||
933 | if (!priv->link_up) | ||
934 | dev->net->stats.tx_carrier_errors++; | ||
935 | |||
936 | /* tx_dropped incremented by usbnet */ | ||
937 | |||
938 | /* filter the packet out, release it */ | ||
939 | dev_kfree_skb_any(skb); | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
943 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | ||
944 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | ||
945 | .rx_urb_size = 8 * 1024, | ||
946 | .whitelist = { | ||
947 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | ||
948 | .ifaceinfo = sierra_net_ifnum_list | ||
949 | } | ||
950 | }; | ||
951 | |||
952 | static const struct driver_info sierra_net_info_68A3 = { | ||
953 | .description = "Sierra Wireless USB-to-WWAN Modem", | ||
954 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | ||
955 | .bind = sierra_net_bind, | ||
956 | .unbind = sierra_net_unbind, | ||
957 | .status = sierra_net_status, | ||
958 | .rx_fixup = sierra_net_rx_fixup, | ||
959 | .tx_fixup = sierra_net_tx_fixup, | ||
960 | .data = (unsigned long)&sierra_net_info_data_68A3, | ||
961 | }; | ||
962 | |||
963 | static const struct usb_device_id products[] = { | ||
964 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | ||
965 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | ||
966 | |||
967 | {}, /* last item */ | ||
968 | }; | ||
969 | MODULE_DEVICE_TABLE(usb, products); | ||
970 | |||
971 | /* We are based on usbnet, so let it handle the USB driver specifics */ | ||
972 | static struct usb_driver sierra_net_driver = { | ||
973 | .name = "sierra_net", | ||
974 | .id_table = products, | ||
975 | .probe = usbnet_probe, | ||
976 | .disconnect = usbnet_disconnect, | ||
977 | .suspend = usbnet_suspend, | ||
978 | .resume = usbnet_resume, | ||
979 | .no_dynamic_id = 1, | ||
980 | }; | ||
981 | |||
982 | static int __init sierra_net_init(void) | ||
983 | { | ||
984 | BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) | ||
985 | < sizeof(struct cdc_state)); | ||
986 | |||
987 | return usb_register(&sierra_net_driver); | ||
988 | } | ||
989 | |||
990 | static void __exit sierra_net_exit(void) | ||
991 | { | ||
992 | usb_deregister(&sierra_net_driver); | ||
993 | } | ||
994 | |||
995 | module_exit(sierra_net_exit); | ||
996 | module_init(sierra_net_init); | ||
997 | |||
998 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
999 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1000 | MODULE_VERSION(DRIVER_VERSION); | ||
1001 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fb783ce20b9..b0577dd1a42d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | |||
327 | struct scatterlist sg[2]; | 327 | struct scatterlist sg[2]; |
328 | int err; | 328 | int err; |
329 | 329 | ||
330 | sg_init_table(sg, 2); | ||
330 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); | 331 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); |
331 | if (unlikely(!skb)) | 332 | if (unlikely(!skb)) |
332 | return -ENOMEM; | 333 | return -ENOMEM; |
@@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) | |||
352 | char *p; | 353 | char *p; |
353 | int i, err, offset; | 354 | int i, err, offset; |
354 | 355 | ||
356 | sg_init_table(sg, MAX_SKB_FRAGS + 2); | ||
355 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ | 357 | /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ |
356 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { | 358 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
357 | first = get_a_page(vi, gfp); | 359 | first = get_a_page(vi, gfp); |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b9b9d6b01c0b..941f053e650e 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev) | |||
628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); | 628 | ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); |
629 | } | 629 | } |
630 | 630 | ||
631 | static void ppp_close(struct net_device *dev) | ||
632 | { | ||
633 | ppp_tx_flush(); | ||
634 | } | ||
635 | |||
631 | static struct hdlc_proto proto = { | 636 | static struct hdlc_proto proto = { |
632 | .start = ppp_start, | 637 | .start = ppp_start, |
633 | .stop = ppp_stop, | 638 | .stop = ppp_stop, |
639 | .close = ppp_close, | ||
634 | .type_trans = ppp_type_trans, | 640 | .type_trans = ppp_type_trans, |
635 | .ioctl = ppp_ioctl, | 641 | .ioctl = ppp_ioctl, |
636 | .netif_rx = ppp_rx, | 642 | .netif_rx = ppp_rx, |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 67ca4e5a6017..115e1aeedb59 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); | 1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); |
1533 | ath9k_set_wiphy_idle(aphy, idle); | 1533 | ath9k_set_wiphy_idle(aphy, idle); |
1534 | 1534 | ||
1535 | if (!idle && all_wiphys_idle) | 1535 | enable_radio = (!idle && all_wiphys_idle); |
1536 | enable_radio = true; | ||
1537 | 1536 | ||
1538 | /* | 1537 | /* |
1539 | * After we unlock here its possible another wiphy | 1538 | * After we unlock here its possible another wiphy |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 83c52a682622..8972166386cb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2016 | "%d index %d\n", scd_ssn , index); | 2016 | "%d index %d\n", scd_ssn , index); |
2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2018 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2018 | if (qc) |
2019 | iwl_free_tfds_in_queue(priv, sta_id, | ||
2020 | tid, freed); | ||
2019 | 2021 | ||
2020 | if (priv->mac80211_registered && | 2022 | if (priv->mac80211_registered && |
2021 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2023 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2041 | tx_resp->failure_frame); | 2043 | tx_resp->failure_frame); |
2042 | 2044 | ||
2043 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2045 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2044 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2046 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2047 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
2048 | else if (sta_id == IWL_INVALID_STATION) | ||
2049 | IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); | ||
2045 | 2050 | ||
2046 | if (priv->mac80211_registered && | 2051 | if (priv->mac80211_registered && |
2047 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 2052 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
2048 | iwl_wake_queue(priv, txq_id); | 2053 | iwl_wake_queue(priv, txq_id); |
2049 | } | 2054 | } |
2050 | 2055 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | |
2051 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2056 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
2052 | 2057 | ||
2053 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 2058 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
2054 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 2059 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index c4844adff92a..92b3e64fc14d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -259,7 +259,7 @@ static struct iwl_lib_ops iwl6000_lib = { | |||
259 | EEPROM_5000_REG_BAND_3_CHANNELS, | 259 | EEPROM_5000_REG_BAND_3_CHANNELS, |
260 | EEPROM_5000_REG_BAND_4_CHANNELS, | 260 | EEPROM_5000_REG_BAND_4_CHANNELS, |
261 | EEPROM_5000_REG_BAND_5_CHANNELS, | 261 | EEPROM_5000_REG_BAND_5_CHANNELS, |
262 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 262 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, |
263 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 263 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS |
264 | }, | 264 | }, |
265 | .verify_signature = iwlcore_eeprom_verify_signature, | 265 | .verify_signature = iwlcore_eeprom_verify_signature, |
@@ -323,7 +323,7 @@ static struct iwl_lib_ops iwl6050_lib = { | |||
323 | EEPROM_5000_REG_BAND_3_CHANNELS, | 323 | EEPROM_5000_REG_BAND_3_CHANNELS, |
324 | EEPROM_5000_REG_BAND_4_CHANNELS, | 324 | EEPROM_5000_REG_BAND_4_CHANNELS, |
325 | EEPROM_5000_REG_BAND_5_CHANNELS, | 325 | EEPROM_5000_REG_BAND_5_CHANNELS, |
326 | EEPROM_5000_REG_BAND_24_HT40_CHANNELS, | 326 | EEPROM_6000_REG_BAND_24_HT40_CHANNELS, |
327 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS | 327 | EEPROM_5000_REG_BAND_52_HT40_CHANNELS |
328 | }, | 328 | }, |
329 | .verify_signature = iwlcore_eeprom_verify_signature, | 329 | .verify_signature = iwlcore_eeprom_verify_signature, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35f819ac87a3..1460116d329f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); | 346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | ||
350 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
351 | * that wraps a NULL pointer check | ||
352 | */ | ||
353 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
354 | { | ||
355 | if (tbl->expected_tpt) | ||
356 | return tbl->expected_tpt[rs_index]; | ||
357 | return 0; | ||
358 | } | ||
359 | |||
349 | /** | 360 | /** |
350 | * rs_collect_tx_data - Update the success/failure sliding window | 361 | * rs_collect_tx_data - Update the success/failure sliding window |
351 | * | 362 | * |
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
353 | * at this rate. window->data contains the bitmask of successful | 364 | * at this rate. window->data contains the bitmask of successful |
354 | * packets. | 365 | * packets. |
355 | */ | 366 | */ |
356 | static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, | 367 | static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, |
357 | int scale_index, s32 tpt, int attempts, | 368 | int scale_index, int attempts, int successes) |
358 | int successes) | ||
359 | { | 369 | { |
360 | struct iwl_rate_scale_data *window = NULL; | 370 | struct iwl_rate_scale_data *window = NULL; |
361 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); | 371 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); |
362 | s32 fail_count; | 372 | s32 fail_count, tpt; |
363 | 373 | ||
364 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) | 374 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) |
365 | return -EINVAL; | 375 | return -EINVAL; |
366 | 376 | ||
367 | /* Select window for current tx bit rate */ | 377 | /* Select window for current tx bit rate */ |
368 | window = &(windows[scale_index]); | 378 | window = &(tbl->win[scale_index]); |
379 | |||
380 | /* Get expected throughput */ | ||
381 | tpt = get_expected_tpt(tbl, scale_index); | ||
369 | 382 | ||
370 | /* | 383 | /* |
371 | * Keep track of only the latest 62 tx frame attempts in this rate's | 384 | * Keep track of only the latest 62 tx frame attempts in this rate's |
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a, | |||
739 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && | 752 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && |
740 | (a->is_SGI == b->is_SGI); | 753 | (a->is_SGI == b->is_SGI); |
741 | } | 754 | } |
742 | /* | ||
743 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
744 | * that wraps a NULL pointer check | ||
745 | */ | ||
746 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
747 | { | ||
748 | if (tbl->expected_tpt) | ||
749 | return tbl->expected_tpt[rs_index]; | ||
750 | return 0; | ||
751 | } | ||
752 | 755 | ||
753 | /* | 756 | /* |
754 | * mac80211 sends us Tx status | 757 | * mac80211 sends us Tx status |
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
765 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 768 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
766 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 769 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; |
767 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 770 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
768 | struct iwl_rate_scale_data *window = NULL; | ||
769 | enum mac80211_rate_control_flags mac_flags; | 771 | enum mac80211_rate_control_flags mac_flags; |
770 | u32 tx_rate; | 772 | u32 tx_rate; |
771 | struct iwl_scale_tbl_info tbl_type; | 773 | struct iwl_scale_tbl_info tbl_type; |
772 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl; | 774 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
773 | s32 tpt = 0; | ||
774 | 775 | ||
775 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); | 776 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); |
776 | 777 | ||
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
853 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); | 854 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); |
854 | return; | 855 | return; |
855 | } | 856 | } |
856 | window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); | ||
857 | 857 | ||
858 | /* | 858 | /* |
859 | * Updating the frame history depends on whether packets were | 859 | * Updating the frame history depends on whether packets were |
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); | 866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); |
867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, | 867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, |
868 | &rs_index); | 868 | &rs_index); |
869 | tpt = get_expected_tpt(curr_tbl, rs_index); | 869 | rs_collect_tx_data(curr_tbl, rs_index, |
870 | rs_collect_tx_data(window, rs_index, tpt, | ||
871 | info->status.ampdu_ack_len, | 870 | info->status.ampdu_ack_len, |
872 | info->status.ampdu_ack_map); | 871 | info->status.ampdu_ack_map); |
873 | 872 | ||
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
897 | * table as active/search. | 896 | * table as active/search. |
898 | */ | 897 | */ |
899 | if (table_type_matches(&tbl_type, curr_tbl)) | 898 | if (table_type_matches(&tbl_type, curr_tbl)) |
900 | tpt = get_expected_tpt(curr_tbl, rs_index); | 899 | tmp_tbl = curr_tbl; |
901 | else if (table_type_matches(&tbl_type, other_tbl)) | 900 | else if (table_type_matches(&tbl_type, other_tbl)) |
902 | tpt = get_expected_tpt(other_tbl, rs_index); | 901 | tmp_tbl = other_tbl; |
903 | else | 902 | else |
904 | continue; | 903 | continue; |
905 | 904 | rs_collect_tx_data(tmp_tbl, rs_index, 1, | |
906 | /* Constants mean 1 transmission, 0 successes */ | 905 | i < retries ? 0 : legacy_success); |
907 | if (i < retries) | ||
908 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
909 | 0); | ||
910 | else | ||
911 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
912 | legacy_success); | ||
913 | } | 906 | } |
914 | 907 | ||
915 | /* Update success/fail counts if not searching for new mode */ | 908 | /* Update success/fail counts if not searching for new mode */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 8b8e3e1cbb44..bdff56583e11 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -3331,6 +3331,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv) | |||
3331 | 3331 | ||
3332 | cancel_delayed_work_sync(&priv->init_alive_start); | 3332 | cancel_delayed_work_sync(&priv->init_alive_start); |
3333 | cancel_delayed_work(&priv->scan_check); | 3333 | cancel_delayed_work(&priv->scan_check); |
3334 | cancel_work_sync(&priv->start_internal_scan); | ||
3334 | cancel_delayed_work(&priv->alive_start); | 3335 | cancel_delayed_work(&priv->alive_start); |
3335 | cancel_work_sync(&priv->beacon_update); | 3336 | cancel_work_sync(&priv->beacon_update); |
3336 | del_timer_sync(&priv->statistics_periodic); | 3337 | del_timer_sync(&priv->statistics_periodic); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1f..8b516c5ff0bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c | |||
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, | |||
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | /* | ||
812 | * The above algorithm sometimes fails when the ucode | ||
813 | * reports 0 for all chains. It's not clear why that | ||
814 | * happens to start with, but it is then causing trouble | ||
815 | * because this can make us enable more chains than the | ||
816 | * hardware really has. | ||
817 | * | ||
818 | * To be safe, simply mask out any chains that we know | ||
819 | * are not on the device. | ||
820 | */ | ||
821 | active_chains &= priv->hw_params.valid_rx_ant; | ||
822 | |||
811 | num_tx_chains = 0; | 823 | num_tx_chains = 0; |
812 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 824 | for (i = 0; i < NUM_RX_CHAINS; i++) { |
813 | /* loops on all the bits of | 825 | /* loops on all the bits of |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index db050b811232..049b652bcb5e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv) | |||
308 | 308 | ||
309 | spin_unlock_irqrestore(&priv->lock, flags); | 309 | spin_unlock_irqrestore(&priv->lock, flags); |
310 | 310 | ||
311 | /* Allocate and init all Tx and Command queues */ | 311 | /* Allocate or reset and init all Tx and Command queues */ |
312 | ret = iwl_txq_ctx_reset(priv); | 312 | if (!priv->txq) { |
313 | if (ret) | 313 | ret = iwl_txq_ctx_alloc(priv); |
314 | return ret; | 314 | if (ret) |
315 | return ret; | ||
316 | } else | ||
317 | iwl_txq_ctx_reset(priv); | ||
315 | 318 | ||
316 | set_bit(STATUS_INIT, &priv->status); | 319 | set_bit(STATUS_INIT, &priv->status); |
317 | 320 | ||
@@ -3355,7 +3358,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv) | |||
3355 | */ | 3358 | */ |
3356 | IWL_DEBUG_INFO(priv, "perform radio reset.\n"); | 3359 | IWL_DEBUG_INFO(priv, "perform radio reset.\n"); |
3357 | iwl_internal_short_hw_scan(priv); | 3360 | iwl_internal_short_hw_scan(priv); |
3358 | return; | ||
3359 | } | 3361 | } |
3360 | 3362 | ||
3361 | 3363 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4ef7739f9e8e..36940a9ec6b9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
442 | /***************************************************** | 442 | /***************************************************** |
443 | * TX | 443 | * TX |
444 | ******************************************************/ | 444 | ******************************************************/ |
445 | int iwl_txq_ctx_reset(struct iwl_priv *priv); | 445 | int iwl_txq_ctx_alloc(struct iwl_priv *priv); |
446 | void iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
446 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 447 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
447 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 448 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
448 | struct iwl_tx_queue *txq, | 449 | struct iwl_tx_queue *txq, |
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |||
456 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 457 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
457 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 458 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
458 | int slots_num, u32 txq_id); | 459 | int slots_num, u32 txq_id); |
460 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
461 | int slots_num, u32 txq_id); | ||
459 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 462 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
460 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | 463 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); |
461 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | 464 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); |
@@ -503,7 +506,7 @@ void iwl_init_scan_params(struct iwl_priv *priv); | |||
503 | int iwl_scan_cancel(struct iwl_priv *priv); | 506 | int iwl_scan_cancel(struct iwl_priv *priv); |
504 | int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); | 507 | int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); |
505 | int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); | 508 | int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); |
506 | int iwl_internal_short_hw_scan(struct iwl_priv *priv); | 509 | void iwl_internal_short_hw_scan(struct iwl_priv *priv); |
507 | int iwl_force_reset(struct iwl_priv *priv, int mode); | 510 | int iwl_force_reset(struct iwl_priv *priv, int mode); |
508 | u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, | 511 | u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, |
509 | const u8 *ie, int ie_len, int left); | 512 | const u8 *ie, int ie_len, int left); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 6054c5fba0c1..ef1720a852e9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -1296,6 +1296,7 @@ struct iwl_priv { | |||
1296 | struct work_struct tt_work; | 1296 | struct work_struct tt_work; |
1297 | struct work_struct ct_enter; | 1297 | struct work_struct ct_enter; |
1298 | struct work_struct ct_exit; | 1298 | struct work_struct ct_exit; |
1299 | struct work_struct start_internal_scan; | ||
1299 | 1300 | ||
1300 | struct tasklet_struct irq_tasklet; | 1301 | struct tasklet_struct irq_tasklet; |
1301 | 1302 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 4e1ba824dc50..8171c701e4e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
@@ -203,6 +203,10 @@ struct iwl_eeprom_enhanced_txpwr { | |||
203 | #define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ | 203 | #define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ |
204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ | 204 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ |
205 | 205 | ||
206 | /* 6000 regulatory - indirect access */ | ||
207 | #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ | ||
208 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ | ||
209 | |||
206 | /* 6000 and up regulatory tx power - indirect access */ | 210 | /* 6000 and up regulatory tx power - indirect access */ |
207 | /* max. elements per section */ | 211 | /* max. elements per section */ |
208 | #define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) | 212 | #define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 9ab0e412bf10..12e455a4b90e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -470,6 +470,8 @@ EXPORT_SYMBOL(iwl_init_scan_params); | |||
470 | 470 | ||
471 | static int iwl_scan_initiate(struct iwl_priv *priv) | 471 | static int iwl_scan_initiate(struct iwl_priv *priv) |
472 | { | 472 | { |
473 | WARN_ON(!mutex_is_locked(&priv->mutex)); | ||
474 | |||
473 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); | 475 | IWL_DEBUG_INFO(priv, "Starting scan...\n"); |
474 | set_bit(STATUS_SCANNING, &priv->status); | 476 | set_bit(STATUS_SCANNING, &priv->status); |
475 | priv->is_internal_short_scan = false; | 477 | priv->is_internal_short_scan = false; |
@@ -547,24 +549,31 @@ EXPORT_SYMBOL(iwl_mac_hw_scan); | |||
547 | * internal short scan, this function should only been called while associated. | 549 | * internal short scan, this function should only been called while associated. |
548 | * It will reset and tune the radio to prevent possible RF related problem | 550 | * It will reset and tune the radio to prevent possible RF related problem |
549 | */ | 551 | */ |
550 | int iwl_internal_short_hw_scan(struct iwl_priv *priv) | 552 | void iwl_internal_short_hw_scan(struct iwl_priv *priv) |
551 | { | 553 | { |
552 | int ret = 0; | 554 | queue_work(priv->workqueue, &priv->start_internal_scan); |
555 | } | ||
556 | |||
557 | static void iwl_bg_start_internal_scan(struct work_struct *work) | ||
558 | { | ||
559 | struct iwl_priv *priv = | ||
560 | container_of(work, struct iwl_priv, start_internal_scan); | ||
561 | |||
562 | mutex_lock(&priv->mutex); | ||
553 | 563 | ||
554 | if (!iwl_is_ready_rf(priv)) { | 564 | if (!iwl_is_ready_rf(priv)) { |
555 | ret = -EIO; | ||
556 | IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); | 565 | IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); |
557 | goto out; | 566 | goto unlock; |
558 | } | 567 | } |
568 | |||
559 | if (test_bit(STATUS_SCANNING, &priv->status)) { | 569 | if (test_bit(STATUS_SCANNING, &priv->status)) { |
560 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | 570 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); |
561 | ret = -EAGAIN; | 571 | goto unlock; |
562 | goto out; | ||
563 | } | 572 | } |
573 | |||
564 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | 574 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { |
565 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); | 575 | IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); |
566 | ret = -EAGAIN; | 576 | goto unlock; |
567 | goto out; | ||
568 | } | 577 | } |
569 | 578 | ||
570 | priv->scan_bands = 0; | 579 | priv->scan_bands = 0; |
@@ -577,9 +586,8 @@ int iwl_internal_short_hw_scan(struct iwl_priv *priv) | |||
577 | set_bit(STATUS_SCANNING, &priv->status); | 586 | set_bit(STATUS_SCANNING, &priv->status); |
578 | priv->is_internal_short_scan = true; | 587 | priv->is_internal_short_scan = true; |
579 | queue_work(priv->workqueue, &priv->request_scan); | 588 | queue_work(priv->workqueue, &priv->request_scan); |
580 | 589 | unlock: | |
581 | out: | 590 | mutex_unlock(&priv->mutex); |
582 | return ret; | ||
583 | } | 591 | } |
584 | EXPORT_SYMBOL(iwl_internal_short_hw_scan); | 592 | EXPORT_SYMBOL(iwl_internal_short_hw_scan); |
585 | 593 | ||
@@ -965,6 +973,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv) | |||
965 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); | 973 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); |
966 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); | 974 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); |
967 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); | 975 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); |
976 | INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); | ||
968 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); | 977 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); |
969 | } | 978 | } |
970 | EXPORT_SYMBOL(iwl_setup_scan_deferred_work); | 979 | EXPORT_SYMBOL(iwl_setup_scan_deferred_work); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index f0b7e6cfbe4f..8dd0c036d547 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
194 | struct iwl_queue *q = &txq->q; | 194 | struct iwl_queue *q = &txq->q; |
195 | struct device *dev = &priv->pci_dev->dev; | 195 | struct device *dev = &priv->pci_dev->dev; |
196 | int i; | 196 | int i; |
197 | bool huge = false; | ||
197 | 198 | ||
198 | if (q->n_bd == 0) | 199 | if (q->n_bd == 0) |
199 | return; | 200 | return; |
200 | 201 | ||
202 | for (; q->read_ptr != q->write_ptr; | ||
203 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
204 | /* we have no way to tell if it is a huge cmd ATM */ | ||
205 | i = get_cmd_index(q, q->read_ptr, 0); | ||
206 | |||
207 | if (txq->meta[i].flags & CMD_SIZE_HUGE) { | ||
208 | huge = true; | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | pci_unmap_single(priv->pci_dev, | ||
213 | pci_unmap_addr(&txq->meta[i], mapping), | ||
214 | pci_unmap_len(&txq->meta[i], len), | ||
215 | PCI_DMA_BIDIRECTIONAL); | ||
216 | } | ||
217 | if (huge) { | ||
218 | i = q->n_window; | ||
219 | pci_unmap_single(priv->pci_dev, | ||
220 | pci_unmap_addr(&txq->meta[i], mapping), | ||
221 | pci_unmap_len(&txq->meta[i], len), | ||
222 | PCI_DMA_BIDIRECTIONAL); | ||
223 | } | ||
224 | |||
201 | /* De-alloc array of command/tx buffers */ | 225 | /* De-alloc array of command/tx buffers */ |
202 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 226 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
203 | kfree(txq->cmd[i]); | 227 | kfree(txq->cmd[i]); |
@@ -410,6 +434,26 @@ out_free_arrays: | |||
410 | } | 434 | } |
411 | EXPORT_SYMBOL(iwl_tx_queue_init); | 435 | EXPORT_SYMBOL(iwl_tx_queue_init); |
412 | 436 | ||
437 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
438 | int slots_num, u32 txq_id) | ||
439 | { | ||
440 | int actual_slots = slots_num; | ||
441 | |||
442 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
443 | actual_slots++; | ||
444 | |||
445 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | ||
446 | |||
447 | txq->need_update = 0; | ||
448 | |||
449 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
450 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
451 | |||
452 | /* Tell device where to find queue */ | ||
453 | priv->cfg->ops->lib->txq_init(priv, txq); | ||
454 | } | ||
455 | EXPORT_SYMBOL(iwl_tx_queue_reset); | ||
456 | |||
413 | /** | 457 | /** |
414 | * iwl_hw_txq_ctx_free - Free TXQ Context | 458 | * iwl_hw_txq_ctx_free - Free TXQ Context |
415 | * | 459 | * |
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
421 | 465 | ||
422 | /* Tx queues */ | 466 | /* Tx queues */ |
423 | if (priv->txq) { | 467 | if (priv->txq) { |
424 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | 468 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
425 | txq_id++) | ||
426 | if (txq_id == IWL_CMD_QUEUE_NUM) | 469 | if (txq_id == IWL_CMD_QUEUE_NUM) |
427 | iwl_cmd_queue_free(priv); | 470 | iwl_cmd_queue_free(priv); |
428 | else | 471 | else |
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
438 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 481 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
439 | 482 | ||
440 | /** | 483 | /** |
441 | * iwl_txq_ctx_reset - Reset TX queue context | 484 | * iwl_txq_ctx_alloc - allocate TX queue context |
442 | * Destroys all DMA structures and initialize them again | 485 | * Allocate all Tx DMA structures and initialize them |
443 | * | 486 | * |
444 | * @param priv | 487 | * @param priv |
445 | * @return error code | 488 | * @return error code |
446 | */ | 489 | */ |
447 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 490 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) |
448 | { | 491 | { |
449 | int ret = 0; | 492 | int ret; |
450 | int txq_id, slots_num; | 493 | int txq_id, slots_num; |
451 | unsigned long flags; | 494 | unsigned long flags; |
452 | 495 | ||
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
504 | return ret; | 547 | return ret; |
505 | } | 548 | } |
506 | 549 | ||
550 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
551 | { | ||
552 | int txq_id, slots_num; | ||
553 | unsigned long flags; | ||
554 | |||
555 | spin_lock_irqsave(&priv->lock, flags); | ||
556 | |||
557 | /* Turn off all Tx DMA fifos */ | ||
558 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
559 | |||
560 | /* Tell NIC where to find the "keep warm" buffer */ | ||
561 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
562 | |||
563 | spin_unlock_irqrestore(&priv->lock, flags); | ||
564 | |||
565 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
566 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
567 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
568 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
569 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
570 | } | ||
571 | } | ||
572 | |||
507 | /** | 573 | /** |
508 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 574 | * iwl_txq_ctx_stop - Stop all Tx DMA channels |
509 | */ | 575 | */ |
510 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 576 | void iwl_txq_ctx_stop(struct iwl_priv *priv) |
511 | { | 577 | { |
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) | |||
525 | 1000); | 591 | 1000); |
526 | } | 592 | } |
527 | spin_unlock_irqrestore(&priv->lock, flags); | 593 | spin_unlock_irqrestore(&priv->lock, flags); |
528 | |||
529 | /* Deallocate memory for all Tx queues */ | ||
530 | iwl_hw_txq_ctx_free(priv); | ||
531 | } | 594 | } |
532 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 595 | EXPORT_SYMBOL(iwl_txq_ctx_stop); |
533 | 596 | ||
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1050 | 1113 | ||
1051 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1114 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
1052 | 1115 | ||
1116 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
1117 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
1118 | */ | ||
1119 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
1120 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
1121 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
1122 | } | ||
1123 | |||
1053 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 1124 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
1054 | out_cmd = txq->cmd[idx]; | 1125 | out_cmd = txq->cmd[idx]; |
1055 | out_meta = &txq->meta[idx]; | 1126 | out_meta = &txq->meta[idx]; |
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1227 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 1298 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
1228 | struct iwl_device_cmd *cmd; | 1299 | struct iwl_device_cmd *cmd; |
1229 | struct iwl_cmd_meta *meta; | 1300 | struct iwl_cmd_meta *meta; |
1301 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
1230 | 1302 | ||
1231 | /* If a Tx command is being handled and it isn't in the actual | 1303 | /* If a Tx command is being handled and it isn't in the actual |
1232 | * command queue then there a command routing bug has been introduced | 1304 | * command queue then there a command routing bug has been introduced |
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1240 | return; | 1312 | return; |
1241 | } | 1313 | } |
1242 | 1314 | ||
1243 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 1315 | /* If this is a huge cmd, clear the huge flag on the meta.flags |
1244 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1316 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap |
1245 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1317 | * the DMA buffer for the scan (huge) command. |
1318 | */ | ||
1319 | if (huge) { | ||
1320 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
1321 | txq->meta[cmd_index].flags = 0; | ||
1322 | } | ||
1323 | cmd_index = get_cmd_index(&txq->q, index, huge); | ||
1324 | cmd = txq->cmd[cmd_index]; | ||
1325 | meta = &txq->meta[cmd_index]; | ||
1246 | 1326 | ||
1247 | pci_unmap_single(priv->pci_dev, | 1327 | pci_unmap_single(priv->pci_dev, |
1248 | pci_unmap_addr(meta, mapping), | 1328 | pci_unmap_addr(meta, mapping), |
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1264 | get_cmd_string(cmd->hdr.cmd)); | 1344 | get_cmd_string(cmd->hdr.cmd)); |
1265 | wake_up_interruptible(&priv->wait_command_queue); | 1345 | wake_up_interruptible(&priv->wait_command_queue); |
1266 | } | 1346 | } |
1347 | meta->flags = 0; | ||
1267 | } | 1348 | } |
1268 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 1349 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1269 | 1350 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 5ea587e59e48..37499127c801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
679 | */ | 679 | */ |
680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) | 680 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) |
681 | { | 681 | { |
682 | return state > PCI_D0 ? | 682 | return state >= PCI_D0 ? |
683 | pci_platform_power_transition(dev, state) : -EINVAL; | 683 | pci_platform_power_transition(dev, state) : -EINVAL; |
684 | } | 684 | } |
685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); | 685 | EXPORT_SYMBOL_GPL(__pci_complete_power_transition); |
@@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
716 | */ | 716 | */ |
717 | return 0; | 717 | return 0; |
718 | 718 | ||
719 | /* Check if we're already there */ | ||
720 | if (dev->current_state == state) | ||
721 | return 0; | ||
722 | |||
723 | __pci_start_power_transition(dev, state); | 719 | __pci_start_power_transition(dev, state); |
724 | 720 | ||
725 | /* This device is quirked not to be put into D3, so | 721 | /* This device is quirked not to be put into D3, so |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index aa495ad9bbd4..7a711ee314b7 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -244,11 +244,17 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
244 | 244 | ||
245 | /* Assert Secondary Bus Reset */ | 245 | /* Assert Secondary Bus Reset */ |
246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); | 246 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl); |
247 | p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET; | 247 | p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET; |
248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 248 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
249 | 249 | ||
250 | /* | ||
251 | * we should send hot reset message for 2ms to allow it time to | ||
252 | * propogate to all downstream ports | ||
253 | */ | ||
254 | msleep(2); | ||
255 | |||
250 | /* De-assert Secondary Bus Reset */ | 256 | /* De-assert Secondary Bus Reset */ |
251 | p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET; | 257 | p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; |
252 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); | 258 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl); |
253 | 259 | ||
254 | /* | 260 | /* |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 882bd8d29fe3..c82548afcd5c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
174 | pci_read_config_dword(dev, pos, &sz); | 174 | pci_read_config_dword(dev, pos, &sz); |
175 | pci_write_config_dword(dev, pos, l); | 175 | pci_write_config_dword(dev, pos, l); |
176 | 176 | ||
177 | if (!sz) | ||
178 | goto fail; /* BAR not implemented */ | ||
179 | |||
180 | /* | 177 | /* |
181 | * All bits set in sz means the device isn't working properly. | 178 | * All bits set in sz means the device isn't working properly. |
182 | * If it's a memory BAR or a ROM, bit 0 must be clear; if it's | 179 | * If the BAR isn't implemented, all bits must be 0. If it's a |
183 | * an io BAR, bit 1 must be clear. | 180 | * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit |
181 | * 1 must be clear. | ||
184 | */ | 182 | */ |
185 | if (sz == 0xffffffff) { | 183 | if (!sz || sz == 0xffffffff) |
186 | dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n", | ||
187 | pos, sz); | ||
188 | goto fail; | 184 | goto fail; |
189 | } | ||
190 | 185 | ||
191 | /* | 186 | /* |
192 | * I don't know how l can have all bits set. Copied from old code. | 187 | * I don't know how l can have all bits set. Copied from old code. |
@@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
249 | pos, res); | 244 | pos, res); |
250 | } | 245 | } |
251 | } else { | 246 | } else { |
252 | u32 size = pci_size(l, sz, mask); | 247 | sz = pci_size(l, sz, mask); |
253 | 248 | ||
254 | if (!size) { | 249 | if (!sz) |
255 | dev_err(&dev->dev, "reg %x: invalid size " | ||
256 | "(l %#x sz %#x mask %#x); broken device?", | ||
257 | pos, l, sz, mask); | ||
258 | goto fail; | 250 | goto fail; |
259 | } | ||
260 | 251 | ||
261 | res->start = l; | 252 | res->start = l; |
262 | res->end = l + size; | 253 | res->end = l + sz; |
263 | 254 | ||
264 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | 255 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); |
265 | } | 256 | } |
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index f230f6543bff..854959cada3a 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c | |||
@@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) | |||
1484 | if (!s) | 1484 | if (!s) |
1485 | return -EINVAL; | 1485 | return -EINVAL; |
1486 | 1486 | ||
1487 | if (s->functions) { | ||
1488 | WARN_ON(1); | ||
1489 | return -EINVAL; | ||
1490 | } | ||
1491 | |||
1487 | /* We do not want to validate the CIS cache... */ | 1492 | /* We do not want to validate the CIS cache... */ |
1488 | mutex_lock(&s->ops_mutex); | 1493 | mutex_lock(&s->ops_mutex); |
1489 | destroy_cis_cache(s); | 1494 | destroy_cis_cache(s); |
@@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, | |||
1639 | count = 0; | 1644 | count = 0; |
1640 | else { | 1645 | else { |
1641 | struct pcmcia_socket *s; | 1646 | struct pcmcia_socket *s; |
1642 | unsigned int chains; | 1647 | unsigned int chains = 1; |
1643 | 1648 | ||
1644 | if (off + count > size) | 1649 | if (off + count > size) |
1645 | count = size - off; | 1650 | count = size - off; |
@@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, | |||
1648 | 1653 | ||
1649 | if (!(s->state & SOCKET_PRESENT)) | 1654 | if (!(s->state & SOCKET_PRESENT)) |
1650 | return -ENODEV; | 1655 | return -ENODEV; |
1651 | if (pccard_validate_cis(s, &chains)) | 1656 | if (!s->functions && pccard_validate_cis(s, &chains)) |
1652 | return -EIO; | 1657 | return -EIO; |
1653 | if (!chains) | 1658 | if (!chains) |
1654 | return -ENODATA; | 1659 | return -ENODATA; |
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 6206408e196c..2d48196a48cd 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) | |||
166 | 166 | ||
167 | ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, | 167 | ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, |
168 | IRQF_DISABLED, "pcmcia_insert", sock); | 168 | IRQF_DISABLED, "pcmcia_insert", sock); |
169 | if (ret) | 169 | if (ret) { |
170 | local_irq_restore(flags); | ||
170 | goto out1; | 171 | goto out1; |
172 | } | ||
171 | 173 | ||
172 | ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, | 174 | ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, |
173 | IRQF_DISABLED, "pcmcia_eject", sock); | 175 | IRQF_DISABLED, "pcmcia_eject", sock); |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index cb6036d89e59..508f94a2a78d 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -335,7 +335,6 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le | |||
335 | 335 | ||
336 | mutex_lock(&s->ops_mutex); | 336 | mutex_lock(&s->ops_mutex); |
337 | list_del(&p_dev->socket_device_list); | 337 | list_del(&p_dev->socket_device_list); |
338 | p_dev->_removed = 1; | ||
339 | mutex_unlock(&s->ops_mutex); | 338 | mutex_unlock(&s->ops_mutex); |
340 | 339 | ||
341 | dev_dbg(&p_dev->dev, "unregistering device\n"); | 340 | dev_dbg(&p_dev->dev, "unregistering device\n"); |
@@ -654,14 +653,7 @@ static int pcmcia_requery_callback(struct device *dev, void * _data) | |||
654 | 653 | ||
655 | static void pcmcia_requery(struct pcmcia_socket *s) | 654 | static void pcmcia_requery(struct pcmcia_socket *s) |
656 | { | 655 | { |
657 | int present, has_pfc; | 656 | int has_pfc; |
658 | |||
659 | mutex_lock(&s->ops_mutex); | ||
660 | present = s->pcmcia_state.present; | ||
661 | mutex_unlock(&s->ops_mutex); | ||
662 | |||
663 | if (!present) | ||
664 | return; | ||
665 | 657 | ||
666 | if (s->functions == 0) { | 658 | if (s->functions == 0) { |
667 | pcmcia_card_add(s); | 659 | pcmcia_card_add(s); |
@@ -687,12 +679,10 @@ static void pcmcia_requery(struct pcmcia_socket *s) | |||
687 | new_funcs = mfc.nfn; | 679 | new_funcs = mfc.nfn; |
688 | else | 680 | else |
689 | new_funcs = 1; | 681 | new_funcs = 1; |
690 | if (old_funcs > new_funcs) { | 682 | if (old_funcs != new_funcs) { |
683 | /* we need to re-start */ | ||
691 | pcmcia_card_remove(s, NULL); | 684 | pcmcia_card_remove(s, NULL); |
692 | pcmcia_card_add(s); | 685 | pcmcia_card_add(s); |
693 | } else if (new_funcs > old_funcs) { | ||
694 | s->functions = new_funcs; | ||
695 | pcmcia_device_add(s, 1); | ||
696 | } | 686 | } |
697 | } | 687 | } |
698 | 688 | ||
@@ -728,6 +718,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
728 | struct pcmcia_socket *s = dev->socket; | 718 | struct pcmcia_socket *s = dev->socket; |
729 | const struct firmware *fw; | 719 | const struct firmware *fw; |
730 | int ret = -ENOMEM; | 720 | int ret = -ENOMEM; |
721 | cistpl_longlink_mfc_t mfc; | ||
722 | int old_funcs, new_funcs = 1; | ||
731 | 723 | ||
732 | if (!filename) | 724 | if (!filename) |
733 | return -EINVAL; | 725 | return -EINVAL; |
@@ -750,6 +742,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) | |||
750 | goto release; | 742 | goto release; |
751 | } | 743 | } |
752 | 744 | ||
745 | /* we need to re-start if the number of functions changed */ | ||
746 | old_funcs = s->functions; | ||
747 | if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, | ||
748 | &mfc)) | ||
749 | new_funcs = mfc.nfn; | ||
750 | |||
751 | if (old_funcs != new_funcs) | ||
752 | ret = -EBUSY; | ||
753 | 753 | ||
754 | /* update information */ | 754 | /* update information */ |
755 | pcmcia_device_query(dev); | 755 | pcmcia_device_query(dev); |
@@ -820,11 +820,12 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
820 | } | 820 | } |
821 | 821 | ||
822 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { | 822 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) { |
823 | if (dev->device_no != did->device_no) | 823 | dev_dbg(&dev->dev, "this is a pseudo-multi-function device\n"); |
824 | return 0; | ||
825 | mutex_lock(&dev->socket->ops_mutex); | 824 | mutex_lock(&dev->socket->ops_mutex); |
826 | dev->socket->pcmcia_state.has_pfc = 1; | 825 | dev->socket->pcmcia_state.has_pfc = 1; |
827 | mutex_unlock(&dev->socket->ops_mutex); | 826 | mutex_unlock(&dev->socket->ops_mutex); |
827 | if (dev->device_no != did->device_no) | ||
828 | return 0; | ||
828 | } | 829 | } |
829 | 830 | ||
830 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { | 831 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID) { |
@@ -835,7 +836,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
835 | 836 | ||
836 | /* if this is a pseudo-multi-function device, | 837 | /* if this is a pseudo-multi-function device, |
837 | * we need explicit matches */ | 838 | * we need explicit matches */ |
838 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) | 839 | if (dev->socket->pcmcia_state.has_pfc) |
839 | return 0; | 840 | return 0; |
840 | if (dev->device_no) | 841 | if (dev->device_no) |
841 | return 0; | 842 | return 0; |
@@ -858,10 +859,8 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev, | |||
858 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { | 859 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { |
859 | dev_dbg(&dev->dev, "device needs a fake CIS\n"); | 860 | dev_dbg(&dev->dev, "device needs a fake CIS\n"); |
860 | if (!dev->socket->fake_cis) | 861 | if (!dev->socket->fake_cis) |
861 | pcmcia_load_firmware(dev, did->cisfile); | 862 | if (pcmcia_load_firmware(dev, did->cisfile)) |
862 | 863 | return 0; | |
863 | if (!dev->socket->fake_cis) | ||
864 | return 0; | ||
865 | } | 864 | } |
866 | 865 | ||
867 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { | 866 | if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { |
@@ -1254,9 +1253,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1254 | 1253 | ||
1255 | switch (event) { | 1254 | switch (event) { |
1256 | case CS_EVENT_CARD_REMOVAL: | 1255 | case CS_EVENT_CARD_REMOVAL: |
1257 | mutex_lock(&s->ops_mutex); | 1256 | atomic_set(&skt->present, 0); |
1258 | s->pcmcia_state.present = 0; | ||
1259 | mutex_unlock(&s->ops_mutex); | ||
1260 | pcmcia_card_remove(skt, NULL); | 1257 | pcmcia_card_remove(skt, NULL); |
1261 | handle_event(skt, event); | 1258 | handle_event(skt, event); |
1262 | mutex_lock(&s->ops_mutex); | 1259 | mutex_lock(&s->ops_mutex); |
@@ -1265,9 +1262,9 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1265 | break; | 1262 | break; |
1266 | 1263 | ||
1267 | case CS_EVENT_CARD_INSERTION: | 1264 | case CS_EVENT_CARD_INSERTION: |
1265 | atomic_set(&skt->present, 1); | ||
1268 | mutex_lock(&s->ops_mutex); | 1266 | mutex_lock(&s->ops_mutex); |
1269 | s->pcmcia_state.has_pfc = 0; | 1267 | s->pcmcia_state.has_pfc = 0; |
1270 | s->pcmcia_state.present = 1; | ||
1271 | destroy_cis_cache(s); /* to be on the safe side... */ | 1268 | destroy_cis_cache(s); /* to be on the safe side... */ |
1272 | mutex_unlock(&s->ops_mutex); | 1269 | mutex_unlock(&s->ops_mutex); |
1273 | pcmcia_card_add(skt); | 1270 | pcmcia_card_add(skt); |
@@ -1307,7 +1304,13 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority) | |||
1307 | return 0; | 1304 | return 0; |
1308 | } /* ds_event */ | 1305 | } /* ds_event */ |
1309 | 1306 | ||
1310 | 1307 | /* | |
1308 | * NOTE: This is racy. There's no guarantee the card will still be | ||
1309 | * physically present, even if the call to this function returns | ||
1310 | * non-NULL. Furthermore, the device driver most likely is unbound | ||
1311 | * almost immediately, so the timeframe where pcmcia_dev_present | ||
1312 | * returns NULL is probably really really small. | ||
1313 | */ | ||
1311 | struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) | 1314 | struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) |
1312 | { | 1315 | { |
1313 | struct pcmcia_device *p_dev; | 1316 | struct pcmcia_device *p_dev; |
@@ -1317,22 +1320,9 @@ struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev) | |||
1317 | if (!p_dev) | 1320 | if (!p_dev) |
1318 | return NULL; | 1321 | return NULL; |
1319 | 1322 | ||
1320 | mutex_lock(&p_dev->socket->ops_mutex); | 1323 | if (atomic_read(&p_dev->socket->present) != 0) |
1321 | if (!p_dev->socket->pcmcia_state.present) | 1324 | ret = p_dev; |
1322 | goto out; | ||
1323 | |||
1324 | if (p_dev->socket->pcmcia_state.dead) | ||
1325 | goto out; | ||
1326 | |||
1327 | if (p_dev->_removed) | ||
1328 | goto out; | ||
1329 | |||
1330 | if (p_dev->suspended) | ||
1331 | goto out; | ||
1332 | 1325 | ||
1333 | ret = p_dev; | ||
1334 | out: | ||
1335 | mutex_unlock(&p_dev->socket->ops_mutex); | ||
1336 | pcmcia_put_dev(p_dev); | 1326 | pcmcia_put_dev(p_dev); |
1337 | return ret; | 1327 | return ret; |
1338 | } | 1328 | } |
@@ -1382,6 +1372,8 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev, | |||
1382 | return ret; | 1372 | return ret; |
1383 | } | 1373 | } |
1384 | 1374 | ||
1375 | atomic_set(&socket->present, 0); | ||
1376 | |||
1385 | return 0; | 1377 | return 0; |
1386 | } | 1378 | } |
1387 | 1379 | ||
@@ -1393,10 +1385,6 @@ static void pcmcia_bus_remove_socket(struct device *dev, | |||
1393 | if (!socket) | 1385 | if (!socket) |
1394 | return; | 1386 | return; |
1395 | 1387 | ||
1396 | mutex_lock(&socket->ops_mutex); | ||
1397 | socket->pcmcia_state.dead = 1; | ||
1398 | mutex_unlock(&socket->ops_mutex); | ||
1399 | |||
1400 | pccard_register_pcmcia(socket, NULL); | 1388 | pccard_register_pcmcia(socket, NULL); |
1401 | 1389 | ||
1402 | /* unregister any unbound devices */ | 1390 | /* unregister any unbound devices */ |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index caec1dee2a4b..7c3d03bb4f30 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) | |||
755 | else | 755 | else |
756 | printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); | 756 | printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); |
757 | 757 | ||
758 | #ifdef CONFIG_PCMCIA_PROBE | 758 | /* If the interrupt is already assigned, it must be the same */ |
759 | 759 | if (s->irq.AssignedIRQ != 0) | |
760 | if (s->irq.AssignedIRQ != 0) { | ||
761 | /* If the interrupt is already assigned, it must be the same */ | ||
762 | irq = s->irq.AssignedIRQ; | 760 | irq = s->irq.AssignedIRQ; |
763 | } else { | 761 | |
762 | #ifdef CONFIG_PCMCIA_PROBE | ||
763 | if (!irq) { | ||
764 | int try; | 764 | int try; |
765 | u32 mask = s->irq_mask; | 765 | u32 mask = s->irq_mask; |
766 | void *data = p_dev; /* something unique to this device */ | 766 | void *data = p_dev; /* something unique to this device */ |
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 559069a80a3b..a6eb7b59ba9f 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c | |||
@@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
214 | return; | 214 | return; |
215 | } | 215 | } |
216 | for (i = base, most = 0; i < base+num; i += 8) { | 216 | for (i = base, most = 0; i < base+num; i += 8) { |
217 | res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); | 217 | res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); |
218 | if (!res) | 218 | if (!res) |
219 | continue; | 219 | continue; |
220 | hole = inb(i); | 220 | hole = inb(i); |
@@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
231 | 231 | ||
232 | bad = any = 0; | 232 | bad = any = 0; |
233 | for (i = base; i < base+num; i += 8) { | 233 | for (i = base; i < base+num; i += 8) { |
234 | res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); | 234 | res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); |
235 | if (!res) | 235 | if (!res) { |
236 | if (!any) | ||
237 | printk(" excluding"); | ||
238 | if (!bad) | ||
239 | bad = any = i; | ||
236 | continue; | 240 | continue; |
241 | } | ||
237 | for (j = 0; j < 8; j++) | 242 | for (j = 0; j < 8; j++) |
238 | if (inb(i+j) != most) | 243 | if (inb(i+j) != most) |
239 | break; | 244 | break; |
@@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, | |||
253 | } | 258 | } |
254 | if (bad) { | 259 | if (bad) { |
255 | if ((num > 16) && (bad == base) && (i == base+num)) { | 260 | if ((num > 16) && (bad == base) && (i == base+num)) { |
261 | sub_interval(&s_data->io_db, bad, i-bad); | ||
256 | printk(" nothing: probe failed.\n"); | 262 | printk(" nothing: probe failed.\n"); |
257 | return; | 263 | return; |
258 | } else { | 264 | } else { |
@@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned | |||
804 | static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) | 810 | static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) |
805 | { | 811 | { |
806 | struct socket_data *data = s->resource_data; | 812 | struct socket_data *data = s->resource_data; |
807 | unsigned long size = end - start + 1; | 813 | unsigned long size; |
808 | int ret = 0; | 814 | int ret = 0; |
809 | 815 | ||
810 | #if defined(CONFIG_X86) | 816 | #if defined(CONFIG_X86) |
@@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long | |||
814 | start = 0x100; | 820 | start = 0x100; |
815 | #endif | 821 | #endif |
816 | 822 | ||
823 | size = end - start + 1; | ||
824 | |||
817 | if (end < start) | 825 | if (end < start) |
818 | return -EINVAL; | 826 | return -EINVAL; |
819 | 827 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7bec4588c268..6c3320d75055 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -390,6 +390,7 @@ config EEEPC_WMI | |||
390 | depends on ACPI_WMI | 390 | depends on ACPI_WMI |
391 | depends on INPUT | 391 | depends on INPUT |
392 | depends on EXPERIMENTAL | 392 | depends on EXPERIMENTAL |
393 | select INPUT_SPARSEKMAP | ||
393 | ---help--- | 394 | ---help--- |
394 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | 395 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. |
395 | 396 | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 52262b012abb..efe8f6388906 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -79,15 +79,15 @@ static uint wapf = 1; | |||
79 | module_param(wapf, uint, 0644); | 79 | module_param(wapf, uint, 0644); |
80 | MODULE_PARM_DESC(wapf, "WAPF value"); | 80 | MODULE_PARM_DESC(wapf, "WAPF value"); |
81 | 81 | ||
82 | static uint wlan_status = 1; | 82 | static int wlan_status = 1; |
83 | static uint bluetooth_status = 1; | 83 | static int bluetooth_status = 1; |
84 | 84 | ||
85 | module_param(wlan_status, uint, 0644); | 85 | module_param(wlan_status, int, 0644); |
86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " | 86 | MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " |
87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 87 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
88 | "default is 1"); | 88 | "default is 1"); |
89 | 89 | ||
90 | module_param(bluetooth_status, uint, 0644); | 90 | module_param(bluetooth_status, int, 0644); |
91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " | 91 | MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " |
92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " | 92 | "(0 = disabled, 1 = enabled, -1 = don't do anything). " |
93 | "default is 1"); | 93 | "default is 1"); |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 6ba6c30e5bb6..66f53c3c35e8 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -217,6 +217,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { | 217 | if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { |
218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" | 218 | printk(KERN_INFO "dell-wmi: Received unknown WMI event" |
219 | " (0x%x)\n", buffer_entry[1]); | 219 | " (0x%x)\n", buffer_entry[1]); |
220 | kfree(obj); | ||
220 | return; | 221 | return; |
221 | } | 222 | } |
222 | 223 | ||
@@ -234,7 +235,7 @@ static void dell_wmi_notify(u32 value, void *context) | |||
234 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { | 235 | key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) { |
235 | /* Don't report brightness notifications that will also | 236 | /* Don't report brightness notifications that will also |
236 | * come via ACPI */ | 237 | * come via ACPI */ |
237 | return; | 238 | ; |
238 | } else { | 239 | } else { |
239 | input_report_key(dell_wmi_input_dev, key->keycode, 1); | 240 | input_report_key(dell_wmi_input_dev, key->keycode, 1); |
240 | input_sync(dell_wmi_input_dev); | 241 | input_sync(dell_wmi_input_dev); |
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 54a015785ca8..0306174ba875 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -169,7 +169,6 @@ struct eeepc_laptop { | |||
169 | struct backlight_device *backlight_device; | 169 | struct backlight_device *backlight_device; |
170 | 170 | ||
171 | struct input_dev *inputdev; | 171 | struct input_dev *inputdev; |
172 | struct key_entry *keymap; | ||
173 | 172 | ||
174 | struct rfkill *wlan_rfkill; | 173 | struct rfkill *wlan_rfkill; |
175 | struct rfkill *bluetooth_rfkill; | 174 | struct rfkill *bluetooth_rfkill; |
@@ -1204,8 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc) | |||
1204 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) | 1203 | static void eeepc_input_exit(struct eeepc_laptop *eeepc) |
1205 | { | 1204 | { |
1206 | if (eeepc->inputdev) { | 1205 | if (eeepc->inputdev) { |
1206 | sparse_keymap_free(eeepc->inputdev); | ||
1207 | input_unregister_device(eeepc->inputdev); | 1207 | input_unregister_device(eeepc->inputdev); |
1208 | kfree(eeepc->keymap); | ||
1209 | } | 1208 | } |
1210 | } | 1209 | } |
1211 | 1210 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 9f8822658fd7..b227eb469f49 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -23,6 +23,8 @@ | |||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
27 | |||
26 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 29 | #include <linux/module.h> |
28 | #include <linux/init.h> | 30 | #include <linux/init.h> |
@@ -30,22 +32,34 @@ | |||
30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
31 | #include <linux/input.h> | 33 | #include <linux/input.h> |
32 | #include <linux/input/sparse-keymap.h> | 34 | #include <linux/input/sparse-keymap.h> |
35 | #include <linux/fb.h> | ||
36 | #include <linux/backlight.h> | ||
37 | #include <linux/platform_device.h> | ||
33 | #include <acpi/acpi_bus.h> | 38 | #include <acpi/acpi_bus.h> |
34 | #include <acpi/acpi_drivers.h> | 39 | #include <acpi/acpi_drivers.h> |
35 | 40 | ||
41 | #define EEEPC_WMI_FILE "eeepc-wmi" | ||
42 | |||
36 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 43 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
37 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | 44 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); |
38 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
39 | 46 | ||
40 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | 47 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" |
48 | #define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
41 | 49 | ||
42 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | 50 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); |
51 | MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID); | ||
43 | 52 | ||
44 | #define NOTIFY_BRNUP_MIN 0x11 | 53 | #define NOTIFY_BRNUP_MIN 0x11 |
45 | #define NOTIFY_BRNUP_MAX 0x1f | 54 | #define NOTIFY_BRNUP_MAX 0x1f |
46 | #define NOTIFY_BRNDOWN_MIN 0x20 | 55 | #define NOTIFY_BRNDOWN_MIN 0x20 |
47 | #define NOTIFY_BRNDOWN_MAX 0x2e | 56 | #define NOTIFY_BRNDOWN_MAX 0x2e |
48 | 57 | ||
58 | #define EEEPC_WMI_METHODID_DEVS 0x53564544 | ||
59 | #define EEEPC_WMI_METHODID_DSTS 0x53544344 | ||
60 | |||
61 | #define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 | ||
62 | |||
49 | static const struct key_entry eeepc_wmi_keymap[] = { | 63 | static const struct key_entry eeepc_wmi_keymap[] = { |
50 | /* Sleep already handled via generic ACPI code */ | 64 | /* Sleep already handled via generic ACPI code */ |
51 | { KE_KEY, 0x5d, { KEY_WLAN } }, | 65 | { KE_KEY, 0x5d, { KEY_WLAN } }, |
@@ -58,18 +72,198 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
58 | { KE_END, 0}, | 72 | { KE_END, 0}, |
59 | }; | 73 | }; |
60 | 74 | ||
61 | static struct input_dev *eeepc_wmi_input_dev; | 75 | struct bios_args { |
76 | u32 dev_id; | ||
77 | u32 ctrl_param; | ||
78 | }; | ||
79 | |||
80 | struct eeepc_wmi { | ||
81 | struct input_dev *inputdev; | ||
82 | struct backlight_device *backlight_device; | ||
83 | }; | ||
84 | |||
85 | static struct platform_device *platform_device; | ||
86 | |||
87 | static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc) | ||
88 | { | ||
89 | int err; | ||
90 | |||
91 | eeepc->inputdev = input_allocate_device(); | ||
92 | if (!eeepc->inputdev) | ||
93 | return -ENOMEM; | ||
94 | |||
95 | eeepc->inputdev->name = "Eee PC WMI hotkeys"; | ||
96 | eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0"; | ||
97 | eeepc->inputdev->id.bustype = BUS_HOST; | ||
98 | eeepc->inputdev->dev.parent = &platform_device->dev; | ||
99 | |||
100 | err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL); | ||
101 | if (err) | ||
102 | goto err_free_dev; | ||
103 | |||
104 | err = input_register_device(eeepc->inputdev); | ||
105 | if (err) | ||
106 | goto err_free_keymap; | ||
107 | |||
108 | return 0; | ||
109 | |||
110 | err_free_keymap: | ||
111 | sparse_keymap_free(eeepc->inputdev); | ||
112 | err_free_dev: | ||
113 | input_free_device(eeepc->inputdev); | ||
114 | return err; | ||
115 | } | ||
116 | |||
117 | static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc) | ||
118 | { | ||
119 | if (eeepc->inputdev) { | ||
120 | sparse_keymap_free(eeepc->inputdev); | ||
121 | input_unregister_device(eeepc->inputdev); | ||
122 | } | ||
123 | |||
124 | eeepc->inputdev = NULL; | ||
125 | } | ||
126 | |||
127 | static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param) | ||
128 | { | ||
129 | struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id }; | ||
130 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
131 | union acpi_object *obj; | ||
132 | acpi_status status; | ||
133 | u32 tmp; | ||
134 | |||
135 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
136 | 1, EEEPC_WMI_METHODID_DSTS, &input, &output); | ||
137 | |||
138 | if (ACPI_FAILURE(status)) | ||
139 | return status; | ||
140 | |||
141 | obj = (union acpi_object *)output.pointer; | ||
142 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
143 | tmp = (u32)obj->integer.value; | ||
144 | else | ||
145 | tmp = 0; | ||
146 | |||
147 | if (ctrl_param) | ||
148 | *ctrl_param = tmp; | ||
149 | |||
150 | kfree(obj); | ||
151 | |||
152 | return status; | ||
153 | |||
154 | } | ||
155 | |||
156 | static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param) | ||
157 | { | ||
158 | struct bios_args args = { | ||
159 | .dev_id = dev_id, | ||
160 | .ctrl_param = ctrl_param, | ||
161 | }; | ||
162 | struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; | ||
163 | acpi_status status; | ||
164 | |||
165 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
166 | 1, EEEPC_WMI_METHODID_DEVS, &input, NULL); | ||
167 | |||
168 | return status; | ||
169 | } | ||
170 | |||
171 | static int read_brightness(struct backlight_device *bd) | ||
172 | { | ||
173 | static u32 ctrl_param; | ||
174 | acpi_status status; | ||
175 | |||
176 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param); | ||
177 | |||
178 | if (ACPI_FAILURE(status)) | ||
179 | return -1; | ||
180 | else | ||
181 | return ctrl_param & 0xFF; | ||
182 | } | ||
183 | |||
184 | static int update_bl_status(struct backlight_device *bd) | ||
185 | { | ||
186 | |||
187 | static u32 ctrl_param; | ||
188 | acpi_status status; | ||
189 | |||
190 | ctrl_param = bd->props.brightness; | ||
191 | |||
192 | status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param); | ||
193 | |||
194 | if (ACPI_FAILURE(status)) | ||
195 | return -1; | ||
196 | else | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static const struct backlight_ops eeepc_wmi_bl_ops = { | ||
201 | .get_brightness = read_brightness, | ||
202 | .update_status = update_bl_status, | ||
203 | }; | ||
204 | |||
205 | static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code) | ||
206 | { | ||
207 | struct backlight_device *bd = eeepc->backlight_device; | ||
208 | int old = bd->props.brightness; | ||
209 | int new; | ||
210 | |||
211 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
212 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
213 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
214 | new = code - NOTIFY_BRNDOWN_MIN; | ||
215 | |||
216 | bd->props.brightness = new; | ||
217 | backlight_update_status(bd); | ||
218 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
219 | |||
220 | return old; | ||
221 | } | ||
222 | |||
223 | static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc) | ||
224 | { | ||
225 | struct backlight_device *bd; | ||
226 | struct backlight_properties props; | ||
227 | |||
228 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
229 | props.max_brightness = 15; | ||
230 | bd = backlight_device_register(EEEPC_WMI_FILE, | ||
231 | &platform_device->dev, eeepc, | ||
232 | &eeepc_wmi_bl_ops, &props); | ||
233 | if (IS_ERR(bd)) { | ||
234 | pr_err("Could not register backlight device\n"); | ||
235 | return PTR_ERR(bd); | ||
236 | } | ||
237 | |||
238 | eeepc->backlight_device = bd; | ||
239 | |||
240 | bd->props.brightness = read_brightness(bd); | ||
241 | bd->props.power = FB_BLANK_UNBLANK; | ||
242 | backlight_update_status(bd); | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc) | ||
248 | { | ||
249 | if (eeepc->backlight_device) | ||
250 | backlight_device_unregister(eeepc->backlight_device); | ||
251 | |||
252 | eeepc->backlight_device = NULL; | ||
253 | } | ||
62 | 254 | ||
63 | static void eeepc_wmi_notify(u32 value, void *context) | 255 | static void eeepc_wmi_notify(u32 value, void *context) |
64 | { | 256 | { |
257 | struct eeepc_wmi *eeepc = context; | ||
65 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | 258 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; |
66 | union acpi_object *obj; | 259 | union acpi_object *obj; |
67 | acpi_status status; | 260 | acpi_status status; |
68 | int code; | 261 | int code; |
262 | int orig_code; | ||
69 | 263 | ||
70 | status = wmi_get_event_data(value, &response); | 264 | status = wmi_get_event_data(value, &response); |
71 | if (status != AE_OK) { | 265 | if (status != AE_OK) { |
72 | pr_err("EEEPC WMI: bad event status 0x%x\n", status); | 266 | pr_err("bad event status 0x%x\n", status); |
73 | return; | 267 | return; |
74 | } | 268 | } |
75 | 269 | ||
@@ -77,81 +271,142 @@ static void eeepc_wmi_notify(u32 value, void *context) | |||
77 | 271 | ||
78 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | 272 | if (obj && obj->type == ACPI_TYPE_INTEGER) { |
79 | code = obj->integer.value; | 273 | code = obj->integer.value; |
274 | orig_code = code; | ||
80 | 275 | ||
81 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | 276 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) |
82 | code = NOTIFY_BRNUP_MIN; | 277 | code = NOTIFY_BRNUP_MIN; |
83 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | 278 | else if (code >= NOTIFY_BRNDOWN_MIN && |
279 | code <= NOTIFY_BRNDOWN_MAX) | ||
84 | code = NOTIFY_BRNDOWN_MIN; | 280 | code = NOTIFY_BRNDOWN_MIN; |
85 | 281 | ||
86 | if (!sparse_keymap_report_event(eeepc_wmi_input_dev, | 282 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { |
283 | if (!acpi_video_backlight_support()) | ||
284 | eeepc_wmi_backlight_notify(eeepc, orig_code); | ||
285 | } | ||
286 | |||
287 | if (!sparse_keymap_report_event(eeepc->inputdev, | ||
87 | code, 1, true)) | 288 | code, 1, true)) |
88 | pr_info("EEEPC WMI: Unknown key %x pressed\n", code); | 289 | pr_info("Unknown key %x pressed\n", code); |
89 | } | 290 | } |
90 | 291 | ||
91 | kfree(obj); | 292 | kfree(obj); |
92 | } | 293 | } |
93 | 294 | ||
94 | static int eeepc_wmi_input_setup(void) | 295 | static int __devinit eeepc_wmi_platform_probe(struct platform_device *device) |
95 | { | 296 | { |
297 | struct eeepc_wmi *eeepc; | ||
96 | int err; | 298 | int err; |
299 | acpi_status status; | ||
97 | 300 | ||
98 | eeepc_wmi_input_dev = input_allocate_device(); | 301 | eeepc = platform_get_drvdata(device); |
99 | if (!eeepc_wmi_input_dev) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys"; | ||
103 | eeepc_wmi_input_dev->phys = "wmi/input0"; | ||
104 | eeepc_wmi_input_dev->id.bustype = BUS_HOST; | ||
105 | 302 | ||
106 | err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL); | 303 | err = eeepc_wmi_input_init(eeepc); |
107 | if (err) | 304 | if (err) |
108 | goto err_free_dev; | 305 | goto error_input; |
109 | 306 | ||
110 | err = input_register_device(eeepc_wmi_input_dev); | 307 | if (!acpi_video_backlight_support()) { |
111 | if (err) | 308 | err = eeepc_wmi_backlight_init(eeepc); |
112 | goto err_free_keymap; | 309 | if (err) |
310 | goto error_backlight; | ||
311 | } else | ||
312 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
313 | |||
314 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
315 | eeepc_wmi_notify, eeepc); | ||
316 | if (ACPI_FAILURE(status)) { | ||
317 | pr_err("Unable to register notify handler - %d\n", | ||
318 | status); | ||
319 | err = -ENODEV; | ||
320 | goto error_wmi; | ||
321 | } | ||
113 | 322 | ||
114 | return 0; | 323 | return 0; |
115 | 324 | ||
116 | err_free_keymap: | 325 | error_wmi: |
117 | sparse_keymap_free(eeepc_wmi_input_dev); | 326 | eeepc_wmi_backlight_exit(eeepc); |
118 | err_free_dev: | 327 | error_backlight: |
119 | input_free_device(eeepc_wmi_input_dev); | 328 | eeepc_wmi_input_exit(eeepc); |
329 | error_input: | ||
120 | return err; | 330 | return err; |
121 | } | 331 | } |
122 | 332 | ||
333 | static int __devexit eeepc_wmi_platform_remove(struct platform_device *device) | ||
334 | { | ||
335 | struct eeepc_wmi *eeepc; | ||
336 | |||
337 | eeepc = platform_get_drvdata(device); | ||
338 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
339 | eeepc_wmi_backlight_exit(eeepc); | ||
340 | eeepc_wmi_input_exit(eeepc); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static struct platform_driver platform_driver = { | ||
346 | .driver = { | ||
347 | .name = EEEPC_WMI_FILE, | ||
348 | .owner = THIS_MODULE, | ||
349 | }, | ||
350 | .probe = eeepc_wmi_platform_probe, | ||
351 | .remove = __devexit_p(eeepc_wmi_platform_remove), | ||
352 | }; | ||
353 | |||
123 | static int __init eeepc_wmi_init(void) | 354 | static int __init eeepc_wmi_init(void) |
124 | { | 355 | { |
356 | struct eeepc_wmi *eeepc; | ||
125 | int err; | 357 | int err; |
126 | acpi_status status; | ||
127 | 358 | ||
128 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) { | 359 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) || |
129 | pr_warning("EEEPC WMI: No known WMI GUID found\n"); | 360 | !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) { |
361 | pr_warning("No known WMI GUID found\n"); | ||
130 | return -ENODEV; | 362 | return -ENODEV; |
131 | } | 363 | } |
132 | 364 | ||
133 | err = eeepc_wmi_input_setup(); | 365 | eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL); |
134 | if (err) | 366 | if (!eeepc) |
135 | return err; | 367 | return -ENOMEM; |
136 | 368 | ||
137 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | 369 | platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1); |
138 | eeepc_wmi_notify, NULL); | 370 | if (!platform_device) { |
139 | if (ACPI_FAILURE(status)) { | 371 | pr_warning("Unable to allocate platform device\n"); |
140 | sparse_keymap_free(eeepc_wmi_input_dev); | 372 | err = -ENOMEM; |
141 | input_unregister_device(eeepc_wmi_input_dev); | 373 | goto fail_platform; |
142 | pr_err("EEEPC WMI: Unable to register notify handler - %d\n", | 374 | } |
143 | status); | 375 | |
144 | return -ENODEV; | 376 | err = platform_device_add(platform_device); |
377 | if (err) { | ||
378 | pr_warning("Unable to add platform device\n"); | ||
379 | goto put_dev; | ||
380 | } | ||
381 | |||
382 | platform_set_drvdata(platform_device, eeepc); | ||
383 | |||
384 | err = platform_driver_register(&platform_driver); | ||
385 | if (err) { | ||
386 | pr_warning("Unable to register platform driver\n"); | ||
387 | goto del_dev; | ||
145 | } | 388 | } |
146 | 389 | ||
147 | return 0; | 390 | return 0; |
391 | |||
392 | del_dev: | ||
393 | platform_device_del(platform_device); | ||
394 | put_dev: | ||
395 | platform_device_put(platform_device); | ||
396 | fail_platform: | ||
397 | kfree(eeepc); | ||
398 | |||
399 | return err; | ||
148 | } | 400 | } |
149 | 401 | ||
150 | static void __exit eeepc_wmi_exit(void) | 402 | static void __exit eeepc_wmi_exit(void) |
151 | { | 403 | { |
152 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | 404 | struct eeepc_wmi *eeepc; |
153 | sparse_keymap_free(eeepc_wmi_input_dev); | 405 | |
154 | input_unregister_device(eeepc_wmi_input_dev); | 406 | eeepc = platform_get_drvdata(platform_device); |
407 | platform_driver_unregister(&platform_driver); | ||
408 | platform_device_unregister(platform_device); | ||
409 | kfree(eeepc); | ||
155 | } | 410 | } |
156 | 411 | ||
157 | module_init(eeepc_wmi_init); | 412 | module_init(eeepc_wmi_init); |
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index b6218f11c957..552cad85ae5a 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c | |||
@@ -109,7 +109,7 @@ static int max8925_is_enabled(struct regulator_dev *rdev) | |||
109 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); | 109 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); |
110 | int ret; | 110 | int ret; |
111 | 111 | ||
112 | ret = max8925_reg_read(info->i2c, info->vol_reg); | 112 | ret = max8925_reg_read(info->i2c, info->enable_reg); |
113 | if (ret < 0) | 113 | if (ret < 0) |
114 | return ret; | 114 | return ret; |
115 | 115 | ||
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index a681f5e8f786..ad036dd8da13 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c | |||
@@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev) | |||
618 | dev_get_platdata(&pdev->dev); | 618 | dev_get_platdata(&pdev->dev); |
619 | int i; | 619 | int i; |
620 | 620 | ||
621 | platform_set_drvdata(pdev, NULL); | ||
622 | |||
621 | for (i = 0; i < pdata->num_regulators; i++) | 623 | for (i = 0; i < pdata->num_regulators; i++) |
622 | regulator_unregister(priv->regulators[i]); | 624 | regulator_unregister(priv->regulators[i]); |
623 | 625 | ||
626 | kfree(priv); | ||
624 | return 0; | 627 | return 0; |
625 | } | 628 | } |
626 | 629 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index bbea90baf98f..acf222f91f5a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1899,7 +1899,8 @@ restart: | |||
1899 | /* Process requests that may be recovered */ | 1899 | /* Process requests that may be recovered */ |
1900 | if (cqr->status == DASD_CQR_NEED_ERP) { | 1900 | if (cqr->status == DASD_CQR_NEED_ERP) { |
1901 | erp_fn = base->discipline->erp_action(cqr); | 1901 | erp_fn = base->discipline->erp_action(cqr); |
1902 | erp_fn(cqr); | 1902 | if (IS_ERR(erp_fn(cqr))) |
1903 | continue; | ||
1903 | goto restart; | 1904 | goto restart; |
1904 | } | 1905 | } |
1905 | 1906 | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 6927e751ce3e..6632649dd6aa 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
2309 | cqr->retries); | 2309 | cqr->retries); |
2310 | dasd_block_set_timer(device->block, (HZ << 3)); | 2310 | dasd_block_set_timer(device->block, (HZ << 3)); |
2311 | } | 2311 | } |
2312 | return cqr; | 2312 | return erp; |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | ccw = cqr->cpaddr; | 2315 | ccw = cqr->cpaddr; |
@@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr) | |||
2372 | /* add erp and initialize with default TIC */ | 2372 | /* add erp and initialize with default TIC */ |
2373 | erp = dasd_3990_erp_add_erp(cqr); | 2373 | erp = dasd_3990_erp_add_erp(cqr); |
2374 | 2374 | ||
2375 | if (IS_ERR(erp)) | ||
2376 | return erp; | ||
2377 | |||
2375 | /* inspect sense, determine specific ERP if possible */ | 2378 | /* inspect sense, determine specific ERP if possible */ |
2376 | if (erp != cqr) { | 2379 | if (erp != cqr) { |
2377 | 2380 | ||
@@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2711 | if (erp == NULL) { | 2714 | if (erp == NULL) { |
2712 | /* no matching erp found - set up erp */ | 2715 | /* no matching erp found - set up erp */ |
2713 | erp = dasd_3990_erp_additional_erp(cqr); | 2716 | erp = dasd_3990_erp_additional_erp(cqr); |
2717 | if (IS_ERR(erp)) | ||
2718 | return erp; | ||
2714 | } else { | 2719 | } else { |
2715 | /* matching erp found - set all leading erp's to DONE */ | 2720 | /* matching erp found - set all leading erp's to DONE */ |
2716 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); | 2721 | erp = dasd_3990_erp_handle_match_erp(cqr, erp); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 18daf16aa357..7217966f7d31 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void) | |||
638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); | 638 | rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); |
639 | else | 639 | else |
640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); | 640 | rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); |
641 | if (rc) { | 641 | if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != |
642 | free_page((unsigned long) ipl_block); | ||
643 | return rc; | ||
644 | } | ||
645 | if (csum_partial(ipl_block, ipl_block->hdr.len, 0) != | ||
646 | ipib_info.checksum) { | 642 | ipib_info.checksum) { |
647 | TRACE("Checksum does not match\n"); | 643 | TRACE("Checksum does not match\n"); |
648 | free_page((unsigned long) ipl_block); | 644 | free_page((unsigned long) ipl_block); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4038f5b4f144..ce7cb87479fe 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(sda_lock); | ||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * chsc_error_from_response() - convert a chsc response to an error | 35 | * chsc_error_from_response() - convert a chsc response to an error |
@@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void) | |||
832 | kfree(sei_page); | 833 | kfree(sei_page); |
833 | } | 834 | } |
834 | 835 | ||
835 | int __init | 836 | int chsc_enable_facility(int operation_code) |
836 | chsc_enable_facility(int operation_code) | ||
837 | { | 837 | { |
838 | int ret; | 838 | int ret; |
839 | struct { | 839 | static struct { |
840 | struct chsc_header request; | 840 | struct chsc_header request; |
841 | u8 reserved1:4; | 841 | u8 reserved1:4; |
842 | u8 format:4; | 842 | u8 format:4; |
@@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code) | |||
849 | u32 reserved5:4; | 849 | u32 reserved5:4; |
850 | u32 format2:4; | 850 | u32 format2:4; |
851 | u32 reserved6:24; | 851 | u32 reserved6:24; |
852 | } __attribute__ ((packed)) *sda_area; | 852 | } __attribute__ ((packed, aligned(4096))) sda_area; |
853 | 853 | ||
854 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 854 | spin_lock(&sda_lock); |
855 | if (!sda_area) | 855 | memset(&sda_area, 0, sizeof(sda_area)); |
856 | return -ENOMEM; | 856 | sda_area.request.length = 0x0400; |
857 | sda_area->request.length = 0x0400; | 857 | sda_area.request.code = 0x0031; |
858 | sda_area->request.code = 0x0031; | 858 | sda_area.operation_code = operation_code; |
859 | sda_area->operation_code = operation_code; | ||
860 | 859 | ||
861 | ret = chsc(sda_area); | 860 | ret = chsc(&sda_area); |
862 | if (ret > 0) { | 861 | if (ret > 0) { |
863 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 862 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
864 | goto out; | 863 | goto out; |
865 | } | 864 | } |
866 | 865 | ||
867 | switch (sda_area->response.code) { | 866 | switch (sda_area.response.code) { |
868 | case 0x0101: | 867 | case 0x0101: |
869 | ret = -EOPNOTSUPP; | 868 | ret = -EOPNOTSUPP; |
870 | break; | 869 | break; |
871 | default: | 870 | default: |
872 | ret = chsc_error_from_response(sda_area->response.code); | 871 | ret = chsc_error_from_response(sda_area.response.code); |
873 | } | 872 | } |
874 | if (ret != 0) | 873 | if (ret != 0) |
875 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 874 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
876 | operation_code, sda_area->response.code); | 875 | operation_code, sda_area.response.code); |
877 | out: | 876 | out: |
878 | free_page((unsigned long)sda_area); | 877 | spin_unlock(&sda_lock); |
879 | return ret; | 878 | return ret; |
880 | } | 879 | } |
881 | 880 | ||
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 404f630c27ca..3b6f4adc5094 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch) | |||
124 | * since we don't have a way to clear the subchannel and | 124 | * since we don't have a way to clear the subchannel and |
125 | * cannot disable it with a request running. | 125 | * cannot disable it with a request running. |
126 | */ | 126 | */ |
127 | cc = stsch(sch->schid, &schib); | 127 | cc = stsch_err(sch->schid, &schib); |
128 | if (!cc && scsw_stctl(&schib.scsw)) | 128 | if (!cc && scsw_stctl(&schib.scsw)) |
129 | return -EAGAIN; | 129 | return -EAGAIN; |
130 | return 0; | 130 | return 0; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index f736cdcf08ad..5feea1a371e1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch) | |||
361 | struct schib schib; | 361 | struct schib schib; |
362 | int ccode, retry, ret = 0; | 362 | int ccode, retry, ret = 0; |
363 | 363 | ||
364 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 364 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
365 | return -ENODEV; | 365 | return -ENODEV; |
366 | 366 | ||
367 | for (retry = 0; retry < 5; retry++) { | 367 | for (retry = 0; retry < 5; retry++) { |
@@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch) | |||
372 | return ccode; | 372 | return ccode; |
373 | switch (ccode) { | 373 | switch (ccode) { |
374 | case 0: /* successful */ | 374 | case 0: /* successful */ |
375 | if (stsch(sch->schid, &schib) || | 375 | if (stsch_err(sch->schid, &schib) || |
376 | !css_sch_is_valid(&schib)) | 376 | !css_sch_is_valid(&schib)) |
377 | return -ENODEV; | 377 | return -ENODEV; |
378 | if (cio_check_config(sch, &schib)) { | 378 | if (cio_check_config(sch, &schib)) { |
@@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch) | |||
404 | { | 404 | { |
405 | struct schib schib; | 405 | struct schib schib; |
406 | 406 | ||
407 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | 407 | if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) |
408 | return -ENODEV; | 408 | return -ENODEV; |
409 | 409 | ||
410 | memcpy(&sch->schib, &schib, sizeof(schib)); | 410 | memcpy(&sch->schib, &schib, sizeof(schib)); |
@@ -771,7 +771,7 @@ cio_get_console_sch_no(void) | |||
771 | if (console_irq != -1) { | 771 | if (console_irq != -1) { |
772 | /* VM provided us with the irq number of the console. */ | 772 | /* VM provided us with the irq number of the console. */ |
773 | schid.sch_no = console_irq; | 773 | schid.sch_no = console_irq; |
774 | if (stsch(schid, &console_subchannel.schib) != 0 || | 774 | if (stsch_err(schid, &console_subchannel.schib) != 0 || |
775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | 775 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || |
776 | !console_subchannel.schib.pmcw.dnv) | 776 | !console_subchannel.schib.pmcw.dnv) |
777 | return -1; | 777 | return -1; |
@@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
863 | cc = 0; | 863 | cc = 0; |
864 | for (retry=0;retry<3;retry++) { | 864 | for (retry=0;retry<3;retry++) { |
865 | schib->pmcw.ena = 0; | 865 | schib->pmcw.ena = 0; |
866 | cc = msch(schid, schib); | 866 | cc = msch_err(schid, schib); |
867 | if (cc) | 867 | if (cc) |
868 | return (cc==3?-ENODEV:-EBUSY); | 868 | return (cc==3?-ENODEV:-EBUSY); |
869 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) | 869 | if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) |
870 | return -ENODEV; | 870 | return -ENODEV; |
871 | if (!schib->pmcw.ena) | 871 | if (!schib->pmcw.ena) |
872 | return 0; | 872 | return 0; |
@@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr) | |||
913 | 913 | ||
914 | pgm_check_occured = 0; | 914 | pgm_check_occured = 0; |
915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; | 915 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
916 | rc = stsch(schid, addr); | 916 | rc = stsch_err(schid, addr); |
917 | s390_base_pgm_handler_fn = NULL; | 917 | s390_base_pgm_handler_fn = NULL; |
918 | 918 | ||
919 | /* The program check handler could have changed pgm_check_occured. */ | 919 | /* The program check handler could have changed pgm_check_occured. */ |
@@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | |||
950 | /* No default clear strategy */ | 950 | /* No default clear strategy */ |
951 | break; | 951 | break; |
952 | } | 952 | } |
953 | stsch(schid, &schib); | 953 | stsch_err(schid, &schib); |
954 | __disable_subchannel_easy(schid, &schib); | 954 | __disable_subchannel_easy(schid, &schib); |
955 | } | 955 | } |
956 | out: | 956 | out: |
@@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; | 1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
1087 | if (!schid.one) | 1087 | if (!schid.one) |
1088 | return -ENODEV; | 1088 | return -ENODEV; |
1089 | if (stsch(schid, &schib)) | 1089 | if (stsch_err(schid, &schib)) |
1090 | return -ENODEV; | 1090 | return -ENODEV; |
1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | 1091 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) |
1092 | return -ENODEV; | 1092 | return -ENODEV; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2769da54f2b9..511649115bd7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -870,15 +870,10 @@ static int __init css_bus_init(void) | |||
870 | 870 | ||
871 | /* Try to enable MSS. */ | 871 | /* Try to enable MSS. */ |
872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 872 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
873 | switch (ret) { | 873 | if (ret) |
874 | case 0: /* Success. */ | ||
875 | max_ssid = __MAX_SSID; | ||
876 | break; | ||
877 | case -ENOMEM: | ||
878 | goto out; | ||
879 | default: | ||
880 | max_ssid = 0; | 874 | max_ssid = 0; |
881 | } | 875 | else /* Success. */ |
876 | max_ssid = __MAX_SSID; | ||
882 | 877 | ||
883 | ret = slow_subchannel_init(); | 878 | ret = slow_subchannel_init(); |
884 | if (ret) | 879 | if (ret) |
@@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init_sync(void) | |||
1048 | } | 1043 | } |
1049 | subsys_initcall_sync(channel_subsystem_init_sync); | 1044 | subsys_initcall_sync(channel_subsystem_init_sync); |
1050 | 1045 | ||
1046 | void channel_subsystem_reinit(void) | ||
1047 | { | ||
1048 | chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
1049 | } | ||
1050 | |||
1051 | #ifdef CONFIG_PROC_FS | 1051 | #ifdef CONFIG_PROC_FS |
1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | 1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, |
1053 | size_t count, loff_t *ppos) | 1053 | size_t count, loff_t *ppos) |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c56ab94612f9..c9b852647f01 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
45 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
46 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
47 | orb = &private->orb; | 47 | orb = &private->orb; |
48 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch_err(sch->schid, &schib); |
49 | 49 | ||
50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
51 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 18564891ea61..b3b1d2f79398 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; | 2105 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; |
2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; | 2106 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; |
2107 | 2107 | ||
2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { | 2108 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && |
2109 | !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { | ||
2109 | blktrc.flags |= ZFCP_BLK_LAT_VALID; | 2110 | blktrc.flags |= ZFCP_BLK_LAT_VALID; |
2110 | blktrc.channel_lat = lat_in->channel_lat * ticks; | 2111 | blktrc.channel_lat = lat_in->channel_lat * ticks; |
2111 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; | 2112 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; |
@@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
2157 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | 2158 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; |
2158 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); | 2159 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); |
2159 | 2160 | ||
2160 | zfcp_fsf_req_trace(req, scpnt); | ||
2161 | |||
2162 | skip_fsfstatus: | 2161 | skip_fsfstatus: |
2162 | zfcp_fsf_req_trace(req, scpnt); | ||
2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); | 2163 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); |
2164 | 2164 | ||
2165 | scpnt->host_scribble = NULL; | 2165 | scpnt->host_scribble = NULL; |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 72617b650a7e..e641922f20bc 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, | |||
169 | SE_DEBUG(DBG_LVL_1, | 169 | SE_DEBUG(DBG_LVL_1, |
170 | "Failed to allocate memory for" | 170 | "Failed to allocate memory for" |
171 | "mgmt_invalidate_icds \n"); | 171 | "mgmt_invalidate_icds \n"); |
172 | spin_unlock(&ctrl->mbox_lock); | ||
172 | return -1; | 173 | return -1; |
173 | } | 174 | } |
174 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); | 175 | nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 6cf9dc37d78b..6b624e767d3b 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
@@ -362,6 +362,7 @@ struct bnx2i_hba { | |||
362 | u32 num_ccell; | 362 | u32 num_ccell; |
363 | 363 | ||
364 | int ofld_conns_active; | 364 | int ofld_conns_active; |
365 | wait_queue_head_t eh_wait; | ||
365 | 366 | ||
366 | int max_active_conns; | 367 | int max_active_conns; |
367 | struct iscsi_cid_queue cid_que; | 368 | struct iscsi_cid_queue cid_que; |
@@ -381,6 +382,7 @@ struct bnx2i_hba { | |||
381 | spinlock_t lock; /* protects hba structure access */ | 382 | spinlock_t lock; /* protects hba structure access */ |
382 | struct mutex net_dev_lock;/* sync net device access */ | 383 | struct mutex net_dev_lock;/* sync net device access */ |
383 | 384 | ||
385 | int hba_shutdown_tmo; | ||
384 | /* | 386 | /* |
385 | * PCI related info. | 387 | * PCI related info. |
386 | */ | 388 | */ |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 6d8172e781cf..5d9296c599f6 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
@@ -177,11 +177,22 @@ void bnx2i_stop(void *handle) | |||
177 | struct bnx2i_hba *hba = handle; | 177 | struct bnx2i_hba *hba = handle; |
178 | 178 | ||
179 | /* check if cleanup happened in GOING_DOWN context */ | 179 | /* check if cleanup happened in GOING_DOWN context */ |
180 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
181 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, | 180 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, |
182 | &hba->adapter_state)) | 181 | &hba->adapter_state)) |
183 | iscsi_host_for_each_session(hba->shost, | 182 | iscsi_host_for_each_session(hba->shost, |
184 | bnx2i_drop_session); | 183 | bnx2i_drop_session); |
184 | |||
185 | /* Wait for all endpoints to be torn down, Chip will be reset once | ||
186 | * control returns to network driver. So it is required to cleanup and | ||
187 | * release all connection resources before returning from this routine. | ||
188 | */ | ||
189 | wait_event_interruptible_timeout(hba->eh_wait, | ||
190 | (hba->ofld_conns_active == 0), | ||
191 | hba->hba_shutdown_tmo); | ||
192 | /* This flag should be cleared last so that ep_disconnect() gracefully | ||
193 | * cleans up connection context | ||
194 | */ | ||
195 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | ||
185 | } | 196 | } |
186 | 197 | ||
187 | /** | 198 | /** |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f2e9b18fe76c..fa68ab34b998 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) | |||
820 | 820 | ||
821 | spin_lock_init(&hba->lock); | 821 | spin_lock_init(&hba->lock); |
822 | mutex_init(&hba->net_dev_lock); | 822 | mutex_init(&hba->net_dev_lock); |
823 | init_waitqueue_head(&hba->eh_wait); | ||
824 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) | ||
825 | hba->hba_shutdown_tmo = 240 * HZ; | ||
826 | else /* 5706/5708/5709 */ | ||
827 | hba->hba_shutdown_tmo = 30 * HZ; | ||
823 | 828 | ||
824 | if (iscsi_host_add(shost, &hba->pcidev->dev)) | 829 | if (iscsi_host_add(shost, &hba->pcidev->dev)) |
825 | goto free_dump_mem; | 830 | goto free_dump_mem; |
@@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1658 | */ | 1663 | */ |
1659 | hba = bnx2i_check_route(dst_addr); | 1664 | hba = bnx2i_check_route(dst_addr); |
1660 | 1665 | ||
1661 | if (!hba) { | 1666 | if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { |
1662 | rc = -ENOMEM; | 1667 | rc = -EINVAL; |
1663 | goto check_busy; | 1668 | goto check_busy; |
1664 | } | 1669 | } |
1665 | 1670 | ||
@@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
1804 | (bnx2i_ep->state == | 1809 | (bnx2i_ep->state == |
1805 | EP_STATE_CONNECT_COMPL)), | 1810 | EP_STATE_CONNECT_COMPL)), |
1806 | msecs_to_jiffies(timeout_ms)); | 1811 | msecs_to_jiffies(timeout_ms)); |
1807 | if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) | 1812 | if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) |
1808 | rc = -1; | 1813 | rc = -1; |
1809 | 1814 | ||
1810 | if (rc > 0) | 1815 | if (rc > 0) |
@@ -1957,6 +1962,8 @@ return_bnx2i_ep: | |||
1957 | 1962 | ||
1958 | if (!hba->ofld_conns_active) | 1963 | if (!hba->ofld_conns_active) |
1959 | bnx2i_unreg_dev_all(); | 1964 | bnx2i_unreg_dev_all(); |
1965 | |||
1966 | wake_up_interruptible(&hba->eh_wait); | ||
1960 | } | 1967 | } |
1961 | 1968 | ||
1962 | 1969 | ||
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 496764349c41..0435d044c9da 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids); | |||
188 | static int adpt_detect(struct scsi_host_template* sht) | 188 | static int adpt_detect(struct scsi_host_template* sht) |
189 | { | 189 | { |
190 | struct pci_dev *pDev = NULL; | 190 | struct pci_dev *pDev = NULL; |
191 | adpt_hba* pHba; | 191 | adpt_hba *pHba; |
192 | adpt_hba *next; | ||
192 | 193 | ||
193 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); | 194 | PINFO("Detecting Adaptec I2O RAID controllers...\n"); |
194 | 195 | ||
@@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht) | |||
206 | } | 207 | } |
207 | 208 | ||
208 | /* In INIT state, Activate IOPs */ | 209 | /* In INIT state, Activate IOPs */ |
209 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 210 | for (pHba = hba_chain; pHba; pHba = next) { |
211 | next = pHba->next; | ||
210 | // Activate does get status , init outbound, and get hrt | 212 | // Activate does get status , init outbound, and get hrt |
211 | if (adpt_i2o_activate_hba(pHba) < 0) { | 213 | if (adpt_i2o_activate_hba(pHba) < 0) { |
212 | adpt_i2o_delete_hba(pHba); | 214 | adpt_i2o_delete_hba(pHba); |
@@ -243,7 +245,8 @@ rebuild_sys_tab: | |||
243 | PDEBUG("HBA's in OPERATIONAL state\n"); | 245 | PDEBUG("HBA's in OPERATIONAL state\n"); |
244 | 246 | ||
245 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); | 247 | printk("dpti: If you have a lot of devices this could take a few minutes.\n"); |
246 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 248 | for (pHba = hba_chain; pHba; pHba = next) { |
249 | next = pHba->next; | ||
247 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); | 250 | printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); |
248 | if (adpt_i2o_lct_get(pHba) < 0){ | 251 | if (adpt_i2o_lct_get(pHba) < 0){ |
249 | adpt_i2o_delete_hba(pHba); | 252 | adpt_i2o_delete_hba(pHba); |
@@ -263,7 +266,8 @@ rebuild_sys_tab: | |||
263 | adpt_sysfs_class = NULL; | 266 | adpt_sysfs_class = NULL; |
264 | } | 267 | } |
265 | 268 | ||
266 | for (pHba = hba_chain; pHba; pHba = pHba->next) { | 269 | for (pHba = hba_chain; pHba; pHba = next) { |
270 | next = pHba->next; | ||
267 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ | 271 | if (adpt_scsi_host_alloc(pHba, sht) < 0){ |
268 | adpt_i2o_delete_hba(pHba); | 272 | adpt_i2o_delete_hba(pHba); |
269 | continue; | 273 | continue; |
@@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) | |||
1229 | } | 1233 | } |
1230 | } | 1234 | } |
1231 | pci_dev_put(pHba->pDev); | 1235 | pci_dev_put(pHba->pDev); |
1232 | kfree(pHba); | ||
1233 | |||
1234 | if (adpt_sysfs_class) | 1236 | if (adpt_sysfs_class) |
1235 | device_destroy(adpt_sysfs_class, | 1237 | device_destroy(adpt_sysfs_class, |
1236 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); | 1238 | MKDEV(DPTI_I2O_MAJOR, pHba->unit)); |
1239 | kfree(pHba); | ||
1237 | 1240 | ||
1238 | if(hba_count <= 0){ | 1241 | if(hba_count <= 0){ |
1239 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); | 1242 | unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index ff5ec5ac1fb5..88bad0e81bdd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -323,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd, | |||
323 | srp_cmd->buf_fmt = fmt; | 323 | srp_cmd->buf_fmt = fmt; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void unmap_sg_list(int num_entries, | ||
327 | struct device *dev, | ||
328 | struct srp_direct_buf *md) | ||
329 | { | ||
330 | int i; | ||
331 | |||
332 | for (i = 0; i < num_entries; ++i) | ||
333 | dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); | ||
334 | } | ||
335 | |||
336 | /** | 326 | /** |
337 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | 327 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format |
338 | * @cmd: srp_cmd whose additional_data member will be unmapped | 328 | * @cmd: srp_cmd whose additional_data member will be unmapped |
@@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
350 | 340 | ||
351 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) | 341 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) |
352 | return; | 342 | return; |
353 | else if (out_fmt == SRP_DATA_DESC_DIRECT || | ||
354 | in_fmt == SRP_DATA_DESC_DIRECT) { | ||
355 | struct srp_direct_buf *data = | ||
356 | (struct srp_direct_buf *) cmd->add_data; | ||
357 | dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); | ||
358 | } else { | ||
359 | struct srp_indirect_buf *indirect = | ||
360 | (struct srp_indirect_buf *) cmd->add_data; | ||
361 | int num_mapped = indirect->table_desc.len / | ||
362 | sizeof(struct srp_direct_buf); | ||
363 | 343 | ||
364 | if (num_mapped <= MAX_INDIRECT_BUFS) { | 344 | if (evt_struct->cmnd) |
365 | unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); | 345 | scsi_dma_unmap(evt_struct->cmnd); |
366 | return; | ||
367 | } | ||
368 | |||
369 | unmap_sg_list(num_mapped, dev, evt_struct->ext_list); | ||
370 | } | ||
371 | } | 346 | } |
372 | 347 | ||
373 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, | 348 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 0ee725ced511..02143af7c1af 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 599 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); | 600 | write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); |
601 | 601 | ||
602 | if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { | 602 | if (sock->sk->sk_sleep) { |
603 | sock->sk->sk_err = EIO; | 603 | sock->sk->sk_err = EIO; |
604 | wake_up_interruptible(sock->sk->sk_sleep); | 604 | wake_up_interruptible(sock->sk->sk_sleep); |
605 | } | 605 | } |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index ec3723831e89..d62b3e467926 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | |||
433 | dd_data = cmdiocbq->context1; | 433 | dd_data = cmdiocbq->context1; |
434 | /* normal completion and timeout crossed paths, already done */ | 434 | /* normal completion and timeout crossed paths, already done */ |
435 | if (!dd_data) { | 435 | if (!dd_data) { |
436 | spin_unlock_irqrestore(&phba->hbalock, flags); | 436 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
437 | return; | 437 | return; |
438 | } | 438 | } |
439 | 439 | ||
@@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
1196 | dd_data = cmdiocbq->context1; | 1196 | dd_data = cmdiocbq->context1; |
1197 | /* normal completion and timeout crossed paths, already done */ | 1197 | /* normal completion and timeout crossed paths, already done */ |
1198 | if (!dd_data) { | 1198 | if (!dd_data) { |
1199 | spin_unlock_irqrestore(&phba->hbalock, flags); | 1199 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
1200 | return; | 1200 | return; |
1201 | } | 1201 | } |
1202 | 1202 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 359e9a71a021..1c7ef55966fb 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
2393 | return 0; | 2393 | return 0; |
2394 | 2394 | ||
2395 | done: | 2395 | done: |
2396 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
2396 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) | 2397 | if (bsg_job->request->msgcode == FC_BSG_HST_CT) |
2397 | kfree(sp->fcport); | 2398 | kfree(sp->fcport); |
2398 | kfree(sp->ctx); | 2399 | kfree(sp->ctx); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 09d6d4b76f39..caeb7d10ae04 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, | |||
467 | if (conn_err_detail) | 467 | if (conn_err_detail) |
468 | *conn_err_detail = mbox_sts[5]; | 468 | *conn_err_detail = mbox_sts[5]; |
469 | if (tcp_source_port_num) | 469 | if (tcp_source_port_num) |
470 | *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; | 470 | *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); |
471 | if (connection_id) | 471 | if (connection_id) |
472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; | 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; |
473 | status = QLA_SUCCESS; | 473 | status = QLA_SUCCESS; |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index d0b7d2ff9ac5..333580bf37c5 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt) | |||
1587 | { | 1587 | { |
1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; | 1588 | Adapter *host = (Adapter *) SCpnt->device->host->hostdata; |
1589 | 1589 | ||
1590 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1590 | spin_lock_irq(SCpnt->device->host->host_lock); |
1591 | 1591 | ||
1592 | if (wd7000_adapter_reset(host) < 0) { | 1592 | if (wd7000_adapter_reset(host) < 0) { |
1593 | spin_unlock_irq(SCpnt->device->host->host_lock); | 1593 | spin_unlock_irq(SCpnt->device->host->host_lock); |
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c index 7bb5fee639e3..b5aaef965f24 100644 --- a/drivers/serial/mcf.c +++ b/drivers/serial/mcf.c | |||
@@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, | |||
263 | } | 263 | } |
264 | 264 | ||
265 | spin_lock_irqsave(&port->lock, flags); | 265 | spin_lock_irqsave(&port->lock, flags); |
266 | uart_update_timeout(port, termios->c_cflag, baud); | ||
266 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); | 267 | writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); |
267 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); | 268 | writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); |
268 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); | 269 | writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); |
@@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data) | |||
379 | static void mcf_config_port(struct uart_port *port, int flags) | 380 | static void mcf_config_port(struct uart_port *port, int flags) |
380 | { | 381 | { |
381 | port->type = PORT_MCF; | 382 | port->type = PORT_MCF; |
383 | port->fifosize = MCFUART_TXFIFOSIZE; | ||
382 | 384 | ||
383 | /* Clear mask, so no surprise interrupts. */ | 385 | /* Clear mask, so no surprise interrupts. */ |
384 | writeb(0, port->membase + MCFUART_UIMR); | 386 | writeb(0, port->membase + MCFUART_UIMR); |
@@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser) | |||
424 | /* | 426 | /* |
425 | * Define the basic serial functions we support. | 427 | * Define the basic serial functions we support. |
426 | */ | 428 | */ |
427 | static struct uart_ops mcf_uart_ops = { | 429 | static const struct uart_ops mcf_uart_ops = { |
428 | .tx_empty = mcf_tx_empty, | 430 | .tx_empty = mcf_tx_empty, |
429 | .get_mctrl = mcf_get_mctrl, | 431 | .get_mctrl = mcf_get_mctrl, |
430 | .set_mctrl = mcf_set_mctrl, | 432 | .set_mctrl = mcf_set_mctrl, |
@@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = { | |||
443 | .verify_port = mcf_verify_port, | 445 | .verify_port = mcf_verify_port, |
444 | }; | 446 | }; |
445 | 447 | ||
446 | static struct mcf_uart mcf_ports[3]; | 448 | static struct mcf_uart mcf_ports[4]; |
447 | 449 | ||
448 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) | 450 | #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) |
449 | 451 | ||
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 175d202ab37e..8cfa5b12ea7a 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -105,6 +105,10 @@ struct serial_cfg_mem { | |||
105 | * manfid 0x0160, 0x0104 | 105 | * manfid 0x0160, 0x0104 |
106 | * This card appears to have a 14.7456MHz clock. | 106 | * This card appears to have a 14.7456MHz clock. |
107 | */ | 107 | */ |
108 | /* Generic Modem: MD55x (GPRS/EDGE) have | ||
109 | * Elan VPU16551 UART with 14.7456MHz oscillator | ||
110 | * manfid 0x015D, 0x4C45 | ||
111 | */ | ||
108 | static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) | 112 | static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) |
109 | { | 113 | { |
110 | port->uartclk = 14745600; | 114 | port->uartclk = 14745600; |
@@ -196,6 +200,11 @@ static const struct serial_quirk quirks[] = { | |||
196 | .multi = -1, | 200 | .multi = -1, |
197 | .setup = quirk_setup_brainboxes_0104, | 201 | .setup = quirk_setup_brainboxes_0104, |
198 | }, { | 202 | }, { |
203 | .manfid = 0x015D, | ||
204 | .prodid = 0x4C45, | ||
205 | .multi = -1, | ||
206 | .setup = quirk_setup_brainboxes_0104, | ||
207 | }, { | ||
199 | .manfid = MANFID_IBM, | 208 | .manfid = MANFID_IBM, |
200 | .prodid = ~0, | 209 | .prodid = ~0, |
201 | .multi = -1, | 210 | .multi = -1, |
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c index a67c622869d2..e2c44ec6fc45 100644 --- a/drivers/staging/dt3155/dt3155_drv.c +++ b/drivers/staging/dt3155/dt3155_drv.c | |||
@@ -57,19 +57,8 @@ MA 02111-1307 USA | |||
57 | 57 | ||
58 | extern void printques(int); | 58 | extern void printques(int); |
59 | 59 | ||
60 | #ifdef MODULE | ||
61 | #include <linux/module.h> | 60 | #include <linux/module.h> |
62 | #include <linux/interrupt.h> | 61 | #include <linux/interrupt.h> |
63 | |||
64 | |||
65 | MODULE_LICENSE("GPL"); | ||
66 | |||
67 | #endif | ||
68 | |||
69 | #ifndef CONFIG_PCI | ||
70 | #error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)" | ||
71 | #endif | ||
72 | |||
73 | #include <linux/pci.h> | 62 | #include <linux/pci.h> |
74 | #include <linux/types.h> | 63 | #include <linux/types.h> |
75 | #include <linux/poll.h> | 64 | #include <linux/poll.h> |
@@ -84,6 +73,9 @@ MODULE_LICENSE("GPL"); | |||
84 | #include "dt3155_io.h" | 73 | #include "dt3155_io.h" |
85 | #include "allocator.h" | 74 | #include "allocator.h" |
86 | 75 | ||
76 | |||
77 | MODULE_LICENSE("GPL"); | ||
78 | |||
87 | /* Error variable. Zero means no error. */ | 79 | /* Error variable. Zero means no error. */ |
88 | int dt3155_errno = 0; | 80 | int dt3155_errno = 0; |
89 | 81 | ||
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 6a3b5cae3a6e..2f3dc4cdf79b 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev) | |||
301 | 301 | ||
302 | intf->condition = USB_INTERFACE_BINDING; | 302 | intf->condition = USB_INTERFACE_BINDING; |
303 | 303 | ||
304 | /* Bound interfaces are initially active. They are | 304 | /* Probed interfaces are initially active. They are |
305 | * runtime-PM-enabled only if the driver has autosuspend support. | 305 | * runtime-PM-enabled only if the driver has autosuspend support. |
306 | * They are sensitive to their children's power states. | 306 | * They are sensitive to their children's power states. |
307 | */ | 307 | */ |
@@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
437 | 437 | ||
438 | iface->condition = USB_INTERFACE_BOUND; | 438 | iface->condition = USB_INTERFACE_BOUND; |
439 | 439 | ||
440 | /* Bound interfaces are initially active. They are | 440 | /* Claimed interfaces are initially inactive (suspended). They are |
441 | * runtime-PM-enabled only if the driver has autosuspend support. | 441 | * runtime-PM-enabled only if the driver has autosuspend support. |
442 | * They are sensitive to their children's power states. | 442 | * They are sensitive to their children's power states. |
443 | */ | 443 | */ |
444 | pm_runtime_set_active(dev); | 444 | pm_runtime_set_suspended(dev); |
445 | pm_suspend_ignore_children(dev, false); | 445 | pm_suspend_ignore_children(dev, false); |
446 | if (driver->supports_autosuspend) | 446 | if (driver->supports_autosuspend) |
447 | pm_runtime_enable(dev); | 447 | pm_runtime_enable(dev); |
@@ -1170,7 +1170,7 @@ done: | |||
1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | 1170 | static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) |
1171 | { | 1171 | { |
1172 | int status = 0; | 1172 | int status = 0; |
1173 | int i = 0; | 1173 | int i = 0, n = 0; |
1174 | struct usb_interface *intf; | 1174 | struct usb_interface *intf; |
1175 | 1175 | ||
1176 | if (udev->state == USB_STATE_NOTATTACHED || | 1176 | if (udev->state == USB_STATE_NOTATTACHED || |
@@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1179 | 1179 | ||
1180 | /* Suspend all the interfaces and then udev itself */ | 1180 | /* Suspend all the interfaces and then udev itself */ |
1181 | if (udev->actconfig) { | 1181 | if (udev->actconfig) { |
1182 | for (; i < udev->actconfig->desc.bNumInterfaces; i++) { | 1182 | n = udev->actconfig->desc.bNumInterfaces; |
1183 | for (i = n - 1; i >= 0; --i) { | ||
1183 | intf = udev->actconfig->interface[i]; | 1184 | intf = udev->actconfig->interface[i]; |
1184 | status = usb_suspend_interface(udev, intf, msg); | 1185 | status = usb_suspend_interface(udev, intf, msg); |
1185 | if (status != 0) | 1186 | if (status != 0) |
@@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) | |||
1192 | /* If the suspend failed, resume interfaces that did get suspended */ | 1193 | /* If the suspend failed, resume interfaces that did get suspended */ |
1193 | if (status != 0) { | 1194 | if (status != 0) { |
1194 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); | 1195 | msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); |
1195 | while (--i >= 0) { | 1196 | while (++i < n) { |
1196 | intf = udev->actconfig->interface[i]; | 1197 | intf = udev->actconfig->interface[i]; |
1197 | usb_resume_interface(udev, intf, msg, 0); | 1198 | usb_resume_interface(udev, intf, msg, 0); |
1198 | } | 1199 | } |
@@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) | |||
1263 | return status; | 1264 | return status; |
1264 | } | 1265 | } |
1265 | 1266 | ||
1267 | static void choose_wakeup(struct usb_device *udev, pm_message_t msg) | ||
1268 | { | ||
1269 | int w, i; | ||
1270 | struct usb_interface *intf; | ||
1271 | |||
1272 | /* Remote wakeup is needed only when we actually go to sleep. | ||
1273 | * For things like FREEZE and QUIESCE, if the device is already | ||
1274 | * autosuspended then its current wakeup setting is okay. | ||
1275 | */ | ||
1276 | if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { | ||
1277 | if (udev->state != USB_STATE_SUSPENDED) | ||
1278 | udev->do_remote_wakeup = 0; | ||
1279 | return; | ||
1280 | } | ||
1281 | |||
1282 | /* If remote wakeup is permitted, see whether any interface drivers | ||
1283 | * actually want it. | ||
1284 | */ | ||
1285 | w = 0; | ||
1286 | if (device_may_wakeup(&udev->dev) && udev->actconfig) { | ||
1287 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | ||
1288 | intf = udev->actconfig->interface[i]; | ||
1289 | w |= intf->needs_remote_wakeup; | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | /* If the device is autosuspended with the wrong wakeup setting, | ||
1294 | * autoresume now so the setting can be changed. | ||
1295 | */ | ||
1296 | if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup) | ||
1297 | pm_runtime_resume(&udev->dev); | ||
1298 | udev->do_remote_wakeup = w; | ||
1299 | } | ||
1300 | |||
1266 | /* The device lock is held by the PM core */ | 1301 | /* The device lock is held by the PM core */ |
1267 | int usb_suspend(struct device *dev, pm_message_t msg) | 1302 | int usb_suspend(struct device *dev, pm_message_t msg) |
1268 | { | 1303 | { |
1269 | struct usb_device *udev = to_usb_device(dev); | 1304 | struct usb_device *udev = to_usb_device(dev); |
1270 | 1305 | ||
1271 | do_unbind_rebind(udev, DO_UNBIND); | 1306 | do_unbind_rebind(udev, DO_UNBIND); |
1272 | udev->do_remote_wakeup = device_may_wakeup(&udev->dev); | 1307 | choose_wakeup(udev, msg); |
1273 | return usb_suspend_both(udev, msg); | 1308 | return usb_suspend_both(udev, msg); |
1274 | } | 1309 | } |
1275 | 1310 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 207e7a85aeb0..13ead00aecd5 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd) | |||
543 | */ | 543 | */ |
544 | ehci->periodic_size = DEFAULT_I_TDPS; | 544 | ehci->periodic_size = DEFAULT_I_TDPS; |
545 | INIT_LIST_HEAD(&ehci->cached_itd_list); | 545 | INIT_LIST_HEAD(&ehci->cached_itd_list); |
546 | INIT_LIST_HEAD(&ehci->cached_sitd_list); | ||
546 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) | 547 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) |
547 | return retval; | 548 | return retval; |
548 | 549 | ||
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 19372673bf09..c7178bcde67a 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -801,7 +801,7 @@ static int ehci_hub_control ( | |||
801 | * this bit; seems too long to spin routinely... | 801 | * this bit; seems too long to spin routinely... |
802 | */ | 802 | */ |
803 | retval = handshake(ehci, status_reg, | 803 | retval = handshake(ehci, status_reg, |
804 | PORT_RESET, 0, 750); | 804 | PORT_RESET, 0, 1000); |
805 | if (retval != 0) { | 805 | if (retval != 0) { |
806 | ehci_err (ehci, "port %d reset error %d\n", | 806 | ehci_err (ehci, "port %d reset error %d\n", |
807 | wIndex + 1, retval); | 807 | wIndex + 1, retval); |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index aeda96e0af67..1f3f01eacaf0 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh) | |||
136 | 136 | ||
137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) | 137 | static void ehci_mem_cleanup (struct ehci_hcd *ehci) |
138 | { | 138 | { |
139 | free_cached_itd_list(ehci); | 139 | free_cached_lists(ehci); |
140 | if (ehci->async) | 140 | if (ehci->async) |
141 | qh_put (ehci->async); | 141 | qh_put (ehci->async); |
142 | ehci->async = NULL; | 142 | ehci->async = NULL; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a67a0030dd57..40a858335035 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
629 | } | 629 | } |
630 | snprintf(supply, sizeof(supply), "hsusb%d", i); | 630 | snprintf(supply, sizeof(supply), "hsusb%d", i); |
631 | omap->regulator[i] = regulator_get(omap->dev, supply); | 631 | omap->regulator[i] = regulator_get(omap->dev, supply); |
632 | if (IS_ERR(omap->regulator[i])) | 632 | if (IS_ERR(omap->regulator[i])) { |
633 | omap->regulator[i] = NULL; | ||
633 | dev_dbg(&pdev->dev, | 634 | dev_dbg(&pdev->dev, |
634 | "failed to get ehci port%d regulator\n", i); | 635 | "failed to get ehci port%d regulator\n", i); |
635 | else | 636 | } else { |
636 | regulator_enable(omap->regulator[i]); | 637 | regulator_enable(omap->regulator[i]); |
638 | } | ||
637 | } | 639 | } |
638 | 640 | ||
639 | ret = omap_start_ehc(omap, hcd); | 641 | ret = omap_start_ehc(omap, hcd); |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a0aaaaff2560..805ec633a652 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
510 | ehci_writel(ehci, cmd, &ehci->regs->command); | 510 | ehci_writel(ehci, cmd, &ehci->regs->command); |
511 | /* posted write ... */ | 511 | /* posted write ... */ |
512 | 512 | ||
513 | free_cached_itd_list(ehci); | 513 | free_cached_lists(ehci); |
514 | 514 | ||
515 | ehci->next_uframe = -1; | 515 | ehci->next_uframe = -1; |
516 | return 0; | 516 | return 0; |
@@ -2139,13 +2139,27 @@ sitd_complete ( | |||
2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | 2139 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); |
2140 | } | 2140 | } |
2141 | iso_stream_put (ehci, stream); | 2141 | iso_stream_put (ehci, stream); |
2142 | /* OK to recycle this SITD now that its completion callback ran. */ | 2142 | |
2143 | done: | 2143 | done: |
2144 | sitd->urb = NULL; | 2144 | sitd->urb = NULL; |
2145 | sitd->stream = NULL; | 2145 | if (ehci->clock_frame != sitd->frame) { |
2146 | list_move(&sitd->sitd_list, &stream->free_list); | 2146 | /* OK to recycle this SITD now. */ |
2147 | iso_stream_put(ehci, stream); | 2147 | sitd->stream = NULL; |
2148 | 2148 | list_move(&sitd->sitd_list, &stream->free_list); | |
2149 | iso_stream_put(ehci, stream); | ||
2150 | } else { | ||
2151 | /* HW might remember this SITD, so we can't recycle it yet. | ||
2152 | * Move it to a safe place until a new frame starts. | ||
2153 | */ | ||
2154 | list_move(&sitd->sitd_list, &ehci->cached_sitd_list); | ||
2155 | if (stream->refcount == 2) { | ||
2156 | /* If iso_stream_put() were called here, stream | ||
2157 | * would be freed. Instead, just prevent reuse. | ||
2158 | */ | ||
2159 | stream->ep->hcpriv = NULL; | ||
2160 | stream->ep = NULL; | ||
2161 | } | ||
2162 | } | ||
2149 | return retval; | 2163 | return retval; |
2150 | } | 2164 | } |
2151 | 2165 | ||
@@ -2211,9 +2225,10 @@ done: | |||
2211 | 2225 | ||
2212 | /*-------------------------------------------------------------------------*/ | 2226 | /*-------------------------------------------------------------------------*/ |
2213 | 2227 | ||
2214 | static void free_cached_itd_list(struct ehci_hcd *ehci) | 2228 | static void free_cached_lists(struct ehci_hcd *ehci) |
2215 | { | 2229 | { |
2216 | struct ehci_itd *itd, *n; | 2230 | struct ehci_itd *itd, *n; |
2231 | struct ehci_sitd *sitd, *sn; | ||
2217 | 2232 | ||
2218 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { | 2233 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { |
2219 | struct ehci_iso_stream *stream = itd->stream; | 2234 | struct ehci_iso_stream *stream = itd->stream; |
@@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci) | |||
2221 | list_move(&itd->itd_list, &stream->free_list); | 2236 | list_move(&itd->itd_list, &stream->free_list); |
2222 | iso_stream_put(ehci, stream); | 2237 | iso_stream_put(ehci, stream); |
2223 | } | 2238 | } |
2239 | |||
2240 | list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { | ||
2241 | struct ehci_iso_stream *stream = sitd->stream; | ||
2242 | sitd->stream = NULL; | ||
2243 | list_move(&sitd->sitd_list, &stream->free_list); | ||
2244 | iso_stream_put(ehci, stream); | ||
2245 | } | ||
2224 | } | 2246 | } |
2225 | 2247 | ||
2226 | /*-------------------------------------------------------------------------*/ | 2248 | /*-------------------------------------------------------------------------*/ |
@@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci) | |||
2247 | clock_frame = -1; | 2269 | clock_frame = -1; |
2248 | } | 2270 | } |
2249 | if (ehci->clock_frame != clock_frame) { | 2271 | if (ehci->clock_frame != clock_frame) { |
2250 | free_cached_itd_list(ehci); | 2272 | free_cached_lists(ehci); |
2251 | ehci->clock_frame = clock_frame; | 2273 | ehci->clock_frame = clock_frame; |
2252 | } | 2274 | } |
2253 | clock %= mod; | 2275 | clock %= mod; |
@@ -2414,7 +2436,7 @@ restart: | |||
2414 | clock = now; | 2436 | clock = now; |
2415 | clock_frame = clock >> 3; | 2437 | clock_frame = clock >> 3; |
2416 | if (ehci->clock_frame != clock_frame) { | 2438 | if (ehci->clock_frame != clock_frame) { |
2417 | free_cached_itd_list(ehci); | 2439 | free_cached_lists(ehci); |
2418 | ehci->clock_frame = clock_frame; | 2440 | ehci->clock_frame = clock_frame; |
2419 | } | 2441 | } |
2420 | } else { | 2442 | } else { |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index b1dce96dd621..556c0b48f3ab 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */ | |||
87 | int next_uframe; /* scan periodic, start here */ | 87 | int next_uframe; /* scan periodic, start here */ |
88 | unsigned periodic_sched; /* periodic activity count */ | 88 | unsigned periodic_sched; /* periodic activity count */ |
89 | 89 | ||
90 | /* list of itds completed while clock_frame was still active */ | 90 | /* list of itds & sitds completed while clock_frame was still active */ |
91 | struct list_head cached_itd_list; | 91 | struct list_head cached_itd_list; |
92 | struct list_head cached_sitd_list; | ||
92 | unsigned clock_frame; | 93 | unsigned clock_frame; |
93 | 94 | ||
94 | /* per root hub port */ | 95 | /* per root hub port */ |
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) | |||
195 | clear_bit (action, &ehci->actions); | 196 | clear_bit (action, &ehci->actions); |
196 | } | 197 | } |
197 | 198 | ||
198 | static void free_cached_itd_list(struct ehci_hcd *ehci); | 199 | static void free_cached_lists(struct ehci_hcd *ehci); |
199 | 200 | ||
200 | /*-------------------------------------------------------------------------*/ | 201 | /*-------------------------------------------------------------------------*/ |
201 | 202 | ||
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 4aa08d36d077..d22fb4d577b7 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." | 23 | #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) | 26 | #define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) |
27 | 27 | ||
28 | static struct clk *usb11_clk; | 28 | static struct clk *usb11_clk; |
29 | static struct clk *usb20_clk; | 29 | static struct clk *usb20_clk; |
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index a9555cb901a1..de8ef945b536 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c | |||
@@ -49,6 +49,7 @@ struct usb_sevsegdev { | |||
49 | u16 textlength; | 49 | u16 textlength; |
50 | 50 | ||
51 | u8 shadow_power; /* for PM */ | 51 | u8 shadow_power; /* for PM */ |
52 | u8 has_interface_pm; | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* sysfs_streq can't replace this completely | 55 | /* sysfs_streq can't replace this completely |
@@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
68 | { | 69 | { |
69 | int rc; | 70 | int rc; |
70 | 71 | ||
71 | if (!mydev->shadow_power && mydev->powered) { | 72 | if (mydev->powered && !mydev->has_interface_pm) { |
72 | rc = usb_autopm_get_interface(mydev->intf); | 73 | rc = usb_autopm_get_interface(mydev->intf); |
73 | if (rc < 0) | 74 | if (rc < 0) |
74 | return; | 75 | return; |
76 | mydev->has_interface_pm = 1; | ||
75 | } | 77 | } |
76 | 78 | ||
79 | if (mydev->shadow_power != 1) | ||
80 | return; | ||
81 | |||
77 | rc = usb_control_msg(mydev->udev, | 82 | rc = usb_control_msg(mydev->udev, |
78 | usb_sndctrlpipe(mydev->udev, 0), | 83 | usb_sndctrlpipe(mydev->udev, 0), |
79 | 0x12, | 84 | 0x12, |
@@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev) | |||
86 | if (rc < 0) | 91 | if (rc < 0) |
87 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); | 92 | dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); |
88 | 93 | ||
89 | if (mydev->shadow_power && !mydev->powered) | 94 | if (!mydev->powered && mydev->has_interface_pm) { |
90 | usb_autopm_put_interface(mydev->intf); | 95 | usb_autopm_put_interface(mydev->intf); |
96 | mydev->has_interface_pm = 0; | ||
97 | } | ||
91 | } | 98 | } |
92 | 99 | ||
93 | static void update_display_mode(struct usb_sevsegdev *mydev) | 100 | static void update_display_mode(struct usb_sevsegdev *mydev) |
@@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface, | |||
351 | mydev->intf = interface; | 358 | mydev->intf = interface; |
352 | usb_set_intfdata(interface, mydev); | 359 | usb_set_intfdata(interface, mydev); |
353 | 360 | ||
361 | /* PM */ | ||
362 | mydev->shadow_power = 1; /* currently active */ | ||
363 | mydev->has_interface_pm = 0; /* have not issued autopm_get */ | ||
364 | |||
354 | /*set defaults */ | 365 | /*set defaults */ |
355 | mydev->textmode = 0x02; /* ascii mode */ | 366 | mydev->textmode = 0x02; /* ascii mode */ |
356 | mydev->mode_msb = 0x06; /* 6 characters */ | 367 | mydev->mode_msb = 0x06; /* 6 characters */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 73d5f346d3e0..c97a0bb5b6db 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -97,6 +97,7 @@ static const struct usb_device_id id_table[] = { | |||
97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, | 97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | 98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, | 99 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
100 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, | ||
100 | { } /* Terminating entry */ | 101 | { } /* Terminating entry */ |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index d640dc951568..a352d5f3a59c 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -134,3 +134,7 @@ | |||
134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ | 134 | /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ |
135 | #define SANWA_VENDOR_ID 0x11ad | 135 | #define SANWA_VENDOR_ID 0x11ad |
136 | #define SANWA_PRODUCT_ID 0x0001 | 136 | #define SANWA_PRODUCT_ID 0x0001 |
137 | |||
138 | /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ | ||
139 | #define ADLINK_VENDOR_ID 0x0b63 | ||
140 | #define ADLINK_ND6530_PRODUCT_ID 0x6530 | ||
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 0b9362061713..7e3bea23600b 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c | |||
@@ -42,6 +42,14 @@ | |||
42 | #define CMOTECH_PRODUCT_CDU550 0x5553 | 42 | #define CMOTECH_PRODUCT_CDU550 0x5553 |
43 | #define CMOTECH_PRODUCT_CDX650 0x6512 | 43 | #define CMOTECH_PRODUCT_CDX650 0x6512 |
44 | 44 | ||
45 | /* LG devices */ | ||
46 | #define LG_VENDOR_ID 0x1004 | ||
47 | #define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */ | ||
48 | |||
49 | /* Sanyo devices */ | ||
50 | #define SANYO_VENDOR_ID 0x0474 | ||
51 | #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */ | ||
52 | |||
45 | static struct usb_device_id id_table[] = { | 53 | static struct usb_device_id id_table[] = { |
46 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, | 54 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, |
47 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, | 55 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, |
@@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = { | |||
51 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, | 59 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, |
52 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, | 60 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, |
53 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, | 61 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, |
62 | { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, | ||
63 | { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, | ||
54 | { }, | 64 | { }, |
55 | }; | 65 | }; |
56 | MODULE_DEVICE_TABLE(usb, id_table); | 66 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 9202f94505e6..ef0bdb08d788 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { | |||
230 | static const struct usb_device_id id_table[] = { | 230 | static const struct usb_device_id id_table[] = { |
231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ | 231 | { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ |
232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ | 232 | { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ |
233 | { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ | ||
233 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ | 234 | { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ |
234 | 235 | ||
235 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 236 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 0afe5c71c17e..880e990abb07 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -172,7 +172,7 @@ static unsigned int product_5052_count; | |||
172 | /* the array dimension is the number of default entries plus */ | 172 | /* the array dimension is the number of default entries plus */ |
173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ | 173 | /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ |
174 | /* null entry */ | 174 | /* null entry */ |
175 | static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | 175 | static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { |
176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 176 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 177 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 178 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
@@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { | |||
180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 180 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 181 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 182 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
183 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
184 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
185 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
183 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, | 186 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, |
184 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, | 187 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
185 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, | 188 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
@@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { | |||
192 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, | 195 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, |
193 | }; | 196 | }; |
194 | 197 | ||
195 | static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { | 198 | static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { |
196 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, | 199 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, |
197 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, | 200 | { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, |
198 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, | 201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, |
@@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] | |||
200 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, | 203 | { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, |
201 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, | 204 | { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, |
202 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, | 205 | { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, |
206 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, | ||
207 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, | ||
208 | { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, | ||
203 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, | 209 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, |
204 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, | 210 | { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, |
205 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, | 211 | { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, |
@@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw"); | |||
287 | MODULE_FIRMWARE("mts_cdma.fw"); | 293 | MODULE_FIRMWARE("mts_cdma.fw"); |
288 | MODULE_FIRMWARE("mts_gsm.fw"); | 294 | MODULE_FIRMWARE("mts_gsm.fw"); |
289 | MODULE_FIRMWARE("mts_edge.fw"); | 295 | MODULE_FIRMWARE("mts_edge.fw"); |
296 | MODULE_FIRMWARE("mts_mt9234mu.fw"); | ||
297 | MODULE_FIRMWARE("mts_mt9234zba.fw"); | ||
290 | 298 | ||
291 | module_param(debug, bool, S_IRUGO | S_IWUSR); | 299 | module_param(debug, bool, S_IRUGO | S_IWUSR); |
292 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); | 300 | MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); |
@@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1687 | const struct firmware *fw_p; | 1695 | const struct firmware *fw_p; |
1688 | char buf[32]; | 1696 | char buf[32]; |
1689 | 1697 | ||
1698 | dbg("%s\n", __func__); | ||
1690 | /* try ID specific firmware first, then try generic firmware */ | 1699 | /* try ID specific firmware first, then try generic firmware */ |
1691 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1700 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, |
1692 | dev->descriptor.idProduct); | 1701 | dev->descriptor.idProduct); |
@@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1703 | case MTS_EDGE_PRODUCT_ID: | 1712 | case MTS_EDGE_PRODUCT_ID: |
1704 | strcpy(buf, "mts_edge.fw"); | 1713 | strcpy(buf, "mts_edge.fw"); |
1705 | break; | 1714 | break; |
1706 | } | 1715 | case MTS_MT9234MU_PRODUCT_ID: |
1716 | strcpy(buf, "mts_mt9234mu.fw"); | ||
1717 | break; | ||
1718 | case MTS_MT9234ZBA_PRODUCT_ID: | ||
1719 | strcpy(buf, "mts_mt9234zba.fw"); | ||
1720 | break; | ||
1721 | case MTS_MT9234ZBAOLD_PRODUCT_ID: | ||
1722 | strcpy(buf, "mts_mt9234zba.fw"); | ||
1723 | break; } | ||
1707 | } | 1724 | } |
1708 | if (buf[0] == '\0') { | 1725 | if (buf[0] == '\0') { |
1709 | if (tdev->td_is_3410) | 1726 | if (tdev->td_is_3410) |
@@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1718 | return -ENOENT; | 1735 | return -ENOENT; |
1719 | } | 1736 | } |
1720 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { | 1737 | if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { |
1721 | dev_err(&dev->dev, "%s - firmware too large\n", __func__); | 1738 | dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size); |
1722 | return -ENOENT; | 1739 | return -ENOENT; |
1723 | } | 1740 | } |
1724 | 1741 | ||
@@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1730 | status = ti_do_download(dev, pipe, buffer, fw_p->size); | 1747 | status = ti_do_download(dev, pipe, buffer, fw_p->size); |
1731 | kfree(buffer); | 1748 | kfree(buffer); |
1732 | } else { | 1749 | } else { |
1750 | dbg("%s ENOMEM\n", __func__); | ||
1733 | status = -ENOMEM; | 1751 | status = -ENOMEM; |
1734 | } | 1752 | } |
1735 | release_firmware(fw_p); | 1753 | release_firmware(fw_p); |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index f323c6025858..2aac1953993b 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
@@ -45,6 +45,9 @@ | |||
45 | #define MTS_CDMA_PRODUCT_ID 0xF110 | 45 | #define MTS_CDMA_PRODUCT_ID 0xF110 |
46 | #define MTS_GSM_PRODUCT_ID 0xF111 | 46 | #define MTS_GSM_PRODUCT_ID 0xF111 |
47 | #define MTS_EDGE_PRODUCT_ID 0xF112 | 47 | #define MTS_EDGE_PRODUCT_ID 0xF112 |
48 | #define MTS_MT9234MU_PRODUCT_ID 0xF114 | ||
49 | #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 | ||
50 | #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 | ||
48 | 51 | ||
49 | /* Commands */ | 52 | /* Commands */ |
50 | #define TI_GET_VERSION 0x01 | 53 | #define TI_GET_VERSION 0x01 |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index 46e79d349498..7ec24e46b34b 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | |||
438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); | 438 | old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); |
439 | keep_alives = 0; | 439 | keep_alives = 0; |
440 | for (cnt = 0; | 440 | for (cnt = 0; |
441 | keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; | 441 | keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; |
442 | cnt++) { | 442 | cnt++) { |
443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); | 443 | unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); |
444 | 444 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5be11c99e18f..e69d238c5af0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, | |||
236 | int log_all) | 236 | int log_all) |
237 | { | 237 | { |
238 | int i; | 238 | int i; |
239 | |||
240 | if (!mem) | ||
241 | return 0; | ||
242 | |||
239 | for (i = 0; i < mem->nregions; ++i) { | 243 | for (i = 0; i < mem->nregions; ++i) { |
240 | struct vhost_memory_region *m = mem->regions + i; | 244 | struct vhost_memory_region *m = mem->regions + i; |
241 | unsigned long a = m->userspace_addr; | 245 | unsigned long a = m->userspace_addr; |
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 581d2dbf675a..ecf405562f5c 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c | |||
@@ -49,6 +49,7 @@ enum { | |||
49 | M_MBP_2, /* MacBook Pro 2nd gen */ | 49 | M_MBP_2, /* MacBook Pro 2nd gen */ |
50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ | 50 | M_MBP_SR, /* MacBook Pro (Santa Rosa) */ |
51 | M_MBP_4, /* MacBook Pro, 4th gen */ | 51 | M_MBP_4, /* MacBook Pro, 4th gen */ |
52 | M_MBP_5_1, /* MacBook Pro, 5,1th gen */ | ||
52 | M_UNKNOWN /* placeholder */ | 53 | M_UNKNOWN /* placeholder */ |
53 | }; | 54 | }; |
54 | 55 | ||
@@ -70,6 +71,7 @@ static struct efifb_dmi_info { | |||
70 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ | 71 | [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ |
71 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, | 72 | [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, |
72 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, | 73 | [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, |
74 | [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, | ||
73 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } | 75 | [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } |
74 | }; | 76 | }; |
75 | 77 | ||
@@ -106,6 +108,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = { | |||
106 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), | 108 | EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), |
107 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), | 109 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), |
108 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), | 110 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), |
111 | EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), | ||
109 | {}, | 112 | {}, |
110 | }; | 113 | }; |
111 | 114 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 3aed38886f94..bfec7c29486d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
103 | num = min(num, ARRAY_SIZE(vb->pfns)); | 103 | num = min(num, ARRAY_SIZE(vb->pfns)); |
104 | 104 | ||
105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { | 105 | for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { |
106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); | 106 | struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | |
107 | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
107 | if (!page) { | 108 | if (!page) { |
108 | if (printk_ratelimit()) | 109 | if (printk_ratelimit()) |
109 | dev_printk(KERN_INFO, &vb->vdev->dev, | 110 | dev_printk(KERN_INFO, &vb->vdev->dev, |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index ef36fca2eed4..3a7e9ff8a746 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/sched.h> | ||
19 | 20 | ||
20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
21 | #include <mach/hardware.h> | 22 | #include <mach/hardware.h> |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 1ed3d554e372..17726a05a0a6 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = { | |||
115 | 115 | ||
116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) | 116 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) |
117 | { | 117 | { |
118 | int t = ((s16)rom[1] << 8) | rom[0]; | 118 | s16 t = le16_to_cpup((__le16 *)rom); |
119 | t = t*1000/16; | 119 | return t*1000/16; |
120 | return t; | ||
121 | } | 120 | } |
122 | 121 | ||
123 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) | 122 | static inline int w1_DS18S20_convert_temp(u8 rom[9]) |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0e8468ffd100..0bf5020d0d32 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -194,10 +194,10 @@ config EP93XX_WATCHDOG | |||
194 | 194 | ||
195 | config OMAP_WATCHDOG | 195 | config OMAP_WATCHDOG |
196 | tristate "OMAP Watchdog" | 196 | tristate "OMAP Watchdog" |
197 | depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 | 197 | depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS |
198 | help | 198 | help |
199 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' | 199 | Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y' |
200 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. | 200 | here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. |
201 | 201 | ||
202 | config PNX4008_WATCHDOG | 202 | config PNX4008_WATCHDOG |
203 | tristate "PNX4008 Watchdog" | 203 | tristate "PNX4008 Watchdog" |
@@ -302,7 +302,7 @@ config TS72XX_WATCHDOG | |||
302 | 302 | ||
303 | config MAX63XX_WATCHDOG | 303 | config MAX63XX_WATCHDOG |
304 | tristate "Max63xx watchdog" | 304 | tristate "Max63xx watchdog" |
305 | depends on ARM | 305 | depends on ARM && HAS_IOMEM |
306 | help | 306 | help |
307 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. | 307 | Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. |
308 | 308 | ||
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 8b724aad6825..801ead191499 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c | |||
@@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT; | |||
44 | 44 | ||
45 | #ifdef CONFIG_FSL_BOOKE | 45 | #ifdef CONFIG_FSL_BOOKE |
46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) | 46 | #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) |
47 | #define WDTP_MASK (WDTP(0)) | 47 | #define WDTP_MASK (WDTP(0x3f)) |
48 | #else | 48 | #else |
49 | #define WDTP(x) (TCR_WP(x)) | 49 | #define WDTP(x) (TCR_WP(x)) |
50 | #define WDTP_MASK (TCR_WP_MASK) | 50 | #define WDTP_MASK (TCR_WP_MASK) |
@@ -121,7 +121,7 @@ static ssize_t booke_wdt_write(struct file *file, const char __user *buf, | |||
121 | return count; | 121 | return count; |
122 | } | 122 | } |
123 | 123 | ||
124 | static const struct watchdog_info ident = { | 124 | static struct watchdog_info ident = { |
125 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | 125 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, |
126 | .identity = "PowerPC Book-E Watchdog", | 126 | .identity = "PowerPC Book-E Watchdog", |
127 | }; | 127 | }; |
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 75f3a83c0361..3053ff05ca41 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry) | |||
154 | 154 | ||
155 | static void max63xx_wdt_disable(void) | 155 | static void max63xx_wdt_disable(void) |
156 | { | 156 | { |
157 | u8 val; | ||
158 | |||
157 | spin_lock(&io_lock); | 159 | spin_lock(&io_lock); |
158 | 160 | ||
159 | __raw_writeb(3, wdt_base); | 161 | val = __raw_readb(wdt_base); |
162 | val &= ~MAX6369_WDSET; | ||
163 | val |= 3; | ||
164 | __raw_writeb(val, wdt_base); | ||
160 | 165 | ||
161 | spin_unlock(&io_lock); | 166 | spin_unlock(&io_lock); |
162 | 167 | ||
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index c8eadd478175..88c83aa57303 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c | |||
@@ -67,8 +67,8 @@ static DEFINE_SPINLOCK(sbwd_lock); | |||
67 | void sbwdog_set(char __iomem *wdog, unsigned long t) | 67 | void sbwdog_set(char __iomem *wdog, unsigned long t) |
68 | { | 68 | { |
69 | spin_lock(&sbwd_lock); | 69 | spin_lock(&sbwd_lock); |
70 | __raw_writeb(0, wdog - 0x10); | 70 | __raw_writeb(0, wdog); |
71 | __raw_writeq(t & 0x7fffffUL, wdog); | 71 | __raw_writeq(t & 0x7fffffUL, wdog - 0x10); |
72 | spin_unlock(&sbwd_lock); | 72 | spin_unlock(&sbwd_lock); |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 8d44c9b6fb5b..c7d67e9a7465 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c | |||
@@ -30,7 +30,7 @@ | |||
30 | static int nowayout = WATCHDOG_NOWAYOUT; | 30 | static int nowayout = WATCHDOG_NOWAYOUT; |
31 | static unsigned int margin = 60; /* (secs) Default is 1 minute */ | 31 | static unsigned int margin = 60; /* (secs) Default is 1 minute */ |
32 | static unsigned long wdt_status; | 32 | static unsigned long wdt_status; |
33 | static DEFINE_SPINLOCK(wdt_lock); | 33 | static DEFINE_MUTEX(wdt_lock); |
34 | 34 | ||
35 | #define WDT_IN_USE 0 | 35 | #define WDT_IN_USE 0 |
36 | #define WDT_OK_TO_CLOSE 1 | 36 | #define WDT_OK_TO_CLOSE 1 |
@@ -45,26 +45,26 @@ static DEFINE_SPINLOCK(wdt_lock); | |||
45 | 45 | ||
46 | static void wdt_send_data(unsigned char command, unsigned char data) | 46 | static void wdt_send_data(unsigned char command, unsigned char data) |
47 | { | 47 | { |
48 | outb(command, COMMAND_PORT); | ||
49 | msleep(100); | ||
50 | outb(data, DATA_PORT); | 48 | outb(data, DATA_PORT); |
51 | msleep(200); | 49 | msleep(200); |
50 | outb(command, COMMAND_PORT); | ||
51 | msleep(100); | ||
52 | } | 52 | } |
53 | 53 | ||
54 | static void wdt_enable(void) | 54 | static void wdt_enable(void) |
55 | { | 55 | { |
56 | spin_lock(&wdt_lock); | 56 | mutex_lock(&wdt_lock); |
57 | wdt_send_data(IFACE_ON_COMMAND, 1); | 57 | wdt_send_data(IFACE_ON_COMMAND, 1); |
58 | wdt_send_data(REBOOT_COMMAND, margin); | 58 | wdt_send_data(REBOOT_COMMAND, margin); |
59 | spin_unlock(&wdt_lock); | 59 | mutex_unlock(&wdt_lock); |
60 | } | 60 | } |
61 | 61 | ||
62 | static void wdt_disable(void) | 62 | static void wdt_disable(void) |
63 | { | 63 | { |
64 | spin_lock(&wdt_lock); | 64 | mutex_lock(&wdt_lock); |
65 | wdt_send_data(IFACE_ON_COMMAND, 0); | 65 | wdt_send_data(IFACE_ON_COMMAND, 0); |
66 | wdt_send_data(REBOOT_COMMAND, 0); | 66 | wdt_send_data(REBOOT_COMMAND, 0); |
67 | spin_unlock(&wdt_lock); | 67 | mutex_unlock(&wdt_lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | static int fitpc2_wdt_open(struct inode *inode, struct file *file) | 70 | static int fitpc2_wdt_open(struct inode *inode, struct file *file) |