diff options
Diffstat (limited to 'drivers')
356 files changed, 8007 insertions, 3770 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 8a07363417ed..368ae6d3a096 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
| @@ -28,7 +28,7 @@ source "drivers/md/Kconfig" | |||
| 28 | 28 | ||
| 29 | source "drivers/message/fusion/Kconfig" | 29 | source "drivers/message/fusion/Kconfig" |
| 30 | 30 | ||
| 31 | source "drivers/ieee1394/Kconfig" | 31 | source "drivers/firewire/Kconfig" |
| 32 | 32 | ||
| 33 | source "drivers/message/i2o/Kconfig" | 33 | source "drivers/message/i2o/Kconfig" |
| 34 | 34 | ||
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 5f2c379ab7bf..79d33d908b5a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
| 81 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
| 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
| 83 | /* | 83 | /* |
| 84 | * According to the ACPI specification the BIOS should make sure that ACPI is | ||
| 85 | * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, | ||
| 86 | * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI | ||
| 87 | * on such systems during resume. Unfortunately that doesn't help in | ||
| 88 | * particularly pathological cases in which SCI_EN has to be set directly on | ||
| 89 | * resume, although the specification states very clearly that this flag is | ||
| 90 | * owned by the hardware. The set_sci_en_on_resume variable will be set in such | ||
| 91 | * cases. | ||
| 92 | */ | ||
| 93 | static bool set_sci_en_on_resume; | ||
| 94 | |||
| 95 | void __init acpi_set_sci_en_on_resume(void) | ||
| 96 | { | ||
| 97 | set_sci_en_on_resume = true; | ||
| 98 | } | ||
| 99 | |||
| 100 | /* | ||
| 84 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the | 101 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the |
| 85 | * user to request that behavior by using the 'acpi_old_suspend_ordering' | 102 | * user to request that behavior by using the 'acpi_old_suspend_ordering' |
| 86 | * kernel command line option that causes the following variable to be set. | 103 | * kernel command line option that causes the following variable to be set. |
| @@ -170,18 +187,6 @@ static void acpi_pm_end(void) | |||
| 170 | #endif /* CONFIG_ACPI_SLEEP */ | 187 | #endif /* CONFIG_ACPI_SLEEP */ |
| 171 | 188 | ||
| 172 | #ifdef CONFIG_SUSPEND | 189 | #ifdef CONFIG_SUSPEND |
| 173 | /* | ||
| 174 | * According to the ACPI specification the BIOS should make sure that ACPI is | ||
| 175 | * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, | ||
| 176 | * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI | ||
| 177 | * on such systems during resume. Unfortunately that doesn't help in | ||
| 178 | * particularly pathological cases in which SCI_EN has to be set directly on | ||
| 179 | * resume, although the specification states very clearly that this flag is | ||
| 180 | * owned by the hardware. The set_sci_en_on_resume variable will be set in such | ||
| 181 | * cases. | ||
| 182 | */ | ||
| 183 | static bool set_sci_en_on_resume; | ||
| 184 | |||
| 185 | extern void do_suspend_lowlevel(void); | 190 | extern void do_suspend_lowlevel(void); |
| 186 | 191 | ||
| 187 | static u32 acpi_suspend_states[] = { | 192 | static u32 acpi_suspend_states[] = { |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 05dff631591c..72e76b4b6538 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -999,8 +999,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
| 999 | sprintf(name, "acpi_video%d", count++); | 999 | sprintf(name, "acpi_video%d", count++); |
| 1000 | device->backlight = backlight_device_register(name, | 1000 | device->backlight = backlight_device_register(name, |
| 1001 | NULL, device, &acpi_backlight_ops); | 1001 | NULL, device, &acpi_backlight_ops); |
| 1002 | device->backlight->props.max_brightness = device->brightness->count-3; | ||
| 1003 | kfree(name); | 1002 | kfree(name); |
| 1003 | if (IS_ERR(device->backlight)) | ||
| 1004 | return; | ||
| 1005 | device->backlight->props.max_brightness = device->brightness->count-3; | ||
| 1004 | 1006 | ||
| 1005 | result = sysfs_create_link(&device->backlight->dev.kobj, | 1007 | result = sysfs_create_link(&device->backlight->dev.kobj, |
| 1006 | &device->dev->dev.kobj, "device"); | 1008 | &device->dev->dev.kobj, "device"); |
| @@ -1979,6 +1981,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event) | |||
| 1979 | unsigned long long level_current, level_next; | 1981 | unsigned long long level_current, level_next; |
| 1980 | int result = -EINVAL; | 1982 | int result = -EINVAL; |
| 1981 | 1983 | ||
| 1984 | /* no warning message if acpi_backlight=vendor is used */ | ||
| 1985 | if (!acpi_video_backlight_support()) | ||
| 1986 | return 0; | ||
| 1987 | |||
| 1982 | if (!device->brightness) | 1988 | if (!device->brightness) |
| 1983 | goto out; | 1989 | goto out; |
| 1984 | 1990 | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 19136a7e1064..6f3f2257d0f0 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
| @@ -329,7 +329,7 @@ static struct ata_port_operations ich_pata_ops = { | |||
| 329 | }; | 329 | }; |
| 330 | 330 | ||
| 331 | static struct ata_port_operations piix_sata_ops = { | 331 | static struct ata_port_operations piix_sata_ops = { |
| 332 | .inherits = &ata_bmdma_port_ops, | 332 | .inherits = &ata_bmdma32_port_ops, |
| 333 | }; | 333 | }; |
| 334 | 334 | ||
| 335 | static struct ata_port_operations piix_sidpr_sata_ops = { | 335 | static struct ata_port_operations piix_sidpr_sata_ops = { |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 22ff51bdbc8a..6728328f3bea 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -3790,21 +3790,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, | |||
| 3790 | int sata_link_resume(struct ata_link *link, const unsigned long *params, | 3790 | int sata_link_resume(struct ata_link *link, const unsigned long *params, |
| 3791 | unsigned long deadline) | 3791 | unsigned long deadline) |
| 3792 | { | 3792 | { |
| 3793 | int tries = ATA_LINK_RESUME_TRIES; | ||
| 3793 | u32 scontrol, serror; | 3794 | u32 scontrol, serror; |
| 3794 | int rc; | 3795 | int rc; |
| 3795 | 3796 | ||
| 3796 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) | 3797 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
| 3797 | return rc; | 3798 | return rc; |
| 3798 | 3799 | ||
| 3799 | scontrol = (scontrol & 0x0f0) | 0x300; | 3800 | /* |
| 3801 | * Writes to SControl sometimes get ignored under certain | ||
| 3802 | * controllers (ata_piix SIDPR). Make sure DET actually is | ||
| 3803 | * cleared. | ||
| 3804 | */ | ||
| 3805 | do { | ||
| 3806 | scontrol = (scontrol & 0x0f0) | 0x300; | ||
| 3807 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) | ||
| 3808 | return rc; | ||
| 3809 | /* | ||
| 3810 | * Some PHYs react badly if SStatus is pounded | ||
| 3811 | * immediately after resuming. Delay 200ms before | ||
| 3812 | * debouncing. | ||
| 3813 | */ | ||
| 3814 | msleep(200); | ||
| 3800 | 3815 | ||
| 3801 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) | 3816 | /* is SControl restored correctly? */ |
| 3802 | return rc; | 3817 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
| 3818 | return rc; | ||
| 3819 | } while ((scontrol & 0xf0f) != 0x300 && --tries); | ||
| 3803 | 3820 | ||
| 3804 | /* Some PHYs react badly if SStatus is pounded immediately | 3821 | if ((scontrol & 0xf0f) != 0x300) { |
| 3805 | * after resuming. Delay 200ms before debouncing. | 3822 | ata_link_printk(link, KERN_ERR, |
| 3806 | */ | 3823 | "failed to resume link (SControl %X)\n", |
| 3807 | msleep(200); | 3824 | scontrol); |
| 3825 | return 0; | ||
| 3826 | } | ||
| 3827 | |||
| 3828 | if (tries < ATA_LINK_RESUME_TRIES) | ||
| 3829 | ata_link_printk(link, KERN_WARNING, | ||
| 3830 | "link resume succeeded after %d retries\n", | ||
| 3831 | ATA_LINK_RESUME_TRIES - tries); | ||
| 3808 | 3832 | ||
| 3809 | if ((rc = sata_link_debounce(link, params, deadline))) | 3833 | if ((rc = sata_link_debounce(link, params, deadline))) |
| 3810 | return rc; | 3834 | return rc; |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 07d8d00b4d34..63306285c843 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
| @@ -862,7 +862,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
| 862 | if (port_status & PDC_DRIVE_ERR) | 862 | if (port_status & PDC_DRIVE_ERR) |
| 863 | ac_err_mask |= AC_ERR_DEV; | 863 | ac_err_mask |= AC_ERR_DEV; |
| 864 | if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR)) | 864 | if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR)) |
| 865 | ac_err_mask |= AC_ERR_HSM; | 865 | ac_err_mask |= AC_ERR_OTHER; |
| 866 | if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR)) | 866 | if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR)) |
| 867 | ac_err_mask |= AC_ERR_ATA_BUS; | 867 | ac_err_mask |= AC_ERR_ATA_BUS; |
| 868 | if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR | 868 | if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 48adf80926a0..a5142bddef41 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -446,8 +446,8 @@ EXPORT_SYMBOL_GPL(dpm_resume_noirq); | |||
| 446 | 446 | ||
| 447 | /** | 447 | /** |
| 448 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | 448 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. |
| 449 | * dev: Device to resume. | 449 | * @dev: Device to resume. |
| 450 | * cb: Resume callback to execute. | 450 | * @cb: Resume callback to execute. |
| 451 | */ | 451 | */ |
| 452 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | 452 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) |
| 453 | { | 453 | { |
| @@ -711,8 +711,9 @@ EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | |||
| 711 | 711 | ||
| 712 | /** | 712 | /** |
| 713 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | 713 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. |
| 714 | * dev: Device to suspend. | 714 | * @dev: Device to suspend. |
| 715 | * cb: Suspend callback to execute. | 715 | * @state: PM transition of the system being carried out. |
| 716 | * @cb: Suspend callback to execute. | ||
| 716 | */ | 717 | */ |
| 717 | static int legacy_suspend(struct device *dev, pm_message_t state, | 718 | static int legacy_suspend(struct device *dev, pm_message_t state, |
| 718 | int (*cb)(struct device *dev, pm_message_t state)) | 719 | int (*cb)(struct device *dev, pm_message_t state)) |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index eb4fa1943944..ce1fa923c414 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
| @@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = { | |||
| 7101 | 7101 | ||
| 7102 | static struct DAC960_privdata DAC960_LP_privdata = { | 7102 | static struct DAC960_privdata DAC960_LP_privdata = { |
| 7103 | .HardwareType = DAC960_LP_Controller, | 7103 | .HardwareType = DAC960_LP_Controller, |
| 7104 | .FirmwareType = DAC960_LP_Controller, | 7104 | .FirmwareType = DAC960_V2_Controller, |
| 7105 | .InterruptHandler = DAC960_LP_InterruptHandler, | 7105 | .InterruptHandler = DAC960_LP_InterruptHandler, |
| 7106 | .MemoryWindowSize = DAC960_LP_RegisterWindowSize, | 7106 | .MemoryWindowSize = DAC960_LP_RegisterWindowSize, |
| 7107 | }; | 7107 | }; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 13bb69d2abb3..64a223b0cc22 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector | |||
| 735 | part_stat_unlock(); | 735 | part_stat_unlock(); |
| 736 | } | 736 | } |
| 737 | 737 | ||
| 738 | /* | ||
| 739 | * Ensure we don't create aliases in VI caches | ||
| 740 | */ | ||
| 741 | static inline void | ||
| 742 | killalias(struct bio *bio) | ||
| 743 | { | ||
| 744 | struct bio_vec *bv; | ||
| 745 | int i; | ||
| 746 | |||
| 747 | if (bio_data_dir(bio) == READ) | ||
| 748 | __bio_for_each_segment(bv, bio, i, 0) { | ||
| 749 | flush_dcache_page(bv->bv_page); | ||
| 750 | } | ||
| 751 | } | ||
| 752 | |||
| 753 | void | 738 | void |
| 754 | aoecmd_ata_rsp(struct sk_buff *skb) | 739 | aoecmd_ata_rsp(struct sk_buff *skb) |
| 755 | { | 740 | { |
| @@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
| 871 | if (buf->flags & BUFFL_FAIL) | 856 | if (buf->flags & BUFFL_FAIL) |
| 872 | bio_endio(buf->bio, -EIO); | 857 | bio_endio(buf->bio, -EIO); |
| 873 | else { | 858 | else { |
| 874 | killalias(buf->bio); | 859 | bio_flush_dcache_pages(buf->bio); |
| 875 | bio_endio(buf->bio, 0); | 860 | bio_endio(buf->bio, 0); |
| 876 | } | 861 | } |
| 877 | mempool_free(buf, d->bufpool); | 862 | mempool_free(buf, d->bufpool); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 2312d782fe99..c97558763430 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
| @@ -1490,7 +1490,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); | |||
| 1490 | 1490 | ||
| 1491 | /* drbd_proc.c */ | 1491 | /* drbd_proc.c */ |
| 1492 | extern struct proc_dir_entry *drbd_proc; | 1492 | extern struct proc_dir_entry *drbd_proc; |
| 1493 | extern struct file_operations drbd_proc_fops; | 1493 | extern const struct file_operations drbd_proc_fops; |
| 1494 | extern const char *drbd_conn_str(enum drbd_conns s); | 1494 | extern const char *drbd_conn_str(enum drbd_conns s); |
| 1495 | extern const char *drbd_role_str(enum drbd_role s); | 1495 | extern const char *drbd_role_str(enum drbd_role s); |
| 1496 | 1496 | ||
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 157d1e4343c2..9348f33f6242 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/version.h> | ||
| 31 | #include <linux/drbd.h> | 30 | #include <linux/drbd.h> |
| 32 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
| 33 | #include <asm/types.h> | 32 | #include <asm/types.h> |
| @@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait; | |||
| 151 | 150 | ||
| 152 | DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); | 151 | DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); |
| 153 | 152 | ||
| 154 | static struct block_device_operations drbd_ops = { | 153 | static const struct block_device_operations drbd_ops = { |
| 155 | .owner = THIS_MODULE, | 154 | .owner = THIS_MODULE, |
| 156 | .open = drbd_open, | 155 | .open = drbd_open, |
| 157 | .release = drbd_release, | 156 | .release = drbd_release, |
| @@ -3623,7 +3622,7 @@ _drbd_fault_random(struct fault_random_state *rsp) | |||
| 3623 | { | 3622 | { |
| 3624 | long refresh; | 3623 | long refresh; |
| 3625 | 3624 | ||
| 3626 | if (--rsp->count < 0) { | 3625 | if (!rsp->count--) { |
| 3627 | get_random_bytes(&refresh, sizeof(refresh)); | 3626 | get_random_bytes(&refresh, sizeof(refresh)); |
| 3628 | rsp->state += refresh; | 3627 | rsp->state += refresh; |
| 3629 | rsp->count = FAULT_RANDOM_REFRESH; | 3628 | rsp->count = FAULT_RANDOM_REFRESH; |
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index bdd0b4943b10..df8ad9660d8f 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c | |||
| @@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file); | |||
| 38 | 38 | ||
| 39 | 39 | ||
| 40 | struct proc_dir_entry *drbd_proc; | 40 | struct proc_dir_entry *drbd_proc; |
| 41 | struct file_operations drbd_proc_fops = { | 41 | const struct file_operations drbd_proc_fops = { |
| 42 | .owner = THIS_MODULE, | 42 | .owner = THIS_MODULE, |
| 43 | .open = drbd_proc_open, | 43 | .open = drbd_proc_open, |
| 44 | .read = seq_read, | 44 | .read = seq_read, |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c548f24f54a1..259c1351b152 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
| 29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
| 30 | 30 | ||
| 31 | #include <linux/version.h> | ||
| 32 | #include <linux/drbd.h> | 31 | #include <linux/drbd.h> |
| 33 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
| 34 | #include <linux/file.h> | 33 | #include <linux/file.h> |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index ed8796f1112d..b453c2bca3be 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/version.h> | ||
| 28 | #include <linux/drbd.h> | 27 | #include <linux/drbd.h> |
| 29 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 30 | #include <linux/smp_lock.h> | 29 | #include <linux/smp_lock.h> |
| @@ -34,7 +33,6 @@ | |||
| 34 | #include <linux/mm_inline.h> | 33 | #include <linux/mm_inline.h> |
| 35 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 36 | #include <linux/random.h> | 35 | #include <linux/random.h> |
| 37 | #include <linux/mm.h> | ||
| 38 | #include <linux/string.h> | 36 | #include <linux/string.h> |
| 39 | #include <linux/scatterlist.h> | 37 | #include <linux/scatterlist.h> |
| 40 | 38 | ||
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index e0339aaa1815..02b2583df7fc 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
| @@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
| 860 | err = -EINVAL; | 860 | err = -EINVAL; |
| 861 | goto probe_err_2; | 861 | goto probe_err_2; |
| 862 | } | 862 | } |
| 863 | host->dev_base = ioremap(rsc->start , rsc->end + 1); | 863 | host->dev_base = ioremap(rsc->start, resource_size(rsc)); |
| 864 | if (!host->dev_base) { | 864 | if (!host->dev_base) { |
| 865 | printk(KERN_ERR "%s:%d ioremap fail\n", | 865 | printk(KERN_ERR "%s:%d ioremap fail\n", |
| 866 | __func__, __LINE__); | 866 | __func__, __LINE__); |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 2fb2e6cc322a..5aa7a586a7ff 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
| @@ -725,9 +725,14 @@ static struct pci_driver agp_amd64_pci_driver = { | |||
| 725 | int __init agp_amd64_init(void) | 725 | int __init agp_amd64_init(void) |
| 726 | { | 726 | { |
| 727 | int err = 0; | 727 | int err = 0; |
| 728 | static int done = 0; | ||
| 728 | 729 | ||
| 729 | if (agp_off) | 730 | if (agp_off) |
| 730 | return -EINVAL; | 731 | return -EINVAL; |
| 732 | |||
| 733 | if (done++) | ||
| 734 | return agp_bridges_found ? 0 : -ENODEV; | ||
| 735 | |||
| 731 | err = pci_register_driver(&agp_amd64_pci_driver); | 736 | err = pci_register_driver(&agp_amd64_pci_driver); |
| 732 | if (err < 0) | 737 | if (err < 0) |
| 733 | return err; | 738 | return err; |
| @@ -771,12 +776,8 @@ static void __exit agp_amd64_cleanup(void) | |||
| 771 | pci_unregister_driver(&agp_amd64_pci_driver); | 776 | pci_unregister_driver(&agp_amd64_pci_driver); |
| 772 | } | 777 | } |
| 773 | 778 | ||
| 774 | /* On AMD64 the PCI driver needs to initialize this driver early | ||
| 775 | for the IOMMU, so it has to be called via a backdoor. */ | ||
| 776 | #ifndef CONFIG_GART_IOMMU | ||
| 777 | module_init(agp_amd64_init); | 779 | module_init(agp_amd64_init); |
| 778 | module_exit(agp_amd64_cleanup); | 780 | module_exit(agp_amd64_cleanup); |
| 779 | #endif | ||
| 780 | 781 | ||
| 781 | MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); | 782 | MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); |
| 782 | module_param(agp_try_unsupported, bool, 0); | 783 | module_param(agp_try_unsupported, bool, 0); |
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index a56ca080e108..c3ab46da51a3 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
| @@ -285,18 +285,22 @@ int agp_add_bridge(struct agp_bridge_data *bridge) | |||
| 285 | { | 285 | { |
| 286 | int error; | 286 | int error; |
| 287 | 287 | ||
| 288 | if (agp_off) | 288 | if (agp_off) { |
| 289 | return -ENODEV; | 289 | error = -ENODEV; |
| 290 | goto err_put_bridge; | ||
| 291 | } | ||
| 290 | 292 | ||
| 291 | if (!bridge->dev) { | 293 | if (!bridge->dev) { |
| 292 | printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); | 294 | printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); |
| 293 | return -EINVAL; | 295 | error = -EINVAL; |
| 296 | goto err_put_bridge; | ||
| 294 | } | 297 | } |
| 295 | 298 | ||
| 296 | /* Grab reference on the chipset driver. */ | 299 | /* Grab reference on the chipset driver. */ |
| 297 | if (!try_module_get(bridge->driver->owner)) { | 300 | if (!try_module_get(bridge->driver->owner)) { |
| 298 | dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); | 301 | dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); |
| 299 | return -EINVAL; | 302 | error = -EINVAL; |
| 303 | goto err_put_bridge; | ||
| 300 | } | 304 | } |
| 301 | 305 | ||
| 302 | error = agp_backend_initialize(bridge); | 306 | error = agp_backend_initialize(bridge); |
| @@ -326,6 +330,7 @@ frontend_err: | |||
| 326 | agp_backend_cleanup(bridge); | 330 | agp_backend_cleanup(bridge); |
| 327 | err_out: | 331 | err_out: |
| 328 | module_put(bridge->driver->owner); | 332 | module_put(bridge->driver->owner); |
| 333 | err_put_bridge: | ||
| 329 | agp_put_bridge(bridge); | 334 | agp_put_bridge(bridge); |
| 330 | return error; | 335 | return error; |
| 331 | } | 336 | } |
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 9047b2714653..58752b70efea 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c | |||
| @@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) | |||
| 488 | handle = obj; | 488 | handle = obj; |
| 489 | do { | 489 | do { |
| 490 | status = acpi_get_object_info(handle, &info); | 490 | status = acpi_get_object_info(handle, &info); |
| 491 | if (ACPI_SUCCESS(status)) { | 491 | if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) { |
| 492 | /* TBD check _CID also */ | 492 | /* TBD check _CID also */ |
| 493 | info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0'; | ||
| 494 | match = (strcmp(info->hardware_id.string, "HWP0001") == 0); | 493 | match = (strcmp(info->hardware_id.string, "HWP0001") == 0); |
| 495 | kfree(info); | 494 | kfree(info); |
| 496 | if (match) { | 495 | if (match) { |
| @@ -509,6 +508,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) | |||
| 509 | handle = parent; | 508 | handle = parent; |
| 510 | } while (ACPI_SUCCESS(status)); | 509 | } while (ACPI_SUCCESS(status)); |
| 511 | 510 | ||
| 511 | if (ACPI_FAILURE(status)) | ||
| 512 | return AE_OK; /* found no enclosing IOC */ | ||
| 513 | |||
| 512 | if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) | 514 | if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) |
| 513 | return AE_OK; | 515 | return AE_OK; |
| 514 | 516 | ||
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index e989f67bb61f..3d9c61e5acbf 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
| @@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 158 | goto out; | 158 | goto out; |
| 159 | } | 159 | } |
| 160 | } | 160 | } |
| 161 | out_unlock: | ||
| 162 | mutex_unlock(&rng_mutex); | ||
| 163 | out: | 161 | out: |
| 164 | return ret ? : err; | 162 | return ret ? : err; |
| 163 | out_unlock: | ||
| 164 | mutex_unlock(&rng_mutex); | ||
| 165 | goto out; | ||
| 165 | } | 166 | } |
| 166 | 167 | ||
| 167 | 168 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 679cd08b80b4..176f1751237f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -3204,7 +3204,7 @@ static __devinit int init_ipmi_si(void) | |||
| 3204 | #ifdef CONFIG_ACPI | 3204 | #ifdef CONFIG_ACPI |
| 3205 | spmi_find_bmc(); | 3205 | spmi_find_bmc(); |
| 3206 | #endif | 3206 | #endif |
| 3207 | #ifdef CONFIG_PNP | 3207 | #ifdef CONFIG_ACPI |
| 3208 | pnp_register_driver(&ipmi_pnp_driver); | 3208 | pnp_register_driver(&ipmi_pnp_driver); |
| 3209 | #endif | 3209 | #endif |
| 3210 | 3210 | ||
| @@ -3330,7 +3330,7 @@ static __exit void cleanup_ipmi_si(void) | |||
| 3330 | #ifdef CONFIG_PCI | 3330 | #ifdef CONFIG_PCI |
| 3331 | pci_unregister_driver(&ipmi_pci_driver); | 3331 | pci_unregister_driver(&ipmi_pci_driver); |
| 3332 | #endif | 3332 | #endif |
| 3333 | #ifdef CONFIG_PNP | 3333 | #ifdef CONFIG_ACPI |
| 3334 | pnp_unregister_driver(&ipmi_pnp_driver); | 3334 | pnp_unregister_driver(&ipmi_pnp_driver); |
| 3335 | #endif | 3335 | #endif |
| 3336 | 3336 | ||
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 0798754a607c..bba727c3807e 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c | |||
| @@ -50,7 +50,6 @@ | |||
| 50 | #include <linux/err.h> | 50 | #include <linux/err.h> |
| 51 | #include <linux/kfifo.h> | 51 | #include <linux/kfifo.h> |
| 52 | #include <linux/platform_device.h> | 52 | #include <linux/platform_device.h> |
| 53 | #include <linux/smp_lock.h> | ||
| 54 | 53 | ||
| 55 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
| 56 | #include <asm/io.h> | 55 | #include <asm/io.h> |
| @@ -905,14 +904,13 @@ static int sonypi_misc_release(struct inode *inode, struct file *file) | |||
| 905 | 904 | ||
| 906 | static int sonypi_misc_open(struct inode *inode, struct file *file) | 905 | static int sonypi_misc_open(struct inode *inode, struct file *file) |
| 907 | { | 906 | { |
| 908 | lock_kernel(); | ||
| 909 | mutex_lock(&sonypi_device.lock); | 907 | mutex_lock(&sonypi_device.lock); |
| 910 | /* Flush input queue on first open */ | 908 | /* Flush input queue on first open */ |
| 911 | if (!sonypi_device.open_count) | 909 | if (!sonypi_device.open_count) |
| 912 | kfifo_reset(&sonypi_device.fifo); | 910 | kfifo_reset(&sonypi_device.fifo); |
| 913 | sonypi_device.open_count++; | 911 | sonypi_device.open_count++; |
| 914 | mutex_unlock(&sonypi_device.lock); | 912 | mutex_unlock(&sonypi_device.lock); |
| 915 | unlock_kernel(); | 913 | |
| 916 | return 0; | 914 | return 0; |
| 917 | } | 915 | } |
| 918 | 916 | ||
| @@ -955,10 +953,10 @@ static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) | |||
| 955 | return 0; | 953 | return 0; |
| 956 | } | 954 | } |
| 957 | 955 | ||
| 958 | static int sonypi_misc_ioctl(struct inode *ip, struct file *fp, | 956 | static long sonypi_misc_ioctl(struct file *fp, |
| 959 | unsigned int cmd, unsigned long arg) | 957 | unsigned int cmd, unsigned long arg) |
| 960 | { | 958 | { |
| 961 | int ret = 0; | 959 | long ret = 0; |
| 962 | void __user *argp = (void __user *)arg; | 960 | void __user *argp = (void __user *)arg; |
| 963 | u8 val8; | 961 | u8 val8; |
| 964 | u16 val16; | 962 | u16 val16; |
| @@ -1074,7 +1072,8 @@ static const struct file_operations sonypi_misc_fops = { | |||
| 1074 | .open = sonypi_misc_open, | 1072 | .open = sonypi_misc_open, |
| 1075 | .release = sonypi_misc_release, | 1073 | .release = sonypi_misc_release, |
| 1076 | .fasync = sonypi_misc_fasync, | 1074 | .fasync = sonypi_misc_fasync, |
| 1077 | .ioctl = sonypi_misc_ioctl, | 1075 | .unlocked_ioctl = sonypi_misc_ioctl, |
| 1076 | .llseek = no_llseek, | ||
| 1078 | }; | 1077 | }; |
| 1079 | 1078 | ||
| 1080 | static struct miscdevice sonypi_misc_device = { | 1079 | static struct miscdevice sonypi_misc_device = { |
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index 663cd15d7c78..f8bc79f6de34 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c | |||
| @@ -68,7 +68,7 @@ | |||
| 68 | #include <linux/stat.h> | 68 | #include <linux/stat.h> |
| 69 | #include <linux/proc_fs.h> | 69 | #include <linux/proc_fs.h> |
| 70 | #include <linux/seq_file.h> | 70 | #include <linux/seq_file.h> |
| 71 | 71 | #include <linux/smp_lock.h> | |
| 72 | #include <linux/toshiba.h> | 72 | #include <linux/toshiba.h> |
| 73 | 73 | ||
| 74 | #define TOSH_MINOR_DEV 181 | 74 | #define TOSH_MINOR_DEV 181 |
| @@ -88,13 +88,13 @@ static int tosh_date; | |||
| 88 | static int tosh_sci; | 88 | static int tosh_sci; |
| 89 | static int tosh_fan; | 89 | static int tosh_fan; |
| 90 | 90 | ||
| 91 | static int tosh_ioctl(struct inode *, struct file *, unsigned int, | 91 | static long tosh_ioctl(struct file *, unsigned int, |
| 92 | unsigned long); | 92 | unsigned long); |
| 93 | 93 | ||
| 94 | 94 | ||
| 95 | static const struct file_operations tosh_fops = { | 95 | static const struct file_operations tosh_fops = { |
| 96 | .owner = THIS_MODULE, | 96 | .owner = THIS_MODULE, |
| 97 | .ioctl = tosh_ioctl, | 97 | .unlocked_ioctl = tosh_ioctl, |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | static struct miscdevice tosh_device = { | 100 | static struct miscdevice tosh_device = { |
| @@ -252,8 +252,7 @@ int tosh_smm(SMMRegisters *regs) | |||
| 252 | EXPORT_SYMBOL(tosh_smm); | 252 | EXPORT_SYMBOL(tosh_smm); |
| 253 | 253 | ||
| 254 | 254 | ||
| 255 | static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, | 255 | static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) |
| 256 | unsigned long arg) | ||
| 257 | { | 256 | { |
| 258 | SMMRegisters regs; | 257 | SMMRegisters regs; |
| 259 | SMMRegisters __user *argp = (SMMRegisters __user *)arg; | 258 | SMMRegisters __user *argp = (SMMRegisters __user *)arg; |
| @@ -275,13 +274,16 @@ static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd, | |||
| 275 | return -EINVAL; | 274 | return -EINVAL; |
| 276 | 275 | ||
| 277 | /* do we need to emulate the fan ? */ | 276 | /* do we need to emulate the fan ? */ |
| 277 | lock_kernel(); | ||
| 278 | if (tosh_fan==1) { | 278 | if (tosh_fan==1) { |
| 279 | if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { | 279 | if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { |
| 280 | err = tosh_emulate_fan(®s); | 280 | err = tosh_emulate_fan(®s); |
| 281 | unlock_kernel(); | ||
| 281 | break; | 282 | break; |
| 282 | } | 283 | } |
| 283 | } | 284 | } |
| 284 | err = tosh_smm(®s); | 285 | err = tosh_smm(®s); |
| 286 | unlock_kernel(); | ||
| 285 | break; | 287 | break; |
| 286 | default: | 288 | default: |
| 287 | return -EINVAL; | 289 | return -EINVAL; |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 68104434ebb5..73655aeb3a60 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/hrtimer.h> | 18 | #include <linux/hrtimer.h> |
| 19 | #include <linux/tick.h> | 19 | #include <linux/tick.h> |
| 20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
| 21 | #include <linux/math64.h> | ||
| 21 | 22 | ||
| 22 | #define BUCKETS 12 | 23 | #define BUCKETS 12 |
| 23 | #define RESOLUTION 1024 | 24 | #define RESOLUTION 1024 |
| @@ -169,6 +170,12 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | |||
| 169 | 170 | ||
| 170 | static void menu_update(struct cpuidle_device *dev); | 171 | static void menu_update(struct cpuidle_device *dev); |
| 171 | 172 | ||
| 173 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | ||
| 174 | static u64 div_round64(u64 dividend, u32 divisor) | ||
| 175 | { | ||
| 176 | return div_u64(dividend + (divisor / 2), divisor); | ||
| 177 | } | ||
| 178 | |||
| 172 | /** | 179 | /** |
| 173 | * menu_select - selects the next idle state to enter | 180 | * menu_select - selects the next idle state to enter |
| 174 | * @dev: the CPU | 181 | * @dev: the CPU |
| @@ -209,9 +216,8 @@ static int menu_select(struct cpuidle_device *dev) | |||
| 209 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | 216 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; |
| 210 | 217 | ||
| 211 | /* Make sure to round up for half microseconds */ | 218 | /* Make sure to round up for half microseconds */ |
| 212 | data->predicted_us = DIV_ROUND_CLOSEST( | 219 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
| 213 | data->expected_us * data->correction_factor[data->bucket], | 220 | RESOLUTION * DECAY); |
| 214 | RESOLUTION * DECAY); | ||
| 215 | 221 | ||
| 216 | /* | 222 | /* |
| 217 | * We want to default to C1 (hlt), not to busy polling | 223 | * We want to default to C1 (hlt), not to busy polling |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index f15112569c1d..efc1a61ca231 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan, | |||
| 815 | dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", | 815 | dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", |
| 816 | cookie, done ? *done : 0, used ? *used : 0); | 816 | cookie, done ? *done : 0, used ? *used : 0); |
| 817 | 817 | ||
| 818 | spin_lock_bh(atchan->lock); | 818 | spin_lock_bh(&atchan->lock); |
| 819 | 819 | ||
| 820 | last_complete = atchan->completed_cookie; | 820 | last_complete = atchan->completed_cookie; |
| 821 | last_used = chan->cookie; | 821 | last_used = chan->cookie; |
| @@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan, | |||
| 830 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 830 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
| 831 | } | 831 | } |
| 832 | 832 | ||
| 833 | spin_unlock_bh(atchan->lock); | 833 | spin_unlock_bh(&atchan->lock); |
| 834 | 834 | ||
| 835 | if (done) | 835 | if (done) |
| 836 | *done = last_complete; | 836 | *done = last_complete; |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 4a99cd94536b..b5f2ee0f8e2c 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -1294,8 +1294,8 @@ static int __exit coh901318_remove(struct platform_device *pdev) | |||
| 1294 | dma_async_device_unregister(&base->dma_slave); | 1294 | dma_async_device_unregister(&base->dma_slave); |
| 1295 | coh901318_pool_destroy(&base->pool); | 1295 | coh901318_pool_destroy(&base->pool); |
| 1296 | free_irq(platform_get_irq(pdev, 0), base); | 1296 | free_irq(platform_get_irq(pdev, 0), base); |
| 1297 | kfree(base); | ||
| 1298 | iounmap(base->virtbase); | 1297 | iounmap(base->virtbase); |
| 1298 | kfree(base); | ||
| 1299 | release_mem_region(pdev->resource->start, | 1299 | release_mem_region(pdev->resource->start, |
| 1300 | resource_size(pdev->resource)); | 1300 | resource_size(pdev->resource)); |
| 1301 | return 0; | 1301 | return 0; |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 285bed0fe17b..d28369f7afd2 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
| @@ -1270,8 +1270,6 @@ static int __init dw_probe(struct platform_device *pdev) | |||
| 1270 | goto err_kfree; | 1270 | goto err_kfree; |
| 1271 | } | 1271 | } |
| 1272 | 1272 | ||
| 1273 | memset(dw, 0, sizeof *dw); | ||
| 1274 | |||
| 1275 | dw->regs = ioremap(io->start, DW_REGLEN); | 1273 | dw->regs = ioremap(io->start, DW_REGLEN); |
| 1276 | if (!dw->regs) { | 1274 | if (!dw->regs) { |
| 1277 | err = -ENOMEM; | 1275 | err = -ENOMEM; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index c524d36d3c2e..dcc4ab78b32b 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device) | |||
| 1032 | dma->dev = &pdev->dev; | 1032 | dma->dev = &pdev->dev; |
| 1033 | 1033 | ||
| 1034 | if (!dma->chancnt) { | 1034 | if (!dma->chancnt) { |
| 1035 | dev_err(dev, "zero channels detected\n"); | 1035 | dev_err(dev, "channel enumeration error\n"); |
| 1036 | goto err_setup_interrupts; | 1036 | goto err_setup_interrupts; |
| 1037 | } | 1037 | } |
| 1038 | 1038 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 45edde996480..bbc3e78ef333 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -60,6 +60,7 @@ | |||
| 60 | * @dca: direct cache access context | 60 | * @dca: direct cache access context |
| 61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | 61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) |
| 62 | * @enumerate_channels: hw version specific channel enumeration | 62 | * @enumerate_channels: hw version specific channel enumeration |
| 63 | * @reset_hw: hw version specific channel (re)initialization | ||
| 63 | * @cleanup_tasklet: select between the v2 and v3 cleanup routines | 64 | * @cleanup_tasklet: select between the v2 and v3 cleanup routines |
| 64 | * @timer_fn: select between the v2 and v3 timer watchdog routines | 65 | * @timer_fn: select between the v2 and v3 timer watchdog routines |
| 65 | * @self_test: hardware version specific self test for each supported op type | 66 | * @self_test: hardware version specific self test for each supported op type |
| @@ -78,6 +79,7 @@ struct ioatdma_device { | |||
| 78 | struct dca_provider *dca; | 79 | struct dca_provider *dca; |
| 79 | void (*intr_quirk)(struct ioatdma_device *device); | 80 | void (*intr_quirk)(struct ioatdma_device *device); |
| 80 | int (*enumerate_channels)(struct ioatdma_device *device); | 81 | int (*enumerate_channels)(struct ioatdma_device *device); |
| 82 | int (*reset_hw)(struct ioat_chan_common *chan); | ||
| 81 | void (*cleanup_tasklet)(unsigned long data); | 83 | void (*cleanup_tasklet)(unsigned long data); |
| 82 | void (*timer_fn)(unsigned long data); | 84 | void (*timer_fn)(unsigned long data); |
| 83 | int (*self_test)(struct ioatdma_device *device); | 85 | int (*self_test)(struct ioatdma_device *device); |
| @@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan) | |||
| 264 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 266 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
| 265 | } | 267 | } |
| 266 | 268 | ||
| 269 | static inline void ioat_reset(struct ioat_chan_common *chan) | ||
| 270 | { | ||
| 271 | u8 ver = chan->device->version; | ||
| 272 | |||
| 273 | writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline bool ioat_reset_pending(struct ioat_chan_common *chan) | ||
| 277 | { | ||
| 278 | u8 ver = chan->device->version; | ||
| 279 | u8 cmd; | ||
| 280 | |||
| 281 | cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
| 282 | return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; | ||
| 283 | } | ||
| 284 | |||
| 267 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | 285 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) |
| 268 | { | 286 | { |
| 269 | struct ioat_chan_common *chan = &ioat->base; | 287 | struct ioat_chan_common *chan = &ioat->base; |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 8f1f7f05deaa..5f7a500e18d0 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) | |||
| 239 | __ioat2_start_null_desc(ioat); | 239 | __ioat2_start_null_desc(ioat); |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | 242 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) |
| 243 | { | 243 | { |
| 244 | struct ioat_chan_common *chan = &ioat->base; | 244 | unsigned long end = jiffies + tmo; |
| 245 | unsigned long phys_complete; | 245 | int err = 0; |
| 246 | u32 status; | 246 | u32 status; |
| 247 | 247 | ||
| 248 | status = ioat_chansts(chan); | 248 | status = ioat_chansts(chan); |
| 249 | if (is_ioat_active(status) || is_ioat_idle(status)) | 249 | if (is_ioat_active(status) || is_ioat_idle(status)) |
| 250 | ioat_suspend(chan); | 250 | ioat_suspend(chan); |
| 251 | while (is_ioat_active(status) || is_ioat_idle(status)) { | 251 | while (is_ioat_active(status) || is_ioat_idle(status)) { |
| 252 | if (end && time_after(jiffies, end)) { | ||
| 253 | err = -ETIMEDOUT; | ||
| 254 | break; | ||
| 255 | } | ||
| 252 | status = ioat_chansts(chan); | 256 | status = ioat_chansts(chan); |
| 253 | cpu_relax(); | 257 | cpu_relax(); |
| 254 | } | 258 | } |
| 255 | 259 | ||
| 260 | return err; | ||
| 261 | } | ||
| 262 | |||
| 263 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | ||
| 264 | { | ||
| 265 | unsigned long end = jiffies + tmo; | ||
| 266 | int err = 0; | ||
| 267 | |||
| 268 | ioat_reset(chan); | ||
| 269 | while (ioat_reset_pending(chan)) { | ||
| 270 | if (end && time_after(jiffies, end)) { | ||
| 271 | err = -ETIMEDOUT; | ||
| 272 | break; | ||
| 273 | } | ||
| 274 | cpu_relax(); | ||
| 275 | } | ||
| 276 | |||
| 277 | return err; | ||
| 278 | } | ||
| 279 | |||
| 280 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
| 281 | { | ||
| 282 | struct ioat_chan_common *chan = &ioat->base; | ||
| 283 | unsigned long phys_complete; | ||
| 284 | |||
| 285 | ioat2_quiesce(chan, 0); | ||
| 256 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 286 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
| 257 | __cleanup(ioat, phys_complete); | 287 | __cleanup(ioat, phys_complete); |
| 258 | 288 | ||
| @@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data) | |||
| 318 | spin_unlock_bh(&chan->cleanup_lock); | 348 | spin_unlock_bh(&chan->cleanup_lock); |
| 319 | } | 349 | } |
| 320 | 350 | ||
| 351 | static int ioat2_reset_hw(struct ioat_chan_common *chan) | ||
| 352 | { | ||
| 353 | /* throw away whatever the channel was doing and get it initialized */ | ||
| 354 | u32 chanerr; | ||
| 355 | |||
| 356 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | ||
| 357 | |||
| 358 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 359 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 360 | |||
| 361 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
| 362 | } | ||
| 363 | |||
| 321 | /** | 364 | /** |
| 322 | * ioat2_enumerate_channels - find and initialize the device's channels | 365 | * ioat2_enumerate_channels - find and initialize the device's channels |
| 323 | * @device: the device to be enumerated | 366 | * @device: the device to be enumerated |
| @@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
| 360 | (unsigned long) ioat); | 403 | (unsigned long) ioat); |
| 361 | ioat->xfercap_log = xfercap_log; | 404 | ioat->xfercap_log = xfercap_log; |
| 362 | spin_lock_init(&ioat->ring_lock); | 405 | spin_lock_init(&ioat->ring_lock); |
| 406 | if (device->reset_hw(&ioat->base)) { | ||
| 407 | i = 0; | ||
| 408 | break; | ||
| 409 | } | ||
| 363 | } | 410 | } |
| 364 | dma->chancnt = i; | 411 | dma->chancnt = i; |
| 365 | return i; | 412 | return i; |
| @@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
| 467 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 514 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
| 468 | struct ioat_chan_common *chan = &ioat->base; | 515 | struct ioat_chan_common *chan = &ioat->base; |
| 469 | struct ioat_ring_ent **ring; | 516 | struct ioat_ring_ent **ring; |
| 470 | u32 chanerr; | ||
| 471 | int order; | 517 | int order; |
| 472 | 518 | ||
| 473 | /* have we already been set up? */ | 519 | /* have we already been set up? */ |
| @@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
| 477 | /* Setup register to interrupt and write completion status on error */ | 523 | /* Setup register to interrupt and write completion status on error */ |
| 478 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | 524 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
| 479 | 525 | ||
| 480 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 481 | if (chanerr) { | ||
| 482 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
| 483 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 484 | } | ||
| 485 | |||
| 486 | /* allocate a completion writeback area */ | 526 | /* allocate a completion writeback area */ |
| 487 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 527 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
| 488 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | 528 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
| @@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
| 746 | tasklet_disable(&chan->cleanup_task); | 786 | tasklet_disable(&chan->cleanup_task); |
| 747 | del_timer_sync(&chan->timer); | 787 | del_timer_sync(&chan->timer); |
| 748 | device->cleanup_tasklet((unsigned long) ioat); | 788 | device->cleanup_tasklet((unsigned long) ioat); |
| 749 | 789 | device->reset_hw(chan); | |
| 750 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
| 751 | * before removing DMA descriptor resources. | ||
| 752 | */ | ||
| 753 | writeb(IOAT_CHANCMD_RESET, | ||
| 754 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
| 755 | mdelay(100); | ||
| 756 | 790 | ||
| 757 | spin_lock_bh(&ioat->ring_lock); | 791 | spin_lock_bh(&ioat->ring_lock); |
| 758 | descs = ioat2_ring_space(ioat); | 792 | descs = ioat2_ring_space(ioat); |
| @@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | |||
| 839 | int err; | 873 | int err; |
| 840 | 874 | ||
| 841 | device->enumerate_channels = ioat2_enumerate_channels; | 875 | device->enumerate_channels = ioat2_enumerate_channels; |
| 876 | device->reset_hw = ioat2_reset_hw; | ||
| 842 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | 877 | device->cleanup_tasklet = ioat2_cleanup_tasklet; |
| 843 | device->timer_fn = ioat2_timer_event; | 878 | device->timer_fn = ioat2_timer_event; |
| 844 | device->self_test = ioat_dma_self_test; | 879 | device->self_test = ioat_dma_self_test; |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 1d849ef74d5f..3afad8da43cc 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
| @@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | |||
| 185 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | 185 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); |
| 186 | void ioat2_cleanup_tasklet(unsigned long data); | 186 | void ioat2_cleanup_tasklet(unsigned long data); |
| 187 | void ioat2_timer_event(unsigned long data); | 187 | void ioat2_timer_event(unsigned long data); |
| 188 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); | ||
| 189 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | ||
| 188 | extern struct kobj_type ioat2_ktype; | 190 | extern struct kobj_type ioat2_ktype; |
| 189 | extern struct kmem_cache *ioat2_cache; | 191 | extern struct kmem_cache *ioat2_cache; |
| 190 | #endif /* IOATDMA_V2_H */ | 192 | #endif /* IOATDMA_V2_H */ |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 42f6f10fb0cc..9908c9e94b2d 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 650 | 650 | ||
| 651 | num_descs = ioat2_xferlen_to_descs(ioat, len); | 651 | num_descs = ioat2_xferlen_to_descs(ioat, len); |
| 652 | /* we need 2x the number of descriptors to cover greater than 3 | 652 | /* we need 2x the number of descriptors to cover greater than 3 |
| 653 | * sources | 653 | * sources (we need 1 extra source in the q-only continuation |
| 654 | * case and 3 extra sources in the p+q continuation case. | ||
| 654 | */ | 655 | */ |
| 655 | if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { | 656 | if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || |
| 657 | (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | ||
| 656 | with_ext = 1; | 658 | with_ext = 1; |
| 657 | num_descs *= 2; | 659 | num_descs *= 2; |
| 658 | } else | 660 | } else |
| @@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | |||
| 1128 | return 0; | 1130 | return 0; |
| 1129 | } | 1131 | } |
| 1130 | 1132 | ||
| 1133 | static int ioat3_reset_hw(struct ioat_chan_common *chan) | ||
| 1134 | { | ||
| 1135 | /* throw away whatever the channel was doing and get it | ||
| 1136 | * initialized, with ioat3 specific workarounds | ||
| 1137 | */ | ||
| 1138 | struct ioatdma_device *device = chan->device; | ||
| 1139 | struct pci_dev *pdev = device->pdev; | ||
| 1140 | u32 chanerr; | ||
| 1141 | u16 dev_id; | ||
| 1142 | int err; | ||
| 1143 | |||
| 1144 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | ||
| 1145 | |||
| 1146 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 1147 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
| 1148 | |||
| 1149 | /* -= IOAT ver.3 workarounds =- */ | ||
| 1150 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
| 1151 | * that can cause stability issues for IOAT ver.3, and clear any | ||
| 1152 | * pending errors | ||
| 1153 | */ | ||
| 1154 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
| 1155 | err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | ||
| 1156 | if (err) { | ||
| 1157 | dev_err(&pdev->dev, "channel error register unreachable\n"); | ||
| 1158 | return err; | ||
| 1159 | } | ||
| 1160 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
| 1161 | |||
| 1162 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
| 1163 | * (workaround for spurious config parity error after restart) | ||
| 1164 | */ | ||
| 1165 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
| 1166 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
| 1167 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
| 1168 | |||
| 1169 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
| 1170 | } | ||
| 1171 | |||
| 1131 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1172 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
| 1132 | { | 1173 | { |
| 1133 | struct pci_dev *pdev = device->pdev; | 1174 | struct pci_dev *pdev = device->pdev; |
| @@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1137 | struct ioat_chan_common *chan; | 1178 | struct ioat_chan_common *chan; |
| 1138 | bool is_raid_device = false; | 1179 | bool is_raid_device = false; |
| 1139 | int err; | 1180 | int err; |
| 1140 | u16 dev_id; | ||
| 1141 | u32 cap; | 1181 | u32 cap; |
| 1142 | 1182 | ||
| 1143 | device->enumerate_channels = ioat2_enumerate_channels; | 1183 | device->enumerate_channels = ioat2_enumerate_channels; |
| 1184 | device->reset_hw = ioat3_reset_hw; | ||
| 1144 | device->self_test = ioat3_dma_self_test; | 1185 | device->self_test = ioat3_dma_self_test; |
| 1145 | dma = &device->common; | 1186 | dma = &device->common; |
| 1146 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | 1187 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; |
| @@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1216 | dma->device_prep_dma_xor_val = NULL; | 1257 | dma->device_prep_dma_xor_val = NULL; |
| 1217 | #endif | 1258 | #endif |
| 1218 | 1259 | ||
| 1219 | /* -= IOAT ver.3 workarounds =- */ | ||
| 1220 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
| 1221 | * that can cause stability issues for IOAT ver.3 | ||
| 1222 | */ | ||
| 1223 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
| 1224 | |||
| 1225 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
| 1226 | * (workaround for spurious config parity error after restart) | ||
| 1227 | */ | ||
| 1228 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
| 1229 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
| 1230 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
| 1231 | |||
| 1232 | err = ioat_probe(device); | 1260 | err = ioat_probe(device); |
| 1233 | if (err) | 1261 | if (err) |
| 1234 | return err; | 1262 | return err; |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index f015ec196700..e8ae63baf588 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #define IOAT_PCI_DEVICE_ID_OFFSET 0x02 | 28 | #define IOAT_PCI_DEVICE_ID_OFFSET 0x02 |
| 29 | #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 | 29 | #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 |
| 30 | #define IOAT_PCI_CHANERR_INT_OFFSET 0x180 | ||
| 30 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 | 31 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 |
| 31 | 32 | ||
| 32 | /* MMIO Device Registers */ | 33 | /* MMIO Device Registers */ |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 2e4a54c8afeb..d10cc899c460 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
| @@ -23,16 +23,19 @@ | |||
| 23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
| 24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
| 25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
| 26 | #include <linux/dmapool.h> | ||
| 27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 28 | #include <cpu/dma.h> | 27 | #include <cpu/dma.h> |
| 29 | #include <asm/dma-sh.h> | 28 | #include <asm/dma-sh.h> |
| 30 | #include "shdma.h" | 29 | #include "shdma.h" |
| 31 | 30 | ||
| 32 | /* DMA descriptor control */ | 31 | /* DMA descriptor control */ |
| 33 | #define DESC_LAST (-1) | 32 | enum sh_dmae_desc_status { |
| 34 | #define DESC_COMP (1) | 33 | DESC_IDLE, |
| 35 | #define DESC_NCOMP (0) | 34 | DESC_PREPARED, |
| 35 | DESC_SUBMITTED, | ||
| 36 | DESC_COMPLETED, /* completed, have to call callback */ | ||
| 37 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
| 38 | }; | ||
| 36 | 39 | ||
| 37 | #define NR_DESCS_PER_CHANNEL 32 | 40 | #define NR_DESCS_PER_CHANNEL 32 |
| 38 | /* | 41 | /* |
| @@ -45,6 +48,8 @@ | |||
| 45 | */ | 48 | */ |
| 46 | #define RS_DEFAULT (RS_DUAL) | 49 | #define RS_DEFAULT (RS_DUAL) |
| 47 | 50 | ||
| 51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | ||
| 52 | |||
| 48 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | 53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
| 49 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
| 50 | { | 55 | { |
| @@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | |||
| 106 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | 111 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; |
| 107 | } | 112 | } |
| 108 | 113 | ||
| 109 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) | 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
| 110 | { | 115 | { |
| 111 | sh_dmae_writel(sh_chan, hw.sar, SAR); | 116 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
| 112 | sh_dmae_writel(sh_chan, hw.dar, DAR); | 117 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
| 113 | sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR); | 118 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); |
| 114 | } | 119 | } |
| 115 | 120 | ||
| 116 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 121 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
| @@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
| 184 | 189 | ||
| 185 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 190 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
| 186 | { | 191 | { |
| 187 | struct sh_desc *desc = tx_to_sh_desc(tx); | 192 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
| 188 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | 193 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
| 194 | dma_async_tx_callback callback = tx->callback; | ||
| 189 | dma_cookie_t cookie; | 195 | dma_cookie_t cookie; |
| 190 | 196 | ||
| 191 | spin_lock_bh(&sh_chan->desc_lock); | 197 | spin_lock_bh(&sh_chan->desc_lock); |
| @@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 195 | if (cookie < 0) | 201 | if (cookie < 0) |
| 196 | cookie = 1; | 202 | cookie = 1; |
| 197 | 203 | ||
| 198 | /* If desc only in the case of 1 */ | 204 | sh_chan->common.cookie = cookie; |
| 199 | if (desc->async_tx.cookie != -EBUSY) | 205 | tx->cookie = cookie; |
| 200 | desc->async_tx.cookie = cookie; | 206 | |
| 201 | sh_chan->common.cookie = desc->async_tx.cookie; | 207 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
| 208 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
| 209 | /* | ||
| 210 | * All chunks are on the global ld_free, so, we have to find | ||
| 211 | * the end of the chain ourselves | ||
| 212 | */ | ||
| 213 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
| 214 | chunk->async_tx.cookie > 0 || | ||
| 215 | chunk->async_tx.cookie == -EBUSY || | ||
| 216 | &chunk->node == &sh_chan->ld_free)) | ||
| 217 | break; | ||
| 218 | chunk->mark = DESC_SUBMITTED; | ||
| 219 | /* Callback goes to the last chunk */ | ||
| 220 | chunk->async_tx.callback = NULL; | ||
| 221 | chunk->cookie = cookie; | ||
| 222 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | ||
| 223 | last = chunk; | ||
| 224 | } | ||
| 225 | |||
| 226 | last->async_tx.callback = callback; | ||
| 227 | last->async_tx.callback_param = tx->callback_param; | ||
| 202 | 228 | ||
| 203 | list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); | 229 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", |
| 230 | tx->cookie, &last->async_tx, sh_chan->id, | ||
| 231 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | ||
| 204 | 232 | ||
| 205 | spin_unlock_bh(&sh_chan->desc_lock); | 233 | spin_unlock_bh(&sh_chan->desc_lock); |
| 206 | 234 | ||
| 207 | return cookie; | 235 | return cookie; |
| 208 | } | 236 | } |
| 209 | 237 | ||
| 238 | /* Called with desc_lock held */ | ||
| 210 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | 239 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
| 211 | { | 240 | { |
| 212 | struct sh_desc *desc, *_desc, *ret = NULL; | 241 | struct sh_desc *desc; |
| 213 | 242 | ||
| 214 | spin_lock_bh(&sh_chan->desc_lock); | 243 | list_for_each_entry(desc, &sh_chan->ld_free, node) |
| 215 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { | 244 | if (desc->mark != DESC_PREPARED) { |
| 216 | if (async_tx_test_ack(&desc->async_tx)) { | 245 | BUG_ON(desc->mark != DESC_IDLE); |
| 217 | list_del(&desc->node); | 246 | list_del(&desc->node); |
| 218 | ret = desc; | 247 | return desc; |
| 219 | break; | ||
| 220 | } | 248 | } |
| 221 | } | ||
| 222 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 223 | |||
| 224 | return ret; | ||
| 225 | } | ||
| 226 | |||
| 227 | static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) | ||
| 228 | { | ||
| 229 | if (desc) { | ||
| 230 | spin_lock_bh(&sh_chan->desc_lock); | ||
| 231 | |||
| 232 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | ||
| 233 | list_add(&desc->node, &sh_chan->ld_free); | ||
| 234 | 249 | ||
| 235 | spin_unlock_bh(&sh_chan->desc_lock); | 250 | return NULL; |
| 236 | } | ||
| 237 | } | 251 | } |
| 238 | 252 | ||
| 239 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 253 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
| @@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
| 252 | dma_async_tx_descriptor_init(&desc->async_tx, | 266 | dma_async_tx_descriptor_init(&desc->async_tx, |
| 253 | &sh_chan->common); | 267 | &sh_chan->common); |
| 254 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | 268 | desc->async_tx.tx_submit = sh_dmae_tx_submit; |
| 255 | desc->async_tx.flags = DMA_CTRL_ACK; | 269 | desc->mark = DESC_IDLE; |
| 256 | INIT_LIST_HEAD(&desc->tx_list); | ||
| 257 | sh_dmae_put_desc(sh_chan, desc); | ||
| 258 | 270 | ||
| 259 | spin_lock_bh(&sh_chan->desc_lock); | 271 | spin_lock_bh(&sh_chan->desc_lock); |
| 272 | list_add(&desc->node, &sh_chan->ld_free); | ||
| 260 | sh_chan->descs_allocated++; | 273 | sh_chan->descs_allocated++; |
| 261 | } | 274 | } |
| 262 | spin_unlock_bh(&sh_chan->desc_lock); | 275 | spin_unlock_bh(&sh_chan->desc_lock); |
| @@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
| 273 | struct sh_desc *desc, *_desc; | 286 | struct sh_desc *desc, *_desc; |
| 274 | LIST_HEAD(list); | 287 | LIST_HEAD(list); |
| 275 | 288 | ||
| 276 | BUG_ON(!list_empty(&sh_chan->ld_queue)); | 289 | /* Prepared and not submitted descriptors can still be on the queue */ |
| 290 | if (!list_empty(&sh_chan->ld_queue)) | ||
| 291 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
| 292 | |||
| 277 | spin_lock_bh(&sh_chan->desc_lock); | 293 | spin_lock_bh(&sh_chan->desc_lock); |
| 278 | 294 | ||
| 279 | list_splice_init(&sh_chan->ld_free, &list); | 295 | list_splice_init(&sh_chan->ld_free, &list); |
| @@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
| 292 | struct sh_dmae_chan *sh_chan; | 308 | struct sh_dmae_chan *sh_chan; |
| 293 | struct sh_desc *first = NULL, *prev = NULL, *new; | 309 | struct sh_desc *first = NULL, *prev = NULL, *new; |
| 294 | size_t copy_size; | 310 | size_t copy_size; |
| 311 | LIST_HEAD(tx_list); | ||
| 312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | ||
| 295 | 313 | ||
| 296 | if (!chan) | 314 | if (!chan) |
| 297 | return NULL; | 315 | return NULL; |
| @@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
| 301 | 319 | ||
| 302 | sh_chan = to_sh_chan(chan); | 320 | sh_chan = to_sh_chan(chan); |
| 303 | 321 | ||
| 322 | /* Have to lock the whole loop to protect against concurrent release */ | ||
| 323 | spin_lock_bh(&sh_chan->desc_lock); | ||
| 324 | |||
| 325 | /* | ||
| 326 | * Chaining: | ||
| 327 | * first descriptor is what user is dealing with in all API calls, its | ||
| 328 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
| 329 | * number | ||
| 330 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
| 331 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
| 332 | * all chunks are linked onto the tx_list head with their .node heads | ||
| 333 | * only during this function, then they are immediately spliced | ||
| 334 | * back onto the free list in form of a chain | ||
| 335 | */ | ||
| 304 | do { | 336 | do { |
| 305 | /* Allocate the link descriptor from DMA pool */ | 337 | /* Allocate the link descriptor from the free list */ |
| 306 | new = sh_dmae_get_desc(sh_chan); | 338 | new = sh_dmae_get_desc(sh_chan); |
| 307 | if (!new) { | 339 | if (!new) { |
| 308 | dev_err(sh_chan->dev, | 340 | dev_err(sh_chan->dev, |
| 309 | "No free memory for link descriptor\n"); | 341 | "No free memory for link descriptor\n"); |
| 310 | goto err_get_desc; | 342 | list_for_each_entry(new, &tx_list, node) |
| 343 | new->mark = DESC_IDLE; | ||
| 344 | list_splice(&tx_list, &sh_chan->ld_free); | ||
| 345 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 346 | return NULL; | ||
| 311 | } | 347 | } |
| 312 | 348 | ||
| 313 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX); | 349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); |
| 314 | 350 | ||
| 315 | new->hw.sar = dma_src; | 351 | new->hw.sar = dma_src; |
| 316 | new->hw.dar = dma_dest; | 352 | new->hw.dar = dma_dest; |
| 317 | new->hw.tcr = copy_size; | 353 | new->hw.tcr = copy_size; |
| 318 | if (!first) | 354 | if (!first) { |
| 355 | /* First desc */ | ||
| 356 | new->async_tx.cookie = -EBUSY; | ||
| 319 | first = new; | 357 | first = new; |
| 358 | } else { | ||
| 359 | /* Other desc - invisible to the user */ | ||
| 360 | new->async_tx.cookie = -EINVAL; | ||
| 361 | } | ||
| 320 | 362 | ||
| 321 | new->mark = DESC_NCOMP; | 363 | dev_dbg(sh_chan->dev, |
| 322 | async_tx_ack(&new->async_tx); | 364 | "chaining %u of %u with %p, dst %x, cookie %d\n", |
| 365 | copy_size, len, &new->async_tx, dma_dest, | ||
| 366 | new->async_tx.cookie); | ||
| 367 | |||
| 368 | new->mark = DESC_PREPARED; | ||
| 369 | new->async_tx.flags = flags; | ||
| 370 | new->chunks = chunks--; | ||
| 323 | 371 | ||
| 324 | prev = new; | 372 | prev = new; |
| 325 | len -= copy_size; | 373 | len -= copy_size; |
| 326 | dma_src += copy_size; | 374 | dma_src += copy_size; |
| 327 | dma_dest += copy_size; | 375 | dma_dest += copy_size; |
| 328 | /* Insert the link descriptor to the LD ring */ | 376 | /* Insert the link descriptor to the LD ring */ |
| 329 | list_add_tail(&new->node, &first->tx_list); | 377 | list_add_tail(&new->node, &tx_list); |
| 330 | } while (len); | 378 | } while (len); |
| 331 | 379 | ||
| 332 | new->async_tx.flags = flags; /* client is in control of this ack */ | 380 | if (new != first) |
| 333 | new->async_tx.cookie = -EBUSY; /* Last desc */ | 381 | new->async_tx.cookie = -ENOSPC; |
| 334 | 382 | ||
| 335 | return &first->async_tx; | 383 | /* Put them back on the free list, so, they don't get lost */ |
| 384 | list_splice_tail(&tx_list, &sh_chan->ld_free); | ||
| 336 | 385 | ||
| 337 | err_get_desc: | 386 | spin_unlock_bh(&sh_chan->desc_lock); |
| 338 | sh_dmae_put_desc(sh_chan, first); | ||
| 339 | return NULL; | ||
| 340 | 387 | ||
| 388 | return &first->async_tx; | ||
| 341 | } | 389 | } |
| 342 | 390 | ||
| 343 | /* | 391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
| 344 | * sh_chan_ld_cleanup - Clean up link descriptors | ||
| 345 | * | ||
| 346 | * This function clean up the ld_queue of DMA channel. | ||
| 347 | */ | ||
| 348 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) | ||
| 349 | { | 392 | { |
| 350 | struct sh_desc *desc, *_desc; | 393 | struct sh_desc *desc, *_desc; |
| 394 | /* Is the "exposed" head of a chain acked? */ | ||
| 395 | bool head_acked = false; | ||
| 396 | dma_cookie_t cookie = 0; | ||
| 397 | dma_async_tx_callback callback = NULL; | ||
| 398 | void *param = NULL; | ||
| 351 | 399 | ||
| 352 | spin_lock_bh(&sh_chan->desc_lock); | 400 | spin_lock_bh(&sh_chan->desc_lock); |
| 353 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | 401 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { |
| 354 | dma_async_tx_callback callback; | 402 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
| 355 | void *callback_param; | 403 | |
| 356 | 404 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
| 357 | /* non send data */ | 405 | BUG_ON(desc->mark != DESC_SUBMITTED && |
| 358 | if (desc->mark == DESC_NCOMP) | 406 | desc->mark != DESC_COMPLETED && |
| 407 | desc->mark != DESC_WAITING); | ||
| 408 | |||
| 409 | /* | ||
| 410 | * queue is ordered, and we use this loop to (1) clean up all | ||
| 411 | * completed descriptors, and to (2) update descriptor flags of | ||
| 412 | * any chunks in a (partially) completed chain | ||
| 413 | */ | ||
| 414 | if (!all && desc->mark == DESC_SUBMITTED && | ||
| 415 | desc->cookie != cookie) | ||
| 359 | break; | 416 | break; |
| 360 | 417 | ||
| 361 | /* send data sesc */ | 418 | if (tx->cookie > 0) |
| 362 | callback = desc->async_tx.callback; | 419 | cookie = tx->cookie; |
| 363 | callback_param = desc->async_tx.callback_param; | ||
| 364 | 420 | ||
| 365 | /* Remove from ld_queue list */ | 421 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
| 366 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | 422 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); |
| 423 | sh_chan->completed_cookie = desc->cookie; | ||
| 424 | } | ||
| 367 | 425 | ||
| 368 | dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", | 426 | /* Call callback on the last chunk */ |
| 369 | desc); | 427 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
| 428 | desc->mark = DESC_WAITING; | ||
| 429 | callback = tx->callback; | ||
| 430 | param = tx->callback_param; | ||
| 431 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | ||
| 432 | tx->cookie, tx, sh_chan->id); | ||
| 433 | BUG_ON(desc->chunks != 1); | ||
| 434 | break; | ||
| 435 | } | ||
| 370 | 436 | ||
| 371 | list_move(&desc->node, &sh_chan->ld_free); | 437 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
| 372 | /* Run the link descriptor callback function */ | 438 | if (desc->mark == DESC_COMPLETED) { |
| 373 | if (callback) { | 439 | BUG_ON(tx->cookie < 0); |
| 374 | spin_unlock_bh(&sh_chan->desc_lock); | 440 | desc->mark = DESC_WAITING; |
| 375 | dev_dbg(sh_chan->dev, "link descriptor %p callback\n", | 441 | } |
| 376 | desc); | 442 | head_acked = async_tx_test_ack(tx); |
| 377 | callback(callback_param); | 443 | } else { |
| 378 | spin_lock_bh(&sh_chan->desc_lock); | 444 | switch (desc->mark) { |
| 445 | case DESC_COMPLETED: | ||
| 446 | desc->mark = DESC_WAITING; | ||
| 447 | /* Fall through */ | ||
| 448 | case DESC_WAITING: | ||
| 449 | if (head_acked) | ||
| 450 | async_tx_ack(&desc->async_tx); | ||
| 451 | } | ||
| 452 | } | ||
| 453 | |||
| 454 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | ||
| 455 | tx, tx->cookie); | ||
| 456 | |||
| 457 | if (((desc->mark == DESC_COMPLETED || | ||
| 458 | desc->mark == DESC_WAITING) && | ||
| 459 | async_tx_test_ack(&desc->async_tx)) || all) { | ||
| 460 | /* Remove from ld_queue list */ | ||
| 461 | desc->mark = DESC_IDLE; | ||
| 462 | list_move(&desc->node, &sh_chan->ld_free); | ||
| 379 | } | 463 | } |
| 380 | } | 464 | } |
| 381 | spin_unlock_bh(&sh_chan->desc_lock); | 465 | spin_unlock_bh(&sh_chan->desc_lock); |
| 466 | |||
| 467 | if (callback) | ||
| 468 | callback(param); | ||
| 469 | |||
| 470 | return callback; | ||
| 471 | } | ||
| 472 | |||
| 473 | /* | ||
| 474 | * sh_chan_ld_cleanup - Clean up link descriptors | ||
| 475 | * | ||
| 476 | * This function cleans up the ld_queue of DMA channel. | ||
| 477 | */ | ||
| 478 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
| 479 | { | ||
| 480 | while (__ld_cleanup(sh_chan, all)) | ||
| 481 | ; | ||
| 382 | } | 482 | } |
| 383 | 483 | ||
| 384 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 484 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
| 385 | { | 485 | { |
| 386 | struct list_head *ld_node; | 486 | struct sh_desc *sd; |
| 387 | struct sh_dmae_regs hw; | ||
| 388 | 487 | ||
| 488 | spin_lock_bh(&sh_chan->desc_lock); | ||
| 389 | /* DMA work check */ | 489 | /* DMA work check */ |
| 390 | if (dmae_is_busy(sh_chan)) | 490 | if (dmae_is_busy(sh_chan)) { |
| 491 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 391 | return; | 492 | return; |
| 493 | } | ||
| 392 | 494 | ||
| 393 | /* Find the first un-transfer desciptor */ | 495 | /* Find the first un-transfer desciptor */ |
| 394 | for (ld_node = sh_chan->ld_queue.next; | 496 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
| 395 | (ld_node != &sh_chan->ld_queue) | 497 | if (sd->mark == DESC_SUBMITTED) { |
| 396 | && (to_sh_desc(ld_node)->mark == DESC_COMP); | 498 | /* Get the ld start address from ld_queue */ |
| 397 | ld_node = ld_node->next) | 499 | dmae_set_reg(sh_chan, &sd->hw); |
| 398 | cpu_relax(); | 500 | dmae_start(sh_chan); |
| 399 | 501 | break; | |
| 400 | if (ld_node != &sh_chan->ld_queue) { | 502 | } |
| 401 | /* Get the ld start address from ld_queue */ | 503 | |
| 402 | hw = to_sh_desc(ld_node)->hw; | 504 | spin_unlock_bh(&sh_chan->desc_lock); |
| 403 | dmae_set_reg(sh_chan, hw); | ||
| 404 | dmae_start(sh_chan); | ||
| 405 | } | ||
| 406 | } | 505 | } |
| 407 | 506 | ||
| 408 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | 507 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) |
| @@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
| 420 | dma_cookie_t last_used; | 519 | dma_cookie_t last_used; |
| 421 | dma_cookie_t last_complete; | 520 | dma_cookie_t last_complete; |
| 422 | 521 | ||
| 423 | sh_dmae_chan_ld_cleanup(sh_chan); | 522 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
| 424 | 523 | ||
| 425 | last_used = chan->cookie; | 524 | last_used = chan->cookie; |
| 426 | last_complete = sh_chan->completed_cookie; | 525 | last_complete = sh_chan->completed_cookie; |
| 427 | if (last_complete == -EBUSY) | 526 | BUG_ON(last_complete < 0); |
| 428 | last_complete = last_used; | ||
| 429 | 527 | ||
| 430 | if (done) | 528 | if (done) |
| 431 | *done = last_complete; | 529 | *done = last_complete; |
| @@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
| 480 | err = sh_dmae_rst(0); | 578 | err = sh_dmae_rst(0); |
| 481 | if (err) | 579 | if (err) |
| 482 | return err; | 580 | return err; |
| 581 | #ifdef SH_DMAC_BASE1 | ||
| 483 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | 582 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
| 484 | err = sh_dmae_rst(1); | 583 | err = sh_dmae_rst(1); |
| 485 | if (err) | 584 | if (err) |
| 486 | return err; | 585 | return err; |
| 487 | } | 586 | } |
| 587 | #endif | ||
| 488 | disable_irq(irq); | 588 | disable_irq(irq); |
| 489 | return IRQ_HANDLED; | 589 | return IRQ_HANDLED; |
| 490 | } | 590 | } |
| @@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
| 494 | static void dmae_do_tasklet(unsigned long data) | 594 | static void dmae_do_tasklet(unsigned long data) |
| 495 | { | 595 | { |
| 496 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 596 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
| 497 | struct sh_desc *desc, *_desc, *cur_desc = NULL; | 597 | struct sh_desc *desc; |
| 498 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 598 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
| 499 | 599 | ||
| 500 | list_for_each_entry_safe(desc, _desc, | 600 | spin_lock(&sh_chan->desc_lock); |
| 501 | &sh_chan->ld_queue, node) { | 601 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
| 502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { | 602 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && |
| 503 | cur_desc = desc; | 603 | desc->mark == DESC_SUBMITTED) { |
| 604 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | ||
| 605 | desc->async_tx.cookie, &desc->async_tx, | ||
| 606 | desc->hw.dar); | ||
| 607 | desc->mark = DESC_COMPLETED; | ||
| 504 | break; | 608 | break; |
| 505 | } | 609 | } |
| 506 | } | 610 | } |
| 611 | spin_unlock(&sh_chan->desc_lock); | ||
| 507 | 612 | ||
| 508 | if (cur_desc) { | ||
| 509 | switch (cur_desc->async_tx.cookie) { | ||
| 510 | case 0: /* other desc data */ | ||
| 511 | break; | ||
| 512 | case -EBUSY: /* last desc */ | ||
| 513 | sh_chan->completed_cookie = | ||
| 514 | cur_desc->async_tx.cookie; | ||
| 515 | break; | ||
| 516 | default: /* first desc ( 0 < )*/ | ||
| 517 | sh_chan->completed_cookie = | ||
| 518 | cur_desc->async_tx.cookie - 1; | ||
| 519 | break; | ||
| 520 | } | ||
| 521 | cur_desc->mark = DESC_COMP; | ||
| 522 | } | ||
| 523 | /* Next desc */ | 613 | /* Next desc */ |
| 524 | sh_chan_xfer_ld_queue(sh_chan); | 614 | sh_chan_xfer_ld_queue(sh_chan); |
| 525 | sh_dmae_chan_ld_cleanup(sh_chan); | 615 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
| 526 | } | 616 | } |
| 527 | 617 | ||
| 528 | static unsigned int get_dmae_irq(unsigned int id) | 618 | static unsigned int get_dmae_irq(unsigned int id) |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 60b81e529b42..108f1cffb6f5 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
| @@ -13,9 +13,9 @@ | |||
| 13 | #ifndef __DMA_SHDMA_H | 13 | #ifndef __DMA_SHDMA_H |
| 14 | #define __DMA_SHDMA_H | 14 | #define __DMA_SHDMA_H |
| 15 | 15 | ||
| 16 | #include <linux/device.h> | ||
| 17 | #include <linux/dmapool.h> | ||
| 18 | #include <linux/dmaengine.h> | 16 | #include <linux/dmaengine.h> |
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/list.h> | ||
| 19 | 19 | ||
| 20 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | 20 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ |
| 21 | 21 | ||
| @@ -26,13 +26,16 @@ struct sh_dmae_regs { | |||
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | struct sh_desc { | 28 | struct sh_desc { |
| 29 | struct list_head tx_list; | ||
| 30 | struct sh_dmae_regs hw; | 29 | struct sh_dmae_regs hw; |
| 31 | struct list_head node; | 30 | struct list_head node; |
| 32 | struct dma_async_tx_descriptor async_tx; | 31 | struct dma_async_tx_descriptor async_tx; |
| 32 | dma_cookie_t cookie; | ||
| 33 | int chunks; | ||
| 33 | int mark; | 34 | int mark; |
| 34 | }; | 35 | }; |
| 35 | 36 | ||
| 37 | struct device; | ||
| 38 | |||
| 36 | struct sh_dmae_chan { | 39 | struct sh_dmae_chan { |
| 37 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | 40 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ |
| 38 | spinlock_t desc_lock; /* Descriptor operation lock */ | 41 | spinlock_t desc_lock; /* Descriptor operation lock */ |
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 13efcd362072..a9371b36a9b9 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
| @@ -1,5 +1,10 @@ | |||
| 1 | menu "IEEE 1394 (FireWire) support" | ||
| 2 | depends on PCI || BROKEN | ||
| 3 | # firewire-core does not depend on PCI but is | ||
| 4 | # not useful without PCI controller driver | ||
| 5 | |||
| 1 | comment "You can enable one or both FireWire driver stacks." | 6 | comment "You can enable one or both FireWire driver stacks." |
| 2 | comment "See the help texts for more information." | 7 | comment "The newer stack is recommended." |
| 3 | 8 | ||
| 4 | config FIREWIRE | 9 | config FIREWIRE |
| 5 | tristate "FireWire driver stack" | 10 | tristate "FireWire driver stack" |
| @@ -15,16 +20,6 @@ config FIREWIRE | |||
| 15 | To compile this driver as a module, say M here: the module will be | 20 | To compile this driver as a module, say M here: the module will be |
| 16 | called firewire-core. | 21 | called firewire-core. |
| 17 | 22 | ||
| 18 | This module functionally replaces ieee1394, raw1394, and video1394. | ||
| 19 | To access it from application programs, you generally need at least | ||
| 20 | libraw1394 v2. IIDC/DCAM applications need libdc1394 v2. | ||
| 21 | No libraries are required to access storage devices through the | ||
| 22 | firewire-sbp2 driver. | ||
| 23 | |||
| 24 | NOTE: | ||
| 25 | FireWire audio devices currently require the old drivers (ieee1394, | ||
| 26 | ohci1394, raw1394). | ||
| 27 | |||
| 28 | config FIREWIRE_OHCI | 23 | config FIREWIRE_OHCI |
| 29 | tristate "OHCI-1394 controllers" | 24 | tristate "OHCI-1394 controllers" |
| 30 | depends on PCI && FIREWIRE | 25 | depends on PCI && FIREWIRE |
| @@ -34,22 +29,7 @@ config FIREWIRE_OHCI | |||
| 34 | is the only chipset in use, so say Y here. | 29 | is the only chipset in use, so say Y here. |
| 35 | 30 | ||
| 36 | To compile this driver as a module, say M here: The module will be | 31 | To compile this driver as a module, say M here: The module will be |
| 37 | called firewire-ohci. It replaces ohci1394 of the classic IEEE 1394 | 32 | called firewire-ohci. |
| 38 | stack. | ||
| 39 | |||
| 40 | NOTE: | ||
| 41 | If you want to install firewire-ohci and ohci1394 together, you | ||
| 42 | should configure them only as modules and blacklist the driver(s) | ||
| 43 | which you don't want to have auto-loaded. Add either | ||
| 44 | |||
| 45 | blacklist firewire-ohci | ||
| 46 | or | ||
| 47 | blacklist ohci1394 | ||
| 48 | blacklist video1394 | ||
| 49 | blacklist dv1394 | ||
| 50 | |||
| 51 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | ||
| 52 | depending on your distribution. | ||
| 53 | 33 | ||
| 54 | config FIREWIRE_OHCI_DEBUG | 34 | config FIREWIRE_OHCI_DEBUG |
| 55 | bool | 35 | bool |
| @@ -66,8 +46,7 @@ config FIREWIRE_SBP2 | |||
| 66 | like scanners. | 46 | like scanners. |
| 67 | 47 | ||
| 68 | To compile this driver as a module, say M here: The module will be | 48 | To compile this driver as a module, say M here: The module will be |
| 69 | called firewire-sbp2. It replaces sbp2 of the classic IEEE 1394 | 49 | called firewire-sbp2. |
| 70 | stack. | ||
| 71 | 50 | ||
| 72 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | 51 | You should also enable support for disks, CD-ROMs, etc. in the SCSI |
| 73 | configuration section. | 52 | configuration section. |
| @@ -83,5 +62,8 @@ config FIREWIRE_NET | |||
| 83 | NOTE, this driver is not stable yet! | 62 | NOTE, this driver is not stable yet! |
| 84 | 63 | ||
| 85 | To compile this driver as a module, say M here: The module will be | 64 | To compile this driver as a module, say M here: The module will be |
| 86 | called firewire-net. It replaces eth1394 of the classic IEEE 1394 | 65 | called firewire-net. |
| 87 | stack. | 66 | |
| 67 | source "drivers/ieee1394/Kconfig" | ||
| 68 | |||
| 69 | endmenu | ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 231e6ee5ba43..e6d63849e78e 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
| @@ -601,8 +601,9 @@ static void release_request(struct client *client, | |||
| 601 | struct inbound_transaction_resource *r = container_of(resource, | 601 | struct inbound_transaction_resource *r = container_of(resource, |
| 602 | struct inbound_transaction_resource, resource); | 602 | struct inbound_transaction_resource, resource); |
| 603 | 603 | ||
| 604 | fw_send_response(client->device->card, r->request, | 604 | if (r->request) |
| 605 | RCODE_CONFLICT_ERROR); | 605 | fw_send_response(client->device->card, r->request, |
| 606 | RCODE_CONFLICT_ERROR); | ||
| 606 | kfree(r); | 607 | kfree(r); |
| 607 | } | 608 | } |
| 608 | 609 | ||
| @@ -645,7 +646,8 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
| 645 | failed: | 646 | failed: |
| 646 | kfree(r); | 647 | kfree(r); |
| 647 | kfree(e); | 648 | kfree(e); |
| 648 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 649 | if (request) |
| 650 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | ||
| 649 | } | 651 | } |
| 650 | 652 | ||
| 651 | static void release_address_handler(struct client *client, | 653 | static void release_address_handler(struct client *client, |
| @@ -715,15 +717,18 @@ static int ioctl_send_response(struct client *client, void *buffer) | |||
| 715 | 717 | ||
| 716 | r = container_of(resource, struct inbound_transaction_resource, | 718 | r = container_of(resource, struct inbound_transaction_resource, |
| 717 | resource); | 719 | resource); |
| 718 | if (request->length < r->length) | 720 | if (r->request) { |
| 719 | r->length = request->length; | 721 | if (request->length < r->length) |
| 720 | 722 | r->length = request->length; | |
| 721 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { | 723 | if (copy_from_user(r->data, u64_to_uptr(request->data), |
| 722 | ret = -EFAULT; | 724 | r->length)) { |
| 723 | goto out; | 725 | ret = -EFAULT; |
| 726 | kfree(r->request); | ||
| 727 | goto out; | ||
| 728 | } | ||
| 729 | fw_send_response(client->device->card, r->request, | ||
| 730 | request->rcode); | ||
| 724 | } | 731 | } |
| 725 | |||
| 726 | fw_send_response(client->device->card, r->request, request->rcode); | ||
| 727 | out: | 732 | out: |
| 728 | kfree(r); | 733 | kfree(r); |
| 729 | 734 | ||
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 842739df23e2..495849eb13cc 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c | |||
| @@ -432,14 +432,20 @@ static struct fw_address_handler *lookup_overlapping_address_handler( | |||
| 432 | return NULL; | 432 | return NULL; |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static bool is_enclosing_handler(struct fw_address_handler *handler, | ||
| 436 | unsigned long long offset, size_t length) | ||
| 437 | { | ||
| 438 | return handler->offset <= offset && | ||
| 439 | offset + length <= handler->offset + handler->length; | ||
| 440 | } | ||
| 441 | |||
| 435 | static struct fw_address_handler *lookup_enclosing_address_handler( | 442 | static struct fw_address_handler *lookup_enclosing_address_handler( |
| 436 | struct list_head *list, unsigned long long offset, size_t length) | 443 | struct list_head *list, unsigned long long offset, size_t length) |
| 437 | { | 444 | { |
| 438 | struct fw_address_handler *handler; | 445 | struct fw_address_handler *handler; |
| 439 | 446 | ||
| 440 | list_for_each_entry(handler, list, link) { | 447 | list_for_each_entry(handler, list, link) { |
| 441 | if (handler->offset <= offset && | 448 | if (is_enclosing_handler(handler, offset, length)) |
| 442 | offset + length <= handler->offset + handler->length) | ||
| 443 | return handler; | 449 | return handler; |
| 444 | } | 450 | } |
| 445 | 451 | ||
| @@ -465,6 +471,12 @@ const struct fw_address_region fw_unit_space_region = | |||
| 465 | { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; | 471 | { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; |
| 466 | #endif /* 0 */ | 472 | #endif /* 0 */ |
| 467 | 473 | ||
| 474 | static bool is_in_fcp_region(u64 offset, size_t length) | ||
| 475 | { | ||
| 476 | return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && | ||
| 477 | offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END); | ||
| 478 | } | ||
| 479 | |||
| 468 | /** | 480 | /** |
| 469 | * fw_core_add_address_handler - register for incoming requests | 481 | * fw_core_add_address_handler - register for incoming requests |
| 470 | * @handler: callback | 482 | * @handler: callback |
| @@ -477,8 +489,11 @@ const struct fw_address_region fw_unit_space_region = | |||
| 477 | * give the details of the particular request. | 489 | * give the details of the particular request. |
| 478 | * | 490 | * |
| 479 | * Return value: 0 on success, non-zero otherwise. | 491 | * Return value: 0 on success, non-zero otherwise. |
| 492 | * | ||
| 480 | * The start offset of the handler's address region is determined by | 493 | * The start offset of the handler's address region is determined by |
| 481 | * fw_core_add_address_handler() and is returned in handler->offset. | 494 | * fw_core_add_address_handler() and is returned in handler->offset. |
| 495 | * | ||
| 496 | * Address allocations are exclusive, except for the FCP registers. | ||
| 482 | */ | 497 | */ |
| 483 | int fw_core_add_address_handler(struct fw_address_handler *handler, | 498 | int fw_core_add_address_handler(struct fw_address_handler *handler, |
| 484 | const struct fw_address_region *region) | 499 | const struct fw_address_region *region) |
| @@ -498,10 +513,12 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, | |||
| 498 | 513 | ||
| 499 | handler->offset = region->start; | 514 | handler->offset = region->start; |
| 500 | while (handler->offset + handler->length <= region->end) { | 515 | while (handler->offset + handler->length <= region->end) { |
| 501 | other = | 516 | if (is_in_fcp_region(handler->offset, handler->length)) |
| 502 | lookup_overlapping_address_handler(&address_handler_list, | 517 | other = NULL; |
| 503 | handler->offset, | 518 | else |
| 504 | handler->length); | 519 | other = lookup_overlapping_address_handler |
| 520 | (&address_handler_list, | ||
| 521 | handler->offset, handler->length); | ||
| 505 | if (other != NULL) { | 522 | if (other != NULL) { |
| 506 | handler->offset += other->length; | 523 | handler->offset += other->length; |
| 507 | } else { | 524 | } else { |
| @@ -668,6 +685,9 @@ static struct fw_request *allocate_request(struct fw_packet *p) | |||
| 668 | void fw_send_response(struct fw_card *card, | 685 | void fw_send_response(struct fw_card *card, |
| 669 | struct fw_request *request, int rcode) | 686 | struct fw_request *request, int rcode) |
| 670 | { | 687 | { |
| 688 | if (WARN_ONCE(!request, "invalid for FCP address handlers")) | ||
| 689 | return; | ||
| 690 | |||
| 671 | /* unified transaction or broadcast transaction: don't respond */ | 691 | /* unified transaction or broadcast transaction: don't respond */ |
| 672 | if (request->ack != ACK_PENDING || | 692 | if (request->ack != ACK_PENDING || |
| 673 | HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { | 693 | HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { |
| @@ -686,26 +706,15 @@ void fw_send_response(struct fw_card *card, | |||
| 686 | } | 706 | } |
| 687 | EXPORT_SYMBOL(fw_send_response); | 707 | EXPORT_SYMBOL(fw_send_response); |
| 688 | 708 | ||
| 689 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | 709 | static void handle_exclusive_region_request(struct fw_card *card, |
| 710 | struct fw_packet *p, | ||
| 711 | struct fw_request *request, | ||
| 712 | unsigned long long offset) | ||
| 690 | { | 713 | { |
| 691 | struct fw_address_handler *handler; | 714 | struct fw_address_handler *handler; |
| 692 | struct fw_request *request; | ||
| 693 | unsigned long long offset; | ||
| 694 | unsigned long flags; | 715 | unsigned long flags; |
| 695 | int tcode, destination, source; | 716 | int tcode, destination, source; |
| 696 | 717 | ||
| 697 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | ||
| 698 | return; | ||
| 699 | |||
| 700 | request = allocate_request(p); | ||
| 701 | if (request == NULL) { | ||
| 702 | /* FIXME: send statically allocated busy packet. */ | ||
| 703 | return; | ||
| 704 | } | ||
| 705 | |||
| 706 | offset = | ||
| 707 | ((unsigned long long) | ||
| 708 | HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2]; | ||
| 709 | tcode = HEADER_GET_TCODE(p->header[0]); | 718 | tcode = HEADER_GET_TCODE(p->header[0]); |
| 710 | destination = HEADER_GET_DESTINATION(p->header[0]); | 719 | destination = HEADER_GET_DESTINATION(p->header[0]); |
| 711 | source = HEADER_GET_SOURCE(p->header[1]); | 720 | source = HEADER_GET_SOURCE(p->header[1]); |
| @@ -732,6 +741,73 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | |||
| 732 | request->data, request->length, | 741 | request->data, request->length, |
| 733 | handler->callback_data); | 742 | handler->callback_data); |
| 734 | } | 743 | } |
| 744 | |||
| 745 | static void handle_fcp_region_request(struct fw_card *card, | ||
| 746 | struct fw_packet *p, | ||
| 747 | struct fw_request *request, | ||
| 748 | unsigned long long offset) | ||
| 749 | { | ||
| 750 | struct fw_address_handler *handler; | ||
| 751 | unsigned long flags; | ||
| 752 | int tcode, destination, source; | ||
| 753 | |||
| 754 | if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && | ||
| 755 | offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || | ||
| 756 | request->length > 0x200) { | ||
| 757 | fw_send_response(card, request, RCODE_ADDRESS_ERROR); | ||
| 758 | |||
| 759 | return; | ||
| 760 | } | ||
| 761 | |||
| 762 | tcode = HEADER_GET_TCODE(p->header[0]); | ||
| 763 | destination = HEADER_GET_DESTINATION(p->header[0]); | ||
| 764 | source = HEADER_GET_SOURCE(p->header[1]); | ||
| 765 | |||
| 766 | if (tcode != TCODE_WRITE_QUADLET_REQUEST && | ||
| 767 | tcode != TCODE_WRITE_BLOCK_REQUEST) { | ||
| 768 | fw_send_response(card, request, RCODE_TYPE_ERROR); | ||
| 769 | |||
| 770 | return; | ||
| 771 | } | ||
| 772 | |||
| 773 | spin_lock_irqsave(&address_handler_lock, flags); | ||
| 774 | list_for_each_entry(handler, &address_handler_list, link) { | ||
| 775 | if (is_enclosing_handler(handler, offset, request->length)) | ||
| 776 | handler->address_callback(card, NULL, tcode, | ||
| 777 | destination, source, | ||
| 778 | p->generation, p->speed, | ||
| 779 | offset, request->data, | ||
| 780 | request->length, | ||
| 781 | handler->callback_data); | ||
| 782 | } | ||
| 783 | spin_unlock_irqrestore(&address_handler_lock, flags); | ||
| 784 | |||
| 785 | fw_send_response(card, request, RCODE_COMPLETE); | ||
| 786 | } | ||
| 787 | |||
| 788 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | ||
| 789 | { | ||
| 790 | struct fw_request *request; | ||
| 791 | unsigned long long offset; | ||
| 792 | |||
| 793 | if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) | ||
| 794 | return; | ||
| 795 | |||
| 796 | request = allocate_request(p); | ||
| 797 | if (request == NULL) { | ||
| 798 | /* FIXME: send statically allocated busy packet. */ | ||
| 799 | return; | ||
| 800 | } | ||
| 801 | |||
| 802 | offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | | ||
| 803 | p->header[2]; | ||
| 804 | |||
| 805 | if (!is_in_fcp_region(offset, request->length)) | ||
| 806 | handle_exclusive_region_request(card, p, request, offset); | ||
| 807 | else | ||
| 808 | handle_fcp_region_request(card, p, request, offset); | ||
| 809 | |||
| 810 | } | ||
| 735 | EXPORT_SYMBOL(fw_core_handle_request); | 811 | EXPORT_SYMBOL(fw_core_handle_request); |
| 736 | 812 | ||
| 737 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | 813 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 96768e160866..a61571c63c59 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -2226,7 +2226,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
| 2226 | if (rest == 0) | 2226 | if (rest == 0) |
| 2227 | return -EINVAL; | 2227 | return -EINVAL; |
| 2228 | 2228 | ||
| 2229 | /* FIXME: make packet-per-buffer/dual-buffer a context option */ | ||
| 2230 | while (rest > 0) { | 2229 | while (rest > 0) { |
| 2231 | d = context_get_descriptors(&ctx->context, | 2230 | d = context_get_descriptors(&ctx->context, |
| 2232 | z + header_z, &d_bus); | 2231 | z + header_z, &d_bus); |
| @@ -2470,7 +2469,10 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
| 2470 | } | 2469 | } |
| 2471 | 2470 | ||
| 2472 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 2471 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
| 2472 | #if 0 | ||
| 2473 | /* FIXME: make it a context option or remove dual-buffer mode */ | ||
| 2473 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; | 2474 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; |
| 2475 | #endif | ||
| 2474 | 2476 | ||
| 2475 | /* dual-buffer mode is broken if more than one IR context is active */ | 2477 | /* dual-buffer mode is broken if more than one IR context is active */ |
| 2476 | if (dev->vendor == PCI_VENDOR_ID_AGERE && | 2478 | if (dev->vendor == PCI_VENDOR_ID_AGERE && |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index a019b49ecc9b..1f1d88ae68d6 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -172,6 +172,15 @@ config GPIO_ADP5520 | |||
| 172 | To compile this driver as a module, choose M here: the module will | 172 | To compile this driver as a module, choose M here: the module will |
| 173 | be called adp5520-gpio. | 173 | be called adp5520-gpio. |
| 174 | 174 | ||
| 175 | config GPIO_ADP5588 | ||
| 176 | tristate "ADP5588 I2C GPIO expander" | ||
| 177 | depends on I2C | ||
| 178 | help | ||
| 179 | This option enables support for 18 GPIOs found | ||
| 180 | on Analog Devices ADP5588 GPIO Expanders. | ||
| 181 | To compile this driver as a module, choose M here: the module will be | ||
| 182 | called adp5588-gpio. | ||
| 183 | |||
| 175 | comment "PCI GPIO expanders:" | 184 | comment "PCI GPIO expanders:" |
| 176 | 185 | ||
| 177 | config GPIO_CS5535 | 186 | config GPIO_CS5535 |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 52fe4cf734c7..48687238edb1 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
| @@ -5,6 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | |||
| 5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o | 5 | obj-$(CONFIG_GPIOLIB) += gpiolib.o |
| 6 | 6 | ||
| 7 | obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o | 7 | obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o |
| 8 | obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o | ||
| 8 | obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o | 9 | obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o |
| 9 | obj-$(CONFIG_GPIO_MAX7301) += max7301.o | 10 | obj-$(CONFIG_GPIO_MAX7301) += max7301.o |
| 10 | obj-$(CONFIG_GPIO_MAX732X) += max732x.o | 11 | obj-$(CONFIG_GPIO_MAX732X) += max732x.o |
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c new file mode 100644 index 000000000000..afc097a16b33 --- /dev/null +++ b/drivers/gpio/adp5588-gpio.c | |||
| @@ -0,0 +1,266 @@ | |||
| 1 | /* | ||
| 2 | * GPIO Chip driver for Analog Devices | ||
| 3 | * ADP5588 I/O Expander and QWERTY Keypad Controller | ||
| 4 | * | ||
| 5 | * Copyright 2009 Analog Devices Inc. | ||
| 6 | * | ||
| 7 | * Licensed under the GPL-2 or later. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/i2c.h> | ||
| 14 | #include <linux/gpio.h> | ||
| 15 | |||
| 16 | #include <linux/i2c/adp5588.h> | ||
| 17 | |||
| 18 | #define DRV_NAME "adp5588-gpio" | ||
| 19 | #define MAXGPIO 18 | ||
| 20 | #define ADP_BANK(offs) ((offs) >> 3) | ||
| 21 | #define ADP_BIT(offs) (1u << ((offs) & 0x7)) | ||
| 22 | |||
| 23 | struct adp5588_gpio { | ||
| 24 | struct i2c_client *client; | ||
| 25 | struct gpio_chip gpio_chip; | ||
| 26 | struct mutex lock; /* protect cached dir, dat_out */ | ||
| 27 | unsigned gpio_start; | ||
| 28 | uint8_t dat_out[3]; | ||
| 29 | uint8_t dir[3]; | ||
| 30 | }; | ||
| 31 | |||
| 32 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) | ||
| 33 | { | ||
| 34 | int ret = i2c_smbus_read_byte_data(client, reg); | ||
| 35 | |||
| 36 | if (ret < 0) | ||
| 37 | dev_err(&client->dev, "Read Error\n"); | ||
| 38 | |||
| 39 | return ret; | ||
| 40 | } | ||
| 41 | |||
| 42 | static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val) | ||
| 43 | { | ||
| 44 | int ret = i2c_smbus_write_byte_data(client, reg, val); | ||
| 45 | |||
| 46 | if (ret < 0) | ||
| 47 | dev_err(&client->dev, "Write Error\n"); | ||
| 48 | |||
| 49 | return ret; | ||
| 50 | } | ||
| 51 | |||
| 52 | static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) | ||
| 53 | { | ||
| 54 | struct adp5588_gpio *dev = | ||
| 55 | container_of(chip, struct adp5588_gpio, gpio_chip); | ||
| 56 | |||
| 57 | return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off)) | ||
| 58 | & ADP_BIT(off)); | ||
| 59 | } | ||
| 60 | |||
| 61 | static void adp5588_gpio_set_value(struct gpio_chip *chip, | ||
| 62 | unsigned off, int val) | ||
| 63 | { | ||
| 64 | unsigned bank, bit; | ||
| 65 | struct adp5588_gpio *dev = | ||
| 66 | container_of(chip, struct adp5588_gpio, gpio_chip); | ||
| 67 | |||
| 68 | bank = ADP_BANK(off); | ||
| 69 | bit = ADP_BIT(off); | ||
| 70 | |||
| 71 | mutex_lock(&dev->lock); | ||
| 72 | if (val) | ||
| 73 | dev->dat_out[bank] |= bit; | ||
| 74 | else | ||
| 75 | dev->dat_out[bank] &= ~bit; | ||
| 76 | |||
| 77 | adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank, | ||
| 78 | dev->dat_out[bank]); | ||
| 79 | mutex_unlock(&dev->lock); | ||
| 80 | } | ||
| 81 | |||
| 82 | static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) | ||
| 83 | { | ||
| 84 | int ret; | ||
| 85 | unsigned bank; | ||
| 86 | struct adp5588_gpio *dev = | ||
| 87 | container_of(chip, struct adp5588_gpio, gpio_chip); | ||
| 88 | |||
| 89 | bank = ADP_BANK(off); | ||
| 90 | |||
| 91 | mutex_lock(&dev->lock); | ||
| 92 | dev->dir[bank] &= ~ADP_BIT(off); | ||
| 93 | ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]); | ||
| 94 | mutex_unlock(&dev->lock); | ||
| 95 | |||
| 96 | return ret; | ||
| 97 | } | ||
| 98 | |||
| 99 | static int adp5588_gpio_direction_output(struct gpio_chip *chip, | ||
| 100 | unsigned off, int val) | ||
| 101 | { | ||
| 102 | int ret; | ||
| 103 | unsigned bank, bit; | ||
| 104 | struct adp5588_gpio *dev = | ||
| 105 | container_of(chip, struct adp5588_gpio, gpio_chip); | ||
| 106 | |||
| 107 | bank = ADP_BANK(off); | ||
| 108 | bit = ADP_BIT(off); | ||
| 109 | |||
| 110 | mutex_lock(&dev->lock); | ||
| 111 | dev->dir[bank] |= bit; | ||
| 112 | |||
| 113 | if (val) | ||
| 114 | dev->dat_out[bank] |= bit; | ||
| 115 | else | ||
| 116 | dev->dat_out[bank] &= ~bit; | ||
| 117 | |||
| 118 | ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank, | ||
| 119 | dev->dat_out[bank]); | ||
| 120 | ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, | ||
| 121 | dev->dir[bank]); | ||
| 122 | mutex_unlock(&dev->lock); | ||
| 123 | |||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 127 | static int __devinit adp5588_gpio_probe(struct i2c_client *client, | ||
| 128 | const struct i2c_device_id *id) | ||
| 129 | { | ||
| 130 | struct adp5588_gpio_platform_data *pdata = client->dev.platform_data; | ||
| 131 | struct adp5588_gpio *dev; | ||
| 132 | struct gpio_chip *gc; | ||
| 133 | int ret, i, revid; | ||
| 134 | |||
| 135 | if (pdata == NULL) { | ||
| 136 | dev_err(&client->dev, "missing platform data\n"); | ||
| 137 | return -ENODEV; | ||
| 138 | } | ||
| 139 | |||
| 140 | if (!i2c_check_functionality(client->adapter, | ||
| 141 | I2C_FUNC_SMBUS_BYTE_DATA)) { | ||
| 142 | dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); | ||
| 143 | return -EIO; | ||
| 144 | } | ||
| 145 | |||
| 146 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
| 147 | if (dev == NULL) { | ||
| 148 | dev_err(&client->dev, "failed to alloc memory\n"); | ||
| 149 | return -ENOMEM; | ||
| 150 | } | ||
| 151 | |||
| 152 | dev->client = client; | ||
| 153 | |||
| 154 | gc = &dev->gpio_chip; | ||
| 155 | gc->direction_input = adp5588_gpio_direction_input; | ||
| 156 | gc->direction_output = adp5588_gpio_direction_output; | ||
| 157 | gc->get = adp5588_gpio_get_value; | ||
| 158 | gc->set = adp5588_gpio_set_value; | ||
| 159 | gc->can_sleep = 1; | ||
| 160 | |||
| 161 | gc->base = pdata->gpio_start; | ||
| 162 | gc->ngpio = MAXGPIO; | ||
| 163 | gc->label = client->name; | ||
| 164 | gc->owner = THIS_MODULE; | ||
| 165 | |||
| 166 | mutex_init(&dev->lock); | ||
| 167 | |||
| 168 | |||
| 169 | ret = adp5588_gpio_read(dev->client, DEV_ID); | ||
| 170 | if (ret < 0) | ||
| 171 | goto err; | ||
| 172 | |||
| 173 | revid = ret & ADP5588_DEVICE_ID_MASK; | ||
| 174 | |||
| 175 | for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) { | ||
| 176 | dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i); | ||
| 177 | dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i); | ||
| 178 | ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0); | ||
| 179 | ret |= adp5588_gpio_write(client, GPIO_PULL1 + i, | ||
| 180 | (pdata->pullup_dis_mask >> (8 * i)) & 0xFF); | ||
| 181 | |||
| 182 | if (ret) | ||
| 183 | goto err; | ||
| 184 | } | ||
| 185 | |||
| 186 | ret = gpiochip_add(&dev->gpio_chip); | ||
| 187 | if (ret) | ||
| 188 | goto err; | ||
| 189 | |||
| 190 | dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n", | ||
| 191 | gc->base, gc->base + gc->ngpio - 1, | ||
| 192 | client->name, revid); | ||
| 193 | |||
| 194 | if (pdata->setup) { | ||
| 195 | ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context); | ||
| 196 | if (ret < 0) | ||
| 197 | dev_warn(&client->dev, "setup failed, %d\n", ret); | ||
| 198 | } | ||
| 199 | |||
| 200 | i2c_set_clientdata(client, dev); | ||
| 201 | return 0; | ||
| 202 | |||
| 203 | err: | ||
| 204 | kfree(dev); | ||
| 205 | return ret; | ||
| 206 | } | ||
| 207 | |||
| 208 | static int __devexit adp5588_gpio_remove(struct i2c_client *client) | ||
| 209 | { | ||
| 210 | struct adp5588_gpio_platform_data *pdata = client->dev.platform_data; | ||
| 211 | struct adp5588_gpio *dev = i2c_get_clientdata(client); | ||
| 212 | int ret; | ||
| 213 | |||
| 214 | if (pdata->teardown) { | ||
| 215 | ret = pdata->teardown(client, | ||
| 216 | dev->gpio_chip.base, dev->gpio_chip.ngpio, | ||
| 217 | pdata->context); | ||
| 218 | if (ret < 0) { | ||
| 219 | dev_err(&client->dev, "teardown failed %d\n", ret); | ||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | } | ||
| 223 | |||
| 224 | ret = gpiochip_remove(&dev->gpio_chip); | ||
| 225 | if (ret) { | ||
| 226 | dev_err(&client->dev, "gpiochip_remove failed %d\n", ret); | ||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | kfree(dev); | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | static const struct i2c_device_id adp5588_gpio_id[] = { | ||
| 235 | {DRV_NAME, 0}, | ||
| 236 | {} | ||
| 237 | }; | ||
| 238 | |||
| 239 | MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id); | ||
| 240 | |||
| 241 | static struct i2c_driver adp5588_gpio_driver = { | ||
| 242 | .driver = { | ||
| 243 | .name = DRV_NAME, | ||
| 244 | }, | ||
| 245 | .probe = adp5588_gpio_probe, | ||
| 246 | .remove = __devexit_p(adp5588_gpio_remove), | ||
| 247 | .id_table = adp5588_gpio_id, | ||
| 248 | }; | ||
| 249 | |||
| 250 | static int __init adp5588_gpio_init(void) | ||
| 251 | { | ||
| 252 | return i2c_add_driver(&adp5588_gpio_driver); | ||
| 253 | } | ||
| 254 | |||
| 255 | module_init(adp5588_gpio_init); | ||
| 256 | |||
| 257 | static void __exit adp5588_gpio_exit(void) | ||
| 258 | { | ||
| 259 | i2c_del_driver(&adp5588_gpio_driver); | ||
| 260 | } | ||
| 261 | |||
| 262 | module_exit(adp5588_gpio_exit); | ||
| 263 | |||
| 264 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); | ||
| 265 | MODULE_DESCRIPTION("GPIO ADP5588 Driver"); | ||
| 266 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a25ad284a272..350842ad3632 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -858,8 +858,6 @@ int gpio_sysfs_set_active_low(unsigned gpio, int value) | |||
| 858 | desc = &gpio_desc[gpio]; | 858 | desc = &gpio_desc[gpio]; |
| 859 | 859 | ||
| 860 | if (test_bit(FLAG_EXPORT, &desc->flags)) { | 860 | if (test_bit(FLAG_EXPORT, &desc->flags)) { |
| 861 | struct device *dev; | ||
| 862 | |||
| 863 | dev = class_find_device(&gpio_class, NULL, desc, match_export); | 861 | dev = class_find_device(&gpio_class, NULL, desc, match_export); |
| 864 | if (dev == NULL) { | 862 | if (dev == NULL) { |
| 865 | status = -ENODEV; | 863 | status = -ENODEV; |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index 628eae3e9b83..a1fce68e3bbe 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c | |||
| @@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, | |||
| 39 | struct drm_ati_pcigart_info *gart_info) | 39 | struct drm_ati_pcigart_info *gart_info) |
| 40 | { | 40 | { |
| 41 | gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, | 41 | gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, |
| 42 | PAGE_SIZE, | 42 | PAGE_SIZE); |
| 43 | gart_info->table_mask); | ||
| 44 | if (gart_info->table_handle == NULL) | 43 | if (gart_info->table_handle == NULL) |
| 45 | return -ENOMEM; | 44 | return -ENOMEM; |
| 46 | 45 | ||
| @@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
| 112 | if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { | 111 | if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { |
| 113 | DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); | 112 | DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); |
| 114 | 113 | ||
| 114 | if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { | ||
| 115 | DRM_ERROR("fail to set dma mask to 0x%Lx\n", | ||
| 116 | gart_info->table_mask); | ||
| 117 | ret = 1; | ||
| 118 | goto done; | ||
| 119 | } | ||
| 120 | |||
| 115 | ret = drm_ati_alloc_pcigart_table(dev, gart_info); | 121 | ret = drm_ati_alloc_pcigart_table(dev, gart_info); |
| 116 | if (ret) { | 122 | if (ret) { |
| 117 | DRM_ERROR("cannot allocate PCI GART page!\n"); | 123 | DRM_ERROR("cannot allocate PCI GART page!\n"); |
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 3d09e304f6f4..8417cc4c43f1 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c | |||
| @@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | |||
| 326 | * As we're limiting the address to 2^32-1 (or less), | 326 | * As we're limiting the address to 2^32-1 (or less), |
| 327 | * casting it down to 32 bits is no problem, but we | 327 | * casting it down to 32 bits is no problem, but we |
| 328 | * need to point to a 64bit variable first. */ | 328 | * need to point to a 64bit variable first. */ |
| 329 | dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); | 329 | dmah = drm_pci_alloc(dev, map->size, map->size); |
| 330 | if (!dmah) { | 330 | if (!dmah) { |
| 331 | kfree(map); | 331 | kfree(map); |
| 332 | return -ENOMEM; | 332 | return -ENOMEM; |
| @@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | |||
| 885 | 885 | ||
| 886 | while (entry->buf_count < count) { | 886 | while (entry->buf_count < count) { |
| 887 | 887 | ||
| 888 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); | 888 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); |
| 889 | 889 | ||
| 890 | if (!dmah) { | 890 | if (!dmah) { |
| 891 | /* Set count correctly so we free the proper amount. */ | 891 | /* Set count correctly so we free the proper amount. */ |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5124401f266a..d91fb8c0b7b3 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = | |||
| 158 | { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, | 158 | { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, |
| 159 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, | 159 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, |
| 160 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, | 160 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, |
| 161 | { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, | ||
| 161 | }; | 162 | }; |
| 162 | 163 | ||
| 163 | static struct drm_prop_enum_list drm_encoder_enum_list[] = | 164 | static struct drm_prop_enum_list drm_encoder_enum_list[] = |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 4231d6db72ec..077313f0d47f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) | |||
| 216 | EXPORT_SYMBOL(drm_helper_crtc_in_use); | 216 | EXPORT_SYMBOL(drm_helper_crtc_in_use); |
| 217 | 217 | ||
| 218 | /** | 218 | /** |
| 219 | * drm_disable_unused_functions - disable unused objects | 219 | * drm_helper_disable_unused_functions - disable unused objects |
| 220 | * @dev: DRM device | 220 | * @dev: DRM device |
| 221 | * | 221 | * |
| 222 | * LOCKING: | 222 | * LOCKING: |
| @@ -1032,7 +1032,7 @@ bool drm_helper_initial_config(struct drm_device *dev) | |||
| 1032 | /* | 1032 | /* |
| 1033 | * we shouldn't end up with no modes here. | 1033 | * we shouldn't end up with no modes here. |
| 1034 | */ | 1034 | */ |
| 1035 | WARN(!count, "No connectors reported connected with modes\n"); | 1035 | printk(KERN_INFO "No connectors reported conncted with modes\n"); |
| 1036 | 1036 | ||
| 1037 | drm_setup_crtcs(dev); | 1037 | drm_setup_crtcs(dev); |
| 1038 | 1038 | ||
| @@ -1162,6 +1162,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); | |||
| 1162 | int drm_helper_resume_force_mode(struct drm_device *dev) | 1162 | int drm_helper_resume_force_mode(struct drm_device *dev) |
| 1163 | { | 1163 | { |
| 1164 | struct drm_crtc *crtc; | 1164 | struct drm_crtc *crtc; |
| 1165 | struct drm_encoder *encoder; | ||
| 1166 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
| 1167 | struct drm_crtc_helper_funcs *crtc_funcs; | ||
| 1165 | int ret; | 1168 | int ret; |
| 1166 | 1169 | ||
| 1167 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 1170 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| @@ -1174,6 +1177,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
| 1174 | 1177 | ||
| 1175 | if (ret == false) | 1178 | if (ret == false) |
| 1176 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); | 1179 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); |
| 1180 | |||
| 1181 | /* Turn off outputs that were already powered off */ | ||
| 1182 | if (drm_helper_choose_crtc_dpms(crtc)) { | ||
| 1183 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 1184 | |||
| 1185 | if(encoder->crtc != crtc) | ||
| 1186 | continue; | ||
| 1187 | |||
| 1188 | encoder_funcs = encoder->helper_private; | ||
| 1189 | if (encoder_funcs->dpms) | ||
| 1190 | (*encoder_funcs->dpms) (encoder, | ||
| 1191 | drm_helper_choose_encoder_dpms(encoder)); | ||
| 1192 | |||
| 1193 | crtc_funcs = crtc->helper_private; | ||
| 1194 | if (crtc_funcs->dpms) | ||
| 1195 | (*crtc_funcs->dpms) (crtc, | ||
| 1196 | drm_helper_choose_crtc_dpms(crtc)); | ||
| 1197 | } | ||
| 1198 | } | ||
| 1177 | } | 1199 | } |
| 1178 | /* disable the unused connectors while restoring the modesetting */ | 1200 | /* disable the unused connectors while restoring the modesetting */ |
| 1179 | drm_helper_disable_unused_functions(dev); | 1201 | drm_helper_disable_unused_functions(dev); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5c9f79877cbf..defcaf108460 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -911,23 +911,27 @@ static int drm_cvt_modes(struct drm_connector *connector, | |||
| 911 | struct drm_device *dev = connector->dev; | 911 | struct drm_device *dev = connector->dev; |
| 912 | struct cvt_timing *cvt; | 912 | struct cvt_timing *cvt; |
| 913 | const int rates[] = { 60, 85, 75, 60, 50 }; | 913 | const int rates[] = { 60, 85, 75, 60, 50 }; |
| 914 | const u8 empty[3] = { 0, 0, 0 }; | ||
| 914 | 915 | ||
| 915 | for (i = 0; i < 4; i++) { | 916 | for (i = 0; i < 4; i++) { |
| 916 | int uninitialized_var(width), height; | 917 | int uninitialized_var(width), height; |
| 917 | cvt = &(timing->data.other_data.data.cvt[i]); | 918 | cvt = &(timing->data.other_data.data.cvt[i]); |
| 918 | 919 | ||
| 919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; | 920 | if (!memcmp(cvt->code, empty, 3)) |
| 920 | switch (cvt->code[1] & 0xc0) { | 921 | continue; |
| 922 | |||
| 923 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; | ||
| 924 | switch (cvt->code[1] & 0x0c) { | ||
| 921 | case 0x00: | 925 | case 0x00: |
| 922 | width = height * 4 / 3; | 926 | width = height * 4 / 3; |
| 923 | break; | 927 | break; |
| 924 | case 0x40: | 928 | case 0x04: |
| 925 | width = height * 16 / 9; | 929 | width = height * 16 / 9; |
| 926 | break; | 930 | break; |
| 927 | case 0x80: | 931 | case 0x08: |
| 928 | width = height * 16 / 10; | 932 | width = height * 16 / 10; |
| 929 | break; | 933 | break; |
| 930 | case 0xc0: | 934 | case 0x0c: |
| 931 | width = height * 15 / 9; | 935 | width = height * 15 / 9; |
| 932 | break; | 936 | break; |
| 933 | } | 937 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1b49fa055f4f..1c2b7d44ec05 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con | |||
| 156 | force = DRM_FORCE_ON; | 156 | force = DRM_FORCE_ON; |
| 157 | break; | 157 | break; |
| 158 | case 'D': | 158 | case 'D': |
| 159 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || | 159 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && |
| 160 | (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) | 160 | (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) |
| 161 | force = DRM_FORCE_ON; | 161 | force = DRM_FORCE_ON; |
| 162 | else | 162 | else |
| @@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
| 606 | return -EINVAL; | 606 | return -EINVAL; |
| 607 | 607 | ||
| 608 | /* Need to resize the fb object !!! */ | 608 | /* Need to resize the fb object !!! */ |
| 609 | if (var->xres > fb->width || var->yres > fb->height) { | 609 | if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) { |
| 610 | DRM_ERROR("Requested width/height is greater than current fb " | 610 | DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " |
| 611 | "object %dx%d > %dx%d\n", var->xres, var->yres, | 611 | "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, |
| 612 | fb->width, fb->height); | 612 | fb->width, fb->height, fb->bits_per_pixel); |
| 613 | DRM_ERROR("Need resizing code.\n"); | ||
| 614 | return -EINVAL; | 613 | return -EINVAL; |
| 615 | } | 614 | } |
| 616 | 615 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 7998ee66b317..b98384dbd9a7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev) | |||
| 115 | 115 | ||
| 116 | dev->num_crtcs = 0; | 116 | dev->num_crtcs = 0; |
| 117 | } | 117 | } |
| 118 | EXPORT_SYMBOL(drm_vblank_cleanup); | ||
| 118 | 119 | ||
| 119 | int drm_vblank_init(struct drm_device *dev, int num_crtcs) | 120 | int drm_vblank_init(struct drm_device *dev, int num_crtcs) |
| 120 | { | 121 | { |
| @@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) | |||
| 163 | } | 164 | } |
| 164 | 165 | ||
| 165 | dev->vblank_disable_allowed = 0; | 166 | dev->vblank_disable_allowed = 0; |
| 166 | |||
| 167 | return 0; | 167 | return 0; |
| 168 | 168 | ||
| 169 | err: | 169 | err: |
| @@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off); | |||
| 493 | */ | 493 | */ |
| 494 | void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) | 494 | void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) |
| 495 | { | 495 | { |
| 496 | /* vblank is not initialized (IRQ not installed ?) */ | ||
| 497 | if (!dev->num_crtcs) | ||
| 498 | return; | ||
| 496 | /* | 499 | /* |
| 497 | * To avoid all the problems that might happen if interrupts | 500 | * To avoid all the problems that might happen if interrupts |
| 498 | * were enabled/disabled around or between these calls, we just | 501 | * were enabled/disabled around or between these calls, we just |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 6d81a02463a3..76d63394c776 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -1,9 +1,4 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * The list_sort function is (presumably) licensed under the GPL (see the | ||
| 3 | * top level "COPYING" file for details). | ||
| 4 | * | ||
| 5 | * The remainder of this file is: | ||
| 6 | * | ||
| 7 | * Copyright © 1997-2003 by The XFree86 Project, Inc. | 2 | * Copyright © 1997-2003 by The XFree86 Project, Inc. |
| 8 | * Copyright © 2007 Dave Airlie | 3 | * Copyright © 2007 Dave Airlie |
| 9 | * Copyright © 2007-2008 Intel Corporation | 4 | * Copyright © 2007-2008 Intel Corporation |
| @@ -36,6 +31,7 @@ | |||
| 36 | */ | 31 | */ |
| 37 | 32 | ||
| 38 | #include <linux/list.h> | 33 | #include <linux/list.h> |
| 34 | #include <linux/list_sort.h> | ||
| 39 | #include "drmP.h" | 35 | #include "drmP.h" |
| 40 | #include "drm.h" | 36 | #include "drm.h" |
| 41 | #include "drm_crtc.h" | 37 | #include "drm_crtc.h" |
| @@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); | |||
| 855 | 851 | ||
| 856 | /** | 852 | /** |
| 857 | * drm_mode_compare - compare modes for favorability | 853 | * drm_mode_compare - compare modes for favorability |
| 854 | * @priv: unused | ||
| 858 | * @lh_a: list_head for first mode | 855 | * @lh_a: list_head for first mode |
| 859 | * @lh_b: list_head for second mode | 856 | * @lh_b: list_head for second mode |
| 860 | * | 857 | * |
| @@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); | |||
| 868 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or | 865 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or |
| 869 | * positive if @lh_b is better than @lh_a. | 866 | * positive if @lh_b is better than @lh_a. |
| 870 | */ | 867 | */ |
| 871 | static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) | 868 | static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b) |
| 872 | { | 869 | { |
| 873 | struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); | 870 | struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); |
| 874 | struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); | 871 | struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); |
| @@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) | |||
| 885 | return diff; | 882 | return diff; |
| 886 | } | 883 | } |
| 887 | 884 | ||
| 888 | /* FIXME: what we don't have a list sort function? */ | ||
| 889 | /* list sort from Mark J Roberts (mjr@znex.org) */ | ||
| 890 | void list_sort(struct list_head *head, | ||
| 891 | int (*cmp)(struct list_head *a, struct list_head *b)) | ||
| 892 | { | ||
| 893 | struct list_head *p, *q, *e, *list, *tail, *oldhead; | ||
| 894 | int insize, nmerges, psize, qsize, i; | ||
| 895 | |||
| 896 | list = head->next; | ||
| 897 | list_del(head); | ||
| 898 | insize = 1; | ||
| 899 | for (;;) { | ||
| 900 | p = oldhead = list; | ||
| 901 | list = tail = NULL; | ||
| 902 | nmerges = 0; | ||
| 903 | |||
| 904 | while (p) { | ||
| 905 | nmerges++; | ||
| 906 | q = p; | ||
| 907 | psize = 0; | ||
| 908 | for (i = 0; i < insize; i++) { | ||
| 909 | psize++; | ||
| 910 | q = q->next == oldhead ? NULL : q->next; | ||
| 911 | if (!q) | ||
| 912 | break; | ||
| 913 | } | ||
| 914 | |||
| 915 | qsize = insize; | ||
| 916 | while (psize > 0 || (qsize > 0 && q)) { | ||
| 917 | if (!psize) { | ||
| 918 | e = q; | ||
| 919 | q = q->next; | ||
| 920 | qsize--; | ||
| 921 | if (q == oldhead) | ||
| 922 | q = NULL; | ||
| 923 | } else if (!qsize || !q) { | ||
| 924 | e = p; | ||
| 925 | p = p->next; | ||
| 926 | psize--; | ||
| 927 | if (p == oldhead) | ||
| 928 | p = NULL; | ||
| 929 | } else if (cmp(p, q) <= 0) { | ||
| 930 | e = p; | ||
| 931 | p = p->next; | ||
| 932 | psize--; | ||
| 933 | if (p == oldhead) | ||
| 934 | p = NULL; | ||
| 935 | } else { | ||
| 936 | e = q; | ||
| 937 | q = q->next; | ||
| 938 | qsize--; | ||
| 939 | if (q == oldhead) | ||
| 940 | q = NULL; | ||
| 941 | } | ||
| 942 | if (tail) | ||
| 943 | tail->next = e; | ||
| 944 | else | ||
| 945 | list = e; | ||
| 946 | e->prev = tail; | ||
| 947 | tail = e; | ||
| 948 | } | ||
| 949 | p = q; | ||
| 950 | } | ||
| 951 | |||
| 952 | tail->next = list; | ||
| 953 | list->prev = tail; | ||
| 954 | |||
| 955 | if (nmerges <= 1) | ||
| 956 | break; | ||
| 957 | |||
| 958 | insize *= 2; | ||
| 959 | } | ||
| 960 | |||
| 961 | head->next = list; | ||
| 962 | head->prev = list->prev; | ||
| 963 | list->prev->next = head; | ||
| 964 | list->prev = head; | ||
| 965 | } | ||
| 966 | |||
| 967 | /** | 885 | /** |
| 968 | * drm_mode_sort - sort mode list | 886 | * drm_mode_sort - sort mode list |
| 969 | * @mode_list: list to sort | 887 | * @mode_list: list to sort |
| @@ -975,7 +893,7 @@ void list_sort(struct list_head *head, | |||
| 975 | */ | 893 | */ |
| 976 | void drm_mode_sort(struct list_head *mode_list) | 894 | void drm_mode_sort(struct list_head *mode_list) |
| 977 | { | 895 | { |
| 978 | list_sort(mode_list, drm_mode_compare); | 896 | list_sort(NULL, mode_list, drm_mode_compare); |
| 979 | } | 897 | } |
| 980 | EXPORT_SYMBOL(drm_mode_sort); | 898 | EXPORT_SYMBOL(drm_mode_sort); |
| 981 | 899 | ||
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 577094fb1995..e68ebf92fa2a 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
| @@ -47,8 +47,7 @@ | |||
| 47 | /** | 47 | /** |
| 48 | * \brief Allocate a PCI consistent memory block, for DMA. | 48 | * \brief Allocate a PCI consistent memory block, for DMA. |
| 49 | */ | 49 | */ |
| 50 | drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, | 50 | drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) |
| 51 | dma_addr_t maxaddr) | ||
| 52 | { | 51 | { |
| 53 | drm_dma_handle_t *dmah; | 52 | drm_dma_handle_t *dmah; |
| 54 | #if 1 | 53 | #if 1 |
| @@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali | |||
| 63 | if (align > size) | 62 | if (align > size) |
| 64 | return NULL; | 63 | return NULL; |
| 65 | 64 | ||
| 66 | if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { | ||
| 67 | DRM_ERROR("Setting pci dma mask failed\n"); | ||
| 68 | return NULL; | ||
| 69 | } | ||
| 70 | |||
| 71 | dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); | 65 | dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); |
| 72 | if (!dmah) | 66 | if (!dmah) |
| 73 | return NULL; | 67 | return NULL; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18476bf0b580..9c9998c4dceb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co | |||
| 272 | mem = kmap_atomic(pages[page], KM_USER0); | 272 | mem = kmap_atomic(pages[page], KM_USER0); |
| 273 | for (i = 0; i < PAGE_SIZE; i += 4) | 273 | for (i = 0; i < PAGE_SIZE; i += 4) |
| 274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
| 275 | kunmap_atomic(pages[page], KM_USER0); | 275 | kunmap_atomic(mem, KM_USER0); |
| 276 | } | 276 | } |
| 277 | } | 277 | } |
| 278 | 278 | ||
| @@ -386,34 +386,6 @@ out: | |||
| 386 | return 0; | 386 | return 0; |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | static int i915_registers_info(struct seq_file *m, void *data) { | ||
| 390 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 391 | struct drm_device *dev = node->minor->dev; | ||
| 392 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 393 | uint32_t reg; | ||
| 394 | |||
| 395 | #define DUMP_RANGE(start, end) \ | ||
| 396 | for (reg=start; reg < end; reg += 4) \ | ||
| 397 | seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); | ||
| 398 | |||
| 399 | DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ | ||
| 400 | DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ | ||
| 401 | DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ | ||
| 402 | DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ | ||
| 403 | DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ | ||
| 404 | DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ | ||
| 405 | DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ | ||
| 406 | DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ | ||
| 407 | DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ | ||
| 408 | DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ | ||
| 409 | DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ | ||
| 410 | DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ | ||
| 411 | DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ | ||
| 412 | DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ | ||
| 413 | |||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | |||
| 417 | static int | 389 | static int |
| 418 | i915_wedged_open(struct inode *inode, | 390 | i915_wedged_open(struct inode *inode, |
| 419 | struct file *filp) | 391 | struct file *filp) |
| @@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
| 519 | } | 491 | } |
| 520 | 492 | ||
| 521 | static struct drm_info_list i915_debugfs_list[] = { | 493 | static struct drm_info_list i915_debugfs_list[] = { |
| 522 | {"i915_regs", i915_registers_info, 0}, | ||
| 523 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 494 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
| 524 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 495 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
| 525 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 496 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 701bfeac7f57..bbe47812e4b6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
| 123 | drm_i915_private_t *dev_priv = dev->dev_private; | 123 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 124 | /* Program Hardware Status Page */ | 124 | /* Program Hardware Status Page */ |
| 125 | dev_priv->status_page_dmah = | 125 | dev_priv->status_page_dmah = |
| 126 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); | 126 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
| 127 | 127 | ||
| 128 | if (!dev_priv->status_page_dmah) { | 128 | if (!dev_priv->status_page_dmah) { |
| 129 | DRM_ERROR("Can not allocate hardware status page\n"); | 129 | DRM_ERROR("Can not allocate hardware status page\n"); |
| @@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 813 | case I915_PARAM_HAS_PAGEFLIPPING: | 813 | case I915_PARAM_HAS_PAGEFLIPPING: |
| 814 | value = 1; | 814 | value = 1; |
| 815 | break; | 815 | break; |
| 816 | case I915_PARAM_HAS_EXECBUF2: | ||
| 817 | /* depends on GEM */ | ||
| 818 | value = dev_priv->has_gem; | ||
| 819 | break; | ||
| 816 | default: | 820 | default: |
| 817 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 821 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
| 818 | param->param); | 822 | param->param); |
| 819 | return -EINVAL; | 823 | return -EINVAL; |
| 820 | } | 824 | } |
| 821 | 825 | ||
| @@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
| 1117 | { | 1121 | { |
| 1118 | struct drm_i915_private *dev_priv = dev->dev_private; | 1122 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1119 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1123 | struct drm_mm_node *compressed_fb, *compressed_llb; |
| 1120 | unsigned long cfb_base, ll_base; | 1124 | unsigned long cfb_base; |
| 1125 | unsigned long ll_base = 0; | ||
| 1121 | 1126 | ||
| 1122 | /* Leave 1M for line length buffer & misc. */ | 1127 | /* Leave 1M for line length buffer & misc. */ |
| 1123 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1128 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); |
| @@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1200 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | 1205 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & |
| 1201 | 0xff000000; | 1206 | 0xff000000; |
| 1202 | 1207 | ||
| 1203 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
| 1204 | dev_priv->cursor_needs_physical = true; | ||
| 1205 | else | ||
| 1206 | dev_priv->cursor_needs_physical = false; | ||
| 1207 | |||
| 1208 | if (IS_I965G(dev) || IS_G33(dev)) | ||
| 1209 | dev_priv->cursor_needs_physical = false; | ||
| 1210 | |||
| 1211 | /* Basic memrange allocator for stolen space (aka vram) */ | 1208 | /* Basic memrange allocator for stolen space (aka vram) */ |
| 1212 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1209 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
| 1213 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1210 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); |
| @@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1257 | if (ret) | 1254 | if (ret) |
| 1258 | goto destroy_ringbuffer; | 1255 | goto destroy_ringbuffer; |
| 1259 | 1256 | ||
| 1257 | intel_modeset_init(dev); | ||
| 1258 | |||
| 1260 | ret = drm_irq_install(dev); | 1259 | ret = drm_irq_install(dev); |
| 1261 | if (ret) | 1260 | if (ret) |
| 1262 | goto destroy_ringbuffer; | 1261 | goto destroy_ringbuffer; |
| @@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
| 1271 | 1270 | ||
| 1272 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1271 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
| 1273 | 1272 | ||
| 1274 | intel_modeset_init(dev); | ||
| 1275 | |||
| 1276 | drm_helper_initial_config(dev); | 1273 | drm_helper_initial_config(dev); |
| 1277 | 1274 | ||
| 1278 | return 0; | 1275 | return 0; |
| @@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1360 | { | 1357 | { |
| 1361 | struct drm_i915_private *dev_priv = dev->dev_private; | 1358 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1362 | resource_size_t base, size; | 1359 | resource_size_t base, size; |
| 1363 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1360 | int ret = 0, mmio_bar; |
| 1364 | uint32_t agp_size, prealloc_size, prealloc_start; | 1361 | uint32_t agp_size, prealloc_size, prealloc_start; |
| 1365 | 1362 | ||
| 1366 | /* i915 has 4 more counters */ | 1363 | /* i915 has 4 more counters */ |
| @@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1376 | 1373 | ||
| 1377 | dev->dev_private = (void *)dev_priv; | 1374 | dev->dev_private = (void *)dev_priv; |
| 1378 | dev_priv->dev = dev; | 1375 | dev_priv->dev = dev; |
| 1376 | dev_priv->info = (struct intel_device_info *) flags; | ||
| 1379 | 1377 | ||
| 1380 | /* Add register map (needed for suspend/resume) */ | 1378 | /* Add register map (needed for suspend/resume) */ |
| 1379 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
| 1381 | base = drm_get_resource_start(dev, mmio_bar); | 1380 | base = drm_get_resource_start(dev, mmio_bar); |
| 1382 | size = drm_get_resource_len(dev, mmio_bar); | 1381 | size = drm_get_resource_len(dev, mmio_bar); |
| 1383 | 1382 | ||
| @@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
| 1652 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1651 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1653 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1652 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | 1653 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), |
| 1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), | ||
| 1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
| 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
| 1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | 1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 24286ca168fc..be631cc3e4dc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
| 34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
| 35 | 35 | ||
| 36 | #include "drm_pciids.h" | ||
| 37 | #include <linux/console.h> | 36 | #include <linux/console.h> |
| 38 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
| 39 | 38 | ||
| @@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400); | |||
| 48 | 47 | ||
| 49 | static struct drm_driver driver; | 48 | static struct drm_driver driver; |
| 50 | 49 | ||
| 51 | static struct pci_device_id pciidlist[] = { | 50 | #define INTEL_VGA_DEVICE(id, info) { \ |
| 52 | i915_PCI_IDS | 51 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
| 52 | .class_mask = 0xffff00, \ | ||
| 53 | .vendor = 0x8086, \ | ||
| 54 | .device = id, \ | ||
| 55 | .subvendor = PCI_ANY_ID, \ | ||
| 56 | .subdevice = PCI_ANY_ID, \ | ||
| 57 | .driver_data = (unsigned long) info } | ||
| 58 | |||
| 59 | const static struct intel_device_info intel_i830_info = { | ||
| 60 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
| 61 | }; | ||
| 62 | |||
| 63 | const static struct intel_device_info intel_845g_info = { | ||
| 64 | .is_i8xx = 1, | ||
| 65 | }; | ||
| 66 | |||
| 67 | const static struct intel_device_info intel_i85x_info = { | ||
| 68 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
| 69 | }; | ||
| 70 | |||
| 71 | const static struct intel_device_info intel_i865g_info = { | ||
| 72 | .is_i8xx = 1, | ||
| 73 | }; | ||
| 74 | |||
| 75 | const static struct intel_device_info intel_i915g_info = { | ||
| 76 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | ||
| 77 | }; | ||
| 78 | const static struct intel_device_info intel_i915gm_info = { | ||
| 79 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
| 80 | .cursor_needs_physical = 1, | ||
| 81 | }; | ||
| 82 | const static struct intel_device_info intel_i945g_info = { | ||
| 83 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | ||
| 84 | }; | ||
| 85 | const static struct intel_device_info intel_i945gm_info = { | ||
| 86 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
| 87 | .has_hotplug = 1, .cursor_needs_physical = 1, | ||
| 88 | }; | ||
| 89 | |||
| 90 | const static struct intel_device_info intel_i965g_info = { | ||
| 91 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | ||
| 92 | }; | ||
| 93 | |||
| 94 | const static struct intel_device_info intel_i965gm_info = { | ||
| 95 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | ||
| 96 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | ||
| 97 | .has_hotplug = 1, | ||
| 98 | }; | ||
| 99 | |||
| 100 | const static struct intel_device_info intel_g33_info = { | ||
| 101 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
| 102 | .has_hotplug = 1, | ||
| 103 | }; | ||
| 104 | |||
| 105 | const static struct intel_device_info intel_g45_info = { | ||
| 106 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
| 107 | .has_pipe_cxsr = 1, | ||
| 108 | .has_hotplug = 1, | ||
| 109 | }; | ||
| 110 | |||
| 111 | const static struct intel_device_info intel_gm45_info = { | ||
| 112 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | ||
| 113 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | ||
| 114 | .has_pipe_cxsr = 1, | ||
| 115 | .has_hotplug = 1, | ||
| 116 | }; | ||
| 117 | |||
| 118 | const static struct intel_device_info intel_pineview_info = { | ||
| 119 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | ||
| 120 | .has_pipe_cxsr = 1, | ||
| 121 | .has_hotplug = 1, | ||
| 122 | }; | ||
| 123 | |||
| 124 | const static struct intel_device_info intel_ironlake_d_info = { | ||
| 125 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
| 126 | .has_pipe_cxsr = 1, | ||
| 127 | .has_hotplug = 1, | ||
| 128 | }; | ||
| 129 | |||
| 130 | const static struct intel_device_info intel_ironlake_m_info = { | ||
| 131 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | ||
| 132 | .need_gfx_hws = 1, .has_rc6 = 1, | ||
| 133 | .has_hotplug = 1, | ||
| 134 | }; | ||
| 135 | |||
| 136 | const static struct pci_device_id pciidlist[] = { | ||
| 137 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | ||
| 138 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | ||
| 139 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | ||
| 140 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | ||
| 141 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | ||
| 142 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | ||
| 143 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | ||
| 144 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | ||
| 145 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | ||
| 146 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | ||
| 147 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | ||
| 148 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | ||
| 149 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | ||
| 150 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | ||
| 151 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | ||
| 152 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | ||
| 153 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | ||
| 154 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | ||
| 155 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | ||
| 156 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | ||
| 157 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | ||
| 158 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | ||
| 159 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | ||
| 160 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | ||
| 161 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | ||
| 162 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | ||
| 163 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | ||
| 164 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | ||
| 165 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | ||
| 166 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | ||
| 167 | {0, 0, 0} | ||
| 53 | }; | 168 | }; |
| 54 | 169 | ||
| 55 | #if defined(CONFIG_DRM_I915_KMS) | 170 | #if defined(CONFIG_DRM_I915_KMS) |
| @@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev) | |||
| 284 | return i915_resume(dev); | 399 | return i915_resume(dev); |
| 285 | } | 400 | } |
| 286 | 401 | ||
| 402 | static int | ||
| 403 | i915_pm_suspend(struct device *dev) | ||
| 404 | { | ||
| 405 | return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); | ||
| 406 | } | ||
| 407 | |||
| 408 | static int | ||
| 409 | i915_pm_resume(struct device *dev) | ||
| 410 | { | ||
| 411 | return i915_pci_resume(to_pci_dev(dev)); | ||
| 412 | } | ||
| 413 | |||
| 414 | static int | ||
| 415 | i915_pm_freeze(struct device *dev) | ||
| 416 | { | ||
| 417 | return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); | ||
| 418 | } | ||
| 419 | |||
| 420 | static int | ||
| 421 | i915_pm_thaw(struct device *dev) | ||
| 422 | { | ||
| 423 | /* thaw during hibernate, do nothing! */ | ||
| 424 | return 0; | ||
| 425 | } | ||
| 426 | |||
| 427 | static int | ||
| 428 | i915_pm_poweroff(struct device *dev) | ||
| 429 | { | ||
| 430 | return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); | ||
| 431 | } | ||
| 432 | |||
| 433 | static int | ||
| 434 | i915_pm_restore(struct device *dev) | ||
| 435 | { | ||
| 436 | return i915_pci_resume(to_pci_dev(dev)); | ||
| 437 | } | ||
| 438 | |||
| 439 | const struct dev_pm_ops i915_pm_ops = { | ||
| 440 | .suspend = i915_pm_suspend, | ||
| 441 | .resume = i915_pm_resume, | ||
| 442 | .freeze = i915_pm_freeze, | ||
| 443 | .thaw = i915_pm_thaw, | ||
| 444 | .poweroff = i915_pm_poweroff, | ||
| 445 | .restore = i915_pm_restore, | ||
| 446 | }; | ||
| 447 | |||
| 287 | static struct vm_operations_struct i915_gem_vm_ops = { | 448 | static struct vm_operations_struct i915_gem_vm_ops = { |
| 288 | .fault = i915_gem_fault, | 449 | .fault = i915_gem_fault, |
| 289 | .open = drm_gem_vm_open, | 450 | .open = drm_gem_vm_open, |
| @@ -344,10 +505,7 @@ static struct drm_driver driver = { | |||
| 344 | .id_table = pciidlist, | 505 | .id_table = pciidlist, |
| 345 | .probe = i915_pci_probe, | 506 | .probe = i915_pci_probe, |
| 346 | .remove = i915_pci_remove, | 507 | .remove = i915_pci_remove, |
| 347 | #ifdef CONFIG_PM | 508 | .driver.pm = &i915_pm_ops, |
| 348 | .resume = i915_pci_resume, | ||
| 349 | .suspend = i915_pci_suspend, | ||
| 350 | #endif | ||
| 351 | }, | 509 | }, |
| 352 | 510 | ||
| 353 | .name = DRIVER_NAME, | 511 | .name = DRIVER_NAME, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fbecac72f5bb..29dd67626967 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -172,9 +172,31 @@ struct drm_i915_display_funcs { | |||
| 172 | 172 | ||
| 173 | struct intel_overlay; | 173 | struct intel_overlay; |
| 174 | 174 | ||
| 175 | struct intel_device_info { | ||
| 176 | u8 is_mobile : 1; | ||
| 177 | u8 is_i8xx : 1; | ||
| 178 | u8 is_i915g : 1; | ||
| 179 | u8 is_i9xx : 1; | ||
| 180 | u8 is_i945gm : 1; | ||
| 181 | u8 is_i965g : 1; | ||
| 182 | u8 is_i965gm : 1; | ||
| 183 | u8 is_g33 : 1; | ||
| 184 | u8 need_gfx_hws : 1; | ||
| 185 | u8 is_g4x : 1; | ||
| 186 | u8 is_pineview : 1; | ||
| 187 | u8 is_ironlake : 1; | ||
| 188 | u8 has_fbc : 1; | ||
| 189 | u8 has_rc6 : 1; | ||
| 190 | u8 has_pipe_cxsr : 1; | ||
| 191 | u8 has_hotplug : 1; | ||
| 192 | u8 cursor_needs_physical : 1; | ||
| 193 | }; | ||
| 194 | |||
| 175 | typedef struct drm_i915_private { | 195 | typedef struct drm_i915_private { |
| 176 | struct drm_device *dev; | 196 | struct drm_device *dev; |
| 177 | 197 | ||
| 198 | const struct intel_device_info *info; | ||
| 199 | |||
| 178 | int has_gem; | 200 | int has_gem; |
| 179 | 201 | ||
| 180 | void __iomem *regs; | 202 | void __iomem *regs; |
| @@ -232,8 +254,6 @@ typedef struct drm_i915_private { | |||
| 232 | int hangcheck_count; | 254 | int hangcheck_count; |
| 233 | uint32_t last_acthd; | 255 | uint32_t last_acthd; |
| 234 | 256 | ||
| 235 | bool cursor_needs_physical; | ||
| 236 | |||
| 237 | struct drm_mm vram; | 257 | struct drm_mm vram; |
| 238 | 258 | ||
| 239 | unsigned long cfb_size; | 259 | unsigned long cfb_size; |
| @@ -287,8 +307,6 @@ typedef struct drm_i915_private { | |||
| 287 | u32 saveDSPACNTR; | 307 | u32 saveDSPACNTR; |
| 288 | u32 saveDSPBCNTR; | 308 | u32 saveDSPBCNTR; |
| 289 | u32 saveDSPARB; | 309 | u32 saveDSPARB; |
| 290 | u32 saveRENDERSTANDBY; | ||
| 291 | u32 savePWRCTXA; | ||
| 292 | u32 saveHWS; | 310 | u32 saveHWS; |
| 293 | u32 savePIPEACONF; | 311 | u32 savePIPEACONF; |
| 294 | u32 savePIPEBCONF; | 312 | u32 savePIPEBCONF; |
| @@ -561,6 +579,7 @@ typedef struct drm_i915_private { | |||
| 561 | u16 orig_clock; | 579 | u16 orig_clock; |
| 562 | int child_dev_num; | 580 | int child_dev_num; |
| 563 | struct child_device_config *child_dev; | 581 | struct child_device_config *child_dev; |
| 582 | struct drm_connector *int_lvds_connector; | ||
| 564 | } drm_i915_private_t; | 583 | } drm_i915_private_t; |
| 565 | 584 | ||
| 566 | /** driver private structure attached to each drm_gem_object */ | 585 | /** driver private structure attached to each drm_gem_object */ |
| @@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
| 794 | struct drm_file *file_priv); | 813 | struct drm_file *file_priv); |
| 795 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 814 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
| 796 | struct drm_file *file_priv); | 815 | struct drm_file *file_priv); |
| 816 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
| 817 | struct drm_file *file_priv); | ||
| 797 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 818 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
| 798 | struct drm_file *file_priv); | 819 | struct drm_file *file_priv); |
| 799 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 820 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
| @@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void); | |||
| 860 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 881 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
| 861 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 882 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
| 862 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 883 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); |
| 884 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
| 885 | int tiling_mode); | ||
| 886 | bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); | ||
| 863 | 887 | ||
| 864 | /* i915_gem_debug.c */ | 888 | /* i915_gem_debug.c */ |
| 865 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 889 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
| @@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev); | |||
| 982 | extern int i915_wrap_ring(struct drm_device * dev); | 1006 | extern int i915_wrap_ring(struct drm_device * dev); |
| 983 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | 1007 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); |
| 984 | 1008 | ||
| 985 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1009 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
| 986 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1010 | |
| 987 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1011 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
| 988 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1012 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
| 989 | #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | 1013 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) |
| 990 | 1014 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | |
| 991 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 1015 | #define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) |
| 992 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1016 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
| 993 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1017 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
| 994 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ | 1018 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
| 995 | (dev)->pci_device == 0x27AE) | 1019 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
| 996 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ | 1020 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
| 997 | (dev)->pci_device == 0x2982 || \ | 1021 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
| 998 | (dev)->pci_device == 0x2992 || \ | 1022 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
| 999 | (dev)->pci_device == 0x29A2 || \ | 1023 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
| 1000 | (dev)->pci_device == 0x2A02 || \ | 1024 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
| 1001 | (dev)->pci_device == 0x2A12 || \ | 1025 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
| 1002 | (dev)->pci_device == 0x2A42 || \ | 1026 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
| 1003 | (dev)->pci_device == 0x2E02 || \ | 1027 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
| 1004 | (dev)->pci_device == 0x2E12 || \ | ||
| 1005 | (dev)->pci_device == 0x2E22 || \ | ||
| 1006 | (dev)->pci_device == 0x2E32 || \ | ||
| 1007 | (dev)->pci_device == 0x2E42 || \ | ||
| 1008 | (dev)->pci_device == 0x0042 || \ | ||
| 1009 | (dev)->pci_device == 0x0046) | ||
| 1010 | |||
| 1011 | #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ | ||
| 1012 | (dev)->pci_device == 0x2A12) | ||
| 1013 | |||
| 1014 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
| 1015 | |||
| 1016 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | ||
| 1017 | (dev)->pci_device == 0x2E12 || \ | ||
| 1018 | (dev)->pci_device == 0x2E22 || \ | ||
| 1019 | (dev)->pci_device == 0x2E32 || \ | ||
| 1020 | (dev)->pci_device == 0x2E42 || \ | ||
| 1021 | IS_GM45(dev)) | ||
| 1022 | |||
| 1023 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
| 1024 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
| 1025 | #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) | ||
| 1026 | |||
| 1027 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | ||
| 1028 | (dev)->pci_device == 0x29B2 || \ | ||
| 1029 | (dev)->pci_device == 0x29D2 || \ | ||
| 1030 | (IS_PINEVIEW(dev))) | ||
| 1031 | |||
| 1032 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1028 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
| 1033 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1029 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
| 1034 | #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) | 1030 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
| 1035 | 1031 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | |
| 1036 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 1032 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
| 1037 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ | ||
| 1038 | IS_IRONLAKE(dev)) | ||
| 1039 | 1033 | ||
| 1040 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 1034 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
| 1041 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ | ||
| 1042 | IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) | ||
| 1043 | 1035 | ||
| 1044 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ | ||
| 1045 | IS_IRONLAKE(dev)) | ||
| 1046 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1036 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
| 1047 | * rows, which changed the alignment requirements and fence programming. | 1037 | * rows, which changed the alignment requirements and fence programming. |
| 1048 | */ | 1038 | */ |
| @@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1054 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1044 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
| 1055 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1045 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ |
| 1056 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | 1046 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) |
| 1057 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | 1047 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
| 1058 | /* dsparb controlled by hw only */ | 1048 | /* dsparb controlled by hw only */ |
| 1059 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1049 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
| 1060 | 1050 | ||
| 1061 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) | 1051 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) |
| 1062 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1052 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
| 1063 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ | 1053 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
| 1064 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | 1054 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) |
| 1065 | !IS_PINEVIEW(dev) && \ | ||
| 1066 | !IS_IRONLAKE(dev)) | ||
| 1067 | #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) | ||
| 1068 | 1055 | ||
| 1069 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1056 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
| 1070 | 1057 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c463cf2050a..2748609f05b3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
| 2021 | /* blow away mappings if mapped through GTT */ | 2021 | /* blow away mappings if mapped through GTT */ |
| 2022 | i915_gem_release_mmap(obj); | 2022 | i915_gem_release_mmap(obj); |
| 2023 | 2023 | ||
| 2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
| 2025 | i915_gem_clear_fence_reg(obj); | ||
| 2026 | |||
| 2027 | /* Move the object to the CPU domain to ensure that | 2024 | /* Move the object to the CPU domain to ensure that |
| 2028 | * any possible CPU writes while it's not in the GTT | 2025 | * any possible CPU writes while it's not in the GTT |
| 2029 | * are flushed when we go to remap it. This will | 2026 | * are flushed when we go to remap it. This will |
| @@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
| 2039 | 2036 | ||
| 2040 | BUG_ON(obj_priv->active); | 2037 | BUG_ON(obj_priv->active); |
| 2041 | 2038 | ||
| 2039 | /* release the fence reg _after_ flushing */ | ||
| 2040 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
| 2041 | i915_gem_clear_fence_reg(obj); | ||
| 2042 | |||
| 2042 | if (obj_priv->agp_mem != NULL) { | 2043 | if (obj_priv->agp_mem != NULL) { |
| 2043 | drm_unbind_agp(obj_priv->agp_mem); | 2044 | drm_unbind_agp(obj_priv->agp_mem); |
| 2044 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2045 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
| @@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2581 | bool retry_alloc = false; | 2582 | bool retry_alloc = false; |
| 2582 | int ret; | 2583 | int ret; |
| 2583 | 2584 | ||
| 2584 | if (dev_priv->mm.suspended) | ||
| 2585 | return -EBUSY; | ||
| 2586 | |||
| 2587 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
| 2588 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2586 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
| 2589 | return -EINVAL; | 2587 | return -EINVAL; |
| @@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
| 3198 | static int | 3196 | static int |
| 3199 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3197 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
| 3200 | struct drm_file *file_priv, | 3198 | struct drm_file *file_priv, |
| 3201 | struct drm_i915_gem_exec_object *entry, | 3199 | struct drm_i915_gem_exec_object2 *entry, |
| 3202 | struct drm_i915_gem_relocation_entry *relocs) | 3200 | struct drm_i915_gem_relocation_entry *relocs) |
| 3203 | { | 3201 | { |
| 3204 | struct drm_device *dev = obj->dev; | 3202 | struct drm_device *dev = obj->dev; |
| @@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3206 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3204 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 3207 | int i, ret; | 3205 | int i, ret; |
| 3208 | void __iomem *reloc_page; | 3206 | void __iomem *reloc_page; |
| 3207 | bool need_fence; | ||
| 3208 | |||
| 3209 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
| 3210 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
| 3211 | |||
| 3212 | /* Check fence reg constraints and rebind if necessary */ | ||
| 3213 | if (need_fence && !i915_obj_fenceable(dev, obj)) | ||
| 3214 | i915_gem_object_unbind(obj); | ||
| 3209 | 3215 | ||
| 3210 | /* Choose the GTT offset for our buffer and put it there. */ | 3216 | /* Choose the GTT offset for our buffer and put it there. */ |
| 3211 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3217 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
| 3212 | if (ret) | 3218 | if (ret) |
| 3213 | return ret; | 3219 | return ret; |
| 3214 | 3220 | ||
| 3221 | /* | ||
| 3222 | * Pre-965 chips need a fence register set up in order to | ||
| 3223 | * properly handle blits to/from tiled surfaces. | ||
| 3224 | */ | ||
| 3225 | if (need_fence) { | ||
| 3226 | ret = i915_gem_object_get_fence_reg(obj); | ||
| 3227 | if (ret != 0) { | ||
| 3228 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
| 3229 | DRM_ERROR("Failure to install fence: %d\n", | ||
| 3230 | ret); | ||
| 3231 | i915_gem_object_unpin(obj); | ||
| 3232 | return ret; | ||
| 3233 | } | ||
| 3234 | } | ||
| 3235 | |||
| 3215 | entry->offset = obj_priv->gtt_offset; | 3236 | entry->offset = obj_priv->gtt_offset; |
| 3216 | 3237 | ||
| 3217 | /* Apply the relocations, using the GTT aperture to avoid cache | 3238 | /* Apply the relocations, using the GTT aperture to avoid cache |
| @@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 3373 | */ | 3394 | */ |
| 3374 | static int | 3395 | static int |
| 3375 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3396 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
| 3376 | struct drm_i915_gem_execbuffer *exec, | 3397 | struct drm_i915_gem_execbuffer2 *exec, |
| 3377 | struct drm_clip_rect *cliprects, | 3398 | struct drm_clip_rect *cliprects, |
| 3378 | uint64_t exec_offset) | 3399 | uint64_t exec_offset) |
| 3379 | { | 3400 | { |
| @@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
| 3463 | } | 3484 | } |
| 3464 | 3485 | ||
| 3465 | static int | 3486 | static int |
| 3466 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3487 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
| 3467 | uint32_t buffer_count, | 3488 | uint32_t buffer_count, |
| 3468 | struct drm_i915_gem_relocation_entry **relocs) | 3489 | struct drm_i915_gem_relocation_entry **relocs) |
| 3469 | { | 3490 | { |
| @@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
| 3478 | } | 3499 | } |
| 3479 | 3500 | ||
| 3480 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3501 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
| 3481 | if (*relocs == NULL) | 3502 | if (*relocs == NULL) { |
| 3503 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
| 3482 | return -ENOMEM; | 3504 | return -ENOMEM; |
| 3505 | } | ||
| 3483 | 3506 | ||
| 3484 | for (i = 0; i < buffer_count; i++) { | 3507 | for (i = 0; i < buffer_count; i++) { |
| 3485 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3508 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
| @@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
| 3503 | } | 3526 | } |
| 3504 | 3527 | ||
| 3505 | static int | 3528 | static int |
| 3506 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3529 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
| 3507 | uint32_t buffer_count, | 3530 | uint32_t buffer_count, |
| 3508 | struct drm_i915_gem_relocation_entry *relocs) | 3531 | struct drm_i915_gem_relocation_entry *relocs) |
| 3509 | { | 3532 | { |
| @@ -3536,7 +3559,7 @@ err: | |||
| 3536 | } | 3559 | } |
| 3537 | 3560 | ||
| 3538 | static int | 3561 | static int |
| 3539 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3562 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
| 3540 | uint64_t exec_offset) | 3563 | uint64_t exec_offset) |
| 3541 | { | 3564 | { |
| 3542 | uint32_t exec_start, exec_len; | 3565 | uint32_t exec_start, exec_len; |
| @@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
| 3589 | } | 3612 | } |
| 3590 | 3613 | ||
| 3591 | int | 3614 | int |
| 3592 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3615 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
| 3593 | struct drm_file *file_priv) | 3616 | struct drm_file *file_priv, |
| 3617 | struct drm_i915_gem_execbuffer2 *args, | ||
| 3618 | struct drm_i915_gem_exec_object2 *exec_list) | ||
| 3594 | { | 3619 | { |
| 3595 | drm_i915_private_t *dev_priv = dev->dev_private; | 3620 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 3596 | struct drm_i915_gem_execbuffer *args = data; | ||
| 3597 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
| 3598 | struct drm_gem_object **object_list = NULL; | 3621 | struct drm_gem_object **object_list = NULL; |
| 3599 | struct drm_gem_object *batch_obj; | 3622 | struct drm_gem_object *batch_obj; |
| 3600 | struct drm_i915_gem_object *obj_priv; | 3623 | struct drm_i915_gem_object *obj_priv; |
| 3601 | struct drm_clip_rect *cliprects = NULL; | 3624 | struct drm_clip_rect *cliprects = NULL; |
| 3602 | struct drm_i915_gem_relocation_entry *relocs; | 3625 | struct drm_i915_gem_relocation_entry *relocs; |
| 3603 | int ret, ret2, i, pinned = 0; | 3626 | int ret = 0, ret2, i, pinned = 0; |
| 3604 | uint64_t exec_offset; | 3627 | uint64_t exec_offset; |
| 3605 | uint32_t seqno, flush_domains, reloc_index; | 3628 | uint32_t seqno, flush_domains, reloc_index; |
| 3606 | int pin_tries, flips; | 3629 | int pin_tries, flips; |
| @@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 3614 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3637 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
| 3615 | return -EINVAL; | 3638 | return -EINVAL; |
| 3616 | } | 3639 | } |
| 3617 | /* Copy in the exec list from userland */ | ||
| 3618 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
| 3619 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 3640 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
| 3620 | if (exec_list == NULL || object_list == NULL) { | 3641 | if (object_list == NULL) { |
| 3621 | DRM_ERROR("Failed to allocate exec or object list " | 3642 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
| 3622 | "for %d buffers\n", | ||
| 3623 | args->buffer_count); | 3643 | args->buffer_count); |
| 3624 | ret = -ENOMEM; | 3644 | ret = -ENOMEM; |
| 3625 | goto pre_mutex_err; | 3645 | goto pre_mutex_err; |
| 3626 | } | 3646 | } |
| 3627 | ret = copy_from_user(exec_list, | ||
| 3628 | (struct drm_i915_relocation_entry __user *) | ||
| 3629 | (uintptr_t) args->buffers_ptr, | ||
| 3630 | sizeof(*exec_list) * args->buffer_count); | ||
| 3631 | if (ret != 0) { | ||
| 3632 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
| 3633 | args->buffer_count, ret); | ||
| 3634 | goto pre_mutex_err; | ||
| 3635 | } | ||
| 3636 | 3647 | ||
| 3637 | if (args->num_cliprects != 0) { | 3648 | if (args->num_cliprects != 0) { |
| 3638 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3649 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
| @@ -3884,20 +3895,6 @@ err: | |||
| 3884 | 3895 | ||
| 3885 | mutex_unlock(&dev->struct_mutex); | 3896 | mutex_unlock(&dev->struct_mutex); |
| 3886 | 3897 | ||
| 3887 | if (!ret) { | ||
| 3888 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
| 3889 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
| 3890 | (uintptr_t) args->buffers_ptr, | ||
| 3891 | exec_list, | ||
| 3892 | sizeof(*exec_list) * args->buffer_count); | ||
| 3893 | if (ret) { | ||
| 3894 | ret = -EFAULT; | ||
| 3895 | DRM_ERROR("failed to copy %d exec entries " | ||
| 3896 | "back to user (%d)\n", | ||
| 3897 | args->buffer_count, ret); | ||
| 3898 | } | ||
| 3899 | } | ||
| 3900 | |||
| 3901 | /* Copy the updated relocations out regardless of current error | 3898 | /* Copy the updated relocations out regardless of current error |
| 3902 | * state. Failure to update the relocs would mean that the next | 3899 | * state. Failure to update the relocs would mean that the next |
| 3903 | * time userland calls execbuf, it would do so with presumed offset | 3900 | * time userland calls execbuf, it would do so with presumed offset |
| @@ -3914,12 +3911,158 @@ err: | |||
| 3914 | 3911 | ||
| 3915 | pre_mutex_err: | 3912 | pre_mutex_err: |
| 3916 | drm_free_large(object_list); | 3913 | drm_free_large(object_list); |
| 3917 | drm_free_large(exec_list); | ||
| 3918 | kfree(cliprects); | 3914 | kfree(cliprects); |
| 3919 | 3915 | ||
| 3920 | return ret; | 3916 | return ret; |
| 3921 | } | 3917 | } |
| 3922 | 3918 | ||
| 3919 | /* | ||
| 3920 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
| 3921 | * list array and passes it to the real function. | ||
| 3922 | */ | ||
| 3923 | int | ||
| 3924 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
| 3925 | struct drm_file *file_priv) | ||
| 3926 | { | ||
| 3927 | struct drm_i915_gem_execbuffer *args = data; | ||
| 3928 | struct drm_i915_gem_execbuffer2 exec2; | ||
| 3929 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
| 3930 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
| 3931 | int ret, i; | ||
| 3932 | |||
| 3933 | #if WATCH_EXEC | ||
| 3934 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
| 3935 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
| 3936 | #endif | ||
| 3937 | |||
| 3938 | if (args->buffer_count < 1) { | ||
| 3939 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
| 3940 | return -EINVAL; | ||
| 3941 | } | ||
| 3942 | |||
| 3943 | /* Copy in the exec list from userland */ | ||
| 3944 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
| 3945 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
| 3946 | if (exec_list == NULL || exec2_list == NULL) { | ||
| 3947 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
| 3948 | args->buffer_count); | ||
| 3949 | drm_free_large(exec_list); | ||
| 3950 | drm_free_large(exec2_list); | ||
| 3951 | return -ENOMEM; | ||
| 3952 | } | ||
| 3953 | ret = copy_from_user(exec_list, | ||
| 3954 | (struct drm_i915_relocation_entry __user *) | ||
| 3955 | (uintptr_t) args->buffers_ptr, | ||
| 3956 | sizeof(*exec_list) * args->buffer_count); | ||
| 3957 | if (ret != 0) { | ||
| 3958 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
| 3959 | args->buffer_count, ret); | ||
| 3960 | drm_free_large(exec_list); | ||
| 3961 | drm_free_large(exec2_list); | ||
| 3962 | return -EFAULT; | ||
| 3963 | } | ||
| 3964 | |||
| 3965 | for (i = 0; i < args->buffer_count; i++) { | ||
| 3966 | exec2_list[i].handle = exec_list[i].handle; | ||
| 3967 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
| 3968 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
| 3969 | exec2_list[i].alignment = exec_list[i].alignment; | ||
| 3970 | exec2_list[i].offset = exec_list[i].offset; | ||
| 3971 | if (!IS_I965G(dev)) | ||
| 3972 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
| 3973 | else | ||
| 3974 | exec2_list[i].flags = 0; | ||
| 3975 | } | ||
| 3976 | |||
| 3977 | exec2.buffers_ptr = args->buffers_ptr; | ||
| 3978 | exec2.buffer_count = args->buffer_count; | ||
| 3979 | exec2.batch_start_offset = args->batch_start_offset; | ||
| 3980 | exec2.batch_len = args->batch_len; | ||
| 3981 | exec2.DR1 = args->DR1; | ||
| 3982 | exec2.DR4 = args->DR4; | ||
| 3983 | exec2.num_cliprects = args->num_cliprects; | ||
| 3984 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
| 3985 | exec2.flags = 0; | ||
| 3986 | |||
| 3987 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
| 3988 | if (!ret) { | ||
| 3989 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
| 3990 | for (i = 0; i < args->buffer_count; i++) | ||
| 3991 | exec_list[i].offset = exec2_list[i].offset; | ||
| 3992 | /* ... and back out to userspace */ | ||
| 3993 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
| 3994 | (uintptr_t) args->buffers_ptr, | ||
| 3995 | exec_list, | ||
| 3996 | sizeof(*exec_list) * args->buffer_count); | ||
| 3997 | if (ret) { | ||
| 3998 | ret = -EFAULT; | ||
| 3999 | DRM_ERROR("failed to copy %d exec entries " | ||
| 4000 | "back to user (%d)\n", | ||
| 4001 | args->buffer_count, ret); | ||
| 4002 | } | ||
| 4003 | } else { | ||
| 4004 | DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret); | ||
| 4005 | } | ||
| 4006 | |||
| 4007 | drm_free_large(exec_list); | ||
| 4008 | drm_free_large(exec2_list); | ||
| 4009 | return ret; | ||
| 4010 | } | ||
| 4011 | |||
| 4012 | int | ||
| 4013 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
| 4014 | struct drm_file *file_priv) | ||
| 4015 | { | ||
| 4016 | struct drm_i915_gem_execbuffer2 *args = data; | ||
| 4017 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
| 4018 | int ret; | ||
| 4019 | |||
| 4020 | #if WATCH_EXEC | ||
| 4021 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
| 4022 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
| 4023 | #endif | ||
| 4024 | |||
| 4025 | if (args->buffer_count < 1) { | ||
| 4026 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
| 4027 | return -EINVAL; | ||
| 4028 | } | ||
| 4029 | |||
| 4030 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
| 4031 | if (exec2_list == NULL) { | ||
| 4032 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
| 4033 | args->buffer_count); | ||
| 4034 | return -ENOMEM; | ||
| 4035 | } | ||
| 4036 | ret = copy_from_user(exec2_list, | ||
| 4037 | (struct drm_i915_relocation_entry __user *) | ||
| 4038 | (uintptr_t) args->buffers_ptr, | ||
| 4039 | sizeof(*exec2_list) * args->buffer_count); | ||
| 4040 | if (ret != 0) { | ||
| 4041 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
| 4042 | args->buffer_count, ret); | ||
| 4043 | drm_free_large(exec2_list); | ||
| 4044 | return -EFAULT; | ||
| 4045 | } | ||
| 4046 | |||
| 4047 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
| 4048 | if (!ret) { | ||
| 4049 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
| 4050 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
| 4051 | (uintptr_t) args->buffers_ptr, | ||
| 4052 | exec2_list, | ||
| 4053 | sizeof(*exec2_list) * args->buffer_count); | ||
| 4054 | if (ret) { | ||
| 4055 | ret = -EFAULT; | ||
| 4056 | DRM_ERROR("failed to copy %d exec entries " | ||
| 4057 | "back to user (%d)\n", | ||
| 4058 | args->buffer_count, ret); | ||
| 4059 | } | ||
| 4060 | } | ||
| 4061 | |||
| 4062 | drm_free_large(exec2_list); | ||
| 4063 | return ret; | ||
| 4064 | } | ||
| 4065 | |||
| 3923 | int | 4066 | int |
| 3924 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 4067 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
| 3925 | { | 4068 | { |
| @@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
| 3933 | if (ret) | 4076 | if (ret) |
| 3934 | return ret; | 4077 | return ret; |
| 3935 | } | 4078 | } |
| 3936 | /* | 4079 | |
| 3937 | * Pre-965 chips need a fence register set up in order to | ||
| 3938 | * properly handle tiled surfaces. | ||
| 3939 | */ | ||
| 3940 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
| 3941 | ret = i915_gem_object_get_fence_reg(obj); | ||
| 3942 | if (ret != 0) { | ||
| 3943 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
| 3944 | DRM_ERROR("Failure to install fence: %d\n", | ||
| 3945 | ret); | ||
| 3946 | return ret; | ||
| 3947 | } | ||
| 3948 | } | ||
| 3949 | obj_priv->pin_count++; | 4080 | obj_priv->pin_count++; |
| 3950 | 4081 | ||
| 3951 | /* If the object is not active and not pending a flush, | 4082 | /* If the object is not active and not pending a flush, |
| @@ -4708,7 +4839,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | |||
| 4708 | 4839 | ||
| 4709 | phys_obj->id = id; | 4840 | phys_obj->id = id; |
| 4710 | 4841 | ||
| 4711 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | 4842 | phys_obj->handle = drm_pci_alloc(dev, size, 0); |
| 4712 | if (!phys_obj->handle) { | 4843 | if (!phys_obj->handle) { |
| 4713 | ret = -ENOMEM; | 4844 | ret = -ENOMEM; |
| 4714 | goto kfree_obj; | 4845 | goto kfree_obj; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 30d6af6c09bb..df278b2685bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
| 304 | 304 | ||
| 305 | 305 | ||
| 306 | /** | 306 | /** |
| 307 | * Returns the size of the fence for a tiled object of the given size. | 307 | * Returns whether an object is currently fenceable. If not, it may need |
| 308 | * to be unbound and have its pitch adjusted. | ||
| 308 | */ | 309 | */ |
| 309 | static int | 310 | bool |
| 310 | i915_get_fence_size(struct drm_device *dev, int size) | 311 | i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) |
| 311 | { | 312 | { |
| 312 | int i; | 313 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 313 | int start; | ||
| 314 | 314 | ||
| 315 | if (IS_I965G(dev)) { | 315 | if (IS_I965G(dev)) { |
| 316 | /* The 965 can have fences at any page boundary. */ | 316 | /* The 965 can have fences at any page boundary. */ |
| 317 | return ALIGN(size, 4096); | 317 | if (obj->size & 4095) |
| 318 | return false; | ||
| 319 | return true; | ||
| 320 | } else if (IS_I9XX(dev)) { | ||
| 321 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | ||
| 322 | return false; | ||
| 318 | } else { | 323 | } else { |
| 319 | /* Align the size to a power of two greater than the smallest | 324 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) |
| 320 | * fence size. | 325 | return false; |
| 321 | */ | 326 | } |
| 322 | if (IS_I9XX(dev)) | ||
| 323 | start = 1024 * 1024; | ||
| 324 | else | ||
| 325 | start = 512 * 1024; | ||
| 326 | 327 | ||
| 327 | for (i = start; i < size; i <<= 1) | 328 | /* Power of two sized... */ |
| 328 | ; | 329 | if (obj->size & (obj->size - 1)) |
| 330 | return false; | ||
| 329 | 331 | ||
| 330 | return i; | 332 | /* Objects must be size aligned as well */ |
| 331 | } | 333 | if (obj_priv->gtt_offset & (obj->size - 1)) |
| 334 | return false; | ||
| 335 | return true; | ||
| 332 | } | 336 | } |
| 333 | 337 | ||
| 334 | /* Check pitch constriants for all chips & tiling formats */ | 338 | /* Check pitch constriants for all chips & tiling formats */ |
| 335 | static bool | 339 | bool |
| 336 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 340 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
| 337 | { | 341 | { |
| 338 | int tile_width; | 342 | int tile_width; |
| @@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
| 384 | if (stride & (stride - 1)) | 388 | if (stride & (stride - 1)) |
| 385 | return false; | 389 | return false; |
| 386 | 390 | ||
| 387 | /* We don't 0handle the aperture area covered by the fence being bigger | ||
| 388 | * than the object size. | ||
| 389 | */ | ||
| 390 | if (i915_get_fence_size(dev, size) != size) | ||
| 391 | return false; | ||
| 392 | |||
| 393 | return true; | 391 | return true; |
| 394 | } | 392 | } |
| 395 | 393 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85f4c5de97e2..7cd8110051b6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 313 | dev_priv->mm.irq_gem_seqno = seqno; | 313 | dev_priv->mm.irq_gem_seqno = seqno; |
| 314 | trace_i915_gem_request_complete(dev, seqno); | 314 | trace_i915_gem_request_complete(dev, seqno); |
| 315 | DRM_WAKEUP(&dev_priv->irq_queue); | 315 | DRM_WAKEUP(&dev_priv->irq_queue); |
| 316 | dev_priv->hangcheck_count = 0; | ||
| 317 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
| 316 | } | 318 | } |
| 317 | 319 | ||
| 318 | if (de_iir & DE_GSE) | 320 | if (de_iir & DE_GSE) |
| @@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
| 1084 | (void) I915_READ(IER); | 1086 | (void) I915_READ(IER); |
| 1085 | } | 1087 | } |
| 1086 | 1088 | ||
| 1089 | /* | ||
| 1090 | * Must be called after intel_modeset_init or hotplug interrupts won't be | ||
| 1091 | * enabled correctly. | ||
| 1092 | */ | ||
| 1087 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1093 | int i915_driver_irq_postinstall(struct drm_device *dev) |
| 1088 | { | 1094 | { |
| 1089 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1095 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| @@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
| 1106 | if (I915_HAS_HOTPLUG(dev)) { | 1112 | if (I915_HAS_HOTPLUG(dev)) { |
| 1107 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1113 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
| 1108 | 1114 | ||
| 1109 | /* Leave other bits alone */ | 1115 | /* Note HDMI and DP share bits */ |
| 1110 | hotplug_en |= HOTPLUG_EN_MASK; | 1116 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
| 1117 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
| 1118 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
| 1119 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
| 1120 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
| 1121 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
| 1122 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
| 1123 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
| 1124 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
| 1125 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
| 1126 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
| 1127 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
| 1128 | /* Ignore TV since it's buggy */ | ||
| 1129 | |||
| 1111 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1130 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
| 1112 | 1131 | ||
| 1113 | dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | | ||
| 1114 | TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | | ||
| 1115 | SDVOB_HOTPLUG_INT_STATUS; | ||
| 1116 | if (IS_G4X(dev)) { | ||
| 1117 | dev_priv->hotplug_supported_mask |= | ||
| 1118 | HDMIB_HOTPLUG_INT_STATUS | | ||
| 1119 | HDMIC_HOTPLUG_INT_STATUS | | ||
| 1120 | HDMID_HOTPLUG_INT_STATUS; | ||
| 1121 | } | ||
| 1122 | /* Enable in IER... */ | 1132 | /* Enable in IER... */ |
| 1123 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1133 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
| 1124 | /* and unmask in IMR */ | 1134 | /* and unmask in IMR */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 974b3cf70618..149d360d64a3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -879,13 +879,6 @@ | |||
| 879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
| 880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | 880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ |
| 881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | 881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f |
| 882 | #define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ | ||
| 883 | HDMIC_HOTPLUG_INT_EN | \ | ||
| 884 | HDMID_HOTPLUG_INT_EN | \ | ||
| 885 | SDVOB_HOTPLUG_INT_EN | \ | ||
| 886 | SDVOC_HOTPLUG_INT_EN | \ | ||
| 887 | CRT_HOTPLUG_INT_EN) | ||
| 888 | |||
| 889 | 882 | ||
| 890 | #define PORT_HOTPLUG_STAT 0x61114 | 883 | #define PORT_HOTPLUG_STAT 0x61114 |
| 891 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 884 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
| @@ -982,6 +975,8 @@ | |||
| 982 | #define LVDS_PORT_EN (1 << 31) | 975 | #define LVDS_PORT_EN (1 << 31) |
| 983 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 976 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
| 984 | #define LVDS_PIPEB_SELECT (1 << 30) | 977 | #define LVDS_PIPEB_SELECT (1 << 30) |
| 978 | /* LVDS dithering flag on 965/g4x platform */ | ||
| 979 | #define LVDS_ENABLE_DITHER (1 << 25) | ||
| 985 | /* Enable border for unscaled (or aspect-scaled) display */ | 980 | /* Enable border for unscaled (or aspect-scaled) display */ |
| 986 | #define LVDS_BORDER_ENABLE (1 << 15) | 981 | #define LVDS_BORDER_ENABLE (1 << 15) |
| 987 | /* | 982 | /* |
| @@ -1751,6 +1746,8 @@ | |||
| 1751 | 1746 | ||
| 1752 | /* Display & cursor control */ | 1747 | /* Display & cursor control */ |
| 1753 | 1748 | ||
| 1749 | /* dithering flag on Ironlake */ | ||
| 1750 | #define PIPE_ENABLE_DITHER (1 << 4) | ||
| 1754 | /* Pipe A */ | 1751 | /* Pipe A */ |
| 1755 | #define PIPEADSL 0x70000 | 1752 | #define PIPEADSL 0x70000 |
| 1756 | #define PIPEACONF 0x70008 | 1753 | #define PIPEACONF 0x70008 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d5ebb00a9d49..a3b90c9561dc 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
| @@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev) | |||
| 732 | 732 | ||
| 733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
| 734 | 734 | ||
| 735 | /* Render Standby */ | ||
| 736 | if (I915_HAS_RC6(dev)) { | ||
| 737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
| 738 | dev_priv->savePWRCTXA = I915_READ(PWRCTXA); | ||
| 739 | } | ||
| 740 | |||
| 741 | /* Hardware status page */ | 735 | /* Hardware status page */ |
| 742 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 736 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
| 743 | 737 | ||
| @@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev) | |||
| 793 | 787 | ||
| 794 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 788 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
| 795 | 789 | ||
| 796 | /* Render Standby */ | ||
| 797 | if (I915_HAS_RC6(dev)) { | ||
| 798 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
| 799 | I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); | ||
| 800 | } | ||
| 801 | |||
| 802 | /* Hardware status page */ | 790 | /* Hardware status page */ |
| 803 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 791 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
| 804 | 792 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9f3d3e563414..ddefc871edfe 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev) | |||
| 548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
| 549 | 549 | ||
| 550 | drm_sysfs_connector_add(connector); | 550 | drm_sysfs_connector_add(connector); |
| 551 | |||
| 552 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | ||
| 551 | } | 553 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 52cd9b006da2..002612fae717 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -262,6 +262,14 @@ struct intel_limit { | |||
| 262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ | 262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ |
| 263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | 263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
| 264 | 264 | ||
| 265 | #define IRONLAKE_P_DISPLAY_PORT_MIN 10 | ||
| 266 | #define IRONLAKE_P_DISPLAY_PORT_MAX 20 | ||
| 267 | #define IRONLAKE_P2_DISPLAY_PORT_FAST 10 | ||
| 268 | #define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 | ||
| 269 | #define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 | ||
| 270 | #define IRONLAKE_P1_DISPLAY_PORT_MIN 1 | ||
| 271 | #define IRONLAKE_P1_DISPLAY_PORT_MAX 2 | ||
| 272 | |||
| 265 | static bool | 273 | static bool |
| 266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 274 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
| 267 | int target, int refclk, intel_clock_t *best_clock); | 275 | int target, int refclk, intel_clock_t *best_clock); |
| @@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 271 | static bool | 279 | static bool |
| 272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 280 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
| 273 | int target, int refclk, intel_clock_t *best_clock); | 281 | int target, int refclk, intel_clock_t *best_clock); |
| 274 | static bool | ||
| 275 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
| 276 | int target, int refclk, intel_clock_t *best_clock); | ||
| 277 | 282 | ||
| 278 | static bool | 283 | static bool |
| 279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 284 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
| @@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = { | |||
| 496 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 501 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
| 497 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, | 502 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, |
| 498 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, | 503 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, |
| 499 | .find_pll = intel_ironlake_find_best_PLL, | 504 | .find_pll = intel_g4x_find_best_PLL, |
| 500 | }; | 505 | }; |
| 501 | 506 | ||
| 502 | static const intel_limit_t intel_limits_ironlake_lvds = { | 507 | static const intel_limit_t intel_limits_ironlake_lvds = { |
| @@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = { | |||
| 511 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 516 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
| 512 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, | 517 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, |
| 513 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, | 518 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, |
| 514 | .find_pll = intel_ironlake_find_best_PLL, | 519 | .find_pll = intel_g4x_find_best_PLL, |
| 520 | }; | ||
| 521 | |||
| 522 | static const intel_limit_t intel_limits_ironlake_display_port = { | ||
| 523 | .dot = { .min = IRONLAKE_DOT_MIN, | ||
| 524 | .max = IRONLAKE_DOT_MAX }, | ||
| 525 | .vco = { .min = IRONLAKE_VCO_MIN, | ||
| 526 | .max = IRONLAKE_VCO_MAX}, | ||
| 527 | .n = { .min = IRONLAKE_N_MIN, | ||
| 528 | .max = IRONLAKE_N_MAX }, | ||
| 529 | .m = { .min = IRONLAKE_M_MIN, | ||
| 530 | .max = IRONLAKE_M_MAX }, | ||
| 531 | .m1 = { .min = IRONLAKE_M1_MIN, | ||
| 532 | .max = IRONLAKE_M1_MAX }, | ||
| 533 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
| 534 | .max = IRONLAKE_M2_MAX }, | ||
| 535 | .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, | ||
| 536 | .max = IRONLAKE_P_DISPLAY_PORT_MAX }, | ||
| 537 | .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, | ||
| 538 | .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, | ||
| 539 | .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, | ||
| 540 | .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, | ||
| 541 | .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, | ||
| 542 | .find_pll = intel_find_pll_ironlake_dp, | ||
| 515 | }; | 543 | }; |
| 516 | 544 | ||
| 517 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 545 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
| @@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | |||
| 519 | const intel_limit_t *limit; | 547 | const intel_limit_t *limit; |
| 520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 548 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
| 521 | limit = &intel_limits_ironlake_lvds; | 549 | limit = &intel_limits_ironlake_lvds; |
| 550 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | ||
| 551 | HAS_eDP) | ||
| 552 | limit = &intel_limits_ironlake_display_port; | ||
| 522 | else | 553 | else |
| 523 | limit = &intel_limits_ironlake_sdvo; | 554 | limit = &intel_limits_ironlake_sdvo; |
| 524 | 555 | ||
| @@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 791 | found = false; | 822 | found = false; |
| 792 | 823 | ||
| 793 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 824 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
| 794 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 825 | int lvds_reg; |
| 826 | |||
| 827 | if (IS_IRONLAKE(dev)) | ||
| 828 | lvds_reg = PCH_LVDS; | ||
| 829 | else | ||
| 830 | lvds_reg = LVDS; | ||
| 831 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | ||
| 795 | LVDS_CLKB_POWER_UP) | 832 | LVDS_CLKB_POWER_UP) |
| 796 | clock.p2 = limit->p2.p2_fast; | 833 | clock.p2 = limit->p2.p2_fast; |
| 797 | else | 834 | else |
| @@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 839 | { | 876 | { |
| 840 | struct drm_device *dev = crtc->dev; | 877 | struct drm_device *dev = crtc->dev; |
| 841 | intel_clock_t clock; | 878 | intel_clock_t clock; |
| 879 | |||
| 880 | /* return directly when it is eDP */ | ||
| 881 | if (HAS_eDP) | ||
| 882 | return true; | ||
| 883 | |||
| 842 | if (target < 200000) { | 884 | if (target < 200000) { |
| 843 | clock.n = 1; | 885 | clock.n = 1; |
| 844 | clock.p1 = 2; | 886 | clock.p1 = 2; |
| @@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
| 857 | return true; | 899 | return true; |
| 858 | } | 900 | } |
| 859 | 901 | ||
| 860 | static bool | ||
| 861 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
| 862 | int target, int refclk, intel_clock_t *best_clock) | ||
| 863 | { | ||
| 864 | struct drm_device *dev = crtc->dev; | ||
| 865 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 866 | intel_clock_t clock; | ||
| 867 | int err_most = 47; | ||
| 868 | int err_min = 10000; | ||
| 869 | |||
| 870 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
| 871 | if (HAS_eDP) | ||
| 872 | return true; | ||
| 873 | |||
| 874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
| 875 | return intel_find_pll_ironlake_dp(limit, crtc, target, | ||
| 876 | refclk, best_clock); | ||
| 877 | |||
| 878 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
| 879 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
| 880 | LVDS_CLKB_POWER_UP) | ||
| 881 | clock.p2 = limit->p2.p2_fast; | ||
| 882 | else | ||
| 883 | clock.p2 = limit->p2.p2_slow; | ||
| 884 | } else { | ||
| 885 | if (target < limit->p2.dot_limit) | ||
| 886 | clock.p2 = limit->p2.p2_slow; | ||
| 887 | else | ||
| 888 | clock.p2 = limit->p2.p2_fast; | ||
| 889 | } | ||
| 890 | |||
| 891 | memset(best_clock, 0, sizeof(*best_clock)); | ||
| 892 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | ||
| 893 | /* based on hardware requriment prefer smaller n to precision */ | ||
| 894 | for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { | ||
| 895 | /* based on hardware requirment prefere larger m1,m2 */ | ||
| 896 | for (clock.m1 = limit->m1.max; | ||
| 897 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
| 898 | for (clock.m2 = limit->m2.max; | ||
| 899 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
| 900 | int this_err; | ||
| 901 | |||
| 902 | intel_clock(dev, refclk, &clock); | ||
| 903 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
| 904 | continue; | ||
| 905 | this_err = abs((10000 - (target*10000/clock.dot))); | ||
| 906 | if (this_err < err_most) { | ||
| 907 | *best_clock = clock; | ||
| 908 | /* found on first matching */ | ||
| 909 | goto out; | ||
| 910 | } else if (this_err < err_min) { | ||
| 911 | *best_clock = clock; | ||
| 912 | err_min = this_err; | ||
| 913 | } | ||
| 914 | } | ||
| 915 | } | ||
| 916 | } | ||
| 917 | } | ||
| 918 | out: | ||
| 919 | return true; | ||
| 920 | } | ||
| 921 | |||
| 922 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | 902 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
| 923 | static bool | 903 | static bool |
| 924 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 904 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
| @@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1493 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1473 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
| 1494 | u32 temp; | 1474 | u32 temp; |
| 1495 | int tries = 5, j, n; | 1475 | int tries = 5, j, n; |
| 1476 | u32 pipe_bpc; | ||
| 1477 | |||
| 1478 | temp = I915_READ(pipeconf_reg); | ||
| 1479 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
| 1496 | 1480 | ||
| 1497 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1481 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
| 1498 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1482 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
| @@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1524 | 1508 | ||
| 1525 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1509 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
| 1526 | temp = I915_READ(fdi_rx_reg); | 1510 | temp = I915_READ(fdi_rx_reg); |
| 1511 | /* | ||
| 1512 | * make the BPC in FDI Rx be consistent with that in | ||
| 1513 | * pipeconf reg. | ||
| 1514 | */ | ||
| 1515 | temp &= ~(0x7 << 16); | ||
| 1516 | temp |= (pipe_bpc << 11); | ||
| 1527 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1517 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
| 1528 | FDI_SEL_PCDCLK | | 1518 | FDI_SEL_PCDCLK | |
| 1529 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | 1519 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
| @@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1666 | 1656 | ||
| 1667 | /* enable PCH transcoder */ | 1657 | /* enable PCH transcoder */ |
| 1668 | temp = I915_READ(transconf_reg); | 1658 | temp = I915_READ(transconf_reg); |
| 1659 | /* | ||
| 1660 | * make the BPC in transcoder be consistent with | ||
| 1661 | * that in pipeconf reg. | ||
| 1662 | */ | ||
| 1663 | temp &= ~PIPE_BPC_MASK; | ||
| 1664 | temp |= pipe_bpc; | ||
| 1669 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1665 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
| 1670 | I915_READ(transconf_reg); | 1666 | I915_READ(transconf_reg); |
| 1671 | 1667 | ||
| @@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1745 | I915_READ(fdi_tx_reg); | 1741 | I915_READ(fdi_tx_reg); |
| 1746 | 1742 | ||
| 1747 | temp = I915_READ(fdi_rx_reg); | 1743 | temp = I915_READ(fdi_rx_reg); |
| 1744 | /* BPC in FDI rx is consistent with that in pipeconf */ | ||
| 1745 | temp &= ~(0x07 << 16); | ||
| 1746 | temp |= (pipe_bpc << 11); | ||
| 1748 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1747 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
| 1749 | I915_READ(fdi_rx_reg); | 1748 | I915_READ(fdi_rx_reg); |
| 1750 | 1749 | ||
| @@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 1789 | } | 1788 | } |
| 1790 | } | 1789 | } |
| 1791 | } | 1790 | } |
| 1792 | 1791 | temp = I915_READ(transconf_reg); | |
| 1792 | /* BPC in transcoder is consistent with that in pipeconf */ | ||
| 1793 | temp &= ~PIPE_BPC_MASK; | ||
| 1794 | temp |= pipe_bpc; | ||
| 1795 | I915_WRITE(transconf_reg, temp); | ||
| 1796 | I915_READ(transconf_reg); | ||
| 1793 | udelay(100); | 1797 | udelay(100); |
| 1794 | 1798 | ||
| 1795 | /* disable PCH DPLL */ | 1799 | /* disable PCH DPLL */ |
| @@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
| 2448 | * A value of 5us seems to be a good balance; safe for very low end | 2452 | * A value of 5us seems to be a good balance; safe for very low end |
| 2449 | * platforms but not overly aggressive on lower latency configs. | 2453 | * platforms but not overly aggressive on lower latency configs. |
| 2450 | */ | 2454 | */ |
| 2451 | const static int latency_ns = 5000; | 2455 | static const int latency_ns = 5000; |
| 2452 | 2456 | ||
| 2453 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | 2457 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
| 2454 | { | 2458 | { |
| @@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2559 | /* Calc sr entries for one plane configs */ | 2563 | /* Calc sr entries for one plane configs */ |
| 2560 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2564 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
| 2561 | /* self-refresh has much higher latency */ | 2565 | /* self-refresh has much higher latency */ |
| 2562 | const static int sr_latency_ns = 12000; | 2566 | static const int sr_latency_ns = 12000; |
| 2563 | 2567 | ||
| 2564 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2568 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
| 2565 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2569 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
| @@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2598 | /* Calc sr entries for one plane configs */ | 2602 | /* Calc sr entries for one plane configs */ |
| 2599 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2603 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
| 2600 | /* self-refresh has much higher latency */ | 2604 | /* self-refresh has much higher latency */ |
| 2601 | const static int sr_latency_ns = 12000; | 2605 | static const int sr_latency_ns = 12000; |
| 2602 | 2606 | ||
| 2603 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2607 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
| 2604 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2608 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
| @@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
| 2667 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 2671 | if (HAS_FW_BLC(dev) && sr_hdisplay && |
| 2668 | (!planea_clock || !planeb_clock)) { | 2672 | (!planea_clock || !planeb_clock)) { |
| 2669 | /* self-refresh has much higher latency */ | 2673 | /* self-refresh has much higher latency */ |
| 2670 | const static int sr_latency_ns = 6000; | 2674 | static const int sr_latency_ns = 6000; |
| 2671 | 2675 | ||
| 2672 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2676 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
| 2673 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2677 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
| @@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 2969 | 2973 | ||
| 2970 | /* determine panel color depth */ | 2974 | /* determine panel color depth */ |
| 2971 | temp = I915_READ(pipeconf_reg); | 2975 | temp = I915_READ(pipeconf_reg); |
| 2976 | temp &= ~PIPE_BPC_MASK; | ||
| 2977 | if (is_lvds) { | ||
| 2978 | int lvds_reg = I915_READ(PCH_LVDS); | ||
| 2979 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
| 2980 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
| 2981 | temp |= PIPE_8BPC; | ||
| 2982 | else | ||
| 2983 | temp |= PIPE_6BPC; | ||
| 2984 | } else | ||
| 2985 | temp |= PIPE_8BPC; | ||
| 2986 | I915_WRITE(pipeconf_reg, temp); | ||
| 2987 | I915_READ(pipeconf_reg); | ||
| 2972 | 2988 | ||
| 2973 | switch (temp & PIPE_BPC_MASK) { | 2989 | switch (temp & PIPE_BPC_MASK) { |
| 2974 | case PIPE_8BPC: | 2990 | case PIPE_8BPC: |
| @@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 3195 | * appropriately here, but we need to look more thoroughly into how | 3211 | * appropriately here, but we need to look more thoroughly into how |
| 3196 | * panels behave in the two modes. | 3212 | * panels behave in the two modes. |
| 3197 | */ | 3213 | */ |
| 3198 | 3214 | /* set the dithering flag */ | |
| 3215 | if (IS_I965G(dev)) { | ||
| 3216 | if (dev_priv->lvds_dither) { | ||
| 3217 | if (IS_IRONLAKE(dev)) | ||
| 3218 | pipeconf |= PIPE_ENABLE_DITHER; | ||
| 3219 | else | ||
| 3220 | lvds |= LVDS_ENABLE_DITHER; | ||
| 3221 | } else { | ||
| 3222 | if (IS_IRONLAKE(dev)) | ||
| 3223 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
| 3224 | else | ||
| 3225 | lvds &= ~LVDS_ENABLE_DITHER; | ||
| 3226 | } | ||
| 3227 | } | ||
| 3199 | I915_WRITE(lvds_reg, lvds); | 3228 | I915_WRITE(lvds_reg, lvds); |
| 3200 | I915_READ(lvds_reg); | 3229 | I915_READ(lvds_reg); |
| 3201 | } | 3230 | } |
| @@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 3385 | 3414 | ||
| 3386 | /* we only need to pin inside GTT if cursor is non-phy */ | 3415 | /* we only need to pin inside GTT if cursor is non-phy */ |
| 3387 | mutex_lock(&dev->struct_mutex); | 3416 | mutex_lock(&dev->struct_mutex); |
| 3388 | if (!dev_priv->cursor_needs_physical) { | 3417 | if (!dev_priv->info->cursor_needs_physical) { |
| 3389 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 3418 | ret = i915_gem_object_pin(bo, PAGE_SIZE); |
| 3390 | if (ret) { | 3419 | if (ret) { |
| 3391 | DRM_ERROR("failed to pin cursor bo\n"); | 3420 | DRM_ERROR("failed to pin cursor bo\n"); |
| @@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 3420 | I915_WRITE(base, addr); | 3449 | I915_WRITE(base, addr); |
| 3421 | 3450 | ||
| 3422 | if (intel_crtc->cursor_bo) { | 3451 | if (intel_crtc->cursor_bo) { |
| 3423 | if (dev_priv->cursor_needs_physical) { | 3452 | if (dev_priv->info->cursor_needs_physical) { |
| 3424 | if (intel_crtc->cursor_bo != bo) | 3453 | if (intel_crtc->cursor_bo != bo) |
| 3425 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 3454 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
| 3426 | } else | 3455 | } else |
| @@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
| 3779 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 3808 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
| 3780 | } | 3809 | } |
| 3781 | 3810 | ||
| 3782 | void intel_increase_renderclock(struct drm_device *dev, bool schedule) | ||
| 3783 | { | ||
| 3784 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 3785 | |||
| 3786 | if (IS_IRONLAKE(dev)) | ||
| 3787 | return; | ||
| 3788 | |||
| 3789 | if (!dev_priv->render_reclock_avail) { | ||
| 3790 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
| 3791 | return; | ||
| 3792 | } | ||
| 3793 | |||
| 3794 | /* Restore render clock frequency to original value */ | ||
| 3795 | if (IS_G4X(dev) || IS_I9XX(dev)) | ||
| 3796 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | ||
| 3797 | else if (IS_I85X(dev)) | ||
| 3798 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | ||
| 3799 | DRM_DEBUG_DRIVER("increasing render clock frequency\n"); | ||
| 3800 | |||
| 3801 | /* Schedule downclock */ | ||
| 3802 | if (schedule) | ||
| 3803 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
| 3804 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
| 3805 | } | ||
| 3806 | |||
| 3807 | void intel_decrease_renderclock(struct drm_device *dev) | ||
| 3808 | { | ||
| 3809 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 3810 | |||
| 3811 | if (IS_IRONLAKE(dev)) | ||
| 3812 | return; | ||
| 3813 | |||
| 3814 | if (!dev_priv->render_reclock_avail) { | ||
| 3815 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
| 3816 | return; | ||
| 3817 | } | ||
| 3818 | |||
| 3819 | if (IS_G4X(dev)) { | ||
| 3820 | u16 gcfgc; | ||
| 3821 | |||
| 3822 | /* Adjust render clock... */ | ||
| 3823 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
| 3824 | |||
| 3825 | /* Down to minimum... */ | ||
| 3826 | gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; | ||
| 3827 | gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; | ||
| 3828 | |||
| 3829 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
| 3830 | } else if (IS_I965G(dev)) { | ||
| 3831 | u16 gcfgc; | ||
| 3832 | |||
| 3833 | /* Adjust render clock... */ | ||
| 3834 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
| 3835 | |||
| 3836 | /* Down to minimum... */ | ||
| 3837 | gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; | ||
| 3838 | gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; | ||
| 3839 | |||
| 3840 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
| 3841 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
| 3842 | u16 gcfgc; | ||
| 3843 | |||
| 3844 | /* Adjust render clock... */ | ||
| 3845 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
| 3846 | |||
| 3847 | /* Down to minimum... */ | ||
| 3848 | gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; | ||
| 3849 | gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; | ||
| 3850 | |||
| 3851 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
| 3852 | } else if (IS_I915G(dev)) { | ||
| 3853 | u16 gcfgc; | ||
| 3854 | |||
| 3855 | /* Adjust render clock... */ | ||
| 3856 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
| 3857 | |||
| 3858 | /* Down to minimum... */ | ||
| 3859 | gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; | ||
| 3860 | gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; | ||
| 3861 | |||
| 3862 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
| 3863 | } else if (IS_I85X(dev)) { | ||
| 3864 | u16 hpllcc; | ||
| 3865 | |||
| 3866 | /* Adjust render clock... */ | ||
| 3867 | pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); | ||
| 3868 | |||
| 3869 | /* Up to maximum... */ | ||
| 3870 | hpllcc &= ~GC_CLOCK_CONTROL_MASK; | ||
| 3871 | hpllcc |= GC_CLOCK_133_200; | ||
| 3872 | |||
| 3873 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | ||
| 3874 | } | ||
| 3875 | DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); | ||
| 3876 | } | ||
| 3877 | |||
| 3878 | /* Note that no increase function is needed for this - increase_renderclock() | ||
| 3879 | * will also rewrite these bits | ||
| 3880 | */ | ||
| 3881 | void intel_decrease_displayclock(struct drm_device *dev) | ||
| 3882 | { | ||
| 3883 | if (IS_IRONLAKE(dev)) | ||
| 3884 | return; | ||
| 3885 | |||
| 3886 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | ||
| 3887 | IS_I915GM(dev)) { | ||
| 3888 | u16 gcfgc; | ||
| 3889 | |||
| 3890 | /* Adjust render clock... */ | ||
| 3891 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
| 3892 | |||
| 3893 | /* Down to minimum... */ | ||
| 3894 | gcfgc &= ~0xf0; | ||
| 3895 | gcfgc |= 0x80; | ||
| 3896 | |||
| 3897 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
| 3898 | } | ||
| 3899 | } | ||
| 3900 | |||
| 3901 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | 3811 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
| 3902 | 3812 | ||
| 3903 | static void intel_crtc_idle_timer(unsigned long arg) | 3813 | static void intel_crtc_idle_timer(unsigned long arg) |
| @@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work) | |||
| 4011 | 3921 | ||
| 4012 | mutex_lock(&dev->struct_mutex); | 3922 | mutex_lock(&dev->struct_mutex); |
| 4013 | 3923 | ||
| 4014 | /* GPU isn't processing, downclock it. */ | ||
| 4015 | if (!dev_priv->busy) { | ||
| 4016 | intel_decrease_renderclock(dev); | ||
| 4017 | intel_decrease_displayclock(dev); | ||
| 4018 | } | ||
| 4019 | |||
| 4020 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3924 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 4021 | /* Skip inactive CRTCs */ | 3925 | /* Skip inactive CRTCs */ |
| 4022 | if (!crtc->fb) | 3926 | if (!crtc->fb) |
| @@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
| 4050 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3954 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 4051 | return; | 3955 | return; |
| 4052 | 3956 | ||
| 4053 | if (!dev_priv->busy) { | 3957 | if (!dev_priv->busy) |
| 4054 | dev_priv->busy = true; | 3958 | dev_priv->busy = true; |
| 4055 | intel_increase_renderclock(dev, true); | 3959 | else |
| 4056 | } else { | ||
| 4057 | mod_timer(&dev_priv->idle_timer, jiffies + | 3960 | mod_timer(&dev_priv->idle_timer, jiffies + |
| 4058 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 3961 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
| 4059 | } | ||
| 4060 | 3962 | ||
| 4061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3963 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 4062 | if (!crtc->fb) | 3964 | if (!crtc->fb) |
| @@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 4400 | bool found = false; | 4302 | bool found = false; |
| 4401 | 4303 | ||
| 4402 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 4304 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
| 4305 | DRM_DEBUG_KMS("probing SDVOB\n"); | ||
| 4403 | found = intel_sdvo_init(dev, SDVOB); | 4306 | found = intel_sdvo_init(dev, SDVOB); |
| 4404 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 4307 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
| 4308 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | ||
| 4405 | intel_hdmi_init(dev, SDVOB); | 4309 | intel_hdmi_init(dev, SDVOB); |
| 4310 | } | ||
| 4406 | 4311 | ||
| 4407 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 4312 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
| 4313 | DRM_DEBUG_KMS("probing DP_B\n"); | ||
| 4408 | intel_dp_init(dev, DP_B); | 4314 | intel_dp_init(dev, DP_B); |
| 4315 | } | ||
| 4409 | } | 4316 | } |
| 4410 | 4317 | ||
| 4411 | /* Before G4X SDVOC doesn't have its own detect register */ | 4318 | /* Before G4X SDVOC doesn't have its own detect register */ |
| 4412 | 4319 | ||
| 4413 | if (I915_READ(SDVOB) & SDVO_DETECTED) | 4320 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
| 4321 | DRM_DEBUG_KMS("probing SDVOC\n"); | ||
| 4414 | found = intel_sdvo_init(dev, SDVOC); | 4322 | found = intel_sdvo_init(dev, SDVOC); |
| 4323 | } | ||
| 4415 | 4324 | ||
| 4416 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | 4325 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
| 4417 | 4326 | ||
| 4418 | if (SUPPORTS_INTEGRATED_HDMI(dev)) | 4327 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
| 4328 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | ||
| 4419 | intel_hdmi_init(dev, SDVOC); | 4329 | intel_hdmi_init(dev, SDVOC); |
| 4420 | if (SUPPORTS_INTEGRATED_DP(dev)) | 4330 | } |
| 4331 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
| 4332 | DRM_DEBUG_KMS("probing DP_C\n"); | ||
| 4421 | intel_dp_init(dev, DP_C); | 4333 | intel_dp_init(dev, DP_C); |
| 4334 | } | ||
| 4422 | } | 4335 | } |
| 4423 | 4336 | ||
| 4424 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 4337 | if (SUPPORTS_INTEGRATED_DP(dev) && |
| 4338 | (I915_READ(DP_D) & DP_DETECTED)) { | ||
| 4339 | DRM_DEBUG_KMS("probing DP_D\n"); | ||
| 4425 | intel_dp_init(dev, DP_D); | 4340 | intel_dp_init(dev, DP_D); |
| 4341 | } | ||
| 4426 | } else if (IS_I8XX(dev)) | 4342 | } else if (IS_I8XX(dev)) |
| 4427 | intel_dvo_init(dev); | 4343 | intel_dvo_init(dev); |
| 4428 | 4344 | ||
| @@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
| 4527 | .fb_changed = intelfb_probe, | 4443 | .fb_changed = intelfb_probe, |
| 4528 | }; | 4444 | }; |
| 4529 | 4445 | ||
| 4446 | static struct drm_gem_object * | ||
| 4447 | intel_alloc_power_context(struct drm_device *dev) | ||
| 4448 | { | ||
| 4449 | struct drm_gem_object *pwrctx; | ||
| 4450 | int ret; | ||
| 4451 | |||
| 4452 | pwrctx = drm_gem_object_alloc(dev, 4096); | ||
| 4453 | if (!pwrctx) { | ||
| 4454 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
| 4455 | return NULL; | ||
| 4456 | } | ||
| 4457 | |||
| 4458 | mutex_lock(&dev->struct_mutex); | ||
| 4459 | ret = i915_gem_object_pin(pwrctx, 4096); | ||
| 4460 | if (ret) { | ||
| 4461 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
| 4462 | goto err_unref; | ||
| 4463 | } | ||
| 4464 | |||
| 4465 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
| 4466 | if (ret) { | ||
| 4467 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
| 4468 | goto err_unpin; | ||
| 4469 | } | ||
| 4470 | mutex_unlock(&dev->struct_mutex); | ||
| 4471 | |||
| 4472 | return pwrctx; | ||
| 4473 | |||
| 4474 | err_unpin: | ||
| 4475 | i915_gem_object_unpin(pwrctx); | ||
| 4476 | err_unref: | ||
| 4477 | drm_gem_object_unreference(pwrctx); | ||
| 4478 | mutex_unlock(&dev->struct_mutex); | ||
| 4479 | return NULL; | ||
| 4480 | } | ||
| 4481 | |||
| 4530 | void intel_init_clock_gating(struct drm_device *dev) | 4482 | void intel_init_clock_gating(struct drm_device *dev) |
| 4531 | { | 4483 | { |
| 4532 | struct drm_i915_private *dev_priv = dev->dev_private; | 4484 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
| 4579 | * GPU can automatically power down the render unit if given a page | 4531 | * GPU can automatically power down the render unit if given a page |
| 4580 | * to save state. | 4532 | * to save state. |
| 4581 | */ | 4533 | */ |
| 4582 | if (I915_HAS_RC6(dev)) { | 4534 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 4583 | struct drm_gem_object *pwrctx; | 4535 | struct drm_i915_gem_object *obj_priv = NULL; |
| 4584 | struct drm_i915_gem_object *obj_priv; | ||
| 4585 | int ret; | ||
| 4586 | 4536 | ||
| 4587 | if (dev_priv->pwrctx) { | 4537 | if (dev_priv->pwrctx) { |
| 4588 | obj_priv = dev_priv->pwrctx->driver_private; | 4538 | obj_priv = dev_priv->pwrctx->driver_private; |
| 4589 | } else { | 4539 | } else { |
| 4590 | pwrctx = drm_gem_object_alloc(dev, 4096); | 4540 | struct drm_gem_object *pwrctx; |
| 4591 | if (!pwrctx) { | ||
| 4592 | DRM_DEBUG("failed to alloc power context, " | ||
| 4593 | "RC6 disabled\n"); | ||
| 4594 | goto out; | ||
| 4595 | } | ||
| 4596 | 4541 | ||
| 4597 | ret = i915_gem_object_pin(pwrctx, 4096); | 4542 | pwrctx = intel_alloc_power_context(dev); |
| 4598 | if (ret) { | 4543 | if (pwrctx) { |
| 4599 | DRM_ERROR("failed to pin power context: %d\n", | 4544 | dev_priv->pwrctx = pwrctx; |
| 4600 | ret); | 4545 | obj_priv = pwrctx->driver_private; |
| 4601 | drm_gem_object_unreference(pwrctx); | ||
| 4602 | goto out; | ||
| 4603 | } | 4546 | } |
| 4604 | |||
| 4605 | i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
| 4606 | |||
| 4607 | dev_priv->pwrctx = pwrctx; | ||
| 4608 | obj_priv = pwrctx->driver_private; | ||
| 4609 | } | 4547 | } |
| 4610 | 4548 | ||
| 4611 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 4549 | if (obj_priv) { |
| 4612 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 4550 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); |
| 4613 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 4551 | I915_WRITE(MCHBAR_RENDER_STANDBY, |
| 4552 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
| 4553 | } | ||
| 4614 | } | 4554 | } |
| 4615 | |||
| 4616 | out: | ||
| 4617 | return; | ||
| 4618 | } | 4555 | } |
| 4619 | 4556 | ||
| 4620 | /* Set up chip specific display functions */ | 4557 | /* Set up chip specific display functions */ |
| @@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
| 4770 | del_timer_sync(&intel_crtc->idle_timer); | 4707 | del_timer_sync(&intel_crtc->idle_timer); |
| 4771 | } | 4708 | } |
| 4772 | 4709 | ||
| 4773 | intel_increase_renderclock(dev, false); | ||
| 4774 | del_timer_sync(&dev_priv->idle_timer); | 4710 | del_timer_sync(&dev_priv->idle_timer); |
| 4775 | 4711 | ||
| 4776 | if (dev_priv->display.disable_fbc) | 4712 | if (dev_priv->display.disable_fbc) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4e7aa8b7b938..1349d9fd01c4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
| 1402 | break; | 1402 | break; |
| 1403 | case DP_B: | 1403 | case DP_B: |
| 1404 | case PCH_DP_B: | 1404 | case PCH_DP_B: |
| 1405 | dev_priv->hotplug_supported_mask |= | ||
| 1406 | HDMIB_HOTPLUG_INT_STATUS; | ||
| 1405 | name = "DPDDC-B"; | 1407 | name = "DPDDC-B"; |
| 1406 | break; | 1408 | break; |
| 1407 | case DP_C: | 1409 | case DP_C: |
| 1408 | case PCH_DP_C: | 1410 | case PCH_DP_C: |
| 1411 | dev_priv->hotplug_supported_mask |= | ||
| 1412 | HDMIC_HOTPLUG_INT_STATUS; | ||
| 1409 | name = "DPDDC-C"; | 1413 | name = "DPDDC-C"; |
| 1410 | break; | 1414 | break; |
| 1411 | case DP_D: | 1415 | case DP_D: |
| 1412 | case PCH_DP_D: | 1416 | case PCH_DP_D: |
| 1417 | dev_priv->hotplug_supported_mask |= | ||
| 1418 | HDMID_HOTPLUG_INT_STATUS; | ||
| 1413 | name = "DPDDC-D"; | 1419 | name = "DPDDC-D"; |
| 1414 | break; | 1420 | break; |
| 1415 | } | 1421 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f04dbbe7d400..06431941b233 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
| 303 | if (sdvox_reg == SDVOB) { | 303 | if (sdvox_reg == SDVOB) { |
| 304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
| 305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
| 306 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
| 306 | } else if (sdvox_reg == SDVOC) { | 307 | } else if (sdvox_reg == SDVOC) { |
| 307 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 308 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
| 308 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 309 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
| 310 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
| 309 | } else if (sdvox_reg == HDMIB) { | 311 | } else if (sdvox_reg == HDMIB) { |
| 310 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 312 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
| 311 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 313 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
| 312 | "HDMIB"); | 314 | "HDMIB"); |
| 315 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
| 313 | } else if (sdvox_reg == HDMIC) { | 316 | } else if (sdvox_reg == HDMIC) { |
| 314 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 317 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
| 315 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 318 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
| 316 | "HDMIC"); | 319 | "HDMIC"); |
| 320 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
| 317 | } else if (sdvox_reg == HDMID) { | 321 | } else if (sdvox_reg == HDMID) { |
| 318 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 322 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
| 319 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 323 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
| 320 | "HDMID"); | 324 | "HDMID"); |
| 325 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
| 321 | } | 326 | } |
| 322 | if (!intel_output->ddc_bus) | 327 | if (!intel_output->ddc_bus) |
| 323 | goto err_connector; | 328 | goto err_connector; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3118ce274e67..f4b4aa242df1 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
| 608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | 608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), |
| 609 | }, | 609 | }, |
| 610 | }, | 610 | }, |
| 611 | { | ||
| 612 | .ident = "PC-81005", | ||
| 613 | .matches = { | ||
| 614 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
| 615 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
| 616 | }, | ||
| 617 | }, | ||
| 611 | { } | 618 | { } |
| 612 | }; | 619 | }; |
| 613 | 620 | ||
| @@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
| 679 | struct drm_i915_private *dev_priv = | 686 | struct drm_i915_private *dev_priv = |
| 680 | container_of(nb, struct drm_i915_private, lid_notifier); | 687 | container_of(nb, struct drm_i915_private, lid_notifier); |
| 681 | struct drm_device *dev = dev_priv->dev; | 688 | struct drm_device *dev = dev_priv->dev; |
| 689 | struct drm_connector *connector = dev_priv->int_lvds_connector; | ||
| 682 | 690 | ||
| 691 | /* | ||
| 692 | * check and update the status of LVDS connector after receiving | ||
| 693 | * the LID nofication event. | ||
| 694 | */ | ||
| 695 | if (connector) | ||
| 696 | connector->status = connector->funcs->detect(connector); | ||
| 683 | if (!acpi_lid_open()) { | 697 | if (!acpi_lid_open()) { |
| 684 | dev_priv->modeset_on_lid = 1; | 698 | dev_priv->modeset_on_lid = 1; |
| 685 | return NOTIFY_OK; | 699 | return NOTIFY_OK; |
| @@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
| 854 | { } /* terminating entry */ | 868 | { } /* terminating entry */ |
| 855 | }; | 869 | }; |
| 856 | 870 | ||
| 857 | #ifdef CONFIG_ACPI | ||
| 858 | /* | ||
| 859 | * check_lid_device -- check whether @handle is an ACPI LID device. | ||
| 860 | * @handle: ACPI device handle | ||
| 861 | * @level : depth in the ACPI namespace tree | ||
| 862 | * @context: the number of LID device when we find the device | ||
| 863 | * @rv: a return value to fill if desired (Not use) | ||
| 864 | */ | ||
| 865 | static acpi_status | ||
| 866 | check_lid_device(acpi_handle handle, u32 level, void *context, | ||
| 867 | void **return_value) | ||
| 868 | { | ||
| 869 | struct acpi_device *acpi_dev; | ||
| 870 | int *lid_present = context; | ||
| 871 | |||
| 872 | acpi_dev = NULL; | ||
| 873 | /* Get the acpi device for device handle */ | ||
| 874 | if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { | ||
| 875 | /* If there is no ACPI device for handle, return */ | ||
| 876 | return AE_OK; | ||
| 877 | } | ||
| 878 | |||
| 879 | if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) | ||
| 880 | *lid_present = 1; | ||
| 881 | |||
| 882 | return AE_OK; | ||
| 883 | } | ||
| 884 | |||
| 885 | /** | ||
| 886 | * check whether there exists the ACPI LID device by enumerating the ACPI | ||
| 887 | * device tree. | ||
| 888 | */ | ||
| 889 | static int intel_lid_present(void) | ||
| 890 | { | ||
| 891 | int lid_present = 0; | ||
| 892 | |||
| 893 | if (acpi_disabled) { | ||
| 894 | /* If ACPI is disabled, there is no ACPI device tree to | ||
| 895 | * check, so assume the LID device would have been present. | ||
| 896 | */ | ||
| 897 | return 1; | ||
| 898 | } | ||
| 899 | |||
| 900 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
| 901 | ACPI_UINT32_MAX, | ||
| 902 | check_lid_device, NULL, &lid_present, NULL); | ||
| 903 | |||
| 904 | return lid_present; | ||
| 905 | } | ||
| 906 | #else | ||
| 907 | static int intel_lid_present(void) | ||
| 908 | { | ||
| 909 | /* In the absence of ACPI built in, assume that the LID device would | ||
| 910 | * have been present. | ||
| 911 | */ | ||
| 912 | return 1; | ||
| 913 | } | ||
| 914 | #endif | ||
| 915 | |||
| 916 | /** | 871 | /** |
| 917 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | 872 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID |
| 918 | * @dev: drm device | 873 | * @dev: drm device |
| @@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1031 | if (dmi_check_system(intel_no_lvds)) | 986 | if (dmi_check_system(intel_no_lvds)) |
| 1032 | return; | 987 | return; |
| 1033 | 988 | ||
| 1034 | /* | 989 | if (!lvds_is_present_in_vbt(dev)) { |
| 1035 | * Assume LVDS is present if there's an ACPI lid device or if the | 990 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
| 1036 | * device is present in the VBT. | ||
| 1037 | */ | ||
| 1038 | if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { | ||
| 1039 | DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); | ||
| 1040 | return; | 991 | return; |
| 1041 | } | 992 | } |
| 1042 | 993 | ||
| @@ -1180,6 +1131,8 @@ out: | |||
| 1180 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | 1131 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
| 1181 | dev_priv->lid_notifier.notifier_call = NULL; | 1132 | dev_priv->lid_notifier.notifier_call = NULL; |
| 1182 | } | 1133 | } |
| 1134 | /* keep the LVDS connector */ | ||
| 1135 | dev_priv->int_lvds_connector = connector; | ||
| 1183 | drm_sysfs_connector_add(connector); | 1136 | drm_sysfs_connector_add(connector); |
| 1184 | return; | 1137 | return; |
| 1185 | 1138 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 24a3dc99716c..de5144c8c153 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
| 2662 | 2662 | ||
| 2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
| 2664 | { | 2664 | { |
| 2665 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2665 | struct drm_connector *connector; | 2666 | struct drm_connector *connector; |
| 2666 | struct intel_output *intel_output; | 2667 | struct intel_output *intel_output; |
| 2667 | struct intel_sdvo_priv *sdvo_priv; | 2668 | struct intel_sdvo_priv *sdvo_priv; |
| @@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
| 2708 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2709 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
| 2709 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2710 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
| 2710 | "SDVOB/VGA DDC BUS"); | 2711 | "SDVOB/VGA DDC BUS"); |
| 2712 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||
| 2711 | } else { | 2713 | } else { |
| 2712 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2714 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
| 2713 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2715 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
| 2714 | "SDVOC/VGA DDC BUS"); | 2716 | "SDVOC/VGA DDC BUS"); |
| 2717 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||
| 2715 | } | 2718 | } |
| 2716 | 2719 | ||
| 2717 | if (intel_output->ddc_bus == NULL) | 2720 | if (intel_output->ddc_bus == NULL) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b741..1d5b9b7b033f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev) | |||
| 1840 | drm_connector_attach_property(connector, | 1840 | drm_connector_attach_property(connector, |
| 1841 | dev->mode_config.tv_bottom_margin_property, | 1841 | dev->mode_config.tv_bottom_margin_property, |
| 1842 | tv_priv->margin[TV_MARGIN_BOTTOM]); | 1842 | tv_priv->margin[TV_MARGIN_BOTTOM]); |
| 1843 | |||
| 1844 | dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; | ||
| 1843 | out: | 1845 | out: |
| 1844 | drm_sysfs_connector_add(connector); | 1846 | drm_sysfs_connector_add(connector); |
| 1845 | } | 1847 | } |
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index b1bc1ea182b8..1175429da102 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
| @@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG | |||
| 30 | via debugfs. | 30 | via debugfs. |
| 31 | 31 | ||
| 32 | menu "I2C encoder or helper chips" | 32 | menu "I2C encoder or helper chips" |
| 33 | depends on DRM && I2C | 33 | depends on DRM && DRM_KMS_HELPER && I2C |
| 34 | 34 | ||
| 35 | config DRM_I2C_CH7006 | 35 | config DRM_I2C_CH7006 |
| 36 | tristate "Chrontel ch7006 TV encoder" | 36 | tristate "Chrontel ch7006 TV encoder" |
| 37 | depends on DRM_NOUVEAU | 37 | default m if DRM_NOUVEAU |
| 38 | default m | ||
| 39 | help | 38 | help |
| 40 | Support for Chrontel ch7006 and similar TV encoders, found | 39 | Support for Chrontel ch7006 and similar TV encoders, found |
| 41 | on some nVidia video cards. | 40 | on some nVidia video cards. |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0cad6d834eb2..e342a418d434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -33,10 +33,13 @@ | |||
| 33 | #include "nouveau_drv.h" | 33 | #include "nouveau_drv.h" |
| 34 | #include "nouveau_dma.h" | 34 | #include "nouveau_dma.h" |
| 35 | 35 | ||
| 36 | #include <linux/log2.h> | ||
| 37 | |||
| 36 | static void | 38 | static void |
| 37 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | 39 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
| 38 | { | 40 | { |
| 39 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 41 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
| 42 | struct drm_device *dev = dev_priv->dev; | ||
| 40 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 43 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 41 | 44 | ||
| 42 | ttm_bo_kunmap(&nvbo->kmap); | 45 | ttm_bo_kunmap(&nvbo->kmap); |
| @@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
| 44 | if (unlikely(nvbo->gem)) | 47 | if (unlikely(nvbo->gem)) |
| 45 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 48 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
| 46 | 49 | ||
| 50 | if (nvbo->tile) | ||
| 51 | nv10_mem_expire_tiling(dev, nvbo->tile, NULL); | ||
| 52 | |||
| 47 | spin_lock(&dev_priv->ttm.bo_list_lock); | 53 | spin_lock(&dev_priv->ttm.bo_list_lock); |
| 48 | list_del(&nvbo->head); | 54 | list_del(&nvbo->head); |
| 49 | spin_unlock(&dev_priv->ttm.bo_list_lock); | 55 | spin_unlock(&dev_priv->ttm.bo_list_lock); |
| 50 | kfree(nvbo); | 56 | kfree(nvbo); |
| 51 | } | 57 | } |
| 52 | 58 | ||
| 59 | static void | ||
| 60 | nouveau_bo_fixup_align(struct drm_device *dev, | ||
| 61 | uint32_t tile_mode, uint32_t tile_flags, | ||
| 62 | int *align, int *size) | ||
| 63 | { | ||
| 64 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | ||
| 68 | * align to to that as well as the page size. Overallocate memory to | ||
| 69 | * avoid corruption of other buffer objects. | ||
| 70 | */ | ||
| 71 | if (dev_priv->card_type == NV_50) { | ||
| 72 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | ||
| 73 | int i; | ||
| 74 | |||
| 75 | switch (tile_flags) { | ||
| 76 | case 0x1800: | ||
| 77 | case 0x2800: | ||
| 78 | case 0x4800: | ||
| 79 | case 0x7a00: | ||
| 80 | *size = roundup(*size, block_size); | ||
| 81 | if (is_power_of_2(block_size)) { | ||
| 82 | *size += 3 * block_size; | ||
| 83 | for (i = 1; i < 10; i++) { | ||
| 84 | *align = 12 * i * block_size; | ||
| 85 | if (!(*align % 65536)) | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } else { | ||
| 89 | *size += 6 * block_size; | ||
| 90 | for (i = 1; i < 10; i++) { | ||
| 91 | *align = 8 * i * block_size; | ||
| 92 | if (!(*align % 65536)) | ||
| 93 | break; | ||
| 94 | } | ||
| 95 | } | ||
| 96 | break; | ||
| 97 | default: | ||
| 98 | break; | ||
| 99 | } | ||
| 100 | |||
| 101 | } else { | ||
| 102 | if (tile_mode) { | ||
| 103 | if (dev_priv->chipset >= 0x40) { | ||
| 104 | *align = 65536; | ||
| 105 | *size = roundup(*size, 64 * tile_mode); | ||
| 106 | |||
| 107 | } else if (dev_priv->chipset >= 0x30) { | ||
| 108 | *align = 32768; | ||
| 109 | *size = roundup(*size, 64 * tile_mode); | ||
| 110 | |||
| 111 | } else if (dev_priv->chipset >= 0x20) { | ||
| 112 | *align = 16384; | ||
| 113 | *size = roundup(*size, 64 * tile_mode); | ||
| 114 | |||
| 115 | } else if (dev_priv->chipset >= 0x10) { | ||
| 116 | *align = 16384; | ||
| 117 | *size = roundup(*size, 32 * tile_mode); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | /* ALIGN works only on powers of two. */ | ||
| 123 | *size = roundup(*size, PAGE_SIZE); | ||
| 124 | |||
| 125 | if (dev_priv->card_type == NV_50) { | ||
| 126 | *size = roundup(*size, 65536); | ||
| 127 | *align = max(65536, *align); | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 53 | int | 131 | int |
| 54 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | 132 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, |
| 55 | int size, int align, uint32_t flags, uint32_t tile_mode, | 133 | int size, int align, uint32_t flags, uint32_t tile_mode, |
| @@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 58 | { | 136 | { |
| 59 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 137 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 60 | struct nouveau_bo *nvbo; | 138 | struct nouveau_bo *nvbo; |
| 61 | int ret, n = 0; | 139 | int ret = 0; |
| 62 | 140 | ||
| 63 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | 141 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
| 64 | if (!nvbo) | 142 | if (!nvbo) |
| @@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
| 70 | nvbo->tile_mode = tile_mode; | 148 | nvbo->tile_mode = tile_mode; |
| 71 | nvbo->tile_flags = tile_flags; | 149 | nvbo->tile_flags = tile_flags; |
| 72 | 150 | ||
| 73 | /* | 151 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); |
| 74 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | ||
| 75 | * align to to that as well as the page size. Overallocate memory to | ||
| 76 | * avoid corruption of other buffer objects. | ||
| 77 | */ | ||
| 78 | switch (tile_flags) { | ||
| 79 | case 0x1800: | ||
| 80 | case 0x2800: | ||
| 81 | case 0x4800: | ||
| 82 | case 0x7a00: | ||
| 83 | if (dev_priv->chipset >= 0xA0) { | ||
| 84 | /* This is based on high end cards with 448 bits | ||
| 85 | * memory bus, could be different elsewhere.*/ | ||
| 86 | size += 6 * 28672; | ||
| 87 | /* 8 * 28672 is the actual alignment requirement, | ||
| 88 | * but we must also align to page size. */ | ||
| 89 | align = 2 * 8 * 28672; | ||
| 90 | } else if (dev_priv->chipset >= 0x90) { | ||
| 91 | size += 3 * 16384; | ||
| 92 | align = 12 * 16834; | ||
| 93 | } else { | ||
| 94 | size += 3 * 8192; | ||
| 95 | /* 12 * 8192 is the actual alignment requirement, | ||
| 96 | * but we must also align to page size. */ | ||
| 97 | align = 2 * 12 * 8192; | ||
| 98 | } | ||
| 99 | break; | ||
| 100 | default: | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | |||
| 104 | align >>= PAGE_SHIFT; | 152 | align >>= PAGE_SHIFT; |
| 105 | 153 | ||
| 106 | size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); | ||
| 107 | if (dev_priv->card_type == NV_50) { | ||
| 108 | size = (size + 65535) & ~65535; | ||
| 109 | if (align < (65536 / PAGE_SIZE)) | ||
| 110 | align = (65536 / PAGE_SIZE); | ||
| 111 | } | ||
| 112 | |||
| 113 | if (flags & TTM_PL_FLAG_VRAM) | ||
| 114 | nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; | ||
| 115 | if (flags & TTM_PL_FLAG_TT) | ||
| 116 | nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | ||
| 117 | nvbo->placement.fpfn = 0; | 154 | nvbo->placement.fpfn = 0; |
| 118 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | 155 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; |
| 119 | nvbo->placement.placement = nvbo->placements; | 156 | nouveau_bo_placement_set(nvbo, flags); |
| 120 | nvbo->placement.busy_placement = nvbo->placements; | ||
| 121 | nvbo->placement.num_placement = n; | ||
| 122 | nvbo->placement.num_busy_placement = n; | ||
| 123 | 157 | ||
| 124 | nvbo->channel = chan; | 158 | nvbo->channel = chan; |
| 125 | nouveau_bo_placement_set(nvbo, flags); | ||
| 126 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 159 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
| 127 | ttm_bo_type_device, &nvbo->placement, align, 0, | 160 | ttm_bo_type_device, &nvbo->placement, align, 0, |
| 128 | false, NULL, size, nouveau_bo_del_ttm); | 161 | false, NULL, size, nouveau_bo_del_ttm); |
| @@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
| 421 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | 454 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access |
| 422 | * TTM_PL_{VRAM,TT} directly. | 455 | * TTM_PL_{VRAM,TT} directly. |
| 423 | */ | 456 | */ |
| 457 | |||
| 424 | static int | 458 | static int |
| 425 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | 459 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, |
| 426 | struct nouveau_bo *nvbo, bool evict, bool no_wait, | 460 | struct nouveau_bo *nvbo, bool evict, bool no_wait, |
| @@ -455,11 +489,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, | |||
| 455 | } | 489 | } |
| 456 | 490 | ||
| 457 | static int | 491 | static int |
| 458 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, | 492 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
| 459 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 493 | int no_wait, struct ttm_mem_reg *new_mem) |
| 460 | { | 494 | { |
| 461 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 495 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 462 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 496 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
| 497 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 463 | struct nouveau_channel *chan; | 498 | struct nouveau_channel *chan; |
| 464 | uint64_t src_offset, dst_offset; | 499 | uint64_t src_offset, dst_offset; |
| 465 | uint32_t page_count; | 500 | uint32_t page_count; |
| @@ -547,7 +582,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 547 | 582 | ||
| 548 | placement.fpfn = placement.lpfn = 0; | 583 | placement.fpfn = placement.lpfn = 0; |
| 549 | placement.num_placement = placement.num_busy_placement = 1; | 584 | placement.num_placement = placement.num_busy_placement = 1; |
| 550 | placement.placement = &placement_memtype; | 585 | placement.placement = placement.busy_placement = &placement_memtype; |
| 551 | 586 | ||
| 552 | tmp_mem = *new_mem; | 587 | tmp_mem = *new_mem; |
| 553 | tmp_mem.mm_node = NULL; | 588 | tmp_mem.mm_node = NULL; |
| @@ -559,7 +594,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 559 | if (ret) | 594 | if (ret) |
| 560 | goto out; | 595 | goto out; |
| 561 | 596 | ||
| 562 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem); | 597 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); |
| 563 | if (ret) | 598 | if (ret) |
| 564 | goto out; | 599 | goto out; |
| 565 | 600 | ||
| @@ -585,7 +620,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 585 | 620 | ||
| 586 | placement.fpfn = placement.lpfn = 0; | 621 | placement.fpfn = placement.lpfn = 0; |
| 587 | placement.num_placement = placement.num_busy_placement = 1; | 622 | placement.num_placement = placement.num_busy_placement = 1; |
| 588 | placement.placement = &placement_memtype; | 623 | placement.placement = placement.busy_placement = &placement_memtype; |
| 589 | 624 | ||
| 590 | tmp_mem = *new_mem; | 625 | tmp_mem = *new_mem; |
| 591 | tmp_mem.mm_node = NULL; | 626 | tmp_mem.mm_node = NULL; |
| @@ -597,7 +632,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 597 | if (ret) | 632 | if (ret) |
| 598 | goto out; | 633 | goto out; |
| 599 | 634 | ||
| 600 | ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem); | 635 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); |
| 601 | if (ret) | 636 | if (ret) |
| 602 | goto out; | 637 | goto out; |
| 603 | 638 | ||
| @@ -612,52 +647,106 @@ out: | |||
| 612 | } | 647 | } |
| 613 | 648 | ||
| 614 | static int | 649 | static int |
| 615 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | 650 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
| 616 | bool no_wait, struct ttm_mem_reg *new_mem) | 651 | struct nouveau_tile_reg **new_tile) |
| 617 | { | 652 | { |
| 618 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 653 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
| 619 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
| 620 | struct drm_device *dev = dev_priv->dev; | 654 | struct drm_device *dev = dev_priv->dev; |
| 621 | struct ttm_mem_reg *old_mem = &bo->mem; | 655 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 656 | uint64_t offset; | ||
| 622 | int ret; | 657 | int ret; |
| 623 | 658 | ||
| 624 | if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM && | 659 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
| 625 | !nvbo->no_vm) { | 660 | /* Nothing to do. */ |
| 626 | uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT; | 661 | *new_tile = NULL; |
| 662 | return 0; | ||
| 663 | } | ||
| 664 | |||
| 665 | offset = new_mem->mm_node->start << PAGE_SHIFT; | ||
| 627 | 666 | ||
| 667 | if (dev_priv->card_type == NV_50) { | ||
| 628 | ret = nv50_mem_vm_bind_linear(dev, | 668 | ret = nv50_mem_vm_bind_linear(dev, |
| 629 | offset + dev_priv->vm_vram_base, | 669 | offset + dev_priv->vm_vram_base, |
| 630 | new_mem->size, nvbo->tile_flags, | 670 | new_mem->size, nvbo->tile_flags, |
| 631 | offset); | 671 | offset); |
| 632 | if (ret) | 672 | if (ret) |
| 633 | return ret; | 673 | return ret; |
| 674 | |||
| 675 | } else if (dev_priv->card_type >= NV_10) { | ||
| 676 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | ||
| 677 | nvbo->tile_mode); | ||
| 634 | } | 678 | } |
| 635 | 679 | ||
| 680 | return 0; | ||
| 681 | } | ||
| 682 | |||
| 683 | static void | ||
| 684 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | ||
| 685 | struct nouveau_tile_reg *new_tile, | ||
| 686 | struct nouveau_tile_reg **old_tile) | ||
| 687 | { | ||
| 688 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
| 689 | struct drm_device *dev = dev_priv->dev; | ||
| 690 | |||
| 691 | if (dev_priv->card_type >= NV_10 && | ||
| 692 | dev_priv->card_type < NV_50) { | ||
| 693 | if (*old_tile) | ||
| 694 | nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); | ||
| 695 | |||
| 696 | *old_tile = new_tile; | ||
| 697 | } | ||
| 698 | } | ||
| 699 | |||
| 700 | static int | ||
| 701 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | ||
| 702 | bool no_wait, struct ttm_mem_reg *new_mem) | ||
| 703 | { | ||
| 704 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
| 705 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
| 706 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
| 707 | struct nouveau_tile_reg *new_tile = NULL; | ||
| 708 | int ret = 0; | ||
| 709 | |||
| 710 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | ||
| 711 | if (ret) | ||
| 712 | return ret; | ||
| 713 | |||
| 714 | /* Software copy if the card isn't up and running yet. */ | ||
| 636 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || | 715 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || |
| 637 | !dev_priv->channel) | 716 | !dev_priv->channel) { |
| 638 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 717 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); |
| 718 | goto out; | ||
| 719 | } | ||
| 639 | 720 | ||
| 721 | /* Fake bo copy. */ | ||
| 640 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { | 722 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
| 641 | BUG_ON(bo->mem.mm_node != NULL); | 723 | BUG_ON(bo->mem.mm_node != NULL); |
| 642 | bo->mem = *new_mem; | 724 | bo->mem = *new_mem; |
| 643 | new_mem->mm_node = NULL; | 725 | new_mem->mm_node = NULL; |
| 644 | return 0; | 726 | goto out; |
| 645 | } | 727 | } |
| 646 | 728 | ||
| 647 | if (new_mem->mem_type == TTM_PL_SYSTEM) { | 729 | /* Hardware assisted copy. */ |
| 648 | if (old_mem->mem_type == TTM_PL_SYSTEM) | 730 | if (new_mem->mem_type == TTM_PL_SYSTEM) |
| 649 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 731 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); |
| 650 | if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem)) | 732 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
| 651 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 733 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); |
| 652 | } else if (old_mem->mem_type == TTM_PL_SYSTEM) { | 734 | else |
| 653 | if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem)) | 735 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); |
| 654 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
| 655 | } else { | ||
| 656 | if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem)) | ||
| 657 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
| 658 | } | ||
| 659 | 736 | ||
| 660 | return 0; | 737 | if (!ret) |
| 738 | goto out; | ||
| 739 | |||
| 740 | /* Fallback to software copy. */ | ||
| 741 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
| 742 | |||
| 743 | out: | ||
| 744 | if (ret) | ||
| 745 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | ||
| 746 | else | ||
| 747 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | ||
| 748 | |||
| 749 | return ret; | ||
| 661 | } | 750 | } |
| 662 | 751 | ||
| 663 | static int | 752 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 9aaa972f8822..343d718a9667 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
| @@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
| 158 | return ret; | 158 | return ret; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | nouveau_dma_pre_init(chan); | ||
| 162 | |||
| 161 | /* Locate channel's user control regs */ | 163 | /* Locate channel's user control regs */ |
| 162 | if (dev_priv->card_type < NV_40) | 164 | if (dev_priv->card_type < NV_40) |
| 163 | user = NV03_USER(channel); | 165 | user = NV03_USER(channel); |
| @@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
| 235 | return 0; | 237 | return 0; |
| 236 | } | 238 | } |
| 237 | 239 | ||
| 238 | int | ||
| 239 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
| 240 | { | ||
| 241 | struct drm_device *dev = chan->dev; | ||
| 242 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 243 | struct nouveau_engine *engine = &dev_priv->engine; | ||
| 244 | uint32_t caches; | ||
| 245 | int idle; | ||
| 246 | |||
| 247 | if (!chan) { | ||
| 248 | NV_ERROR(dev, "no channel...\n"); | ||
| 249 | return 1; | ||
| 250 | } | ||
| 251 | |||
| 252 | caches = nv_rd32(dev, NV03_PFIFO_CACHES); | ||
| 253 | nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1); | ||
| 254 | |||
| 255 | if (engine->fifo.channel_id(dev) != chan->id) { | ||
| 256 | struct nouveau_gpuobj *ramfc = | ||
| 257 | chan->ramfc ? chan->ramfc->gpuobj : NULL; | ||
| 258 | |||
| 259 | if (!ramfc) { | ||
| 260 | NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id); | ||
| 261 | return 1; | ||
| 262 | } | ||
| 263 | |||
| 264 | engine->instmem.prepare_access(dev, false); | ||
| 265 | if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1)) | ||
| 266 | idle = 0; | ||
| 267 | else | ||
| 268 | idle = 1; | ||
| 269 | engine->instmem.finish_access(dev); | ||
| 270 | } else { | ||
| 271 | idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) == | ||
| 272 | nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); | ||
| 273 | } | ||
| 274 | |||
| 275 | nv_wr32(dev, NV03_PFIFO_CACHES, caches); | ||
| 276 | return idle; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* stops a fifo */ | 240 | /* stops a fifo */ |
| 280 | void | 241 | void |
| 281 | nouveau_channel_free(struct nouveau_channel *chan) | 242 | nouveau_channel_free(struct nouveau_channel *chan) |
| @@ -414,7 +375,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
| 414 | init->subchan[0].grclass = 0x0039; | 375 | init->subchan[0].grclass = 0x0039; |
| 415 | else | 376 | else |
| 416 | init->subchan[0].grclass = 0x5039; | 377 | init->subchan[0].grclass = 0x5039; |
| 417 | init->nr_subchan = 1; | 378 | init->subchan[1].handle = NvSw; |
| 379 | init->subchan[1].grclass = NV_SW; | ||
| 380 | init->nr_subchan = 2; | ||
| 418 | 381 | ||
| 419 | /* Named memory object area */ | 382 | /* Named memory object area */ |
| 420 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | 383 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 703553687b20..7afbe8b40d51 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
| @@ -29,12 +29,22 @@ | |||
| 29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
| 30 | #include "nouveau_dma.h" | 30 | #include "nouveau_dma.h" |
| 31 | 31 | ||
| 32 | void | ||
| 33 | nouveau_dma_pre_init(struct nouveau_channel *chan) | ||
| 34 | { | ||
| 35 | chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; | ||
| 36 | chan->dma.put = 0; | ||
| 37 | chan->dma.cur = chan->dma.put; | ||
| 38 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
| 39 | } | ||
| 40 | |||
| 32 | int | 41 | int |
| 33 | nouveau_dma_init(struct nouveau_channel *chan) | 42 | nouveau_dma_init(struct nouveau_channel *chan) |
| 34 | { | 43 | { |
| 35 | struct drm_device *dev = chan->dev; | 44 | struct drm_device *dev = chan->dev; |
| 36 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 45 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 37 | struct nouveau_gpuobj *m2mf = NULL; | 46 | struct nouveau_gpuobj *m2mf = NULL; |
| 47 | struct nouveau_gpuobj *nvsw = NULL; | ||
| 38 | int ret, i; | 48 | int ret, i; |
| 39 | 49 | ||
| 40 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ | 50 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ |
| @@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
| 47 | if (ret) | 57 | if (ret) |
| 48 | return ret; | 58 | return ret; |
| 49 | 59 | ||
| 60 | /* Create an NV_SW object for various sync purposes */ | ||
| 61 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); | ||
| 62 | if (ret) | ||
| 63 | return ret; | ||
| 64 | |||
| 65 | ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); | ||
| 66 | if (ret) | ||
| 67 | return ret; | ||
| 68 | |||
| 50 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 69 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
| 51 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 70 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); |
| 52 | if (ret) | 71 | if (ret) |
| @@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
| 64 | return ret; | 83 | return ret; |
| 65 | } | 84 | } |
| 66 | 85 | ||
| 67 | /* Initialise DMA vars */ | ||
| 68 | chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; | ||
| 69 | chan->dma.put = 0; | ||
| 70 | chan->dma.cur = chan->dma.put; | ||
| 71 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
| 72 | |||
| 73 | /* Insert NOPS for NOUVEAU_DMA_SKIPS */ | 86 | /* Insert NOPS for NOUVEAU_DMA_SKIPS */ |
| 74 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | 87 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); |
| 75 | if (ret) | 88 | if (ret) |
| @@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
| 87 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); | 100 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); |
| 88 | OUT_RING(chan, NvNotify0); | 101 | OUT_RING(chan, NvNotify0); |
| 89 | 102 | ||
| 103 | /* Initialise NV_SW */ | ||
| 104 | ret = RING_SPACE(chan, 2); | ||
| 105 | if (ret) | ||
| 106 | return ret; | ||
| 107 | BEGIN_RING(chan, NvSubSw, 0, 1); | ||
| 108 | OUT_RING(chan, NvSw); | ||
| 109 | |||
| 90 | /* Sit back and pray the channel works.. */ | 110 | /* Sit back and pray the channel works.. */ |
| 91 | FIRE_RING(chan); | 111 | FIRE_RING(chan); |
| 92 | 112 | ||
| @@ -113,7 +133,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *get) | |||
| 113 | 133 | ||
| 114 | val = nvchan_rd32(chan, chan->user_get); | 134 | val = nvchan_rd32(chan, chan->user_get); |
| 115 | if (val < chan->pushbuf_base || | 135 | if (val < chan->pushbuf_base || |
| 116 | val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) { | 136 | val > chan->pushbuf_base + (chan->dma.max << 2)) { |
| 117 | /* meaningless to dma_wait() except to know whether the | 137 | /* meaningless to dma_wait() except to know whether the |
| 118 | * GPU has stalled or not | 138 | * GPU has stalled or not |
| 119 | */ | 139 | */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 04e85d8f757e..dabfd655f93e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
| @@ -46,10 +46,11 @@ | |||
| 46 | /* Hardcoded object assignments to subchannels (subchannel id). */ | 46 | /* Hardcoded object assignments to subchannels (subchannel id). */ |
| 47 | enum { | 47 | enum { |
| 48 | NvSubM2MF = 0, | 48 | NvSubM2MF = 0, |
| 49 | NvSub2D = 1, | 49 | NvSubSw = 1, |
| 50 | NvSubCtxSurf2D = 1, | 50 | NvSub2D = 2, |
| 51 | NvSubGdiRect = 2, | 51 | NvSubCtxSurf2D = 2, |
| 52 | NvSubImageBlit = 3 | 52 | NvSubGdiRect = 3, |
| 53 | NvSubImageBlit = 4 | ||
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| 55 | /* Object handles. */ | 56 | /* Object handles. */ |
| @@ -67,6 +68,7 @@ enum { | |||
| 67 | NvClipRect = 0x8000000b, | 68 | NvClipRect = 0x8000000b, |
| 68 | NvGdiRect = 0x8000000c, | 69 | NvGdiRect = 0x8000000c, |
| 69 | NvImageBlit = 0x8000000d, | 70 | NvImageBlit = 0x8000000d, |
| 71 | NvSw = 0x8000000e, | ||
| 70 | 72 | ||
| 71 | /* G80+ display objects */ | 73 | /* G80+ display objects */ |
| 72 | NvEvoVRAM = 0x01000000, | 74 | NvEvoVRAM = 0x01000000, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5f8cbb79c499..026419fe8791 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -59,11 +59,19 @@ struct nouveau_grctx; | |||
| 59 | #define MAX_NUM_DCB_ENTRIES 16 | 59 | #define MAX_NUM_DCB_ENTRIES 16 |
| 60 | 60 | ||
| 61 | #define NOUVEAU_MAX_CHANNEL_NR 128 | 61 | #define NOUVEAU_MAX_CHANNEL_NR 128 |
| 62 | #define NOUVEAU_MAX_TILE_NR 15 | ||
| 62 | 63 | ||
| 63 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) | 64 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) |
| 64 | #define NV50_VM_BLOCK (512*1024*1024ULL) | 65 | #define NV50_VM_BLOCK (512*1024*1024ULL) |
| 65 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) | 66 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) |
| 66 | 67 | ||
| 68 | struct nouveau_tile_reg { | ||
| 69 | struct nouveau_fence *fence; | ||
| 70 | uint32_t addr; | ||
| 71 | uint32_t size; | ||
| 72 | bool used; | ||
| 73 | }; | ||
| 74 | |||
| 67 | struct nouveau_bo { | 75 | struct nouveau_bo { |
| 68 | struct ttm_buffer_object bo; | 76 | struct ttm_buffer_object bo; |
| 69 | struct ttm_placement placement; | 77 | struct ttm_placement placement; |
| @@ -83,6 +91,7 @@ struct nouveau_bo { | |||
| 83 | 91 | ||
| 84 | uint32_t tile_mode; | 92 | uint32_t tile_mode; |
| 85 | uint32_t tile_flags; | 93 | uint32_t tile_flags; |
| 94 | struct nouveau_tile_reg *tile; | ||
| 86 | 95 | ||
| 87 | struct drm_gem_object *gem; | 96 | struct drm_gem_object *gem; |
| 88 | struct drm_file *cpu_filp; | 97 | struct drm_file *cpu_filp; |
| @@ -277,8 +286,13 @@ struct nouveau_timer_engine { | |||
| 277 | }; | 286 | }; |
| 278 | 287 | ||
| 279 | struct nouveau_fb_engine { | 288 | struct nouveau_fb_engine { |
| 289 | int num_tiles; | ||
| 290 | |||
| 280 | int (*init)(struct drm_device *dev); | 291 | int (*init)(struct drm_device *dev); |
| 281 | void (*takedown)(struct drm_device *dev); | 292 | void (*takedown)(struct drm_device *dev); |
| 293 | |||
| 294 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | ||
| 295 | uint32_t size, uint32_t pitch); | ||
| 282 | }; | 296 | }; |
| 283 | 297 | ||
| 284 | struct nouveau_fifo_engine { | 298 | struct nouveau_fifo_engine { |
| @@ -292,6 +306,8 @@ struct nouveau_fifo_engine { | |||
| 292 | void (*disable)(struct drm_device *); | 306 | void (*disable)(struct drm_device *); |
| 293 | void (*enable)(struct drm_device *); | 307 | void (*enable)(struct drm_device *); |
| 294 | bool (*reassign)(struct drm_device *, bool enable); | 308 | bool (*reassign)(struct drm_device *, bool enable); |
| 309 | bool (*cache_flush)(struct drm_device *dev); | ||
| 310 | bool (*cache_pull)(struct drm_device *dev, bool enable); | ||
| 295 | 311 | ||
| 296 | int (*channel_id)(struct drm_device *); | 312 | int (*channel_id)(struct drm_device *); |
| 297 | 313 | ||
| @@ -330,6 +346,9 @@ struct nouveau_pgraph_engine { | |||
| 330 | void (*destroy_context)(struct nouveau_channel *); | 346 | void (*destroy_context)(struct nouveau_channel *); |
| 331 | int (*load_context)(struct nouveau_channel *); | 347 | int (*load_context)(struct nouveau_channel *); |
| 332 | int (*unload_context)(struct drm_device *); | 348 | int (*unload_context)(struct drm_device *); |
| 349 | |||
| 350 | void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, | ||
| 351 | uint32_t size, uint32_t pitch); | ||
| 333 | }; | 352 | }; |
| 334 | 353 | ||
| 335 | struct nouveau_engine { | 354 | struct nouveau_engine { |
| @@ -548,6 +567,12 @@ struct drm_nouveau_private { | |||
| 548 | unsigned long sg_handle; | 567 | unsigned long sg_handle; |
| 549 | } gart_info; | 568 | } gart_info; |
| 550 | 569 | ||
| 570 | /* nv10-nv40 tiling regions */ | ||
| 571 | struct { | ||
| 572 | struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; | ||
| 573 | spinlock_t lock; | ||
| 574 | } tile; | ||
| 575 | |||
| 551 | /* G8x/G9x virtual address space */ | 576 | /* G8x/G9x virtual address space */ |
| 552 | uint64_t vm_gart_base; | 577 | uint64_t vm_gart_base; |
| 553 | uint64_t vm_gart_size; | 578 | uint64_t vm_gart_size; |
| @@ -685,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); | |||
| 685 | extern int nouveau_mem_init(struct drm_device *); | 710 | extern int nouveau_mem_init(struct drm_device *); |
| 686 | extern int nouveau_mem_init_agp(struct drm_device *); | 711 | extern int nouveau_mem_init_agp(struct drm_device *); |
| 687 | extern void nouveau_mem_close(struct drm_device *); | 712 | extern void nouveau_mem_close(struct drm_device *); |
| 713 | extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, | ||
| 714 | uint32_t addr, | ||
| 715 | uint32_t size, | ||
| 716 | uint32_t pitch); | ||
| 717 | extern void nv10_mem_expire_tiling(struct drm_device *dev, | ||
| 718 | struct nouveau_tile_reg *tile, | ||
| 719 | struct nouveau_fence *fence); | ||
| 688 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, | 720 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, |
| 689 | uint32_t size, uint32_t flags, | 721 | uint32_t size, uint32_t flags, |
| 690 | uint64_t phys); | 722 | uint64_t phys); |
| @@ -713,7 +745,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev, | |||
| 713 | struct drm_file *file_priv, | 745 | struct drm_file *file_priv, |
| 714 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); | 746 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); |
| 715 | extern void nouveau_channel_free(struct nouveau_channel *); | 747 | extern void nouveau_channel_free(struct nouveau_channel *); |
| 716 | extern int nouveau_channel_idle(struct nouveau_channel *chan); | ||
| 717 | 748 | ||
| 718 | /* nouveau_object.c */ | 749 | /* nouveau_object.c */ |
| 719 | extern int nouveau_gpuobj_early_init(struct drm_device *); | 750 | extern int nouveau_gpuobj_early_init(struct drm_device *); |
| @@ -756,6 +787,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, | |||
| 756 | uint32_t *o_ret); | 787 | uint32_t *o_ret); |
| 757 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, | 788 | extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, |
| 758 | struct nouveau_gpuobj **); | 789 | struct nouveau_gpuobj **); |
| 790 | extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, | ||
| 791 | struct nouveau_gpuobj **); | ||
| 759 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, | 792 | extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, |
| 760 | struct drm_file *); | 793 | struct drm_file *); |
| 761 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, | 794 | extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, |
| @@ -804,6 +837,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan) | |||
| 804 | #endif | 837 | #endif |
| 805 | 838 | ||
| 806 | /* nouveau_dma.c */ | 839 | /* nouveau_dma.c */ |
| 840 | extern void nouveau_dma_pre_init(struct nouveau_channel *); | ||
| 807 | extern int nouveau_dma_init(struct nouveau_channel *); | 841 | extern int nouveau_dma_init(struct nouveau_channel *); |
| 808 | extern int nouveau_dma_wait(struct nouveau_channel *, int size); | 842 | extern int nouveau_dma_wait(struct nouveau_channel *, int size); |
| 809 | 843 | ||
| @@ -879,16 +913,22 @@ extern void nv04_fb_takedown(struct drm_device *); | |||
| 879 | /* nv10_fb.c */ | 913 | /* nv10_fb.c */ |
| 880 | extern int nv10_fb_init(struct drm_device *); | 914 | extern int nv10_fb_init(struct drm_device *); |
| 881 | extern void nv10_fb_takedown(struct drm_device *); | 915 | extern void nv10_fb_takedown(struct drm_device *); |
| 916 | extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, | ||
| 917 | uint32_t, uint32_t); | ||
| 882 | 918 | ||
| 883 | /* nv40_fb.c */ | 919 | /* nv40_fb.c */ |
| 884 | extern int nv40_fb_init(struct drm_device *); | 920 | extern int nv40_fb_init(struct drm_device *); |
| 885 | extern void nv40_fb_takedown(struct drm_device *); | 921 | extern void nv40_fb_takedown(struct drm_device *); |
| 922 | extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, | ||
| 923 | uint32_t, uint32_t); | ||
| 886 | 924 | ||
| 887 | /* nv04_fifo.c */ | 925 | /* nv04_fifo.c */ |
| 888 | extern int nv04_fifo_init(struct drm_device *); | 926 | extern int nv04_fifo_init(struct drm_device *); |
| 889 | extern void nv04_fifo_disable(struct drm_device *); | 927 | extern void nv04_fifo_disable(struct drm_device *); |
| 890 | extern void nv04_fifo_enable(struct drm_device *); | 928 | extern void nv04_fifo_enable(struct drm_device *); |
| 891 | extern bool nv04_fifo_reassign(struct drm_device *, bool); | 929 | extern bool nv04_fifo_reassign(struct drm_device *, bool); |
| 930 | extern bool nv04_fifo_cache_flush(struct drm_device *); | ||
| 931 | extern bool nv04_fifo_cache_pull(struct drm_device *, bool); | ||
| 892 | extern int nv04_fifo_channel_id(struct drm_device *); | 932 | extern int nv04_fifo_channel_id(struct drm_device *); |
| 893 | extern int nv04_fifo_create_context(struct nouveau_channel *); | 933 | extern int nv04_fifo_create_context(struct nouveau_channel *); |
| 894 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); | 934 | extern void nv04_fifo_destroy_context(struct nouveau_channel *); |
| @@ -941,6 +981,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *); | |||
| 941 | extern int nv10_graph_load_context(struct nouveau_channel *); | 981 | extern int nv10_graph_load_context(struct nouveau_channel *); |
| 942 | extern int nv10_graph_unload_context(struct drm_device *); | 982 | extern int nv10_graph_unload_context(struct drm_device *); |
| 943 | extern void nv10_graph_context_switch(struct drm_device *); | 983 | extern void nv10_graph_context_switch(struct drm_device *); |
| 984 | extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
| 985 | uint32_t, uint32_t); | ||
| 944 | 986 | ||
| 945 | /* nv20_graph.c */ | 987 | /* nv20_graph.c */ |
| 946 | extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; | 988 | extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; |
| @@ -952,6 +994,8 @@ extern int nv20_graph_unload_context(struct drm_device *); | |||
| 952 | extern int nv20_graph_init(struct drm_device *); | 994 | extern int nv20_graph_init(struct drm_device *); |
| 953 | extern void nv20_graph_takedown(struct drm_device *); | 995 | extern void nv20_graph_takedown(struct drm_device *); |
| 954 | extern int nv30_graph_init(struct drm_device *); | 996 | extern int nv30_graph_init(struct drm_device *); |
| 997 | extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
| 998 | uint32_t, uint32_t); | ||
| 955 | 999 | ||
| 956 | /* nv40_graph.c */ | 1000 | /* nv40_graph.c */ |
| 957 | extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; | 1001 | extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; |
| @@ -963,6 +1007,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *); | |||
| 963 | extern int nv40_graph_load_context(struct nouveau_channel *); | 1007 | extern int nv40_graph_load_context(struct nouveau_channel *); |
| 964 | extern int nv40_graph_unload_context(struct drm_device *); | 1008 | extern int nv40_graph_unload_context(struct drm_device *); |
| 965 | extern void nv40_grctx_init(struct nouveau_grctx *); | 1009 | extern void nv40_grctx_init(struct nouveau_grctx *); |
| 1010 | extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, | ||
| 1011 | uint32_t, uint32_t); | ||
| 966 | 1012 | ||
| 967 | /* nv50_graph.c */ | 1013 | /* nv50_graph.c */ |
| 968 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; | 1014 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; |
| @@ -1030,8 +1076,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, | |||
| 1030 | 1076 | ||
| 1031 | /* nv04_dac.c */ | 1077 | /* nv04_dac.c */ |
| 1032 | extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); | 1078 | extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); |
| 1033 | extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | 1079 | extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); |
| 1034 | struct drm_connector *connector); | ||
| 1035 | extern int nv04_dac_output_offset(struct drm_encoder *encoder); | 1080 | extern int nv04_dac_output_offset(struct drm_encoder *encoder); |
| 1036 | extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); | 1081 | extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); |
| 1037 | 1082 | ||
| @@ -1049,9 +1094,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); | |||
| 1049 | 1094 | ||
| 1050 | /* nv17_tv.c */ | 1095 | /* nv17_tv.c */ |
| 1051 | extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); | 1096 | extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); |
| 1052 | extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | ||
| 1053 | struct drm_connector *connector, | ||
| 1054 | uint32_t pin_mask); | ||
| 1055 | 1097 | ||
| 1056 | /* nv04_display.c */ | 1098 | /* nv04_display.c */ |
| 1057 | extern int nv04_display_create(struct drm_device *); | 1099 | extern int nv04_display_create(struct drm_device *); |
| @@ -1290,14 +1332,14 @@ nv_two_reg_pll(struct drm_device *dev) | |||
| 1290 | return false; | 1332 | return false; |
| 1291 | } | 1333 | } |
| 1292 | 1334 | ||
| 1293 | #define NV50_NVSW 0x0000506e | 1335 | #define NV_SW 0x0000506e |
| 1294 | #define NV50_NVSW_DMA_SEMAPHORE 0x00000060 | 1336 | #define NV_SW_DMA_SEMAPHORE 0x00000060 |
| 1295 | #define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064 | 1337 | #define NV_SW_SEMAPHORE_OFFSET 0x00000064 |
| 1296 | #define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068 | 1338 | #define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 |
| 1297 | #define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c | 1339 | #define NV_SW_SEMAPHORE_RELEASE 0x0000006c |
| 1298 | #define NV50_NVSW_DMA_VBLSEM 0x0000018c | 1340 | #define NV_SW_DMA_VBLSEM 0x0000018c |
| 1299 | #define NV50_NVSW_VBLSEM_OFFSET 0x00000400 | 1341 | #define NV_SW_VBLSEM_OFFSET 0x00000400 |
| 1300 | #define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404 | 1342 | #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 |
| 1301 | #define NV50_NVSW_VBLSEM_RELEASE 0x00000408 | 1343 | #define NV_SW_VBLSEM_RELEASE 0x00000408 |
| 1302 | 1344 | ||
| 1303 | #endif /* __NOUVEAU_DRV_H__ */ | 1345 | #endif /* __NOUVEAU_DRV_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 84af25c238b6..0b05c869e0e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
| 64 | return 0; | 64 | return 0; |
| 65 | 65 | ||
| 66 | if (RING_SPACE(chan, 4)) { | 66 | if (RING_SPACE(chan, 4)) { |
| 67 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 67 | nouveau_fbcon_gpu_lockup(info); |
| 68 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 69 | return 0; | 68 | return 0; |
| 70 | } | 69 | } |
| 71 | 70 | ||
| @@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
| 86 | } | 85 | } |
| 87 | 86 | ||
| 88 | if (ret) { | 87 | if (ret) { |
| 89 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 88 | nouveau_fbcon_gpu_lockup(info); |
| 90 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 91 | return 0; | 89 | return 0; |
| 92 | } | 90 | } |
| 93 | 91 | ||
| @@ -212,11 +210,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
| 212 | 210 | ||
| 213 | mode_cmd.bpp = surface_bpp; | 211 | mode_cmd.bpp = surface_bpp; |
| 214 | mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); | 212 | mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); |
| 215 | mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256); | 213 | mode_cmd.pitch = roundup(mode_cmd.pitch, 256); |
| 216 | mode_cmd.depth = surface_depth; | 214 | mode_cmd.depth = surface_depth; |
| 217 | 215 | ||
| 218 | size = mode_cmd.pitch * mode_cmd.height; | 216 | size = mode_cmd.pitch * mode_cmd.height; |
| 219 | size = ALIGN(size, PAGE_SIZE); | 217 | size = roundup(size, PAGE_SIZE); |
| 220 | 218 | ||
| 221 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, | 219 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, |
| 222 | 0, 0x0000, false, true, &nvbo); | 220 | 0, 0x0000, false, true, &nvbo); |
| @@ -380,3 +378,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
| 380 | 378 | ||
| 381 | return 0; | 379 | return 0; |
| 382 | } | 380 | } |
| 381 | |||
| 382 | void nouveau_fbcon_gpu_lockup(struct fb_info *info) | ||
| 383 | { | ||
| 384 | struct nouveau_fbcon_par *par = info->par; | ||
| 385 | struct drm_device *dev = par->dev; | ||
| 386 | |||
| 387 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | ||
| 388 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 389 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 8531140fedbc..462e0b87b4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
| @@ -43,5 +43,6 @@ void nouveau_fbcon_zfill(struct drm_device *dev); | |||
| 43 | int nv04_fbcon_accel_init(struct fb_info *info); | 43 | int nv04_fbcon_accel_init(struct fb_info *info); |
| 44 | int nv50_fbcon_accel_init(struct fb_info *info); | 44 | int nv50_fbcon_accel_init(struct fb_info *info); |
| 45 | 45 | ||
| 46 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | ||
| 46 | #endif /* __NV50_FBCON_H__ */ | 47 | #endif /* __NV50_FBCON_H__ */ |
| 47 | 48 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index dacac9a0842a..faddf53ff9ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) | |||
| 142 | list_add_tail(&fence->entry, &chan->fence.pending); | 142 | list_add_tail(&fence->entry, &chan->fence.pending); |
| 143 | spin_unlock_irqrestore(&chan->fence.lock, flags); | 143 | spin_unlock_irqrestore(&chan->fence.lock, flags); |
| 144 | 144 | ||
| 145 | BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1); | 145 | BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); |
| 146 | OUT_RING(chan, fence->sequence); | 146 | OUT_RING(chan, fence->sequence); |
| 147 | FIRE_RING(chan); | 147 | FIRE_RING(chan); |
| 148 | 148 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 18fd8ac9fca7..2009db2426c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, | |||
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | struct validate_op { | 222 | struct validate_op { |
| 223 | struct nouveau_fence *fence; | ||
| 224 | struct list_head vram_list; | 223 | struct list_head vram_list; |
| 225 | struct list_head gart_list; | 224 | struct list_head gart_list; |
| 226 | struct list_head both_list; | 225 | struct list_head both_list; |
| @@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) | |||
| 252 | } | 251 | } |
| 253 | 252 | ||
| 254 | static void | 253 | static void |
| 255 | validate_fini(struct validate_op *op, bool success) | 254 | validate_fini(struct validate_op *op, struct nouveau_fence* fence) |
| 256 | { | 255 | { |
| 257 | struct nouveau_fence *fence = op->fence; | 256 | validate_fini_list(&op->vram_list, fence); |
| 258 | 257 | validate_fini_list(&op->gart_list, fence); | |
| 259 | if (unlikely(!success)) | 258 | validate_fini_list(&op->both_list, fence); |
| 260 | op->fence = NULL; | ||
| 261 | |||
| 262 | validate_fini_list(&op->vram_list, op->fence); | ||
| 263 | validate_fini_list(&op->gart_list, op->fence); | ||
| 264 | validate_fini_list(&op->both_list, op->fence); | ||
| 265 | nouveau_fence_unref((void *)&fence); | ||
| 266 | } | 259 | } |
| 267 | 260 | ||
| 268 | static int | 261 | static int |
| @@ -420,10 +413,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, | |||
| 420 | INIT_LIST_HEAD(&op->gart_list); | 413 | INIT_LIST_HEAD(&op->gart_list); |
| 421 | INIT_LIST_HEAD(&op->both_list); | 414 | INIT_LIST_HEAD(&op->both_list); |
| 422 | 415 | ||
| 423 | ret = nouveau_fence_new(chan, &op->fence, false); | ||
| 424 | if (ret) | ||
| 425 | return ret; | ||
| 426 | |||
| 427 | if (nr_buffers == 0) | 416 | if (nr_buffers == 0) |
| 428 | return 0; | 417 | return 0; |
| 429 | 418 | ||
| @@ -541,6 +530,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 541 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; | 530 | struct drm_nouveau_gem_pushbuf_bo *bo = NULL; |
| 542 | struct nouveau_channel *chan; | 531 | struct nouveau_channel *chan; |
| 543 | struct validate_op op; | 532 | struct validate_op op; |
| 533 | struct nouveau_fence* fence = 0; | ||
| 544 | uint32_t *pushbuf = NULL; | 534 | uint32_t *pushbuf = NULL; |
| 545 | int ret = 0, do_reloc = 0, i; | 535 | int ret = 0, do_reloc = 0, i; |
| 546 | 536 | ||
| @@ -597,7 +587,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 597 | 587 | ||
| 598 | OUT_RINGp(chan, pushbuf, req->nr_dwords); | 588 | OUT_RINGp(chan, pushbuf, req->nr_dwords); |
| 599 | 589 | ||
| 600 | ret = nouveau_fence_emit(op.fence); | 590 | ret = nouveau_fence_new(chan, &fence, true); |
| 601 | if (ret) { | 591 | if (ret) { |
| 602 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | 592 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); |
| 603 | WIND_RING(chan); | 593 | WIND_RING(chan); |
| @@ -605,7 +595,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 605 | } | 595 | } |
| 606 | 596 | ||
| 607 | if (nouveau_gem_pushbuf_sync(chan)) { | 597 | if (nouveau_gem_pushbuf_sync(chan)) { |
| 608 | ret = nouveau_fence_wait(op.fence, NULL, false, false); | 598 | ret = nouveau_fence_wait(fence, NULL, false, false); |
| 609 | if (ret) { | 599 | if (ret) { |
| 610 | for (i = 0; i < req->nr_dwords; i++) | 600 | for (i = 0; i < req->nr_dwords; i++) |
| 611 | NV_ERROR(dev, "0x%08x\n", pushbuf[i]); | 601 | NV_ERROR(dev, "0x%08x\n", pushbuf[i]); |
| @@ -614,7 +604,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 614 | } | 604 | } |
| 615 | 605 | ||
| 616 | out: | 606 | out: |
| 617 | validate_fini(&op, ret == 0); | 607 | validate_fini(&op, fence); |
| 608 | nouveau_fence_unref((void**)&fence); | ||
| 618 | mutex_unlock(&dev->struct_mutex); | 609 | mutex_unlock(&dev->struct_mutex); |
| 619 | kfree(pushbuf); | 610 | kfree(pushbuf); |
| 620 | kfree(bo); | 611 | kfree(bo); |
| @@ -634,6 +625,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
| 634 | struct drm_gem_object *gem; | 625 | struct drm_gem_object *gem; |
| 635 | struct nouveau_bo *pbbo; | 626 | struct nouveau_bo *pbbo; |
| 636 | struct validate_op op; | 627 | struct validate_op op; |
| 628 | struct nouveau_fence* fence = 0; | ||
| 637 | int i, ret = 0, do_reloc = 0; | 629 | int i, ret = 0, do_reloc = 0; |
| 638 | 630 | ||
| 639 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | 631 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; |
| @@ -772,7 +764,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
| 772 | OUT_RING(chan, 0); | 764 | OUT_RING(chan, 0); |
| 773 | } | 765 | } |
| 774 | 766 | ||
| 775 | ret = nouveau_fence_emit(op.fence); | 767 | ret = nouveau_fence_new(chan, &fence, true); |
| 776 | if (ret) { | 768 | if (ret) { |
| 777 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); | 769 | NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); |
| 778 | WIND_RING(chan); | 770 | WIND_RING(chan); |
| @@ -780,7 +772,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
| 780 | } | 772 | } |
| 781 | 773 | ||
| 782 | out: | 774 | out: |
| 783 | validate_fini(&op, ret == 0); | 775 | validate_fini(&op, fence); |
| 776 | nouveau_fence_unref((void**)&fence); | ||
| 784 | mutex_unlock(&dev->struct_mutex); | 777 | mutex_unlock(&dev->struct_mutex); |
| 785 | kfree(bo); | 778 | kfree(bo); |
| 786 | 779 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 370c72c968d1..919a619ca7fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
| @@ -635,6 +635,7 @@ nv50_pgraph_irq_handler(struct drm_device *dev) | |||
| 635 | 635 | ||
| 636 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | 636 | if ((nv_rd32(dev, 0x400500) & isb) != isb) |
| 637 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); | 637 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); |
| 638 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
| 638 | } | 639 | } |
| 639 | 640 | ||
| 640 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 641 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5158a12f7844..fb9bdd6edf1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) | |||
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | /* | 194 | /* |
| 195 | * NV10-NV40 tiling helpers | ||
| 196 | */ | ||
| 197 | |||
| 198 | static void | ||
| 199 | nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 200 | uint32_t size, uint32_t pitch) | ||
| 201 | { | ||
| 202 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 203 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
| 204 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
| 205 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 206 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
| 207 | |||
| 208 | tile->addr = addr; | ||
| 209 | tile->size = size; | ||
| 210 | tile->used = !!pitch; | ||
| 211 | nouveau_fence_unref((void **)&tile->fence); | ||
| 212 | |||
| 213 | if (!pfifo->cache_flush(dev)) | ||
| 214 | return; | ||
| 215 | |||
| 216 | pfifo->reassign(dev, false); | ||
| 217 | pfifo->cache_flush(dev); | ||
| 218 | pfifo->cache_pull(dev, false); | ||
| 219 | |||
| 220 | nouveau_wait_for_idle(dev); | ||
| 221 | |||
| 222 | pgraph->set_region_tiling(dev, i, addr, size, pitch); | ||
| 223 | pfb->set_region_tiling(dev, i, addr, size, pitch); | ||
| 224 | |||
| 225 | pfifo->cache_pull(dev, true); | ||
| 226 | pfifo->reassign(dev, true); | ||
| 227 | } | ||
| 228 | |||
| 229 | struct nouveau_tile_reg * | ||
| 230 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | ||
| 231 | uint32_t pitch) | ||
| 232 | { | ||
| 233 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 234 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
| 235 | struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; | ||
| 236 | int i; | ||
| 237 | |||
| 238 | spin_lock(&dev_priv->tile.lock); | ||
| 239 | |||
| 240 | for (i = 0; i < pfb->num_tiles; i++) { | ||
| 241 | if (tile[i].used) | ||
| 242 | /* Tile region in use. */ | ||
| 243 | continue; | ||
| 244 | |||
| 245 | if (tile[i].fence && | ||
| 246 | !nouveau_fence_signalled(tile[i].fence, NULL)) | ||
| 247 | /* Pending tile region. */ | ||
| 248 | continue; | ||
| 249 | |||
| 250 | if (max(tile[i].addr, addr) < | ||
| 251 | min(tile[i].addr + tile[i].size, addr + size)) | ||
| 252 | /* Kill an intersecting tile region. */ | ||
| 253 | nv10_mem_set_region_tiling(dev, i, 0, 0, 0); | ||
| 254 | |||
| 255 | if (pitch && !found) { | ||
| 256 | /* Free tile region. */ | ||
| 257 | nv10_mem_set_region_tiling(dev, i, addr, size, pitch); | ||
| 258 | found = &tile[i]; | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 262 | spin_unlock(&dev_priv->tile.lock); | ||
| 263 | |||
| 264 | return found; | ||
| 265 | } | ||
| 266 | |||
| 267 | void | ||
| 268 | nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, | ||
| 269 | struct nouveau_fence *fence) | ||
| 270 | { | ||
| 271 | if (fence) { | ||
| 272 | /* Mark it as pending. */ | ||
| 273 | tile->fence = fence; | ||
| 274 | nouveau_fence_ref(fence); | ||
| 275 | } | ||
| 276 | |||
| 277 | tile->used = false; | ||
| 278 | } | ||
| 279 | |||
| 280 | /* | ||
| 195 | * NV50 VM helpers | 281 | * NV50 VM helpers |
| 196 | */ | 282 | */ |
| 197 | int | 283 | int |
| @@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev) | |||
| 513 | 599 | ||
| 514 | INIT_LIST_HEAD(&dev_priv->ttm.bo_list); | 600 | INIT_LIST_HEAD(&dev_priv->ttm.bo_list); |
| 515 | spin_lock_init(&dev_priv->ttm.bo_list_lock); | 601 | spin_lock_init(&dev_priv->ttm.bo_list_lock); |
| 602 | spin_lock_init(&dev_priv->tile.lock); | ||
| 516 | 603 | ||
| 517 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); | 604 | dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); |
| 518 | 605 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 93379bb81bea..6c2cf81716df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
| @@ -881,7 +881,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | |||
| 881 | return 0; | 881 | return 0; |
| 882 | } | 882 | } |
| 883 | 883 | ||
| 884 | static int | 884 | int |
| 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
| 886 | struct nouveau_gpuobj **gpuobj_ret) | 886 | struct nouveau_gpuobj **gpuobj_ret) |
| 887 | { | 887 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index fa1b0e7165b9..251f1b3b38b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
| @@ -349,19 +349,19 @@ | |||
| 349 | #define NV04_PGRAPH_BLEND 0x00400824 | 349 | #define NV04_PGRAPH_BLEND 0x00400824 |
| 350 | #define NV04_PGRAPH_STORED_FMT 0x00400830 | 350 | #define NV04_PGRAPH_STORED_FMT 0x00400830 |
| 351 | #define NV04_PGRAPH_PATT_COLORRAM 0x00400900 | 351 | #define NV04_PGRAPH_PATT_COLORRAM 0x00400900 |
| 352 | #define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) | 352 | #define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16)) |
| 353 | #define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) | 353 | #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) |
| 354 | #define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) | 354 | #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) |
| 355 | #define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) | 355 | #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) |
| 356 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) | 356 | #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) |
| 357 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) | 357 | #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) |
| 358 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) | 358 | #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) |
| 359 | #define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) | 359 | #define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) |
| 360 | #define NV04_PGRAPH_U_RAM 0x00400D00 | 360 | #define NV04_PGRAPH_U_RAM 0x00400D00 |
| 361 | #define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) | 361 | #define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16)) |
| 362 | #define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) | 362 | #define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16)) |
| 363 | #define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) | 363 | #define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16)) |
| 364 | #define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) | 364 | #define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) |
| 365 | #define NV04_PGRAPH_V_RAM 0x00400D40 | 365 | #define NV04_PGRAPH_V_RAM 0x00400D40 |
| 366 | #define NV04_PGRAPH_W_RAM 0x00400D80 | 366 | #define NV04_PGRAPH_W_RAM 0x00400D80 |
| 367 | #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 | 367 | #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e76ec2d207a9..09b9a46dfc0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 76 | engine->fifo.disable = nv04_fifo_disable; | 76 | engine->fifo.disable = nv04_fifo_disable; |
| 77 | engine->fifo.enable = nv04_fifo_enable; | 77 | engine->fifo.enable = nv04_fifo_enable; |
| 78 | engine->fifo.reassign = nv04_fifo_reassign; | 78 | engine->fifo.reassign = nv04_fifo_reassign; |
| 79 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
| 80 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
| 79 | engine->fifo.channel_id = nv04_fifo_channel_id; | 81 | engine->fifo.channel_id = nv04_fifo_channel_id; |
| 80 | engine->fifo.create_context = nv04_fifo_create_context; | 82 | engine->fifo.create_context = nv04_fifo_create_context; |
| 81 | engine->fifo.destroy_context = nv04_fifo_destroy_context; | 83 | engine->fifo.destroy_context = nv04_fifo_destroy_context; |
| @@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 100 | engine->timer.takedown = nv04_timer_takedown; | 102 | engine->timer.takedown = nv04_timer_takedown; |
| 101 | engine->fb.init = nv10_fb_init; | 103 | engine->fb.init = nv10_fb_init; |
| 102 | engine->fb.takedown = nv10_fb_takedown; | 104 | engine->fb.takedown = nv10_fb_takedown; |
| 105 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
| 103 | engine->graph.grclass = nv10_graph_grclass; | 106 | engine->graph.grclass = nv10_graph_grclass; |
| 104 | engine->graph.init = nv10_graph_init; | 107 | engine->graph.init = nv10_graph_init; |
| 105 | engine->graph.takedown = nv10_graph_takedown; | 108 | engine->graph.takedown = nv10_graph_takedown; |
| @@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 109 | engine->graph.fifo_access = nv04_graph_fifo_access; | 112 | engine->graph.fifo_access = nv04_graph_fifo_access; |
| 110 | engine->graph.load_context = nv10_graph_load_context; | 113 | engine->graph.load_context = nv10_graph_load_context; |
| 111 | engine->graph.unload_context = nv10_graph_unload_context; | 114 | engine->graph.unload_context = nv10_graph_unload_context; |
| 115 | engine->graph.set_region_tiling = nv10_graph_set_region_tiling; | ||
| 112 | engine->fifo.channels = 32; | 116 | engine->fifo.channels = 32; |
| 113 | engine->fifo.init = nv10_fifo_init; | 117 | engine->fifo.init = nv10_fifo_init; |
| 114 | engine->fifo.takedown = nouveau_stub_takedown; | 118 | engine->fifo.takedown = nouveau_stub_takedown; |
| 115 | engine->fifo.disable = nv04_fifo_disable; | 119 | engine->fifo.disable = nv04_fifo_disable; |
| 116 | engine->fifo.enable = nv04_fifo_enable; | 120 | engine->fifo.enable = nv04_fifo_enable; |
| 117 | engine->fifo.reassign = nv04_fifo_reassign; | 121 | engine->fifo.reassign = nv04_fifo_reassign; |
| 122 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
| 123 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
| 118 | engine->fifo.channel_id = nv10_fifo_channel_id; | 124 | engine->fifo.channel_id = nv10_fifo_channel_id; |
| 119 | engine->fifo.create_context = nv10_fifo_create_context; | 125 | engine->fifo.create_context = nv10_fifo_create_context; |
| 120 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 126 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
| @@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 139 | engine->timer.takedown = nv04_timer_takedown; | 145 | engine->timer.takedown = nv04_timer_takedown; |
| 140 | engine->fb.init = nv10_fb_init; | 146 | engine->fb.init = nv10_fb_init; |
| 141 | engine->fb.takedown = nv10_fb_takedown; | 147 | engine->fb.takedown = nv10_fb_takedown; |
| 148 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
| 142 | engine->graph.grclass = nv20_graph_grclass; | 149 | engine->graph.grclass = nv20_graph_grclass; |
| 143 | engine->graph.init = nv20_graph_init; | 150 | engine->graph.init = nv20_graph_init; |
| 144 | engine->graph.takedown = nv20_graph_takedown; | 151 | engine->graph.takedown = nv20_graph_takedown; |
| @@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 148 | engine->graph.fifo_access = nv04_graph_fifo_access; | 155 | engine->graph.fifo_access = nv04_graph_fifo_access; |
| 149 | engine->graph.load_context = nv20_graph_load_context; | 156 | engine->graph.load_context = nv20_graph_load_context; |
| 150 | engine->graph.unload_context = nv20_graph_unload_context; | 157 | engine->graph.unload_context = nv20_graph_unload_context; |
| 158 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | ||
| 151 | engine->fifo.channels = 32; | 159 | engine->fifo.channels = 32; |
| 152 | engine->fifo.init = nv10_fifo_init; | 160 | engine->fifo.init = nv10_fifo_init; |
| 153 | engine->fifo.takedown = nouveau_stub_takedown; | 161 | engine->fifo.takedown = nouveau_stub_takedown; |
| 154 | engine->fifo.disable = nv04_fifo_disable; | 162 | engine->fifo.disable = nv04_fifo_disable; |
| 155 | engine->fifo.enable = nv04_fifo_enable; | 163 | engine->fifo.enable = nv04_fifo_enable; |
| 156 | engine->fifo.reassign = nv04_fifo_reassign; | 164 | engine->fifo.reassign = nv04_fifo_reassign; |
| 165 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
| 166 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
| 157 | engine->fifo.channel_id = nv10_fifo_channel_id; | 167 | engine->fifo.channel_id = nv10_fifo_channel_id; |
| 158 | engine->fifo.create_context = nv10_fifo_create_context; | 168 | engine->fifo.create_context = nv10_fifo_create_context; |
| 159 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 169 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
| @@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 178 | engine->timer.takedown = nv04_timer_takedown; | 188 | engine->timer.takedown = nv04_timer_takedown; |
| 179 | engine->fb.init = nv10_fb_init; | 189 | engine->fb.init = nv10_fb_init; |
| 180 | engine->fb.takedown = nv10_fb_takedown; | 190 | engine->fb.takedown = nv10_fb_takedown; |
| 191 | engine->fb.set_region_tiling = nv10_fb_set_region_tiling; | ||
| 181 | engine->graph.grclass = nv30_graph_grclass; | 192 | engine->graph.grclass = nv30_graph_grclass; |
| 182 | engine->graph.init = nv30_graph_init; | 193 | engine->graph.init = nv30_graph_init; |
| 183 | engine->graph.takedown = nv20_graph_takedown; | 194 | engine->graph.takedown = nv20_graph_takedown; |
| @@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 187 | engine->graph.destroy_context = nv20_graph_destroy_context; | 198 | engine->graph.destroy_context = nv20_graph_destroy_context; |
| 188 | engine->graph.load_context = nv20_graph_load_context; | 199 | engine->graph.load_context = nv20_graph_load_context; |
| 189 | engine->graph.unload_context = nv20_graph_unload_context; | 200 | engine->graph.unload_context = nv20_graph_unload_context; |
| 201 | engine->graph.set_region_tiling = nv20_graph_set_region_tiling; | ||
| 190 | engine->fifo.channels = 32; | 202 | engine->fifo.channels = 32; |
| 191 | engine->fifo.init = nv10_fifo_init; | 203 | engine->fifo.init = nv10_fifo_init; |
| 192 | engine->fifo.takedown = nouveau_stub_takedown; | 204 | engine->fifo.takedown = nouveau_stub_takedown; |
| 193 | engine->fifo.disable = nv04_fifo_disable; | 205 | engine->fifo.disable = nv04_fifo_disable; |
| 194 | engine->fifo.enable = nv04_fifo_enable; | 206 | engine->fifo.enable = nv04_fifo_enable; |
| 195 | engine->fifo.reassign = nv04_fifo_reassign; | 207 | engine->fifo.reassign = nv04_fifo_reassign; |
| 208 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
| 209 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
| 196 | engine->fifo.channel_id = nv10_fifo_channel_id; | 210 | engine->fifo.channel_id = nv10_fifo_channel_id; |
| 197 | engine->fifo.create_context = nv10_fifo_create_context; | 211 | engine->fifo.create_context = nv10_fifo_create_context; |
| 198 | engine->fifo.destroy_context = nv10_fifo_destroy_context; | 212 | engine->fifo.destroy_context = nv10_fifo_destroy_context; |
| @@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 218 | engine->timer.takedown = nv04_timer_takedown; | 232 | engine->timer.takedown = nv04_timer_takedown; |
| 219 | engine->fb.init = nv40_fb_init; | 233 | engine->fb.init = nv40_fb_init; |
| 220 | engine->fb.takedown = nv40_fb_takedown; | 234 | engine->fb.takedown = nv40_fb_takedown; |
| 235 | engine->fb.set_region_tiling = nv40_fb_set_region_tiling; | ||
| 221 | engine->graph.grclass = nv40_graph_grclass; | 236 | engine->graph.grclass = nv40_graph_grclass; |
| 222 | engine->graph.init = nv40_graph_init; | 237 | engine->graph.init = nv40_graph_init; |
| 223 | engine->graph.takedown = nv40_graph_takedown; | 238 | engine->graph.takedown = nv40_graph_takedown; |
| @@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 227 | engine->graph.destroy_context = nv40_graph_destroy_context; | 242 | engine->graph.destroy_context = nv40_graph_destroy_context; |
| 228 | engine->graph.load_context = nv40_graph_load_context; | 243 | engine->graph.load_context = nv40_graph_load_context; |
| 229 | engine->graph.unload_context = nv40_graph_unload_context; | 244 | engine->graph.unload_context = nv40_graph_unload_context; |
| 245 | engine->graph.set_region_tiling = nv40_graph_set_region_tiling; | ||
| 230 | engine->fifo.channels = 32; | 246 | engine->fifo.channels = 32; |
| 231 | engine->fifo.init = nv40_fifo_init; | 247 | engine->fifo.init = nv40_fifo_init; |
| 232 | engine->fifo.takedown = nouveau_stub_takedown; | 248 | engine->fifo.takedown = nouveau_stub_takedown; |
| 233 | engine->fifo.disable = nv04_fifo_disable; | 249 | engine->fifo.disable = nv04_fifo_disable; |
| 234 | engine->fifo.enable = nv04_fifo_enable; | 250 | engine->fifo.enable = nv04_fifo_enable; |
| 235 | engine->fifo.reassign = nv04_fifo_reassign; | 251 | engine->fifo.reassign = nv04_fifo_reassign; |
| 252 | engine->fifo.cache_flush = nv04_fifo_cache_flush; | ||
| 253 | engine->fifo.cache_pull = nv04_fifo_cache_pull; | ||
| 236 | engine->fifo.channel_id = nv10_fifo_channel_id; | 254 | engine->fifo.channel_id = nv10_fifo_channel_id; |
| 237 | engine->fifo.create_context = nv40_fifo_create_context; | 255 | engine->fifo.create_context = nv40_fifo_create_context; |
| 238 | engine->fifo.destroy_context = nv40_fifo_destroy_context; | 256 | engine->fifo.destroy_context = nv40_fifo_destroy_context; |
| @@ -624,7 +642,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
| 624 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; | 642 | dev_priv->chipset = (reg0 & 0xff00000) >> 20; |
| 625 | /* NV04 or NV05 */ | 643 | /* NV04 or NV05 */ |
| 626 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { | 644 | } else if ((reg0 & 0xff00fff0) == 0x20004000) { |
| 627 | dev_priv->chipset = 0x04; | 645 | if (reg0 & 0x00f00000) |
| 646 | dev_priv->chipset = 0x05; | ||
| 647 | else | ||
| 648 | dev_priv->chipset = 0x04; | ||
| 628 | } else | 649 | } else |
| 629 | dev_priv->chipset = 0xff; | 650 | dev_priv->chipset = 0xff; |
| 630 | 651 | ||
| @@ -704,8 +725,8 @@ static void nouveau_close(struct drm_device *dev) | |||
| 704 | { | 725 | { |
| 705 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 726 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 706 | 727 | ||
| 707 | /* In the case of an error dev_priv may not be be allocated yet */ | 728 | /* In the case of an error dev_priv may not be allocated yet */ |
| 708 | if (dev_priv && dev_priv->card_type) | 729 | if (dev_priv) |
| 709 | nouveau_card_takedown(dev); | 730 | nouveau_card_takedown(dev); |
| 710 | } | 731 | } |
| 711 | 732 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 187eb84e4da5..c385d50f041b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
| @@ -28,45 +28,17 @@ | |||
| 28 | 28 | ||
| 29 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
| 30 | 30 | ||
| 31 | static struct vm_operations_struct nouveau_ttm_vm_ops; | ||
| 32 | static const struct vm_operations_struct *ttm_vm_ops; | ||
| 33 | |||
| 34 | static int | ||
| 35 | nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 36 | { | ||
| 37 | struct ttm_buffer_object *bo = vma->vm_private_data; | ||
| 38 | int ret; | ||
| 39 | |||
| 40 | if (unlikely(bo == NULL)) | ||
| 41 | return VM_FAULT_NOPAGE; | ||
| 42 | |||
| 43 | ret = ttm_vm_ops->fault(vma, vmf); | ||
| 44 | return ret; | ||
| 45 | } | ||
| 46 | |||
| 47 | int | 31 | int |
| 48 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) | 32 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) |
| 49 | { | 33 | { |
| 50 | struct drm_file *file_priv = filp->private_data; | 34 | struct drm_file *file_priv = filp->private_data; |
| 51 | struct drm_nouveau_private *dev_priv = | 35 | struct drm_nouveau_private *dev_priv = |
| 52 | file_priv->minor->dev->dev_private; | 36 | file_priv->minor->dev->dev_private; |
| 53 | int ret; | ||
| 54 | 37 | ||
| 55 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | 38 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
| 56 | return drm_mmap(filp, vma); | 39 | return drm_mmap(filp, vma); |
| 57 | 40 | ||
| 58 | ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); | 41 | return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); |
| 59 | if (unlikely(ret != 0)) | ||
| 60 | return ret; | ||
| 61 | |||
| 62 | if (unlikely(ttm_vm_ops == NULL)) { | ||
| 63 | ttm_vm_ops = vma->vm_ops; | ||
| 64 | nouveau_ttm_vm_ops = *ttm_vm_ops; | ||
| 65 | nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault; | ||
| 66 | } | ||
| 67 | |||
| 68 | vma->vm_ops = &nouveau_ttm_vm_ops; | ||
| 69 | return 0; | ||
| 70 | } | 42 | } |
| 71 | 43 | ||
| 72 | static int | 44 | static int |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d9f32879ba38..d0e038d28948 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
| @@ -212,16 +212,15 @@ out: | |||
| 212 | return connector_status_disconnected; | 212 | return connector_status_disconnected; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | 215 | uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) |
| 216 | struct drm_connector *connector) | ||
| 217 | { | 216 | { |
| 218 | struct drm_device *dev = encoder->dev; | 217 | struct drm_device *dev = encoder->dev; |
| 219 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 218 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 220 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; | 219 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; |
| 221 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | 220 | uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); |
| 222 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, | 221 | uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, |
| 223 | saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; | 222 | saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; |
| 224 | int head, present = 0; | 223 | int head; |
| 225 | 224 | ||
| 226 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) | 225 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) |
| 227 | if (dcb->type == OUTPUT_TV) { | 226 | if (dcb->type == OUTPUT_TV) { |
| @@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | |||
| 287 | temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); | 286 | temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); |
| 288 | msleep(5); | 287 | msleep(5); |
| 289 | 288 | ||
| 290 | temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); | 289 | sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); |
| 291 | |||
| 292 | if (dcb->type == OUTPUT_TV) | ||
| 293 | present = (nv17_tv_detect(encoder, connector, temp) | ||
| 294 | == connector_status_connected); | ||
| 295 | else | ||
| 296 | present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI; | ||
| 297 | 290 | ||
| 298 | temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); | 291 | temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); |
| 299 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, | 292 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, |
| @@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder, | |||
| 310 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); | 303 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); |
| 311 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); | 304 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); |
| 312 | 305 | ||
| 313 | if (present) { | 306 | return sample; |
| 314 | NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); | 307 | } |
| 308 | |||
| 309 | static enum drm_connector_status | ||
| 310 | nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
| 311 | { | ||
| 312 | struct drm_device *dev = encoder->dev; | ||
| 313 | struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; | ||
| 314 | uint32_t sample = nv17_dac_sample_load(encoder); | ||
| 315 | |||
| 316 | if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { | ||
| 317 | NV_INFO(dev, "Load detected on output %c\n", | ||
| 318 | '@' + ffs(dcb->or)); | ||
| 315 | return connector_status_connected; | 319 | return connector_status_connected; |
| 320 | } else { | ||
| 321 | return connector_status_disconnected; | ||
| 316 | } | 322 | } |
| 317 | |||
| 318 | return connector_status_disconnected; | ||
| 319 | } | 323 | } |
| 320 | 324 | ||
| 321 | |||
| 322 | static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, | 325 | static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, |
| 323 | struct drm_display_mode *mode, | 326 | struct drm_display_mode *mode, |
| 324 | struct drm_display_mode *adjusted_mode) | 327 | struct drm_display_mode *adjusted_mode) |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 09a31071ee58..d910873c1368 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
| 39 | return; | 39 | return; |
| 40 | 40 | ||
| 41 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { | 41 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { |
| 42 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 42 | nouveau_fbcon_gpu_lockup(info); |
| 43 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 44 | } | 43 | } |
| 45 | 44 | ||
| 46 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 45 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -62,14 +61,12 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 62 | struct drm_device *dev = par->dev; | 61 | struct drm_device *dev = par->dev; |
| 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 62 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 64 | struct nouveau_channel *chan = dev_priv->channel; | 63 | struct nouveau_channel *chan = dev_priv->channel; |
| 65 | uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color]; | ||
| 66 | 64 | ||
| 67 | if (info->state != FBINFO_STATE_RUNNING) | 65 | if (info->state != FBINFO_STATE_RUNNING) |
| 68 | return; | 66 | return; |
| 69 | 67 | ||
| 70 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { | 68 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { |
| 71 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 69 | nouveau_fbcon_gpu_lockup(info); |
| 72 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 73 | } | 70 | } |
| 74 | 71 | ||
| 75 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 72 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -80,7 +77,11 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 80 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); | 77 | BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); |
| 81 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); | 78 | OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); |
| 82 | BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); | 79 | BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); |
| 83 | OUT_RING(chan, color); | 80 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 81 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) | ||
| 82 | OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); | ||
| 83 | else | ||
| 84 | OUT_RING(chan, rect->color); | ||
| 84 | BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); | 85 | BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); |
| 85 | OUT_RING(chan, (rect->dx << 16) | rect->dy); | 86 | OUT_RING(chan, (rect->dx << 16) | rect->dy); |
| 86 | OUT_RING(chan, (rect->width << 16) | rect->height); | 87 | OUT_RING(chan, (rect->width << 16) | rect->height); |
| @@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 109 | } | 110 | } |
| 110 | 111 | ||
| 111 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { | 112 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { |
| 112 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 113 | nouveau_fbcon_gpu_lockup(info); |
| 113 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 116 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 144 | int iter_len = dsize > 128 ? 128 : dsize; | 144 | int iter_len = dsize > 128 ? 128 : dsize; |
| 145 | 145 | ||
| 146 | if (RING_SPACE(chan, iter_len + 1)) { | 146 | if (RING_SPACE(chan, iter_len + 1)) { |
| 147 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 147 | nouveau_fbcon_gpu_lockup(info); |
| 148 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 149 | cfb_imageblit(info, image); | 148 | cfb_imageblit(info, image); |
| 150 | return; | 149 | return; |
| 151 | } | 150 | } |
| @@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 184 | struct drm_device *dev = par->dev; | 183 | struct drm_device *dev = par->dev; |
| 185 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 184 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 186 | struct nouveau_channel *chan = dev_priv->channel; | 185 | struct nouveau_channel *chan = dev_priv->channel; |
| 186 | const int sub = NvSubCtxSurf2D; | ||
| 187 | int surface_fmt, pattern_fmt, rect_fmt; | 187 | int surface_fmt, pattern_fmt, rect_fmt; |
| 188 | int ret; | 188 | int ret; |
| 189 | 189 | ||
| @@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 242 | return ret; | 242 | return ret; |
| 243 | 243 | ||
| 244 | if (RING_SPACE(chan, 49)) { | 244 | if (RING_SPACE(chan, 49)) { |
| 245 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 245 | nouveau_fbcon_gpu_lockup(info); |
| 246 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 247 | return 0; | 246 | return 0; |
| 248 | } | 247 | } |
| 249 | 248 | ||
| 250 | BEGIN_RING(chan, 1, 0x0000, 1); | 249 | BEGIN_RING(chan, sub, 0x0000, 1); |
| 251 | OUT_RING(chan, NvCtxSurf2D); | 250 | OUT_RING(chan, NvCtxSurf2D); |
| 252 | BEGIN_RING(chan, 1, 0x0184, 2); | 251 | BEGIN_RING(chan, sub, 0x0184, 2); |
| 253 | OUT_RING(chan, NvDmaFB); | 252 | OUT_RING(chan, NvDmaFB); |
| 254 | OUT_RING(chan, NvDmaFB); | 253 | OUT_RING(chan, NvDmaFB); |
| 255 | BEGIN_RING(chan, 1, 0x0300, 4); | 254 | BEGIN_RING(chan, sub, 0x0300, 4); |
| 256 | OUT_RING(chan, surface_fmt); | 255 | OUT_RING(chan, surface_fmt); |
| 257 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); | 256 | OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); |
| 258 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); | 257 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); |
| 259 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); | 258 | OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); |
| 260 | 259 | ||
| 261 | BEGIN_RING(chan, 1, 0x0000, 1); | 260 | BEGIN_RING(chan, sub, 0x0000, 1); |
| 262 | OUT_RING(chan, NvRop); | 261 | OUT_RING(chan, NvRop); |
| 263 | BEGIN_RING(chan, 1, 0x0300, 1); | 262 | BEGIN_RING(chan, sub, 0x0300, 1); |
| 264 | OUT_RING(chan, 0x55); | 263 | OUT_RING(chan, 0x55); |
| 265 | 264 | ||
| 266 | BEGIN_RING(chan, 1, 0x0000, 1); | 265 | BEGIN_RING(chan, sub, 0x0000, 1); |
| 267 | OUT_RING(chan, NvImagePatt); | 266 | OUT_RING(chan, NvImagePatt); |
| 268 | BEGIN_RING(chan, 1, 0x0300, 8); | 267 | BEGIN_RING(chan, sub, 0x0300, 8); |
| 269 | OUT_RING(chan, pattern_fmt); | 268 | OUT_RING(chan, pattern_fmt); |
| 270 | #ifdef __BIG_ENDIAN | 269 | #ifdef __BIG_ENDIAN |
| 271 | OUT_RING(chan, 2); | 270 | OUT_RING(chan, 2); |
| @@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
| 279 | OUT_RING(chan, ~0); | 278 | OUT_RING(chan, ~0); |
| 280 | OUT_RING(chan, ~0); | 279 | OUT_RING(chan, ~0); |
| 281 | 280 | ||
| 282 | BEGIN_RING(chan, 1, 0x0000, 1); | 281 | BEGIN_RING(chan, sub, 0x0000, 1); |
| 283 | OUT_RING(chan, NvClipRect); | 282 | OUT_RING(chan, NvClipRect); |
| 284 | BEGIN_RING(chan, 1, 0x0300, 2); | 283 | BEGIN_RING(chan, sub, 0x0300, 2); |
| 285 | OUT_RING(chan, 0); | 284 | OUT_RING(chan, 0); |
| 286 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); | 285 | OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); |
| 287 | 286 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 0c3cd53c7313..f31347b8c9b0 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
| @@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable) | |||
| 71 | return (reassign == 1); | 71 | return (reassign == 1); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | bool | ||
| 75 | nv04_fifo_cache_flush(struct drm_device *dev) | ||
| 76 | { | ||
| 77 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 78 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | ||
| 79 | uint64_t start = ptimer->read(dev); | ||
| 80 | |||
| 81 | do { | ||
| 82 | if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) == | ||
| 83 | nv_rd32(dev, NV03_PFIFO_CACHE1_PUT)) | ||
| 84 | return true; | ||
| 85 | |||
| 86 | } while (ptimer->read(dev) - start < 100000000); | ||
| 87 | |||
| 88 | NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n"); | ||
| 89 | |||
| 90 | return false; | ||
| 91 | } | ||
| 92 | |||
| 93 | bool | ||
| 94 | nv04_fifo_cache_pull(struct drm_device *dev, bool enable) | ||
| 95 | { | ||
| 96 | uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0); | ||
| 97 | |||
| 98 | if (enable) { | ||
| 99 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1); | ||
| 100 | } else { | ||
| 101 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1); | ||
| 102 | nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); | ||
| 103 | } | ||
| 104 | |||
| 105 | return !!(pull & 1); | ||
| 106 | } | ||
| 107 | |||
| 74 | int | 108 | int |
| 75 | nv04_fifo_channel_id(struct drm_device *dev) | 109 | nv04_fifo_channel_id(struct drm_device *dev) |
| 76 | { | 110 | { |
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index d561d773c0f4..e260986ea65a 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
| @@ -28,6 +28,10 @@ | |||
| 28 | #include "nouveau_drv.h" | 28 | #include "nouveau_drv.h" |
| 29 | 29 | ||
| 30 | static uint32_t nv04_graph_ctx_regs[] = { | 30 | static uint32_t nv04_graph_ctx_regs[] = { |
| 31 | 0x0040053c, | ||
| 32 | 0x00400544, | ||
| 33 | 0x00400540, | ||
| 34 | 0x00400548, | ||
| 31 | NV04_PGRAPH_CTX_SWITCH1, | 35 | NV04_PGRAPH_CTX_SWITCH1, |
| 32 | NV04_PGRAPH_CTX_SWITCH2, | 36 | NV04_PGRAPH_CTX_SWITCH2, |
| 33 | NV04_PGRAPH_CTX_SWITCH3, | 37 | NV04_PGRAPH_CTX_SWITCH3, |
| @@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
| 102 | NV04_PGRAPH_PATT_COLOR0, | 106 | NV04_PGRAPH_PATT_COLOR0, |
| 103 | NV04_PGRAPH_PATT_COLOR1, | 107 | NV04_PGRAPH_PATT_COLOR1, |
| 104 | NV04_PGRAPH_PATT_COLORRAM+0x00, | 108 | NV04_PGRAPH_PATT_COLORRAM+0x00, |
| 105 | NV04_PGRAPH_PATT_COLORRAM+0x01, | ||
| 106 | NV04_PGRAPH_PATT_COLORRAM+0x02, | ||
| 107 | NV04_PGRAPH_PATT_COLORRAM+0x03, | ||
| 108 | NV04_PGRAPH_PATT_COLORRAM+0x04, | 109 | NV04_PGRAPH_PATT_COLORRAM+0x04, |
| 109 | NV04_PGRAPH_PATT_COLORRAM+0x05, | ||
| 110 | NV04_PGRAPH_PATT_COLORRAM+0x06, | ||
| 111 | NV04_PGRAPH_PATT_COLORRAM+0x07, | ||
| 112 | NV04_PGRAPH_PATT_COLORRAM+0x08, | 110 | NV04_PGRAPH_PATT_COLORRAM+0x08, |
| 113 | NV04_PGRAPH_PATT_COLORRAM+0x09, | 111 | NV04_PGRAPH_PATT_COLORRAM+0x0c, |
| 114 | NV04_PGRAPH_PATT_COLORRAM+0x0A, | ||
| 115 | NV04_PGRAPH_PATT_COLORRAM+0x0B, | ||
| 116 | NV04_PGRAPH_PATT_COLORRAM+0x0C, | ||
| 117 | NV04_PGRAPH_PATT_COLORRAM+0x0D, | ||
| 118 | NV04_PGRAPH_PATT_COLORRAM+0x0E, | ||
| 119 | NV04_PGRAPH_PATT_COLORRAM+0x0F, | ||
| 120 | NV04_PGRAPH_PATT_COLORRAM+0x10, | 112 | NV04_PGRAPH_PATT_COLORRAM+0x10, |
| 121 | NV04_PGRAPH_PATT_COLORRAM+0x11, | ||
| 122 | NV04_PGRAPH_PATT_COLORRAM+0x12, | ||
| 123 | NV04_PGRAPH_PATT_COLORRAM+0x13, | ||
| 124 | NV04_PGRAPH_PATT_COLORRAM+0x14, | 113 | NV04_PGRAPH_PATT_COLORRAM+0x14, |
| 125 | NV04_PGRAPH_PATT_COLORRAM+0x15, | ||
| 126 | NV04_PGRAPH_PATT_COLORRAM+0x16, | ||
| 127 | NV04_PGRAPH_PATT_COLORRAM+0x17, | ||
| 128 | NV04_PGRAPH_PATT_COLORRAM+0x18, | 114 | NV04_PGRAPH_PATT_COLORRAM+0x18, |
| 129 | NV04_PGRAPH_PATT_COLORRAM+0x19, | 115 | NV04_PGRAPH_PATT_COLORRAM+0x1c, |
| 130 | NV04_PGRAPH_PATT_COLORRAM+0x1A, | ||
| 131 | NV04_PGRAPH_PATT_COLORRAM+0x1B, | ||
| 132 | NV04_PGRAPH_PATT_COLORRAM+0x1C, | ||
| 133 | NV04_PGRAPH_PATT_COLORRAM+0x1D, | ||
| 134 | NV04_PGRAPH_PATT_COLORRAM+0x1E, | ||
| 135 | NV04_PGRAPH_PATT_COLORRAM+0x1F, | ||
| 136 | NV04_PGRAPH_PATT_COLORRAM+0x20, | 116 | NV04_PGRAPH_PATT_COLORRAM+0x20, |
| 137 | NV04_PGRAPH_PATT_COLORRAM+0x21, | ||
| 138 | NV04_PGRAPH_PATT_COLORRAM+0x22, | ||
| 139 | NV04_PGRAPH_PATT_COLORRAM+0x23, | ||
| 140 | NV04_PGRAPH_PATT_COLORRAM+0x24, | 117 | NV04_PGRAPH_PATT_COLORRAM+0x24, |
| 141 | NV04_PGRAPH_PATT_COLORRAM+0x25, | ||
| 142 | NV04_PGRAPH_PATT_COLORRAM+0x26, | ||
| 143 | NV04_PGRAPH_PATT_COLORRAM+0x27, | ||
| 144 | NV04_PGRAPH_PATT_COLORRAM+0x28, | 118 | NV04_PGRAPH_PATT_COLORRAM+0x28, |
| 145 | NV04_PGRAPH_PATT_COLORRAM+0x29, | 119 | NV04_PGRAPH_PATT_COLORRAM+0x2c, |
| 146 | NV04_PGRAPH_PATT_COLORRAM+0x2A, | ||
| 147 | NV04_PGRAPH_PATT_COLORRAM+0x2B, | ||
| 148 | NV04_PGRAPH_PATT_COLORRAM+0x2C, | ||
| 149 | NV04_PGRAPH_PATT_COLORRAM+0x2D, | ||
| 150 | NV04_PGRAPH_PATT_COLORRAM+0x2E, | ||
| 151 | NV04_PGRAPH_PATT_COLORRAM+0x2F, | ||
| 152 | NV04_PGRAPH_PATT_COLORRAM+0x30, | 120 | NV04_PGRAPH_PATT_COLORRAM+0x30, |
| 153 | NV04_PGRAPH_PATT_COLORRAM+0x31, | ||
| 154 | NV04_PGRAPH_PATT_COLORRAM+0x32, | ||
| 155 | NV04_PGRAPH_PATT_COLORRAM+0x33, | ||
| 156 | NV04_PGRAPH_PATT_COLORRAM+0x34, | 121 | NV04_PGRAPH_PATT_COLORRAM+0x34, |
| 157 | NV04_PGRAPH_PATT_COLORRAM+0x35, | ||
| 158 | NV04_PGRAPH_PATT_COLORRAM+0x36, | ||
| 159 | NV04_PGRAPH_PATT_COLORRAM+0x37, | ||
| 160 | NV04_PGRAPH_PATT_COLORRAM+0x38, | 122 | NV04_PGRAPH_PATT_COLORRAM+0x38, |
| 161 | NV04_PGRAPH_PATT_COLORRAM+0x39, | 123 | NV04_PGRAPH_PATT_COLORRAM+0x3c, |
| 162 | NV04_PGRAPH_PATT_COLORRAM+0x3A, | 124 | NV04_PGRAPH_PATT_COLORRAM+0x40, |
| 163 | NV04_PGRAPH_PATT_COLORRAM+0x3B, | 125 | NV04_PGRAPH_PATT_COLORRAM+0x44, |
| 164 | NV04_PGRAPH_PATT_COLORRAM+0x3C, | 126 | NV04_PGRAPH_PATT_COLORRAM+0x48, |
| 165 | NV04_PGRAPH_PATT_COLORRAM+0x3D, | 127 | NV04_PGRAPH_PATT_COLORRAM+0x4c, |
| 166 | NV04_PGRAPH_PATT_COLORRAM+0x3E, | 128 | NV04_PGRAPH_PATT_COLORRAM+0x50, |
| 167 | NV04_PGRAPH_PATT_COLORRAM+0x3F, | 129 | NV04_PGRAPH_PATT_COLORRAM+0x54, |
| 130 | NV04_PGRAPH_PATT_COLORRAM+0x58, | ||
| 131 | NV04_PGRAPH_PATT_COLORRAM+0x5c, | ||
| 132 | NV04_PGRAPH_PATT_COLORRAM+0x60, | ||
| 133 | NV04_PGRAPH_PATT_COLORRAM+0x64, | ||
| 134 | NV04_PGRAPH_PATT_COLORRAM+0x68, | ||
| 135 | NV04_PGRAPH_PATT_COLORRAM+0x6c, | ||
| 136 | NV04_PGRAPH_PATT_COLORRAM+0x70, | ||
| 137 | NV04_PGRAPH_PATT_COLORRAM+0x74, | ||
| 138 | NV04_PGRAPH_PATT_COLORRAM+0x78, | ||
| 139 | NV04_PGRAPH_PATT_COLORRAM+0x7c, | ||
| 140 | NV04_PGRAPH_PATT_COLORRAM+0x80, | ||
| 141 | NV04_PGRAPH_PATT_COLORRAM+0x84, | ||
| 142 | NV04_PGRAPH_PATT_COLORRAM+0x88, | ||
| 143 | NV04_PGRAPH_PATT_COLORRAM+0x8c, | ||
| 144 | NV04_PGRAPH_PATT_COLORRAM+0x90, | ||
| 145 | NV04_PGRAPH_PATT_COLORRAM+0x94, | ||
| 146 | NV04_PGRAPH_PATT_COLORRAM+0x98, | ||
| 147 | NV04_PGRAPH_PATT_COLORRAM+0x9c, | ||
| 148 | NV04_PGRAPH_PATT_COLORRAM+0xa0, | ||
| 149 | NV04_PGRAPH_PATT_COLORRAM+0xa4, | ||
| 150 | NV04_PGRAPH_PATT_COLORRAM+0xa8, | ||
| 151 | NV04_PGRAPH_PATT_COLORRAM+0xac, | ||
| 152 | NV04_PGRAPH_PATT_COLORRAM+0xb0, | ||
| 153 | NV04_PGRAPH_PATT_COLORRAM+0xb4, | ||
| 154 | NV04_PGRAPH_PATT_COLORRAM+0xb8, | ||
| 155 | NV04_PGRAPH_PATT_COLORRAM+0xbc, | ||
| 156 | NV04_PGRAPH_PATT_COLORRAM+0xc0, | ||
| 157 | NV04_PGRAPH_PATT_COLORRAM+0xc4, | ||
| 158 | NV04_PGRAPH_PATT_COLORRAM+0xc8, | ||
| 159 | NV04_PGRAPH_PATT_COLORRAM+0xcc, | ||
| 160 | NV04_PGRAPH_PATT_COLORRAM+0xd0, | ||
| 161 | NV04_PGRAPH_PATT_COLORRAM+0xd4, | ||
| 162 | NV04_PGRAPH_PATT_COLORRAM+0xd8, | ||
| 163 | NV04_PGRAPH_PATT_COLORRAM+0xdc, | ||
| 164 | NV04_PGRAPH_PATT_COLORRAM+0xe0, | ||
| 165 | NV04_PGRAPH_PATT_COLORRAM+0xe4, | ||
| 166 | NV04_PGRAPH_PATT_COLORRAM+0xe8, | ||
| 167 | NV04_PGRAPH_PATT_COLORRAM+0xec, | ||
| 168 | NV04_PGRAPH_PATT_COLORRAM+0xf0, | ||
| 169 | NV04_PGRAPH_PATT_COLORRAM+0xf4, | ||
| 170 | NV04_PGRAPH_PATT_COLORRAM+0xf8, | ||
| 171 | NV04_PGRAPH_PATT_COLORRAM+0xfc, | ||
| 168 | NV04_PGRAPH_PATTERN, | 172 | NV04_PGRAPH_PATTERN, |
| 169 | 0x0040080c, | 173 | 0x0040080c, |
| 170 | NV04_PGRAPH_PATTERN_SHAPE, | 174 | NV04_PGRAPH_PATTERN_SHAPE, |
| @@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
| 247 | 0x004004f8, | 251 | 0x004004f8, |
| 248 | 0x0040047c, | 252 | 0x0040047c, |
| 249 | 0x004004fc, | 253 | 0x004004fc, |
| 250 | 0x0040053c, | ||
| 251 | 0x00400544, | ||
| 252 | 0x00400540, | ||
| 253 | 0x00400548, | ||
| 254 | 0x00400560, | ||
| 255 | 0x00400568, | ||
| 256 | 0x00400564, | ||
| 257 | 0x0040056c, | ||
| 258 | 0x00400534, | 254 | 0x00400534, |
| 259 | 0x00400538, | 255 | 0x00400538, |
| 260 | 0x00400514, | 256 | 0x00400514, |
| @@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = { | |||
| 341 | 0x00400500, | 337 | 0x00400500, |
| 342 | 0x00400504, | 338 | 0x00400504, |
| 343 | NV04_PGRAPH_VALID1, | 339 | NV04_PGRAPH_VALID1, |
| 344 | NV04_PGRAPH_VALID2 | 340 | NV04_PGRAPH_VALID2, |
| 345 | 341 | NV04_PGRAPH_DEBUG_3 | |
| 346 | |||
| 347 | }; | 342 | }; |
| 348 | 343 | ||
| 349 | struct graph_state { | 344 | struct graph_state { |
| @@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev) | |||
| 388 | pgraph->fifo_access(dev, true); | 383 | pgraph->fifo_access(dev, true); |
| 389 | } | 384 | } |
| 390 | 385 | ||
| 386 | static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) | ||
| 387 | { | ||
| 388 | int i; | ||
| 389 | |||
| 390 | for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) { | ||
| 391 | if (nv04_graph_ctx_regs[i] == reg) | ||
| 392 | return &ctx->nv04[i]; | ||
| 393 | } | ||
| 394 | |||
| 395 | return NULL; | ||
| 396 | } | ||
| 397 | |||
| 391 | int nv04_graph_create_context(struct nouveau_channel *chan) | 398 | int nv04_graph_create_context(struct nouveau_channel *chan) |
| 392 | { | 399 | { |
| 393 | struct graph_state *pgraph_ctx; | 400 | struct graph_state *pgraph_ctx; |
| @@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan) | |||
| 398 | if (pgraph_ctx == NULL) | 405 | if (pgraph_ctx == NULL) |
| 399 | return -ENOMEM; | 406 | return -ENOMEM; |
| 400 | 407 | ||
| 401 | /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */ | 408 | *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; |
| 402 | pgraph_ctx->nv04[0] = 0x0001ffff; | 409 | |
| 403 | /* is it really needed ??? */ | ||
| 404 | #if 0 | ||
| 405 | dev_priv->fifos[channel].pgraph_ctx[1] = | ||
| 406 | nv_rd32(dev, NV_PGRAPH_DEBUG_4); | ||
| 407 | dev_priv->fifos[channel].pgraph_ctx[2] = | ||
| 408 | nv_rd32(dev, 0x004006b0); | ||
| 409 | #endif | ||
| 410 | return 0; | 410 | return 0; |
| 411 | } | 411 | } |
| 412 | 412 | ||
| @@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan) | |||
| 429 | nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); | 429 | nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); |
| 430 | 430 | ||
| 431 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); | 431 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100); |
| 432 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24); | 432 | |
| 433 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | ||
| 434 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24); | ||
| 435 | |||
| 433 | tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); | 436 | tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2); |
| 434 | nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); | 437 | nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff); |
| 438 | |||
| 435 | return 0; | 439 | return 0; |
| 436 | } | 440 | } |
| 437 | 441 | ||
| @@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev) | |||
| 494 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); | 498 | nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); |
| 495 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); | 499 | nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); |
| 496 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; | 500 | tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; |
| 497 | tmp |= dev_priv->engine.fifo.channels << 24; | 501 | tmp |= (dev_priv->engine.fifo.channels - 1) << 24; |
| 498 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); | 502 | nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); |
| 499 | 503 | ||
| 500 | /* These don't belong here, they're part of a per-channel context */ | 504 | /* These don't belong here, they're part of a per-channel context */ |
| @@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
| 533 | int mthd, uint32_t data) | 537 | int mthd, uint32_t data) |
| 534 | { | 538 | { |
| 535 | struct drm_device *dev = chan->dev; | 539 | struct drm_device *dev = chan->dev; |
| 536 | uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff; | 540 | uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; |
| 537 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; | 541 | int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; |
| 538 | uint32_t tmp; | 542 | uint32_t tmp; |
| 539 | 543 | ||
| @@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
| 547 | return 0; | 551 | return 0; |
| 548 | } | 552 | } |
| 549 | 553 | ||
| 550 | static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = { | 554 | static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { |
| 551 | { 0x0150, nv04_graph_mthd_set_ref }, | 555 | { 0x0150, nv04_graph_mthd_set_ref }, |
| 552 | {} | 556 | {} |
| 553 | }; | 557 | }; |
| @@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = { | |||
| 558 | }; | 562 | }; |
| 559 | 563 | ||
| 560 | struct nouveau_pgraph_object_class nv04_graph_grclass[] = { | 564 | struct nouveau_pgraph_object_class nv04_graph_grclass[] = { |
| 561 | { 0x0039, false, nv04_graph_mthds_m2mf }, | 565 | { 0x0039, false, NULL }, |
| 562 | { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ | 566 | { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */ |
| 563 | { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ | 567 | { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */ |
| 564 | { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ | 568 | { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */ |
| @@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = { | |||
| 574 | { 0x0053, false, NULL }, /* surf3d */ | 578 | { 0x0053, false, NULL }, /* surf3d */ |
| 575 | { 0x0054, false, NULL }, /* tex_tri */ | 579 | { 0x0054, false, NULL }, /* tex_tri */ |
| 576 | { 0x0055, false, NULL }, /* multitex_tri */ | 580 | { 0x0055, false, NULL }, /* multitex_tri */ |
| 581 | { 0x506e, true, nv04_graph_mthds_sw }, | ||
| 577 | {} | 582 | {} |
| 578 | }; | 583 | }; |
| 579 | 584 | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index 79e2d104d70a..cc5cda44e501 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c | |||
| @@ -3,17 +3,37 @@ | |||
| 3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
| 4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
| 5 | 5 | ||
| 6 | void | ||
| 7 | nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 8 | uint32_t size, uint32_t pitch) | ||
| 9 | { | ||
| 10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 11 | uint32_t limit = max(1u, addr + size) - 1; | ||
| 12 | |||
| 13 | if (pitch) { | ||
| 14 | if (dev_priv->card_type >= NV_20) | ||
| 15 | addr |= 1; | ||
| 16 | else | ||
| 17 | addr |= 1 << 31; | ||
| 18 | } | ||
| 19 | |||
| 20 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | ||
| 21 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | ||
| 22 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | ||
| 23 | } | ||
| 24 | |||
| 6 | int | 25 | int |
| 7 | nv10_fb_init(struct drm_device *dev) | 26 | nv10_fb_init(struct drm_device *dev) |
| 8 | { | 27 | { |
| 9 | uint32_t fb_bar_size; | 28 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 29 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
| 10 | int i; | 30 | int i; |
| 11 | 31 | ||
| 12 | fb_bar_size = drm_get_resource_len(dev, 0) - 1; | 32 | pfb->num_tiles = NV10_PFB_TILE__SIZE; |
| 13 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 33 | |
| 14 | nv_wr32(dev, NV10_PFB_TILE(i), 0); | 34 | /* Turn all the tiling regions off. */ |
| 15 | nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); | 35 | for (i = 0; i < pfb->num_tiles; i++) |
| 16 | } | 36 | pfb->set_region_tiling(dev, i, 0, 0, 0); |
| 17 | 37 | ||
| 18 | return 0; | 38 | return 0; |
| 19 | } | 39 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 6870e0ee2e7e..fcf2cdd19493 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
| @@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) | |||
| 807 | chan->pgraph_ctx = NULL; | 807 | chan->pgraph_ctx = NULL; |
| 808 | } | 808 | } |
| 809 | 809 | ||
| 810 | void | ||
| 811 | nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 812 | uint32_t size, uint32_t pitch) | ||
| 813 | { | ||
| 814 | uint32_t limit = max(1u, addr + size) - 1; | ||
| 815 | |||
| 816 | if (pitch) | ||
| 817 | addr |= 1 << 31; | ||
| 818 | |||
| 819 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); | ||
| 820 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); | ||
| 821 | nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); | ||
| 822 | } | ||
| 823 | |||
| 810 | int nv10_graph_init(struct drm_device *dev) | 824 | int nv10_graph_init(struct drm_device *dev) |
| 811 | { | 825 | { |
| 812 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 826 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| @@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev) | |||
| 838 | } else | 852 | } else |
| 839 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); | 853 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); |
| 840 | 854 | ||
| 841 | /* copy tile info from PFB */ | 855 | /* Turn all the tiling regions off. */ |
| 842 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 856 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
| 843 | nv_wr32(dev, NV10_PGRAPH_TILE(i), | 857 | nv10_graph_set_region_tiling(dev, i, 0, 0, 0); |
| 844 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
| 845 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), | ||
| 846 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
| 847 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), | ||
| 848 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
| 849 | nv_wr32(dev, NV10_PGRAPH_TSTATUS(i), | ||
| 850 | nv_rd32(dev, NV10_PFB_TSTATUS(i))); | ||
| 851 | } | ||
| 852 | 858 | ||
| 853 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); | 859 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000); |
| 854 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); | 860 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 81c01353a9f9..58b917c3341b 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
| @@ -33,13 +33,103 @@ | |||
| 33 | #include "nouveau_hw.h" | 33 | #include "nouveau_hw.h" |
| 34 | #include "nv17_tv.h" | 34 | #include "nv17_tv.h" |
| 35 | 35 | ||
| 36 | enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | 36 | static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) |
| 37 | struct drm_connector *connector, | ||
| 38 | uint32_t pin_mask) | ||
| 39 | { | 37 | { |
| 38 | struct drm_device *dev = encoder->dev; | ||
| 39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 40 | uint32_t testval, regoffset = nv04_dac_output_offset(encoder); | ||
| 41 | uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, | ||
| 42 | fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; | ||
| 43 | uint32_t sample = 0; | ||
| 44 | int head; | ||
| 45 | |||
| 46 | #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) | ||
| 47 | testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); | ||
| 48 | if (dev_priv->vbios->tvdactestval) | ||
| 49 | testval = dev_priv->vbios->tvdactestval; | ||
| 50 | |||
| 51 | dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); | ||
| 52 | head = (dacclk & 0x100) >> 8; | ||
| 53 | |||
| 54 | /* Save the previous state. */ | ||
| 55 | gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1); | ||
| 56 | gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0); | ||
| 57 | fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); | ||
| 58 | fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); | ||
| 59 | fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); | ||
| 60 | fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); | ||
| 61 | test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); | ||
| 62 | ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); | ||
| 63 | ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); | ||
| 64 | ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); | ||
| 65 | |||
| 66 | /* Prepare the DAC for load detection. */ | ||
| 67 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true); | ||
| 68 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true); | ||
| 69 | |||
| 70 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); | ||
| 71 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); | ||
| 72 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); | ||
| 73 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, | ||
| 74 | NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | | ||
| 75 | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 | | ||
| 76 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | | ||
| 77 | NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | | ||
| 78 | NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS); | ||
| 79 | |||
| 80 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0); | ||
| 81 | |||
| 82 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, | ||
| 83 | (dacclk & ~0xff) | 0x22); | ||
| 84 | msleep(1); | ||
| 85 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, | ||
| 86 | (dacclk & ~0xff) | 0x21); | ||
| 87 | |||
| 88 | NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); | ||
| 89 | NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); | ||
| 90 | |||
| 91 | /* Sample pin 0x4 (usually S-video luma). */ | ||
| 92 | NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); | ||
| 93 | msleep(20); | ||
| 94 | sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) | ||
| 95 | & 0x4 << 28; | ||
| 96 | |||
| 97 | /* Sample the remaining pins. */ | ||
| 98 | NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); | ||
| 99 | msleep(20); | ||
| 100 | sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) | ||
| 101 | & 0xa << 28; | ||
| 102 | |||
| 103 | /* Restore the previous state. */ | ||
| 104 | NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); | ||
| 105 | NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); | ||
| 106 | NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); | ||
| 107 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk); | ||
| 108 | NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl); | ||
| 109 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); | ||
| 110 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); | ||
| 111 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); | ||
| 112 | NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); | ||
| 113 | nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1); | ||
| 114 | nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0); | ||
| 115 | |||
| 116 | return sample; | ||
| 117 | } | ||
| 118 | |||
| 119 | static enum drm_connector_status | ||
| 120 | nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
| 121 | { | ||
| 122 | struct drm_device *dev = encoder->dev; | ||
| 123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 124 | struct drm_mode_config *conf = &dev->mode_config; | ||
| 40 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); | 125 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); |
| 126 | struct dcb_entry *dcb = tv_enc->base.dcb; | ||
| 41 | 127 | ||
| 42 | tv_enc->pin_mask = pin_mask >> 28 & 0xe; | 128 | if (dev_priv->chipset == 0x42 || |
| 129 | dev_priv->chipset == 0x43) | ||
| 130 | tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; | ||
| 131 | else | ||
| 132 | tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; | ||
| 43 | 133 | ||
| 44 | switch (tv_enc->pin_mask) { | 134 | switch (tv_enc->pin_mask) { |
| 45 | case 0x2: | 135 | case 0x2: |
| @@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | |||
| 50 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; | 140 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; |
| 51 | break; | 141 | break; |
| 52 | case 0xe: | 142 | case 0xe: |
| 53 | if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output) | 143 | if (dcb->tvconf.has_component_output) |
| 54 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; | 144 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; |
| 55 | else | 145 | else |
| 56 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; | 146 | tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; |
| @@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, | |||
| 61 | } | 151 | } |
| 62 | 152 | ||
| 63 | drm_connector_property_set_value(connector, | 153 | drm_connector_property_set_value(connector, |
| 64 | encoder->dev->mode_config.tv_subconnector_property, | 154 | conf->tv_subconnector_property, |
| 65 | tv_enc->subconnector); | 155 | tv_enc->subconnector); |
| 66 | 156 | ||
| 67 | return tv_enc->subconnector ? connector_status_connected : | 157 | if (tv_enc->subconnector) { |
| 68 | connector_status_disconnected; | 158 | NV_INFO(dev, "Load detected on output %c\n", |
| 159 | '@' + ffs(dcb->or)); | ||
| 160 | return connector_status_connected; | ||
| 161 | } else { | ||
| 162 | return connector_status_disconnected; | ||
| 163 | } | ||
| 69 | } | 164 | } |
| 70 | 165 | ||
| 71 | static const struct { | 166 | static const struct { |
| @@ -633,7 +728,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { | |||
| 633 | .prepare = nv17_tv_prepare, | 728 | .prepare = nv17_tv_prepare, |
| 634 | .commit = nv17_tv_commit, | 729 | .commit = nv17_tv_commit, |
| 635 | .mode_set = nv17_tv_mode_set, | 730 | .mode_set = nv17_tv_mode_set, |
| 636 | .detect = nv17_dac_detect, | 731 | .detect = nv17_tv_detect, |
| 637 | }; | 732 | }; |
| 638 | 733 | ||
| 639 | static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { | 734 | static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { |
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 18ba74f19703..d6fc0a82f03d 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c | |||
| @@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev) | |||
| 514 | nouveau_wait_for_idle(dev); | 514 | nouveau_wait_for_idle(dev); |
| 515 | } | 515 | } |
| 516 | 516 | ||
| 517 | void | ||
| 518 | nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 519 | uint32_t size, uint32_t pitch) | ||
| 520 | { | ||
| 521 | uint32_t limit = max(1u, addr + size) - 1; | ||
| 522 | |||
| 523 | if (pitch) | ||
| 524 | addr |= 1; | ||
| 525 | |||
| 526 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
| 527 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
| 528 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
| 529 | |||
| 530 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); | ||
| 531 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); | ||
| 532 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); | ||
| 533 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); | ||
| 534 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); | ||
| 535 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); | ||
| 536 | } | ||
| 537 | |||
| 517 | int | 538 | int |
| 518 | nv20_graph_init(struct drm_device *dev) | 539 | nv20_graph_init(struct drm_device *dev) |
| 519 | { | 540 | { |
| @@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev) | |||
| 572 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); | 593 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030); |
| 573 | } | 594 | } |
| 574 | 595 | ||
| 575 | /* copy tile info from PFB */ | 596 | /* Turn all the tiling regions off. */ |
| 576 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 597 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
| 577 | nv_wr32(dev, 0x00400904 + i * 0x10, | 598 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); |
| 578 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | 599 | |
| 579 | /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ | ||
| 580 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4); | ||
| 581 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
| 582 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
| 583 | nv_wr32(dev, 0x00400908 + i * 0x10, | ||
| 584 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
| 585 | /* which is NV40_PGRAPH_TSIZE0(i) ?? */ | ||
| 586 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4); | ||
| 587 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
| 588 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
| 589 | nv_wr32(dev, 0x00400900 + i * 0x10, | ||
| 590 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
| 591 | /* which is NV40_PGRAPH_TILE0(i) ?? */ | ||
| 592 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4); | ||
| 593 | nv_wr32(dev, NV10_PGRAPH_RDI_DATA, | ||
| 594 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
| 595 | } | ||
| 596 | for (i = 0; i < 8; i++) { | 600 | for (i = 0; i < 8; i++) { |
| 597 | nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); | 601 | nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); |
| 598 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); | 602 | nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); |
| @@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev) | |||
| 704 | 708 | ||
| 705 | nv_wr32(dev, 0x4000c0, 0x00000016); | 709 | nv_wr32(dev, 0x4000c0, 0x00000016); |
| 706 | 710 | ||
| 707 | /* copy tile info from PFB */ | 711 | /* Turn all the tiling regions off. */ |
| 708 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | 712 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
| 709 | nv_wr32(dev, 0x00400904 + i * 0x10, | 713 | nv20_graph_set_region_tiling(dev, i, 0, 0, 0); |
| 710 | nv_rd32(dev, NV10_PFB_TLIMIT(i))); | ||
| 711 | /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ | ||
| 712 | nv_wr32(dev, 0x00400908 + i * 0x10, | ||
| 713 | nv_rd32(dev, NV10_PFB_TSIZE(i))); | ||
| 714 | /* which is NV40_PGRAPH_TSIZE0(i) ?? */ | ||
| 715 | nv_wr32(dev, 0x00400900 + i * 0x10, | ||
| 716 | nv_rd32(dev, NV10_PFB_TILE(i))); | ||
| 717 | /* which is NV40_PGRAPH_TILE0(i) ?? */ | ||
| 718 | } | ||
| 719 | 714 | ||
| 720 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); | 715 | nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); |
| 721 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); | 716 | nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); |
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index ca1d27107a8e..3cd07d8d5bd7 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c | |||
| @@ -3,12 +3,37 @@ | |||
| 3 | #include "nouveau_drv.h" | 3 | #include "nouveau_drv.h" |
| 4 | #include "nouveau_drm.h" | 4 | #include "nouveau_drm.h" |
| 5 | 5 | ||
| 6 | void | ||
| 7 | nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 8 | uint32_t size, uint32_t pitch) | ||
| 9 | { | ||
| 10 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 11 | uint32_t limit = max(1u, addr + size) - 1; | ||
| 12 | |||
| 13 | if (pitch) | ||
| 14 | addr |= 1; | ||
| 15 | |||
| 16 | switch (dev_priv->chipset) { | ||
| 17 | case 0x40: | ||
| 18 | nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); | ||
| 19 | nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); | ||
| 20 | nv_wr32(dev, NV10_PFB_TILE(i), addr); | ||
| 21 | break; | ||
| 22 | |||
| 23 | default: | ||
| 24 | nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); | ||
| 25 | nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); | ||
| 26 | nv_wr32(dev, NV40_PFB_TILE(i), addr); | ||
| 27 | break; | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 6 | int | 31 | int |
| 7 | nv40_fb_init(struct drm_device *dev) | 32 | nv40_fb_init(struct drm_device *dev) |
| 8 | { | 33 | { |
| 9 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 34 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 10 | uint32_t fb_bar_size, tmp; | 35 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
| 11 | int num_tiles; | 36 | uint32_t tmp; |
| 12 | int i; | 37 | int i; |
| 13 | 38 | ||
| 14 | /* This is strictly a NV4x register (don't know about NV5x). */ | 39 | /* This is strictly a NV4x register (don't know about NV5x). */ |
| @@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev) | |||
| 23 | case 0x45: | 48 | case 0x45: |
| 24 | tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); | 49 | tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); |
| 25 | nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); | 50 | nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); |
| 26 | num_tiles = NV10_PFB_TILE__SIZE; | 51 | pfb->num_tiles = NV10_PFB_TILE__SIZE; |
| 27 | break; | 52 | break; |
| 28 | case 0x46: /* G72 */ | 53 | case 0x46: /* G72 */ |
| 29 | case 0x47: /* G70 */ | 54 | case 0x47: /* G70 */ |
| 30 | case 0x49: /* G71 */ | 55 | case 0x49: /* G71 */ |
| 31 | case 0x4b: /* G73 */ | 56 | case 0x4b: /* G73 */ |
| 32 | case 0x4c: /* C51 (G7X version) */ | 57 | case 0x4c: /* C51 (G7X version) */ |
| 33 | num_tiles = NV40_PFB_TILE__SIZE_1; | 58 | pfb->num_tiles = NV40_PFB_TILE__SIZE_1; |
| 34 | break; | 59 | break; |
| 35 | default: | 60 | default: |
| 36 | num_tiles = NV40_PFB_TILE__SIZE_0; | 61 | pfb->num_tiles = NV40_PFB_TILE__SIZE_0; |
| 37 | break; | 62 | break; |
| 38 | } | 63 | } |
| 39 | 64 | ||
| 40 | fb_bar_size = drm_get_resource_len(dev, 0) - 1; | 65 | /* Turn all the tiling regions off. */ |
| 41 | switch (dev_priv->chipset) { | 66 | for (i = 0; i < pfb->num_tiles; i++) |
| 42 | case 0x40: | 67 | pfb->set_region_tiling(dev, i, 0, 0, 0); |
| 43 | for (i = 0; i < num_tiles; i++) { | ||
| 44 | nv_wr32(dev, NV10_PFB_TILE(i), 0); | ||
| 45 | nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size); | ||
| 46 | } | ||
| 47 | break; | ||
| 48 | default: | ||
| 49 | for (i = 0; i < num_tiles; i++) { | ||
| 50 | nv_wr32(dev, NV40_PFB_TILE(i), 0); | ||
| 51 | nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size); | ||
| 52 | } | ||
| 53 | break; | ||
| 54 | } | ||
| 55 | 68 | ||
| 56 | return 0; | 69 | return 0; |
| 57 | } | 70 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 2b332bb55acf..53e8afe1dcd1 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
| @@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev) | |||
| 181 | return ret; | 181 | return ret; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | void | ||
| 185 | nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | ||
| 186 | uint32_t size, uint32_t pitch) | ||
| 187 | { | ||
| 188 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 189 | uint32_t limit = max(1u, addr + size) - 1; | ||
| 190 | |||
| 191 | if (pitch) | ||
| 192 | addr |= 1; | ||
| 193 | |||
| 194 | switch (dev_priv->chipset) { | ||
| 195 | case 0x44: | ||
| 196 | case 0x4a: | ||
| 197 | case 0x4e: | ||
| 198 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
| 199 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
| 200 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
| 201 | break; | ||
| 202 | |||
| 203 | case 0x46: | ||
| 204 | case 0x47: | ||
| 205 | case 0x49: | ||
| 206 | case 0x4b: | ||
| 207 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); | ||
| 208 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); | ||
| 209 | nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); | ||
| 210 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | ||
| 211 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | ||
| 212 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | ||
| 213 | break; | ||
| 214 | |||
| 215 | default: | ||
| 216 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); | ||
| 217 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); | ||
| 218 | nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); | ||
| 219 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); | ||
| 220 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); | ||
| 221 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); | ||
| 222 | break; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | |||
| 184 | /* | 226 | /* |
| 185 | * G70 0x47 | 227 | * G70 0x47 |
| 186 | * G71 0x49 | 228 | * G71 0x49 |
| @@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev) | |||
| 195 | { | 237 | { |
| 196 | struct drm_nouveau_private *dev_priv = | 238 | struct drm_nouveau_private *dev_priv = |
| 197 | (struct drm_nouveau_private *)dev->dev_private; | 239 | (struct drm_nouveau_private *)dev->dev_private; |
| 198 | uint32_t vramsz, tmp; | 240 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
| 241 | uint32_t vramsz; | ||
| 199 | int i, j; | 242 | int i, j; |
| 200 | 243 | ||
| 201 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 244 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
| @@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev) | |||
| 292 | nv_wr32(dev, 0x400b38, 0x2ffff800); | 335 | nv_wr32(dev, 0x400b38, 0x2ffff800); |
| 293 | nv_wr32(dev, 0x400b3c, 0x00006000); | 336 | nv_wr32(dev, 0x400b3c, 0x00006000); |
| 294 | 337 | ||
| 295 | /* copy tile info from PFB */ | 338 | /* Turn all the tiling regions off. */ |
| 296 | switch (dev_priv->chipset) { | 339 | for (i = 0; i < pfb->num_tiles; i++) |
| 297 | case 0x40: /* vanilla NV40 */ | 340 | nv40_graph_set_region_tiling(dev, i, 0, 0, 0); |
| 298 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { | ||
| 299 | tmp = nv_rd32(dev, NV10_PFB_TILE(i)); | ||
| 300 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
| 301 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
| 302 | tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i)); | ||
| 303 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
| 304 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
| 305 | tmp = nv_rd32(dev, NV10_PFB_TSIZE(i)); | ||
| 306 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
| 307 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
| 308 | tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i)); | ||
| 309 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
| 310 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
| 311 | } | ||
| 312 | break; | ||
| 313 | case 0x44: | ||
| 314 | case 0x4a: | ||
| 315 | case 0x4e: /* NV44-based cores don't have 0x406900? */ | ||
| 316 | for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { | ||
| 317 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
| 318 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
| 319 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
| 320 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
| 321 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
| 322 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
| 323 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
| 324 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
| 325 | } | ||
| 326 | break; | ||
| 327 | case 0x46: | ||
| 328 | case 0x47: | ||
| 329 | case 0x49: | ||
| 330 | case 0x4b: /* G7X-based cores */ | ||
| 331 | for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) { | ||
| 332 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
| 333 | nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp); | ||
| 334 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
| 335 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
| 336 | nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp); | ||
| 337 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
| 338 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
| 339 | nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp); | ||
| 340 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
| 341 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
| 342 | nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp); | ||
| 343 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
| 344 | } | ||
| 345 | break; | ||
| 346 | default: /* everything else */ | ||
| 347 | for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) { | ||
| 348 | tmp = nv_rd32(dev, NV40_PFB_TILE(i)); | ||
| 349 | nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp); | ||
| 350 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp); | ||
| 351 | tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i)); | ||
| 352 | nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp); | ||
| 353 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp); | ||
| 354 | tmp = nv_rd32(dev, NV40_PFB_TSIZE(i)); | ||
| 355 | nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp); | ||
| 356 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp); | ||
| 357 | tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i)); | ||
| 358 | nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp); | ||
| 359 | nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp); | ||
| 360 | } | ||
| 361 | break; | ||
| 362 | } | ||
| 363 | 341 | ||
| 364 | /* begin RAM config */ | 342 | /* begin RAM config */ |
| 365 | vramsz = drm_get_resource_len(dev, 0) - 1; | 343 | vramsz = drm_get_resource_len(dev, 0) - 1; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a9263d92a231..90f0bf59fbcd 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 690 | int pxclk) | 690 | int pxclk) |
| 691 | { | 691 | { |
| 692 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 692 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 693 | struct nouveau_connector *nv_connector = NULL; | ||
| 694 | struct drm_encoder *encoder; | ||
| 693 | struct nvbios *bios = &dev_priv->VBIOS; | 695 | struct nvbios *bios = &dev_priv->VBIOS; |
| 694 | uint32_t mc, script = 0, or; | 696 | uint32_t mc, script = 0, or; |
| 695 | 697 | ||
| 698 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 699 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
| 700 | |||
| 701 | if (nv_encoder->dcb != dcbent) | ||
| 702 | continue; | ||
| 703 | |||
| 704 | nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
| 705 | break; | ||
| 706 | } | ||
| 707 | |||
| 696 | or = ffs(dcbent->or) - 1; | 708 | or = ffs(dcbent->or) - 1; |
| 697 | mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); | 709 | mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); |
| 698 | switch (dcbent->type) { | 710 | switch (dcbent->type) { |
| @@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 711 | } else | 723 | } else |
| 712 | if (bios->fp.strapless_is_24bit & 1) | 724 | if (bios->fp.strapless_is_24bit & 1) |
| 713 | script |= 0x0200; | 725 | script |= 0x0200; |
| 726 | |||
| 727 | if (nv_connector && nv_connector->edid && | ||
| 728 | (nv_connector->edid->revision >= 4) && | ||
| 729 | (nv_connector->edid->input & 0x70) >= 0x20) | ||
| 730 | script |= 0x0200; | ||
| 714 | } | 731 | } |
| 715 | 732 | ||
| 716 | if (nouveau_uscript_lvds >= 0) { | 733 | if (nouveau_uscript_lvds >= 0) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6bcc6d39e9b0..e4f279ee61cf 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 16 | 16 | ||
| 17 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && | 17 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && |
| 18 | RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { | 18 | RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { |
| 19 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 19 | nouveau_fbcon_gpu_lockup(info); |
| 20 | |||
| 21 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 22 | } | 20 | } |
| 23 | 21 | ||
| 24 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 22 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 31 | OUT_RING(chan, 1); | 29 | OUT_RING(chan, 1); |
| 32 | } | 30 | } |
| 33 | BEGIN_RING(chan, NvSub2D, 0x0588, 1); | 31 | BEGIN_RING(chan, NvSub2D, 0x0588, 1); |
| 34 | OUT_RING(chan, rect->color); | 32 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 33 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) | ||
| 34 | OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); | ||
| 35 | else | ||
| 36 | OUT_RING(chan, rect->color); | ||
| 35 | BEGIN_RING(chan, NvSub2D, 0x0600, 4); | 37 | BEGIN_RING(chan, NvSub2D, 0x0600, 4); |
| 36 | OUT_RING(chan, rect->dx); | 38 | OUT_RING(chan, rect->dx); |
| 37 | OUT_RING(chan, rect->dy); | 39 | OUT_RING(chan, rect->dy); |
| @@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
| 56 | return; | 58 | return; |
| 57 | 59 | ||
| 58 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { | 60 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { |
| 59 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 61 | nouveau_fbcon_gpu_lockup(info); |
| 60 | |||
| 61 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 64 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { | 103 | if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { |
| 104 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 104 | nouveau_fbcon_gpu_lockup(info); |
| 105 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 106 | } | 105 | } |
| 107 | 106 | ||
| 108 | if (info->flags & FBINFO_HWACCEL_DISABLED) { | 107 | if (info->flags & FBINFO_HWACCEL_DISABLED) { |
| @@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 135 | int push = dwords > 2047 ? 2047 : dwords; | 134 | int push = dwords > 2047 ? 2047 : dwords; |
| 136 | 135 | ||
| 137 | if (RING_SPACE(chan, push + 1)) { | 136 | if (RING_SPACE(chan, push + 1)) { |
| 138 | NV_ERROR(dev, | 137 | nouveau_fbcon_gpu_lockup(info); |
| 139 | "GPU lockup - switching to software fbcon\n"); | ||
| 140 | info->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 141 | cfb_imageblit(info, image); | 138 | cfb_imageblit(info, image); |
| 142 | return; | 139 | return; |
| 143 | } | 140 | } |
| @@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
| 199 | 196 | ||
| 200 | ret = RING_SPACE(chan, 59); | 197 | ret = RING_SPACE(chan, 59); |
| 201 | if (ret) { | 198 | if (ret) { |
| 202 | NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); | 199 | nouveau_fbcon_gpu_lockup(info); |
| 203 | return ret; | 200 | return ret; |
| 204 | } | 201 | } |
| 205 | 202 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index b7282284f080..39caf167587d 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
| @@ -384,8 +384,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan) | |||
| 384 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), | 384 | nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), |
| 385 | nv_ro32(dev, cache, (ptr * 2) + 1)); | 385 | nv_ro32(dev, cache, (ptr * 2) + 1)); |
| 386 | } | 386 | } |
| 387 | nv_wr32(dev, 0x3210, cnt << 2); | 387 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); |
| 388 | nv_wr32(dev, 0x3270, 0); | 388 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); |
| 389 | 389 | ||
| 390 | /* guessing that all the 0x34xx regs aren't on NV50 */ | 390 | /* guessing that all the 0x34xx regs aren't on NV50 */ |
| 391 | if (!IS_G80) { | 391 | if (!IS_G80) { |
| @@ -398,8 +398,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) | |||
| 398 | 398 | ||
| 399 | dev_priv->engine.instmem.finish_access(dev); | 399 | dev_priv->engine.instmem.finish_access(dev); |
| 400 | 400 | ||
| 401 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); | ||
| 402 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); | ||
| 403 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); | 401 | nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); |
| 404 | return 0; | 402 | return 0; |
| 405 | } | 403 | } |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index b5f5fe75e6af..1cc7b937b1ea 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
| @@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable | |||
| 24 | $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable | 24 | $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable |
| 25 | $(call if_changed,mkregtable) | 25 | $(call if_changed,mkregtable) |
| 26 | 26 | ||
| 27 | $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable | ||
| 28 | $(call if_changed,mkregtable) | ||
| 29 | |||
| 27 | $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable | 30 | $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable |
| 28 | $(call if_changed,mkregtable) | 31 | $(call if_changed,mkregtable) |
| 29 | 32 | ||
| @@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h | |||
| 35 | 38 | ||
| 36 | $(obj)/r300.o: $(obj)/r300_reg_safe.h | 39 | $(obj)/r300.o: $(obj)/r300_reg_safe.h |
| 37 | 40 | ||
| 41 | $(obj)/r420.o: $(obj)/r420_reg_safe.h | ||
| 42 | |||
| 38 | $(obj)/rs600.o: $(obj)/rs600_reg_safe.h | 43 | $(obj)/rs600.o: $(obj)/rs600_reg_safe.h |
| 39 | 44 | ||
| 40 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 45 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index 6d0183c61d3b..c714179d1bfa 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright 2006-2007 Advanced Micro Devices, Inc. | 2 | * Copyright 2006-2007 Advanced Micro Devices, Inc. |
| 3 | * | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
| @@ -41,14 +41,14 @@ | |||
| 41 | /****************************************************/ | 41 | /****************************************************/ |
| 42 | /* Encoder Object ID Definition */ | 42 | /* Encoder Object ID Definition */ |
| 43 | /****************************************************/ | 43 | /****************************************************/ |
| 44 | #define ENCODER_OBJECT_ID_NONE 0x00 | 44 | #define ENCODER_OBJECT_ID_NONE 0x00 |
| 45 | 45 | ||
| 46 | /* Radeon Class Display Hardware */ | 46 | /* Radeon Class Display Hardware */ |
| 47 | #define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 | 47 | #define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01 |
| 48 | #define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 | 48 | #define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02 |
| 49 | #define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 | 49 | #define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03 |
| 50 | #define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 | 50 | #define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04 |
| 51 | #define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ | 51 | #define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */ |
| 52 | #define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 | 52 | #define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06 |
| 53 | #define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 | 53 | #define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07 |
| 54 | 54 | ||
| @@ -56,11 +56,11 @@ | |||
| 56 | #define ENCODER_OBJECT_ID_SI170B 0x08 | 56 | #define ENCODER_OBJECT_ID_SI170B 0x08 |
| 57 | #define ENCODER_OBJECT_ID_CH7303 0x09 | 57 | #define ENCODER_OBJECT_ID_CH7303 0x09 |
| 58 | #define ENCODER_OBJECT_ID_CH7301 0x0A | 58 | #define ENCODER_OBJECT_ID_CH7301 0x0A |
| 59 | #define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ | 59 | #define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */ |
| 60 | #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C | 60 | #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C |
| 61 | #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D | 61 | #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D |
| 62 | #define ENCODER_OBJECT_ID_TITFP513 0x0E | 62 | #define ENCODER_OBJECT_ID_TITFP513 0x0E |
| 63 | #define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ | 63 | #define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */ |
| 64 | #define ENCODER_OBJECT_ID_VT1623 0x10 | 64 | #define ENCODER_OBJECT_ID_VT1623 0x10 |
| 65 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 | 65 | #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 |
| 66 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 | 66 | #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 |
| @@ -68,9 +68,9 @@ | |||
| 68 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 | 68 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 |
| 69 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 | 69 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 |
| 70 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 | 70 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15 |
| 71 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ | 71 | #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */ |
| 72 | #define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ | 72 | #define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */ |
| 73 | #define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ | 73 | #define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */ |
| 74 | #define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 | 74 | #define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19 |
| 75 | #define ENCODER_OBJECT_ID_VT1625 0x1A | 75 | #define ENCODER_OBJECT_ID_VT1625 0x1A |
| 76 | #define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B | 76 | #define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B |
| @@ -86,7 +86,7 @@ | |||
| 86 | /****************************************************/ | 86 | /****************************************************/ |
| 87 | /* Connector Object ID Definition */ | 87 | /* Connector Object ID Definition */ |
| 88 | /****************************************************/ | 88 | /****************************************************/ |
| 89 | #define CONNECTOR_OBJECT_ID_NONE 0x00 | 89 | #define CONNECTOR_OBJECT_ID_NONE 0x00 |
| 90 | #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 | 90 | #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01 |
| 91 | #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 | 91 | #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02 |
| 92 | #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 | 92 | #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03 |
| @@ -96,7 +96,7 @@ | |||
| 96 | #define CONNECTOR_OBJECT_ID_SVIDEO 0x07 | 96 | #define CONNECTOR_OBJECT_ID_SVIDEO 0x07 |
| 97 | #define CONNECTOR_OBJECT_ID_YPbPr 0x08 | 97 | #define CONNECTOR_OBJECT_ID_YPbPr 0x08 |
| 98 | #define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 | 98 | #define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09 |
| 99 | #define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ | 99 | #define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */ |
| 100 | #define CONNECTOR_OBJECT_ID_SCART 0x0B | 100 | #define CONNECTOR_OBJECT_ID_SCART 0x0B |
| 101 | #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C | 101 | #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C |
| 102 | #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D | 102 | #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D |
| @@ -106,6 +106,8 @@ | |||
| 106 | #define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 | 106 | #define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11 |
| 107 | #define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 | 107 | #define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12 |
| 108 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 | 108 | #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 |
| 109 | #define CONNECTOR_OBJECT_ID_eDP 0x14 | ||
| 110 | #define CONNECTOR_OBJECT_ID_MXM 0x15 | ||
| 109 | 111 | ||
| 110 | /* deleted */ | 112 | /* deleted */ |
| 111 | 113 | ||
| @@ -116,6 +118,14 @@ | |||
| 116 | #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 | 118 | #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01 |
| 117 | 119 | ||
| 118 | /****************************************************/ | 120 | /****************************************************/ |
| 121 | /* Generic Object ID Definition */ | ||
| 122 | /****************************************************/ | ||
| 123 | #define GENERIC_OBJECT_ID_NONE 0x00 | ||
| 124 | #define GENERIC_OBJECT_ID_GLSYNC 0x01 | ||
| 125 | #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 | ||
| 126 | #define GENERIC_OBJECT_ID_MXM_OPM 0x03 | ||
| 127 | |||
| 128 | /****************************************************/ | ||
| 119 | /* Graphics Object ENUM ID Definition */ | 129 | /* Graphics Object ENUM ID Definition */ |
| 120 | /****************************************************/ | 130 | /****************************************************/ |
| 121 | #define GRAPH_OBJECT_ENUM_ID1 0x01 | 131 | #define GRAPH_OBJECT_ENUM_ID1 0x01 |
| @@ -124,6 +134,7 @@ | |||
| 124 | #define GRAPH_OBJECT_ENUM_ID4 0x04 | 134 | #define GRAPH_OBJECT_ENUM_ID4 0x04 |
| 125 | #define GRAPH_OBJECT_ENUM_ID5 0x05 | 135 | #define GRAPH_OBJECT_ENUM_ID5 0x05 |
| 126 | #define GRAPH_OBJECT_ENUM_ID6 0x06 | 136 | #define GRAPH_OBJECT_ENUM_ID6 0x06 |
| 137 | #define GRAPH_OBJECT_ENUM_ID7 0x07 | ||
| 127 | 138 | ||
| 128 | /****************************************************/ | 139 | /****************************************************/ |
| 129 | /* Graphics Object ID Bit definition */ | 140 | /* Graphics Object ID Bit definition */ |
| @@ -133,35 +144,35 @@ | |||
| 133 | #define RESERVED1_ID_MASK 0x0800 | 144 | #define RESERVED1_ID_MASK 0x0800 |
| 134 | #define OBJECT_TYPE_MASK 0x7000 | 145 | #define OBJECT_TYPE_MASK 0x7000 |
| 135 | #define RESERVED2_ID_MASK 0x8000 | 146 | #define RESERVED2_ID_MASK 0x8000 |
| 136 | 147 | ||
| 137 | #define OBJECT_ID_SHIFT 0x00 | 148 | #define OBJECT_ID_SHIFT 0x00 |
| 138 | #define ENUM_ID_SHIFT 0x08 | 149 | #define ENUM_ID_SHIFT 0x08 |
| 139 | #define OBJECT_TYPE_SHIFT 0x0C | 150 | #define OBJECT_TYPE_SHIFT 0x0C |
| 140 | 151 | ||
| 152 | |||
| 141 | /****************************************************/ | 153 | /****************************************************/ |
| 142 | /* Graphics Object family definition */ | 154 | /* Graphics Object family definition */ |
| 143 | /****************************************************/ | 155 | /****************************************************/ |
| 144 | #define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \ | 156 | #define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ |
| 145 | (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \ | 157 | GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) |
| 146 | GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT) | ||
| 147 | /****************************************************/ | 158 | /****************************************************/ |
| 148 | /* GPU Object ID definition - Shared with BIOS */ | 159 | /* GPU Object ID definition - Shared with BIOS */ |
| 149 | /****************************************************/ | 160 | /****************************************************/ |
| 150 | #define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ | 161 | #define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\ |
| 151 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) | 162 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT) |
| 152 | 163 | ||
| 153 | /****************************************************/ | 164 | /****************************************************/ |
| 154 | /* Encoder Object ID definition - Shared with BIOS */ | 165 | /* Encoder Object ID definition - Shared with BIOS */ |
| 155 | /****************************************************/ | 166 | /****************************************************/ |
| 156 | /* | 167 | /* |
| 157 | #define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 | 168 | #define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101 |
| 158 | #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 | 169 | #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102 |
| 159 | #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 | 170 | #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103 |
| 160 | #define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 | 171 | #define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104 |
| 161 | #define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 | 172 | #define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105 |
| 162 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 | 173 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106 |
| 163 | #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 | 174 | #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107 |
| 164 | #define ENCODER_SIL170B_ENUM_ID1 0x2108 | 175 | #define ENCODER_SIL170B_ENUM_ID1 0x2108 |
| 165 | #define ENCODER_CH7303_ENUM_ID1 0x2109 | 176 | #define ENCODER_CH7303_ENUM_ID1 0x2109 |
| 166 | #define ENCODER_CH7301_ENUM_ID1 0x210A | 177 | #define ENCODER_CH7301_ENUM_ID1 0x210A |
| 167 | #define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B | 178 | #define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B |
| @@ -175,8 +186,8 @@ | |||
| 175 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 | 186 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113 |
| 176 | #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 | 187 | #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114 |
| 177 | #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 | 188 | #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115 |
| 178 | #define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 | 189 | #define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116 |
| 179 | #define ENCODER_SI178_ENUM_ID1 0x2117 | 190 | #define ENCODER_SI178_ENUM_ID1 0x2117 |
| 180 | #define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 | 191 | #define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118 |
| 181 | #define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 | 192 | #define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119 |
| 182 | #define ENCODER_VT1625_ENUM_ID1 0x211A | 193 | #define ENCODER_VT1625_ENUM_ID1 0x211A |
| @@ -185,205 +196,169 @@ | |||
| 185 | #define ENCODER_DP_DP501_ENUM_ID1 0x211D | 196 | #define ENCODER_DP_DP501_ENUM_ID1 0x211D |
| 186 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E | 197 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E |
| 187 | */ | 198 | */ |
| 188 | #define ENCODER_INTERNAL_LVDS_ENUM_ID1 \ | 199 | #define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 189 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 200 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 190 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 201 | ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) |
| 191 | ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT) | 202 | |
| 192 | 203 | #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 193 | #define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \ | 204 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 194 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 205 | ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) |
| 195 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 206 | |
| 196 | ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT) | 207 | #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 197 | 208 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 198 | #define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \ | 209 | ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) |
| 199 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 210 | |
| 200 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 211 | #define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 201 | ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT) | 212 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 202 | 213 | ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) | |
| 203 | #define ENCODER_INTERNAL_DAC1_ENUM_ID1 \ | 214 | |
| 204 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 215 | #define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 205 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 216 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 206 | ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT) | 217 | ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) |
| 207 | 218 | ||
| 208 | #define ENCODER_INTERNAL_DAC2_ENUM_ID1 \ | 219 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 209 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 220 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 210 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 221 | ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) |
| 211 | ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT) | 222 | |
| 212 | 223 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 213 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \ | 224 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 214 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 225 | ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) |
| 215 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 226 | |
| 216 | ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) | 227 | #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 217 | 228 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 218 | #define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \ | 229 | ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) |
| 219 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 230 | |
| 220 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 231 | #define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 221 | ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT) | 232 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 222 | 233 | ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) | |
| 223 | #define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \ | 234 | |
| 224 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 235 | #define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 225 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 236 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 226 | ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT) | 237 | ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) |
| 227 | 238 | ||
| 228 | #define ENCODER_SIL170B_ENUM_ID1 \ | 239 | #define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 229 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 240 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 230 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 241 | ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) |
| 231 | ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT) | 242 | |
| 232 | 243 | #define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 233 | #define ENCODER_CH7303_ENUM_ID1 \ | 244 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 234 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 245 | ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) |
| 235 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 246 | |
| 236 | ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT) | 247 | #define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 237 | 248 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 238 | #define ENCODER_CH7301_ENUM_ID1 \ | 249 | ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) |
| 239 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 250 | |
| 240 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 251 | #define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 241 | ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT) | 252 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 242 | 253 | ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) | |
| 243 | #define ENCODER_INTERNAL_DVO1_ENUM_ID1 \ | 254 | |
| 244 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 255 | |
| 245 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 256 | #define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 246 | ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT) | 257 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 247 | 258 | ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) | |
| 248 | #define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \ | 259 | |
| 249 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 260 | |
| 250 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 261 | #define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 251 | ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) | 262 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 252 | 263 | ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) | |
| 253 | #define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \ | 264 | |
| 254 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 265 | #define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 255 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 266 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 256 | ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT) | 267 | ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) |
| 257 | 268 | ||
| 258 | #define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \ | 269 | #define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 259 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 270 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 260 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 271 | ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) |
| 261 | ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT) | 272 | |
| 262 | 273 | #define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 263 | #define ENCODER_TITFP513_ENUM_ID1 \ | 274 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 264 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 275 | ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) |
| 265 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 276 | |
| 266 | ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT) | 277 | #define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 267 | 278 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 268 | #define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \ | 279 | ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) |
| 269 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 280 | |
| 270 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 281 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 271 | ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT) | 282 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 272 | 283 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) | |
| 273 | #define ENCODER_VT1623_ENUM_ID1 \ | 284 | |
| 274 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 285 | |
| 275 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 286 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 276 | ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT) | 287 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 277 | 288 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) | |
| 278 | #define ENCODER_HDMI_SI1930_ENUM_ID1 \ | 289 | |
| 279 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 290 | |
| 280 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 291 | #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 281 | ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT) | 292 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 282 | 293 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) | |
| 283 | #define ENCODER_HDMI_INTERNAL_ENUM_ID1 \ | 294 | |
| 284 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 295 | #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 285 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 296 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 286 | ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT) | 297 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) |
| 287 | 298 | ||
| 288 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \ | 299 | #define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 289 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 300 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 290 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 301 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT |
| 291 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) | 302 | |
| 292 | 303 | #define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 293 | #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \ | 304 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 294 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 305 | ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) |
| 295 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 306 | |
| 296 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT) | 307 | #define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 297 | 308 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 298 | #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \ | 309 | ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) |
| 299 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 310 | |
| 300 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 311 | #define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 301 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT) | 312 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 302 | 313 | ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) | |
| 303 | #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \ | 314 | |
| 304 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 315 | #define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 305 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 316 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 306 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT) | 317 | ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) |
| 307 | 318 | ||
| 308 | #define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \ | 319 | #define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 309 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 320 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 310 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 321 | ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) |
| 311 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */ | 322 | |
| 312 | 323 | #define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 313 | #define ENCODER_SI178_ENUM_ID1 \ | 324 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 314 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 325 | ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) |
| 315 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 326 | |
| 316 | ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT) | 327 | #define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 317 | 328 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 318 | #define ENCODER_MVPU_FPGA_ENUM_ID1 \ | 329 | ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) |
| 319 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 330 | |
| 320 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 331 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 321 | ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT) | 332 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 322 | 333 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) | |
| 323 | #define ENCODER_INTERNAL_DDI_ENUM_ID1 \ | 334 | |
| 324 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 335 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 325 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 336 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 326 | ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) | 337 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) |
| 327 | 338 | ||
| 328 | #define ENCODER_VT1625_ENUM_ID1 \ | 339 | #define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 329 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 340 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 330 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 341 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) |
| 331 | ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT) | 342 | |
| 332 | 343 | #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | |
| 333 | #define ENCODER_HDMI_SI1932_ENUM_ID1 \ | 344 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 334 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 345 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) |
| 335 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 346 | |
| 336 | ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT) | 347 | #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 337 | 348 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | |
| 338 | #define ENCODER_DP_DP501_ENUM_ID1 \ | 349 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) |
| 339 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 350 | |
| 340 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 351 | #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 341 | ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT) | 352 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 342 | 353 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) | |
| 343 | #define ENCODER_DP_AN9801_ENUM_ID1 \ | 354 | |
| 344 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 355 | #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 345 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 356 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 346 | ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT) | 357 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) |
| 347 | 358 | ||
| 348 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \ | 359 | #define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ |
| 349 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | 360 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 350 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 361 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) |
| 351 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) | ||
| 352 | |||
| 353 | #define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \ | ||
| 354 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 355 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 356 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT) | ||
| 357 | |||
| 358 | #define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \ | ||
| 359 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 360 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 361 | ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT) | ||
| 362 | |||
| 363 | #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \ | ||
| 364 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 365 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 366 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) | ||
| 367 | |||
| 368 | #define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \ | ||
| 369 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 370 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 371 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT) | ||
| 372 | |||
| 373 | #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \ | ||
| 374 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 375 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 376 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) | ||
| 377 | |||
| 378 | #define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \ | ||
| 379 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 380 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 381 | ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT) | ||
| 382 | |||
| 383 | #define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \ | ||
| 384 | (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
| 385 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 386 | ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) | ||
| 387 | 362 | ||
| 388 | /****************************************************/ | 363 | /****************************************************/ |
| 389 | /* Connector Object ID definition - Shared with BIOS */ | 364 | /* Connector Object ID definition - Shared with BIOS */ |
| @@ -406,167 +381,253 @@ | |||
| 406 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F | 381 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F |
| 407 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 | 382 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110 |
| 408 | */ | 383 | */ |
| 409 | #define CONNECTOR_LVDS_ENUM_ID1 \ | 384 | #define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 410 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 385 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 411 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 386 | CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) |
| 412 | CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) | 387 | |
| 413 | 388 | #define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 414 | #define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \ | 389 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 415 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 390 | CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT) |
| 416 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 391 | |
| 417 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) | 392 | #define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 418 | 393 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 419 | #define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \ | 394 | CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT) |
| 420 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 395 | |
| 421 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 396 | #define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 422 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) | 397 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 423 | 398 | CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT) | |
| 424 | #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \ | 399 | |
| 425 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 400 | #define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 426 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 401 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 427 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) | 402 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) |
| 428 | 403 | ||
| 429 | #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \ | 404 | #define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 430 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 405 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 431 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 406 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT) |
| 432 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) | 407 | |
| 433 | 408 | #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 434 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \ | 409 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 435 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 410 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) |
| 436 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 411 | |
| 437 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | 412 | #define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 438 | 413 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | |
| 439 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \ | 414 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT) |
| 440 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 415 | |
| 441 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 416 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 442 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | 417 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 443 | 418 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) | |
| 444 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \ | 419 | |
| 445 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 420 | #define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 446 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 421 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 447 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) | 422 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) |
| 448 | 423 | ||
| 449 | #define CONNECTOR_VGA_ENUM_ID1 \ | 424 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 450 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 425 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 451 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 426 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) |
| 452 | CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) | 427 | |
| 453 | 428 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 454 | #define CONNECTOR_VGA_ENUM_ID2 \ | 429 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 455 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 430 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) |
| 456 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 431 | |
| 457 | CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) | 432 | #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 458 | 433 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | |
| 459 | #define CONNECTOR_COMPOSITE_ENUM_ID1 \ | 434 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) |
| 460 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 435 | |
| 461 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 436 | #define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 462 | CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) | 437 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 463 | 438 | CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) | |
| 464 | #define CONNECTOR_SVIDEO_ENUM_ID1 \ | 439 | |
| 465 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 440 | #define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 466 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 441 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 467 | CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) | 442 | CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT) |
| 468 | 443 | ||
| 469 | #define CONNECTOR_YPbPr_ENUM_ID1 \ | 444 | #define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 470 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 445 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 471 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 446 | CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) |
| 472 | CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) | 447 | |
| 473 | 448 | #define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 474 | #define CONNECTOR_D_CONNECTOR_ENUM_ID1 \ | 449 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 475 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 450 | CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT) |
| 476 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 451 | |
| 477 | CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) | 452 | #define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 478 | 453 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 479 | #define CONNECTOR_9PIN_DIN_ENUM_ID1 \ | 454 | CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) |
| 480 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 455 | |
| 481 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 456 | #define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 482 | CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) | 457 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 483 | 458 | CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT) | |
| 484 | #define CONNECTOR_SCART_ENUM_ID1 \ | 459 | |
| 485 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 460 | #define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 486 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 461 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 487 | CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) | 462 | CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) |
| 488 | 463 | ||
| 489 | #define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \ | 464 | #define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 490 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 465 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 491 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 466 | CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT) |
| 492 | CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) | 467 | |
| 493 | 468 | #define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 494 | #define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \ | 469 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 495 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 470 | CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) |
| 496 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 471 | |
| 497 | CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) | 472 | #define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 498 | 473 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | |
| 499 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 \ | 474 | CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT) |
| 500 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 475 | |
| 501 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 476 | #define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 502 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) | 477 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 503 | 478 | CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) | |
| 504 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \ | 479 | |
| 505 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 480 | #define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 506 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 481 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 507 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) | 482 | CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT) |
| 508 | 483 | ||
| 509 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \ | 484 | #define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 510 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 485 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 511 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 486 | CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) |
| 512 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) | 487 | |
| 513 | 488 | #define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 514 | #define CONNECTOR_CROSSFIRE_ENUM_ID1 \ | 489 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 515 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 490 | CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT) |
| 516 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 491 | |
| 517 | CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) | 492 | #define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 518 | 493 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 519 | #define CONNECTOR_CROSSFIRE_ENUM_ID2 \ | 494 | CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) |
| 520 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 495 | |
| 521 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 496 | #define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 522 | CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) | 497 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 523 | 498 | CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) | |
| 524 | #define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \ | 499 | |
| 525 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 500 | #define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 526 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 501 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ |
| 527 | CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) | 502 | CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT) |
| 528 | 503 | ||
| 529 | #define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \ | 504 | #define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 530 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 505 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 531 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 506 | CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) |
| 532 | CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) | 507 | |
| 533 | 508 | #define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 534 | #define CONNECTOR_DISPLAYPORT_ENUM_ID1 \ | 509 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 535 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 510 | CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT) |
| 536 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 511 | |
| 537 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | 512 | #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 538 | 513 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | |
| 539 | #define CONNECTOR_DISPLAYPORT_ENUM_ID2 \ | 514 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
| 540 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 515 | #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 541 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | 516 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 542 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | 517 | CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) |
| 543 | 518 | ||
| 544 | #define CONNECTOR_DISPLAYPORT_ENUM_ID3 \ | 519 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 545 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 520 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 546 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | 521 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) |
| 547 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | 522 | |
| 548 | 523 | #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | |
| 549 | #define CONNECTOR_DISPLAYPORT_ENUM_ID4 \ | 524 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ |
| 550 | (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | 525 | CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT) |
| 551 | GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ | 526 | |
| 552 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | 527 | #define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ |
| 528 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 529 | CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) | ||
| 530 | |||
| 531 | #define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 532 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 533 | CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT) | ||
| 534 | |||
| 535 | |||
| 536 | #define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 537 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 538 | CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) | ||
| 539 | |||
| 540 | #define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 541 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 542 | CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT) | ||
| 543 | |||
| 544 | #define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 545 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 546 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 547 | |||
| 548 | #define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 549 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 550 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 551 | |||
| 552 | #define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 553 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | ||
| 554 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 555 | |||
| 556 | #define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 557 | GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ | ||
| 558 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 559 | |||
| 560 | #define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 561 | GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\ | ||
| 562 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 563 | |||
| 564 | #define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 565 | GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\ | ||
| 566 | CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT) | ||
| 567 | |||
| 568 | #define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 569 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 570 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A | ||
| 571 | |||
| 572 | #define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 573 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 574 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B | ||
| 575 | |||
| 576 | #define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 577 | GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ | ||
| 578 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C | ||
| 579 | |||
| 580 | #define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 581 | GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ | ||
| 582 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D | ||
| 583 | |||
| 584 | #define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 585 | GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\ | ||
| 586 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx | ||
| 587 | |||
| 588 | #define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 589 | GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\ | ||
| 590 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx | ||
| 591 | |||
| 592 | #define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ | ||
| 593 | GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ | ||
| 594 | CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC | ||
| 553 | 595 | ||
| 554 | /****************************************************/ | 596 | /****************************************************/ |
| 555 | /* Router Object ID definition - Shared with BIOS */ | 597 | /* Router Object ID definition - Shared with BIOS */ |
| 556 | /****************************************************/ | 598 | /****************************************************/ |
| 557 | #define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \ | 599 | #define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ |
| 558 | (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\ | 600 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
| 559 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 601 | ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) |
| 560 | ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT) | ||
| 561 | 602 | ||
| 562 | /* deleted */ | 603 | /* deleted */ |
| 563 | 604 | ||
| 564 | /****************************************************/ | 605 | /****************************************************/ |
| 606 | /* Generic Object ID definition - Shared with BIOS */ | ||
| 607 | /****************************************************/ | ||
| 608 | #define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
| 609 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 610 | GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT) | ||
| 611 | |||
| 612 | #define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
| 613 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 614 | GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT) | ||
| 615 | |||
| 616 | #define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
| 617 | GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ | ||
| 618 | GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT) | ||
| 619 | |||
| 620 | #define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ | ||
| 621 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | ||
| 622 | GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) | ||
| 623 | |||
| 624 | /****************************************************/ | ||
| 565 | /* Object Cap definition - Shared with BIOS */ | 625 | /* Object Cap definition - Shared with BIOS */ |
| 566 | /****************************************************/ | 626 | /****************************************************/ |
| 567 | #define GRAPHICS_OBJECT_CAP_I2C 0x00000001L | 627 | #define GRAPHICS_OBJECT_CAP_I2C 0x00000001L |
| 568 | #define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L | 628 | #define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L |
| 569 | 629 | ||
| 630 | |||
| 570 | #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 | 631 | #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01 |
| 571 | #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 | 632 | #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02 |
| 572 | #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 | 633 | #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03 |
| @@ -575,4 +636,8 @@ | |||
| 575 | #pragma pack() | 636 | #pragma pack() |
| 576 | #endif | 637 | #endif |
| 577 | 638 | ||
| 578 | #endif /*GRAPHICTYPE */ | 639 | #endif /*GRAPHICTYPE */ |
| 640 | |||
| 641 | |||
| 642 | |||
| 643 | |||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 0d63c4436e7c..3eb0ca5b3d73 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -468,7 +468,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector, | |||
| 468 | struct radeon_connector *radeon_connector; | 468 | struct radeon_connector *radeon_connector; |
| 469 | struct radeon_connector_atom_dig *dig_connector; | 469 | struct radeon_connector_atom_dig *dig_connector; |
| 470 | 470 | ||
| 471 | if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) | 471 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || |
| 472 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | ||
| 472 | return; | 473 | return; |
| 473 | 474 | ||
| 474 | radeon_connector = to_radeon_connector(connector); | 475 | radeon_connector = to_radeon_connector(connector); |
| @@ -582,7 +583,8 @@ void dp_link_train(struct drm_encoder *encoder, | |||
| 582 | u8 train_set[4]; | 583 | u8 train_set[4]; |
| 583 | int i; | 584 | int i; |
| 584 | 585 | ||
| 585 | if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) | 586 | if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) || |
| 587 | (connector->connector_type != DRM_MODE_CONNECTOR_eDP)) | ||
| 586 | return; | 588 | return; |
| 587 | 589 | ||
| 588 | if (!radeon_encoder->enc_priv) | 590 | if (!radeon_encoder->enc_priv) |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 0d79577c1576..607241c6a8a9 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
| @@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename) | |||
| 661 | fseek(file, 0, SEEK_SET); | 661 | fseek(file, 0, SEEK_SET); |
| 662 | 662 | ||
| 663 | /* get header */ | 663 | /* get header */ |
| 664 | if (fgets(buf, 1024, file) == NULL) | 664 | if (fgets(buf, 1024, file) == NULL) { |
| 665 | fclose(file); | ||
| 665 | return -1; | 666 | return -1; |
| 667 | } | ||
| 666 | 668 | ||
| 667 | /* first line will contain the last register | 669 | /* first line will contain the last register |
| 668 | * and gpu name */ | 670 | * and gpu name */ |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 71727460968f..8760d66e058a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev) | |||
| 131 | break; | 131 | break; |
| 132 | } | 132 | } |
| 133 | } | 133 | } |
| 134 | r100_irq_set(rdev); | 134 | if (rdev->irq.installed) |
| 135 | r100_irq_set(rdev); | ||
| 135 | } | 136 | } |
| 136 | 137 | ||
| 137 | void r100_hpd_fini(struct radeon_device *rdev) | 138 | void r100_hpd_fini(struct radeon_device *rdev) |
| @@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev) | |||
| 243 | { | 244 | { |
| 244 | uint32_t tmp = 0; | 245 | uint32_t tmp = 0; |
| 245 | 246 | ||
| 247 | if (!rdev->irq.installed) { | ||
| 248 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | ||
| 249 | WREG32(R_000040_GEN_INT_CNTL, 0); | ||
| 250 | return -EINVAL; | ||
| 251 | } | ||
| 246 | if (rdev->irq.sw_int) { | 252 | if (rdev->irq.sw_int) { |
| 247 | tmp |= RADEON_SW_INT_ENABLE; | 253 | tmp |= RADEON_SW_INT_ENABLE; |
| 248 | } | 254 | } |
| @@ -356,6 +362,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
| 356 | /* Wait until IDLE & CLEAN */ | 362 | /* Wait until IDLE & CLEAN */ |
| 357 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 363 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
| 358 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | 364 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); |
| 365 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
| 366 | radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | | ||
| 367 | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 368 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
| 369 | radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); | ||
| 359 | /* Emit fence sequence & fire IRQ */ | 370 | /* Emit fence sequence & fire IRQ */ |
| 360 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); | 371 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
| 361 | radeon_ring_write(rdev, fence->seq); | 372 | radeon_ring_write(rdev, fence->seq); |
| @@ -1713,14 +1724,6 @@ void r100_gpu_init(struct radeon_device *rdev) | |||
| 1713 | r100_hdp_reset(rdev); | 1724 | r100_hdp_reset(rdev); |
| 1714 | } | 1725 | } |
| 1715 | 1726 | ||
| 1716 | void r100_hdp_flush(struct radeon_device *rdev) | ||
| 1717 | { | ||
| 1718 | u32 tmp; | ||
| 1719 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 1720 | tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; | ||
| 1721 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 1722 | } | ||
| 1723 | |||
| 1724 | void r100_hdp_reset(struct radeon_device *rdev) | 1727 | void r100_hdp_reset(struct radeon_device *rdev) |
| 1725 | { | 1728 | { |
| 1726 | uint32_t tmp; | 1729 | uint32_t tmp; |
| @@ -3313,6 +3316,7 @@ static int r100_startup(struct radeon_device *rdev) | |||
| 3313 | } | 3316 | } |
| 3314 | /* Enable IRQ */ | 3317 | /* Enable IRQ */ |
| 3315 | r100_irq_set(rdev); | 3318 | r100_irq_set(rdev); |
| 3319 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 3316 | /* 1M ring buffer */ | 3320 | /* 1M ring buffer */ |
| 3317 | r = r100_cp_init(rdev, 1024 * 1024); | 3321 | r = r100_cp_init(rdev, 1024 * 1024); |
| 3318 | if (r) { | 3322 | if (r) { |
| @@ -3371,6 +3375,7 @@ void r100_fini(struct radeon_device *rdev) | |||
| 3371 | radeon_gem_fini(rdev); | 3375 | radeon_gem_fini(rdev); |
| 3372 | if (rdev->flags & RADEON_IS_PCI) | 3376 | if (rdev->flags & RADEON_IS_PCI) |
| 3373 | r100_pci_gart_fini(rdev); | 3377 | r100_pci_gart_fini(rdev); |
| 3378 | radeon_agp_fini(rdev); | ||
| 3374 | radeon_irq_kms_fini(rdev); | 3379 | radeon_irq_kms_fini(rdev); |
| 3375 | radeon_fence_driver_fini(rdev); | 3380 | radeon_fence_driver_fini(rdev); |
| 3376 | radeon_bo_fini(rdev); | 3381 | radeon_bo_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 3f2cc9e2e8d9..0051d11b907c 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -36,7 +36,15 @@ | |||
| 36 | #include "rv350d.h" | 36 | #include "rv350d.h" |
| 37 | #include "r300_reg_safe.h" | 37 | #include "r300_reg_safe.h" |
| 38 | 38 | ||
| 39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ | 39 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
| 40 | * | ||
| 41 | * GPU Errata: | ||
| 42 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL | ||
| 43 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. | ||
| 44 | * However, scheduling such write to the ring seems harmless, i suspect | ||
| 45 | * the CP read collide with the flush somehow, or maybe the MC, hard to | ||
| 46 | * tell. (Jerome Glisse) | ||
| 47 | */ | ||
| 40 | 48 | ||
| 41 | /* | 49 | /* |
| 42 | * rv370,rv380 PCIE GART | 50 | * rv370,rv380 PCIE GART |
| @@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev, | |||
| 178 | /* Wait until IDLE & CLEAN */ | 186 | /* Wait until IDLE & CLEAN */ |
| 179 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 187 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
| 180 | radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); | 188 | radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); |
| 189 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
| 190 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | | ||
| 191 | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
| 192 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
| 193 | radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); | ||
| 181 | /* Emit fence sequence & fire IRQ */ | 194 | /* Emit fence sequence & fire IRQ */ |
| 182 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); | 195 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
| 183 | radeon_ring_write(rdev, fence->seq); | 196 | radeon_ring_write(rdev, fence->seq); |
| @@ -1258,6 +1271,7 @@ static int r300_startup(struct radeon_device *rdev) | |||
| 1258 | } | 1271 | } |
| 1259 | /* Enable IRQ */ | 1272 | /* Enable IRQ */ |
| 1260 | r100_irq_set(rdev); | 1273 | r100_irq_set(rdev); |
| 1274 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 1261 | /* 1M ring buffer */ | 1275 | /* 1M ring buffer */ |
| 1262 | r = r100_cp_init(rdev, 1024 * 1024); | 1276 | r = r100_cp_init(rdev, 1024 * 1024); |
| 1263 | if (r) { | 1277 | if (r) { |
| @@ -1322,6 +1336,7 @@ void r300_fini(struct radeon_device *rdev) | |||
| 1322 | rv370_pcie_gart_fini(rdev); | 1336 | rv370_pcie_gart_fini(rdev); |
| 1323 | if (rdev->flags & RADEON_IS_PCI) | 1337 | if (rdev->flags & RADEON_IS_PCI) |
| 1324 | r100_pci_gart_fini(rdev); | 1338 | r100_pci_gart_fini(rdev); |
| 1339 | radeon_agp_fini(rdev); | ||
| 1325 | radeon_irq_kms_fini(rdev); | 1340 | radeon_irq_kms_fini(rdev); |
| 1326 | radeon_fence_driver_fini(rdev); | 1341 | radeon_fence_driver_fini(rdev); |
| 1327 | radeon_bo_fini(rdev); | 1342 | radeon_bo_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c05a7270cf0c..053404e71a9d 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -30,7 +30,15 @@ | |||
| 30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
| 31 | #include "radeon.h" | 31 | #include "radeon.h" |
| 32 | #include "atom.h" | 32 | #include "atom.h" |
| 33 | #include "r100d.h" | ||
| 33 | #include "r420d.h" | 34 | #include "r420d.h" |
| 35 | #include "r420_reg_safe.h" | ||
| 36 | |||
| 37 | static void r420_set_reg_safe(struct radeon_device *rdev) | ||
| 38 | { | ||
| 39 | rdev->config.r300.reg_safe_bm = r420_reg_safe_bm; | ||
| 40 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); | ||
| 41 | } | ||
| 34 | 42 | ||
| 35 | int r420_mc_init(struct radeon_device *rdev) | 43 | int r420_mc_init(struct radeon_device *rdev) |
| 36 | { | 44 | { |
| @@ -165,6 +173,34 @@ static void r420_clock_resume(struct radeon_device *rdev) | |||
| 165 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); | 173 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); |
| 166 | } | 174 | } |
| 167 | 175 | ||
| 176 | static void r420_cp_errata_init(struct radeon_device *rdev) | ||
| 177 | { | ||
| 178 | /* RV410 and R420 can lock up if CP DMA to host memory happens | ||
| 179 | * while the 2D engine is busy. | ||
| 180 | * | ||
| 181 | * The proper workaround is to queue a RESYNC at the beginning | ||
| 182 | * of the CP init, apparently. | ||
| 183 | */ | ||
| 184 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); | ||
| 185 | radeon_ring_lock(rdev, 8); | ||
| 186 | radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1)); | ||
| 187 | radeon_ring_write(rdev, rdev->config.r300.resync_scratch); | ||
| 188 | radeon_ring_write(rdev, 0xDEADBEEF); | ||
| 189 | radeon_ring_unlock_commit(rdev); | ||
| 190 | } | ||
| 191 | |||
| 192 | static void r420_cp_errata_fini(struct radeon_device *rdev) | ||
| 193 | { | ||
| 194 | /* Catch the RESYNC we dispatched all the way back, | ||
| 195 | * at the very beginning of the CP init. | ||
| 196 | */ | ||
| 197 | radeon_ring_lock(rdev, 8); | ||
| 198 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
| 199 | radeon_ring_write(rdev, R300_RB3D_DC_FINISH); | ||
| 200 | radeon_ring_unlock_commit(rdev); | ||
| 201 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); | ||
| 202 | } | ||
| 203 | |||
| 168 | static int r420_startup(struct radeon_device *rdev) | 204 | static int r420_startup(struct radeon_device *rdev) |
| 169 | { | 205 | { |
| 170 | int r; | 206 | int r; |
| @@ -190,12 +226,14 @@ static int r420_startup(struct radeon_device *rdev) | |||
| 190 | r420_pipes_init(rdev); | 226 | r420_pipes_init(rdev); |
| 191 | /* Enable IRQ */ | 227 | /* Enable IRQ */ |
| 192 | r100_irq_set(rdev); | 228 | r100_irq_set(rdev); |
| 229 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 193 | /* 1M ring buffer */ | 230 | /* 1M ring buffer */ |
| 194 | r = r100_cp_init(rdev, 1024 * 1024); | 231 | r = r100_cp_init(rdev, 1024 * 1024); |
| 195 | if (r) { | 232 | if (r) { |
| 196 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 233 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); |
| 197 | return r; | 234 | return r; |
| 198 | } | 235 | } |
| 236 | r420_cp_errata_init(rdev); | ||
| 199 | r = r100_wb_init(rdev); | 237 | r = r100_wb_init(rdev); |
| 200 | if (r) { | 238 | if (r) { |
| 201 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | 239 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); |
| @@ -238,6 +276,7 @@ int r420_resume(struct radeon_device *rdev) | |||
| 238 | 276 | ||
| 239 | int r420_suspend(struct radeon_device *rdev) | 277 | int r420_suspend(struct radeon_device *rdev) |
| 240 | { | 278 | { |
| 279 | r420_cp_errata_fini(rdev); | ||
| 241 | r100_cp_disable(rdev); | 280 | r100_cp_disable(rdev); |
| 242 | r100_wb_disable(rdev); | 281 | r100_wb_disable(rdev); |
| 243 | r100_irq_disable(rdev); | 282 | r100_irq_disable(rdev); |
| @@ -346,7 +385,7 @@ int r420_init(struct radeon_device *rdev) | |||
| 346 | if (r) | 385 | if (r) |
| 347 | return r; | 386 | return r; |
| 348 | } | 387 | } |
| 349 | r300_set_reg_safe(rdev); | 388 | r420_set_reg_safe(rdev); |
| 350 | rdev->accel_working = true; | 389 | rdev->accel_working = true; |
| 351 | r = r420_startup(rdev); | 390 | r = r420_startup(rdev); |
| 352 | if (r) { | 391 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 0f3843b6dac7..9a189072f2b9 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev) | |||
| 186 | } | 186 | } |
| 187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
| 188 | rs600_irq_set(rdev); | 188 | rs600_irq_set(rdev); |
| 189 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 189 | /* 1M ring buffer */ | 190 | /* 1M ring buffer */ |
| 190 | r = r100_cp_init(rdev, 1024 * 1024); | 191 | r = r100_cp_init(rdev, 1024 * 1024); |
| 191 | if (r) { | 192 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a0ac3c134b1b..c0651991c3e4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
| 285 | } | 285 | } |
| 286 | } | 286 | } |
| 287 | } | 287 | } |
| 288 | r600_irq_set(rdev); | 288 | if (rdev->irq.installed) |
| 289 | r600_irq_set(rdev); | ||
| 289 | } | 290 | } |
| 290 | 291 | ||
| 291 | void r600_hpd_fini(struct radeon_device *rdev) | 292 | void r600_hpd_fini(struct radeon_device *rdev) |
| @@ -726,6 +727,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
| 726 | a.full = rfixed_const(100); | 727 | a.full = rfixed_const(100); |
| 727 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | 728 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
| 728 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 729 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
| 730 | |||
| 731 | if (rdev->flags & RADEON_IS_IGP) | ||
| 732 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
| 733 | |||
| 729 | return 0; | 734 | return 0; |
| 730 | } | 735 | } |
| 731 | 736 | ||
| @@ -1384,11 +1389,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
| 1384 | (void)RREG32(PCIE_PORT_DATA); | 1389 | (void)RREG32(PCIE_PORT_DATA); |
| 1385 | } | 1390 | } |
| 1386 | 1391 | ||
| 1387 | void r600_hdp_flush(struct radeon_device *rdev) | ||
| 1388 | { | ||
| 1389 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | /* | 1392 | /* |
| 1393 | * CP & Ring | 1393 | * CP & Ring |
| 1394 | */ | 1394 | */ |
| @@ -1785,6 +1785,8 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
| 1785 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1785 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
| 1786 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 1786 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
| 1787 | radeon_ring_write(rdev, fence->seq); | 1787 | radeon_ring_write(rdev, fence->seq); |
| 1788 | radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); | ||
| 1789 | radeon_ring_write(rdev, 1); | ||
| 1788 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | 1790 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ |
| 1789 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | 1791 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); |
| 1790 | radeon_ring_write(rdev, RB_INT_STAT); | 1792 | radeon_ring_write(rdev, RB_INT_STAT); |
| @@ -2089,8 +2091,7 @@ void r600_fini(struct radeon_device *rdev) | |||
| 2089 | radeon_gem_fini(rdev); | 2091 | radeon_gem_fini(rdev); |
| 2090 | radeon_fence_driver_fini(rdev); | 2092 | radeon_fence_driver_fini(rdev); |
| 2091 | radeon_clocks_fini(rdev); | 2093 | radeon_clocks_fini(rdev); |
| 2092 | if (rdev->flags & RADEON_IS_AGP) | 2094 | radeon_agp_fini(rdev); |
| 2093 | radeon_agp_fini(rdev); | ||
| 2094 | radeon_bo_fini(rdev); | 2095 | radeon_bo_fini(rdev); |
| 2095 | radeon_atombios_fini(rdev); | 2096 | radeon_atombios_fini(rdev); |
| 2096 | kfree(rdev->bios); | 2097 | kfree(rdev->bios); |
| @@ -2461,6 +2462,10 @@ int r600_irq_set(struct radeon_device *rdev) | |||
| 2461 | u32 mode_int = 0; | 2462 | u32 mode_int = 0; |
| 2462 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 2463 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
| 2463 | 2464 | ||
| 2465 | if (!rdev->irq.installed) { | ||
| 2466 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | ||
| 2467 | return -EINVAL; | ||
| 2468 | } | ||
| 2464 | /* don't enable anything if the ih is disabled */ | 2469 | /* don't enable anything if the ih is disabled */ |
| 2465 | if (!rdev->ih.enabled) | 2470 | if (!rdev->ih.enabled) |
| 2466 | return 0; | 2471 | return 0; |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9aecafb51b66..8787ea89dc6e 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -577,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
| 577 | ring_size = num_loops * dwords_per_loop; | 577 | ring_size = num_loops * dwords_per_loop; |
| 578 | /* set default + shaders */ | 578 | /* set default + shaders */ |
| 579 | ring_size += 40; /* shaders + def state */ | 579 | ring_size += 40; /* shaders + def state */ |
| 580 | ring_size += 5; /* fence emit for VB IB */ | 580 | ring_size += 7; /* fence emit for VB IB */ |
| 581 | ring_size += 5; /* done copy */ | 581 | ring_size += 5; /* done copy */ |
| 582 | ring_size += 5; /* fence emit for done copy */ | 582 | ring_size += 7; /* fence emit for done copy */ |
| 583 | r = radeon_ring_lock(rdev, ring_size); | 583 | r = radeon_ring_lock(rdev, ring_size); |
| 584 | WARN_ON(r); | 584 | WARN_ON(r); |
| 585 | 585 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 53b55608102b..eb5f99b9469d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -319,10 +319,12 @@ struct radeon_mc { | |||
| 319 | u64 real_vram_size; | 319 | u64 real_vram_size; |
| 320 | int vram_mtrr; | 320 | int vram_mtrr; |
| 321 | bool vram_is_ddr; | 321 | bool vram_is_ddr; |
| 322 | bool igp_sideport_enabled; | ||
| 322 | }; | 323 | }; |
| 323 | 324 | ||
| 324 | int radeon_mc_setup(struct radeon_device *rdev); | 325 | int radeon_mc_setup(struct radeon_device *rdev); |
| 325 | 326 | bool radeon_combios_sideport_present(struct radeon_device *rdev); | |
| 327 | bool radeon_atombios_sideport_present(struct radeon_device *rdev); | ||
| 326 | 328 | ||
| 327 | /* | 329 | /* |
| 328 | * GPU scratch registers structures, functions & helpers | 330 | * GPU scratch registers structures, functions & helpers |
| @@ -654,7 +656,6 @@ struct radeon_asic { | |||
| 654 | uint32_t offset, uint32_t obj_size); | 656 | uint32_t offset, uint32_t obj_size); |
| 655 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | 657 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
| 656 | void (*bandwidth_update)(struct radeon_device *rdev); | 658 | void (*bandwidth_update)(struct radeon_device *rdev); |
| 657 | void (*hdp_flush)(struct radeon_device *rdev); | ||
| 658 | void (*hpd_init)(struct radeon_device *rdev); | 659 | void (*hpd_init)(struct radeon_device *rdev); |
| 659 | void (*hpd_fini)(struct radeon_device *rdev); | 660 | void (*hpd_fini)(struct radeon_device *rdev); |
| 660 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 661 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| @@ -667,11 +668,14 @@ struct radeon_asic { | |||
| 667 | struct r100_asic { | 668 | struct r100_asic { |
| 668 | const unsigned *reg_safe_bm; | 669 | const unsigned *reg_safe_bm; |
| 669 | unsigned reg_safe_bm_size; | 670 | unsigned reg_safe_bm_size; |
| 671 | u32 hdp_cntl; | ||
| 670 | }; | 672 | }; |
| 671 | 673 | ||
| 672 | struct r300_asic { | 674 | struct r300_asic { |
| 673 | const unsigned *reg_safe_bm; | 675 | const unsigned *reg_safe_bm; |
| 674 | unsigned reg_safe_bm_size; | 676 | unsigned reg_safe_bm_size; |
| 677 | u32 resync_scratch; | ||
| 678 | u32 hdp_cntl; | ||
| 675 | }; | 679 | }; |
| 676 | 680 | ||
| 677 | struct r600_asic { | 681 | struct r600_asic { |
| @@ -1007,7 +1011,6 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
| 1007 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | 1011 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) |
| 1008 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | 1012 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) |
| 1009 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | 1013 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) |
| 1010 | #define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev)) | ||
| 1011 | #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) | 1014 | #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) |
| 1012 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) | 1015 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) |
| 1013 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) | 1016 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 54bf49a6d676..220f454ea9fa 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
| @@ -252,10 +252,8 @@ void radeon_agp_resume(struct radeon_device *rdev) | |||
| 252 | void radeon_agp_fini(struct radeon_device *rdev) | 252 | void radeon_agp_fini(struct radeon_device *rdev) |
| 253 | { | 253 | { |
| 254 | #if __OS_HAS_AGP | 254 | #if __OS_HAS_AGP |
| 255 | if (rdev->flags & RADEON_IS_AGP) { | 255 | if (rdev->ddev->agp && rdev->ddev->agp->acquired) { |
| 256 | if (rdev->ddev->agp && rdev->ddev->agp->acquired) { | 256 | drm_agp_release(rdev->ddev); |
| 257 | drm_agp_release(rdev->ddev); | ||
| 258 | } | ||
| 259 | } | 257 | } |
| 260 | #endif | 258 | #endif |
| 261 | } | 259 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index eb29217bbf1d..f2fbd2e4e9df 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | |||
| 77 | void r100_bandwidth_update(struct radeon_device *rdev); | 77 | void r100_bandwidth_update(struct radeon_device *rdev); |
| 78 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 78 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 79 | int r100_ring_test(struct radeon_device *rdev); | 79 | int r100_ring_test(struct radeon_device *rdev); |
| 80 | void r100_hdp_flush(struct radeon_device *rdev); | ||
| 81 | void r100_hpd_init(struct radeon_device *rdev); | 80 | void r100_hpd_init(struct radeon_device *rdev); |
| 82 | void r100_hpd_fini(struct radeon_device *rdev); | 81 | void r100_hpd_fini(struct radeon_device *rdev); |
| 83 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 82 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| @@ -114,7 +113,6 @@ static struct radeon_asic r100_asic = { | |||
| 114 | .set_surface_reg = r100_set_surface_reg, | 113 | .set_surface_reg = r100_set_surface_reg, |
| 115 | .clear_surface_reg = r100_clear_surface_reg, | 114 | .clear_surface_reg = r100_clear_surface_reg, |
| 116 | .bandwidth_update = &r100_bandwidth_update, | 115 | .bandwidth_update = &r100_bandwidth_update, |
| 117 | .hdp_flush = &r100_hdp_flush, | ||
| 118 | .hpd_init = &r100_hpd_init, | 116 | .hpd_init = &r100_hpd_init, |
| 119 | .hpd_fini = &r100_hpd_fini, | 117 | .hpd_fini = &r100_hpd_fini, |
| 120 | .hpd_sense = &r100_hpd_sense, | 118 | .hpd_sense = &r100_hpd_sense, |
| @@ -174,7 +172,6 @@ static struct radeon_asic r300_asic = { | |||
| 174 | .set_surface_reg = r100_set_surface_reg, | 172 | .set_surface_reg = r100_set_surface_reg, |
| 175 | .clear_surface_reg = r100_clear_surface_reg, | 173 | .clear_surface_reg = r100_clear_surface_reg, |
| 176 | .bandwidth_update = &r100_bandwidth_update, | 174 | .bandwidth_update = &r100_bandwidth_update, |
| 177 | .hdp_flush = &r100_hdp_flush, | ||
| 178 | .hpd_init = &r100_hpd_init, | 175 | .hpd_init = &r100_hpd_init, |
| 179 | .hpd_fini = &r100_hpd_fini, | 176 | .hpd_fini = &r100_hpd_fini, |
| 180 | .hpd_sense = &r100_hpd_sense, | 177 | .hpd_sense = &r100_hpd_sense, |
| @@ -218,7 +215,6 @@ static struct radeon_asic r420_asic = { | |||
| 218 | .set_surface_reg = r100_set_surface_reg, | 215 | .set_surface_reg = r100_set_surface_reg, |
| 219 | .clear_surface_reg = r100_clear_surface_reg, | 216 | .clear_surface_reg = r100_clear_surface_reg, |
| 220 | .bandwidth_update = &r100_bandwidth_update, | 217 | .bandwidth_update = &r100_bandwidth_update, |
| 221 | .hdp_flush = &r100_hdp_flush, | ||
| 222 | .hpd_init = &r100_hpd_init, | 218 | .hpd_init = &r100_hpd_init, |
| 223 | .hpd_fini = &r100_hpd_fini, | 219 | .hpd_fini = &r100_hpd_fini, |
| 224 | .hpd_sense = &r100_hpd_sense, | 220 | .hpd_sense = &r100_hpd_sense, |
| @@ -267,7 +263,6 @@ static struct radeon_asic rs400_asic = { | |||
| 267 | .set_surface_reg = r100_set_surface_reg, | 263 | .set_surface_reg = r100_set_surface_reg, |
| 268 | .clear_surface_reg = r100_clear_surface_reg, | 264 | .clear_surface_reg = r100_clear_surface_reg, |
| 269 | .bandwidth_update = &r100_bandwidth_update, | 265 | .bandwidth_update = &r100_bandwidth_update, |
| 270 | .hdp_flush = &r100_hdp_flush, | ||
| 271 | .hpd_init = &r100_hpd_init, | 266 | .hpd_init = &r100_hpd_init, |
| 272 | .hpd_fini = &r100_hpd_fini, | 267 | .hpd_fini = &r100_hpd_fini, |
| 273 | .hpd_sense = &r100_hpd_sense, | 268 | .hpd_sense = &r100_hpd_sense, |
| @@ -324,7 +319,6 @@ static struct radeon_asic rs600_asic = { | |||
| 324 | .set_pcie_lanes = NULL, | 319 | .set_pcie_lanes = NULL, |
| 325 | .set_clock_gating = &radeon_atom_set_clock_gating, | 320 | .set_clock_gating = &radeon_atom_set_clock_gating, |
| 326 | .bandwidth_update = &rs600_bandwidth_update, | 321 | .bandwidth_update = &rs600_bandwidth_update, |
| 327 | .hdp_flush = &r100_hdp_flush, | ||
| 328 | .hpd_init = &rs600_hpd_init, | 322 | .hpd_init = &rs600_hpd_init, |
| 329 | .hpd_fini = &rs600_hpd_fini, | 323 | .hpd_fini = &rs600_hpd_fini, |
| 330 | .hpd_sense = &rs600_hpd_sense, | 324 | .hpd_sense = &rs600_hpd_sense, |
| @@ -372,7 +366,6 @@ static struct radeon_asic rs690_asic = { | |||
| 372 | .set_surface_reg = r100_set_surface_reg, | 366 | .set_surface_reg = r100_set_surface_reg, |
| 373 | .clear_surface_reg = r100_clear_surface_reg, | 367 | .clear_surface_reg = r100_clear_surface_reg, |
| 374 | .bandwidth_update = &rs690_bandwidth_update, | 368 | .bandwidth_update = &rs690_bandwidth_update, |
| 375 | .hdp_flush = &r100_hdp_flush, | ||
| 376 | .hpd_init = &rs600_hpd_init, | 369 | .hpd_init = &rs600_hpd_init, |
| 377 | .hpd_fini = &rs600_hpd_fini, | 370 | .hpd_fini = &rs600_hpd_fini, |
| 378 | .hpd_sense = &rs600_hpd_sense, | 371 | .hpd_sense = &rs600_hpd_sense, |
| @@ -424,7 +417,6 @@ static struct radeon_asic rv515_asic = { | |||
| 424 | .set_surface_reg = r100_set_surface_reg, | 417 | .set_surface_reg = r100_set_surface_reg, |
| 425 | .clear_surface_reg = r100_clear_surface_reg, | 418 | .clear_surface_reg = r100_clear_surface_reg, |
| 426 | .bandwidth_update = &rv515_bandwidth_update, | 419 | .bandwidth_update = &rv515_bandwidth_update, |
| 427 | .hdp_flush = &r100_hdp_flush, | ||
| 428 | .hpd_init = &rs600_hpd_init, | 420 | .hpd_init = &rs600_hpd_init, |
| 429 | .hpd_fini = &rs600_hpd_fini, | 421 | .hpd_fini = &rs600_hpd_fini, |
| 430 | .hpd_sense = &rs600_hpd_sense, | 422 | .hpd_sense = &rs600_hpd_sense, |
| @@ -467,7 +459,6 @@ static struct radeon_asic r520_asic = { | |||
| 467 | .set_surface_reg = r100_set_surface_reg, | 459 | .set_surface_reg = r100_set_surface_reg, |
| 468 | .clear_surface_reg = r100_clear_surface_reg, | 460 | .clear_surface_reg = r100_clear_surface_reg, |
| 469 | .bandwidth_update = &rv515_bandwidth_update, | 461 | .bandwidth_update = &rv515_bandwidth_update, |
| 470 | .hdp_flush = &r100_hdp_flush, | ||
| 471 | .hpd_init = &rs600_hpd_init, | 462 | .hpd_init = &rs600_hpd_init, |
| 472 | .hpd_fini = &rs600_hpd_fini, | 463 | .hpd_fini = &rs600_hpd_fini, |
| 473 | .hpd_sense = &rs600_hpd_sense, | 464 | .hpd_sense = &rs600_hpd_sense, |
| @@ -508,7 +499,6 @@ int r600_ring_test(struct radeon_device *rdev); | |||
| 508 | int r600_copy_blit(struct radeon_device *rdev, | 499 | int r600_copy_blit(struct radeon_device *rdev, |
| 509 | uint64_t src_offset, uint64_t dst_offset, | 500 | uint64_t src_offset, uint64_t dst_offset, |
| 510 | unsigned num_pages, struct radeon_fence *fence); | 501 | unsigned num_pages, struct radeon_fence *fence); |
| 511 | void r600_hdp_flush(struct radeon_device *rdev); | ||
| 512 | void r600_hpd_init(struct radeon_device *rdev); | 502 | void r600_hpd_init(struct radeon_device *rdev); |
| 513 | void r600_hpd_fini(struct radeon_device *rdev); | 503 | void r600_hpd_fini(struct radeon_device *rdev); |
| 514 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 504 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
| @@ -544,7 +534,6 @@ static struct radeon_asic r600_asic = { | |||
| 544 | .set_surface_reg = r600_set_surface_reg, | 534 | .set_surface_reg = r600_set_surface_reg, |
| 545 | .clear_surface_reg = r600_clear_surface_reg, | 535 | .clear_surface_reg = r600_clear_surface_reg, |
| 546 | .bandwidth_update = &rv515_bandwidth_update, | 536 | .bandwidth_update = &rv515_bandwidth_update, |
| 547 | .hdp_flush = &r600_hdp_flush, | ||
| 548 | .hpd_init = &r600_hpd_init, | 537 | .hpd_init = &r600_hpd_init, |
| 549 | .hpd_fini = &r600_hpd_fini, | 538 | .hpd_fini = &r600_hpd_fini, |
| 550 | .hpd_sense = &r600_hpd_sense, | 539 | .hpd_sense = &r600_hpd_sense, |
| @@ -589,7 +578,6 @@ static struct radeon_asic rv770_asic = { | |||
| 589 | .set_surface_reg = r600_set_surface_reg, | 578 | .set_surface_reg = r600_set_surface_reg, |
| 590 | .clear_surface_reg = r600_clear_surface_reg, | 579 | .clear_surface_reg = r600_clear_surface_reg, |
| 591 | .bandwidth_update = &rv515_bandwidth_update, | 580 | .bandwidth_update = &rv515_bandwidth_update, |
| 592 | .hdp_flush = &r600_hdp_flush, | ||
| 593 | .hpd_init = &r600_hpd_init, | 581 | .hpd_init = &r600_hpd_init, |
| 594 | .hpd_fini = &r600_hpd_fini, | 582 | .hpd_fini = &r600_hpd_fini, |
| 595 | .hpd_sense = &r600_hpd_sense, | 583 | .hpd_sense = &r600_hpd_sense, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 321044bef71c..fa82ca74324e 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
| 114 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | 114 | i2c.i2c_id = gpio->sucI2cId.ucAccess; |
| 115 | 115 | ||
| 116 | i2c.valid = true; | 116 | i2c.valid = true; |
| 117 | break; | ||
| 117 | } | 118 | } |
| 118 | } | 119 | } |
| 119 | 120 | ||
| @@ -345,7 +346,9 @@ const int object_connector_convert[] = { | |||
| 345 | DRM_MODE_CONNECTOR_Unknown, | 346 | DRM_MODE_CONNECTOR_Unknown, |
| 346 | DRM_MODE_CONNECTOR_Unknown, | 347 | DRM_MODE_CONNECTOR_Unknown, |
| 347 | DRM_MODE_CONNECTOR_Unknown, | 348 | DRM_MODE_CONNECTOR_Unknown, |
| 348 | DRM_MODE_CONNECTOR_DisplayPort | 349 | DRM_MODE_CONNECTOR_DisplayPort, |
| 350 | DRM_MODE_CONNECTOR_eDP, | ||
| 351 | DRM_MODE_CONNECTOR_Unknown | ||
| 349 | }; | 352 | }; |
| 350 | 353 | ||
| 351 | bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | 354 | bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) |
| @@ -935,6 +938,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
| 935 | return false; | 938 | return false; |
| 936 | } | 939 | } |
| 937 | 940 | ||
| 941 | union igp_info { | ||
| 942 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
| 943 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; | ||
| 944 | }; | ||
| 945 | |||
| 946 | bool radeon_atombios_sideport_present(struct radeon_device *rdev) | ||
| 947 | { | ||
| 948 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
| 949 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
| 950 | union igp_info *igp_info; | ||
| 951 | u8 frev, crev; | ||
| 952 | u16 data_offset; | ||
| 953 | |||
| 954 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, | ||
| 955 | &crev, &data_offset); | ||
| 956 | |||
| 957 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
| 958 | data_offset); | ||
| 959 | |||
| 960 | if (igp_info) { | ||
| 961 | switch (crev) { | ||
| 962 | case 1: | ||
| 963 | if (igp_info->info.ucMemoryType & 0xf0) | ||
| 964 | return true; | ||
| 965 | break; | ||
| 966 | case 2: | ||
| 967 | if (igp_info->info_2.ucMemoryType & 0x0f) | ||
| 968 | return true; | ||
| 969 | break; | ||
| 970 | default: | ||
| 971 | DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); | ||
| 972 | break; | ||
| 973 | } | ||
| 974 | } | ||
| 975 | return false; | ||
| 976 | } | ||
| 977 | |||
| 938 | bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | 978 | bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, |
| 939 | struct radeon_encoder_int_tmds *tmds) | 979 | struct radeon_encoder_int_tmds *tmds) |
| 940 | { | 980 | { |
| @@ -1026,6 +1066,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct | |||
| 1026 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; | 1066 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; |
| 1027 | ss->range = ss_info->asSS_Info[i].ucSS_Range; | 1067 | ss->range = ss_info->asSS_Info[i].ucSS_Range; |
| 1028 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; | 1068 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; |
| 1069 | break; | ||
| 1029 | } | 1070 | } |
| 1030 | } | 1071 | } |
| 1031 | } | 1072 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index fd94dbca33ac..7914455c96ca 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
| 595 | return false; | 595 | return false; |
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | bool radeon_combios_sideport_present(struct radeon_device *rdev) | ||
| 599 | { | ||
| 600 | struct drm_device *dev = rdev->ddev; | ||
| 601 | u16 igp_info; | ||
| 602 | |||
| 603 | igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE); | ||
| 604 | |||
| 605 | if (igp_info) { | ||
| 606 | if (RBIOS16(igp_info + 0x4)) | ||
| 607 | return true; | ||
| 608 | } | ||
| 609 | return false; | ||
| 610 | } | ||
| 611 | |||
| 612 | static const uint32_t default_primarydac_adj[CHIP_LAST] = { | ||
| 613 | 0x00000808, /* r100 */ | ||
| 614 | 0x00000808, /* rv100 */ | ||
| 615 | 0x00000808, /* rs100 */ | ||
| 616 | 0x00000808, /* rv200 */ | ||
| 617 | 0x00000808, /* rs200 */ | ||
| 618 | 0x00000808, /* r200 */ | ||
| 619 | 0x00000808, /* rv250 */ | ||
| 620 | 0x00000000, /* rs300 */ | ||
| 621 | 0x00000808, /* rv280 */ | ||
| 622 | 0x00000808, /* r300 */ | ||
| 623 | 0x00000808, /* r350 */ | ||
| 624 | 0x00000808, /* rv350 */ | ||
| 625 | 0x00000808, /* rv380 */ | ||
| 626 | 0x00000808, /* r420 */ | ||
| 627 | 0x00000808, /* r423 */ | ||
| 628 | 0x00000808, /* rv410 */ | ||
| 629 | 0x00000000, /* rs400 */ | ||
| 630 | 0x00000000, /* rs480 */ | ||
| 631 | }; | ||
| 632 | |||
| 633 | static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev, | ||
| 634 | struct radeon_encoder_primary_dac *p_dac) | ||
| 635 | { | ||
| 636 | p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family]; | ||
| 637 | return; | ||
| 638 | } | ||
| 639 | |||
| 598 | struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | 640 | struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct |
| 599 | radeon_encoder | 641 | radeon_encoder |
| 600 | *encoder) | 642 | *encoder) |
| @@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 604 | uint16_t dac_info; | 646 | uint16_t dac_info; |
| 605 | uint8_t rev, bg, dac; | 647 | uint8_t rev, bg, dac; |
| 606 | struct radeon_encoder_primary_dac *p_dac = NULL; | 648 | struct radeon_encoder_primary_dac *p_dac = NULL; |
| 649 | int found = 0; | ||
| 607 | 650 | ||
| 608 | if (rdev->bios == NULL) | 651 | p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), |
| 652 | GFP_KERNEL); | ||
| 653 | |||
| 654 | if (!p_dac) | ||
| 609 | return NULL; | 655 | return NULL; |
| 610 | 656 | ||
| 657 | if (rdev->bios == NULL) | ||
| 658 | goto out; | ||
| 659 | |||
| 611 | /* check CRT table */ | 660 | /* check CRT table */ |
| 612 | dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); | 661 | dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); |
| 613 | if (dac_info) { | 662 | if (dac_info) { |
| 614 | p_dac = | ||
| 615 | kzalloc(sizeof(struct radeon_encoder_primary_dac), | ||
| 616 | GFP_KERNEL); | ||
| 617 | |||
| 618 | if (!p_dac) | ||
| 619 | return NULL; | ||
| 620 | |||
| 621 | rev = RBIOS8(dac_info) & 0x3; | 663 | rev = RBIOS8(dac_info) & 0x3; |
| 622 | if (rev < 2) { | 664 | if (rev < 2) { |
| 623 | bg = RBIOS8(dac_info + 0x2) & 0xf; | 665 | bg = RBIOS8(dac_info + 0x2) & 0xf; |
| @@ -628,9 +670,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 628 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 670 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
| 629 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 671 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
| 630 | } | 672 | } |
| 631 | 673 | found = 1; | |
| 632 | } | 674 | } |
| 633 | 675 | ||
| 676 | out: | ||
| 677 | if (!found) /* fallback to defaults */ | ||
| 678 | radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); | ||
| 679 | |||
| 634 | return p_dac; | 680 | return p_dac; |
| 635 | } | 681 | } |
| 636 | 682 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 20161567dbff..9da10dd5df80 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
| 49 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | 49 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
| 50 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | 50 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); |
| 51 | 51 | ||
| 52 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 52 | if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
| 53 | if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 53 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { |
| 54 | if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
| 55 | (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) { | ||
| 54 | if (radeon_dp_needs_link_train(radeon_connector)) { | 56 | if (radeon_dp_needs_link_train(radeon_connector)) { |
| 55 | if (connector->encoder) | 57 | if (connector->encoder) |
| 56 | dp_link_train(connector->encoder, connector); | 58 | dp_link_train(connector->encoder, connector); |
| @@ -615,7 +617,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
| 615 | ret = connector_status_connected; | 617 | ret = connector_status_connected; |
| 616 | } | 618 | } |
| 617 | } else { | 619 | } else { |
| 618 | if (radeon_connector->dac_load_detect) { | 620 | if (radeon_connector->dac_load_detect && encoder) { |
| 619 | encoder_funcs = encoder->helper_private; | 621 | encoder_funcs = encoder->helper_private; |
| 620 | ret = encoder_funcs->detect(encoder, connector); | 622 | ret = encoder_funcs->detect(encoder, connector); |
| 621 | } | 623 | } |
| @@ -967,7 +969,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto | |||
| 967 | } | 969 | } |
| 968 | 970 | ||
| 969 | sink_type = radeon_dp_getsinktype(radeon_connector); | 971 | sink_type = radeon_dp_getsinktype(radeon_connector); |
| 970 | if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | 972 | if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
| 973 | (sink_type == CONNECTOR_OBJECT_ID_eDP)) { | ||
| 971 | if (radeon_dp_getdpcd(radeon_connector)) { | 974 | if (radeon_dp_getdpcd(radeon_connector)) { |
| 972 | radeon_dig_connector->dp_sink_type = sink_type; | 975 | radeon_dig_connector->dp_sink_type = sink_type; |
| 973 | ret = connector_status_connected; | 976 | ret = connector_status_connected; |
| @@ -992,7 +995,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, | |||
| 992 | 995 | ||
| 993 | /* XXX check mode bandwidth */ | 996 | /* XXX check mode bandwidth */ |
| 994 | 997 | ||
| 995 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) | 998 | if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
| 999 | (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
| 996 | return radeon_dp_mode_valid_helper(radeon_connector, mode); | 1000 | return radeon_dp_mode_valid_helper(radeon_connector, mode); |
| 997 | else | 1001 | else |
| 998 | return MODE_OK; | 1002 | return MODE_OK; |
| @@ -1145,6 +1149,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1145 | subpixel_order = SubPixelHorizontalRGB; | 1149 | subpixel_order = SubPixelHorizontalRGB; |
| 1146 | break; | 1150 | break; |
| 1147 | case DRM_MODE_CONNECTOR_DisplayPort: | 1151 | case DRM_MODE_CONNECTOR_DisplayPort: |
| 1152 | case DRM_MODE_CONNECTOR_eDP: | ||
| 1148 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1153 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| 1149 | if (!radeon_dig_connector) | 1154 | if (!radeon_dig_connector) |
| 1150 | goto failed; | 1155 | goto failed; |
| @@ -1157,10 +1162,16 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1157 | goto failed; | 1162 | goto failed; |
| 1158 | if (i2c_bus->valid) { | 1163 | if (i2c_bus->valid) { |
| 1159 | /* add DP i2c bus */ | 1164 | /* add DP i2c bus */ |
| 1160 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | 1165 | if (connector_type == DRM_MODE_CONNECTOR_eDP) |
| 1166 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); | ||
| 1167 | else | ||
| 1168 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | ||
| 1161 | if (!radeon_dig_connector->dp_i2c_bus) | 1169 | if (!radeon_dig_connector->dp_i2c_bus) |
| 1162 | goto failed; | 1170 | goto failed; |
| 1163 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); | 1171 | if (connector_type == DRM_MODE_CONNECTOR_eDP) |
| 1172 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP"); | ||
| 1173 | else | ||
| 1174 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); | ||
| 1164 | if (!radeon_connector->ddc_bus) | 1175 | if (!radeon_connector->ddc_bus) |
| 1165 | goto failed; | 1176 | goto failed; |
| 1166 | } | 1177 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 0b2f9c2ad2c1..06123ba31d31 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
| @@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) | |||
| 2145 | &master_priv->sarea); | 2145 | &master_priv->sarea); |
| 2146 | if (ret) { | 2146 | if (ret) { |
| 2147 | DRM_ERROR("SAREA setup failed\n"); | 2147 | DRM_ERROR("SAREA setup failed\n"); |
| 2148 | kfree(master_priv); | ||
| 2148 | return ret; | 2149 | return ret; |
| 2149 | } | 2150 | } |
| 2150 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); | 2151 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7c6848096bcd..0c51f8e46613 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -733,16 +733,18 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
| 733 | */ | 733 | */ |
| 734 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | 734 | int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) |
| 735 | { | 735 | { |
| 736 | struct radeon_device *rdev = dev->dev_private; | 736 | struct radeon_device *rdev; |
| 737 | struct drm_crtc *crtc; | 737 | struct drm_crtc *crtc; |
| 738 | int r; | 738 | int r; |
| 739 | 739 | ||
| 740 | if (dev == NULL || rdev == NULL) { | 740 | if (dev == NULL || dev->dev_private == NULL) { |
| 741 | return -ENODEV; | 741 | return -ENODEV; |
| 742 | } | 742 | } |
| 743 | if (state.event == PM_EVENT_PRETHAW) { | 743 | if (state.event == PM_EVENT_PRETHAW) { |
| 744 | return 0; | 744 | return 0; |
| 745 | } | 745 | } |
| 746 | rdev = dev->dev_private; | ||
| 747 | |||
| 746 | /* unpin the front buffers */ | 748 | /* unpin the front buffers */ |
| 747 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 749 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 748 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 750 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 91d72b70abc9..0ec491ead2ff 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -234,7 +234,7 @@ static const char *encoder_names[34] = { | |||
| 234 | "INTERNAL_UNIPHY2", | 234 | "INTERNAL_UNIPHY2", |
| 235 | }; | 235 | }; |
| 236 | 236 | ||
| 237 | static const char *connector_names[13] = { | 237 | static const char *connector_names[15] = { |
| 238 | "Unknown", | 238 | "Unknown", |
| 239 | "VGA", | 239 | "VGA", |
| 240 | "DVI-I", | 240 | "DVI-I", |
| @@ -248,6 +248,8 @@ static const char *connector_names[13] = { | |||
| 248 | "DisplayPort", | 248 | "DisplayPort", |
| 249 | "HDMI-A", | 249 | "HDMI-A", |
| 250 | "HDMI-B", | 250 | "HDMI-B", |
| 251 | "TV", | ||
| 252 | "eDP", | ||
| 251 | }; | 253 | }; |
| 252 | 254 | ||
| 253 | static const char *hpd_names[7] = { | 255 | static const char *hpd_names[7] = { |
| @@ -329,8 +331,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
| 329 | ret = radeon_get_atom_connector_info_from_object_table(dev); | 331 | ret = radeon_get_atom_connector_info_from_object_table(dev); |
| 330 | else | 332 | else |
| 331 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); | 333 | ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); |
| 332 | } else | 334 | } else { |
| 333 | ret = radeon_get_legacy_connector_info_from_bios(dev); | 335 | ret = radeon_get_legacy_connector_info_from_bios(dev); |
| 336 | if (ret == false) | ||
| 337 | ret = radeon_get_legacy_connector_info_from_table(dev); | ||
| 338 | } | ||
| 334 | } else { | 339 | } else { |
| 335 | if (!ASIC_IS_AVIVO(rdev)) | 340 | if (!ASIC_IS_AVIVO(rdev)) |
| 336 | ret = radeon_get_legacy_connector_info_from_table(dev); | 341 | ret = radeon_get_legacy_connector_info_from_table(dev); |
| @@ -349,7 +354,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
| 349 | { | 354 | { |
| 350 | int ret = 0; | 355 | int ret = 0; |
| 351 | 356 | ||
| 352 | if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 357 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
| 358 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
| 353 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 359 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
| 354 | if (dig->dp_i2c_bus) | 360 | if (dig->dp_i2c_bus) |
| 355 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | 361 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ccba95f83d11..82eb551970b9 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -596,21 +596,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
| 596 | return ATOM_ENCODER_MODE_LVDS; | 596 | return ATOM_ENCODER_MODE_LVDS; |
| 597 | break; | 597 | break; |
| 598 | case DRM_MODE_CONNECTOR_DisplayPort: | 598 | case DRM_MODE_CONNECTOR_DisplayPort: |
| 599 | case DRM_MODE_CONNECTOR_eDP: | ||
| 599 | radeon_dig_connector = radeon_connector->con_priv; | 600 | radeon_dig_connector = radeon_connector->con_priv; |
| 600 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) | 601 | if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
| 602 | (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
| 601 | return ATOM_ENCODER_MODE_DP; | 603 | return ATOM_ENCODER_MODE_DP; |
| 602 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 604 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
| 603 | return ATOM_ENCODER_MODE_HDMI; | 605 | return ATOM_ENCODER_MODE_HDMI; |
| 604 | else | 606 | else |
| 605 | return ATOM_ENCODER_MODE_DVI; | 607 | return ATOM_ENCODER_MODE_DVI; |
| 606 | break; | 608 | break; |
| 607 | case CONNECTOR_DVI_A: | 609 | case DRM_MODE_CONNECTOR_DVIA: |
| 608 | case CONNECTOR_VGA: | 610 | case DRM_MODE_CONNECTOR_VGA: |
| 609 | return ATOM_ENCODER_MODE_CRT; | 611 | return ATOM_ENCODER_MODE_CRT; |
| 610 | break; | 612 | break; |
| 611 | case CONNECTOR_STV: | 613 | case DRM_MODE_CONNECTOR_Composite: |
| 612 | case CONNECTOR_CTV: | 614 | case DRM_MODE_CONNECTOR_SVIDEO: |
| 613 | case CONNECTOR_DIN: | 615 | case DRM_MODE_CONNECTOR_9PinDIN: |
| 614 | /* fix me */ | 616 | /* fix me */ |
| 615 | return ATOM_ENCODER_MODE_TV; | 617 | return ATOM_ENCODER_MODE_TV; |
| 616 | /*return ATOM_ENCODER_MODE_CV;*/ | 618 | /*return ATOM_ENCODER_MODE_CV;*/ |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 4cdd8b4f7549..8495d4e32e18 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) | |||
| 140 | 140 | ||
| 141 | bool radeon_fence_signaled(struct radeon_fence *fence) | 141 | bool radeon_fence_signaled(struct radeon_fence *fence) |
| 142 | { | 142 | { |
| 143 | struct radeon_device *rdev = fence->rdev; | ||
| 144 | unsigned long irq_flags; | 143 | unsigned long irq_flags; |
| 145 | bool signaled = false; | 144 | bool signaled = false; |
| 146 | 145 | ||
| 147 | if (rdev->gpu_lockup) { | 146 | if (!fence) |
| 148 | return true; | 147 | return true; |
| 149 | } | 148 | |
| 150 | if (fence == NULL) { | 149 | if (fence->rdev->gpu_lockup) |
| 151 | return true; | 150 | return true; |
| 152 | } | 151 | |
| 153 | write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); | 152 | write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); |
| 154 | signaled = fence->signaled; | 153 | signaled = fence->signaled; |
| 155 | /* if we are shuting down report all fence as signaled */ | 154 | /* if we are shuting down report all fence as signaled */ |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 60df2d7e7e4c..0e1325e18534 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
| 131 | printk(KERN_ERR "Failed to wait for object !\n"); | 131 | printk(KERN_ERR "Failed to wait for object !\n"); |
| 132 | return r; | 132 | return r; |
| 133 | } | 133 | } |
| 134 | radeon_hdp_flush(robj->rdev); | ||
| 135 | } | 134 | } |
| 136 | return 0; | 135 | return 0; |
| 137 | } | 136 | } |
| @@ -312,7 +311,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
| 312 | mutex_lock(&dev->struct_mutex); | 311 | mutex_lock(&dev->struct_mutex); |
| 313 | drm_gem_object_unreference(gobj); | 312 | drm_gem_object_unreference(gobj); |
| 314 | mutex_unlock(&dev->struct_mutex); | 313 | mutex_unlock(&dev->struct_mutex); |
| 315 | radeon_hdp_flush(robj->rdev); | ||
| 316 | return r; | 314 | return r; |
| 317 | } | 315 | } |
| 318 | 316 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index b79ecc4a7cc4..2f349a300195 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c | |||
| @@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr | |||
| 289 | drm_radeon_irq_emit_t *emit = data; | 289 | drm_radeon_irq_emit_t *emit = data; |
| 290 | int result; | 290 | int result; |
| 291 | 291 | ||
| 292 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
| 293 | return -EINVAL; | ||
| 294 | |||
| 295 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
| 296 | |||
| 297 | if (!dev_priv) { | 292 | if (!dev_priv) { |
| 298 | DRM_ERROR("called with no initialization\n"); | 293 | DRM_ERROR("called with no initialization\n"); |
| 299 | return -EINVAL; | 294 | return -EINVAL; |
| 300 | } | 295 | } |
| 301 | 296 | ||
| 297 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
| 298 | return -EINVAL; | ||
| 299 | |||
| 300 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
| 301 | |||
| 302 | result = radeon_emit_irq(dev); | 302 | result = radeon_emit_irq(dev); |
| 303 | 303 | ||
| 304 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 304 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 9223296fe37b..3cfd60fd0083 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
| 97 | rdev->irq.sw_int = false; | 97 | rdev->irq.sw_int = false; |
| 98 | for (i = 0; i < 2; i++) { | 98 | for (i = 0; i < 2; i++) { |
| 99 | rdev->irq.crtc_vblank_int[i] = false; | 99 | rdev->irq.crtc_vblank_int[i] = false; |
| 100 | rdev->irq.hpd[i] = false; | ||
| 100 | } | 101 | } |
| 101 | radeon_irq_set(rdev); | 102 | radeon_irq_set(rdev); |
| 102 | } | 103 | } |
| @@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
| 128 | DRM_INFO("radeon: using MSI.\n"); | 129 | DRM_INFO("radeon: using MSI.\n"); |
| 129 | } | 130 | } |
| 130 | } | 131 | } |
| 131 | drm_irq_install(rdev->ddev); | ||
| 132 | rdev->irq.installed = true; | 132 | rdev->irq.installed = true; |
| 133 | r = drm_irq_install(rdev->ddev); | ||
| 134 | if (r) { | ||
| 135 | rdev->irq.installed = false; | ||
| 136 | return r; | ||
| 137 | } | ||
| 133 | DRM_INFO("radeon: irq initialized.\n"); | 138 | DRM_INFO("radeon: irq initialized.\n"); |
| 134 | return 0; | 139 | return 0; |
| 135 | } | 140 | } |
| 136 | 141 | ||
| 137 | void radeon_irq_kms_fini(struct radeon_device *rdev) | 142 | void radeon_irq_kms_fini(struct radeon_device *rdev) |
| 138 | { | 143 | { |
| 144 | drm_vblank_cleanup(rdev->ddev); | ||
| 139 | if (rdev->irq.installed) { | 145 | if (rdev->irq.installed) { |
| 140 | rdev->irq.installed = false; | ||
| 141 | drm_irq_uninstall(rdev->ddev); | 146 | drm_irq_uninstall(rdev->ddev); |
| 147 | rdev->irq.installed = false; | ||
| 142 | if (rdev->msi_enabled) | 148 | if (rdev->msi_enabled) |
| 143 | pci_disable_msi(rdev->pdev); | 149 | pci_disable_msi(rdev->pdev); |
| 144 | } | 150 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 3a12bb0c0563..417684daef4c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
| @@ -77,7 +77,7 @@ struct radeon_tv_mode_constants { | |||
| 77 | unsigned pix_to_tv; | 77 | unsigned pix_to_tv; |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | static const uint16_t hor_timing_NTSC[] = { | 80 | static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = { |
| 81 | 0x0007, | 81 | 0x0007, |
| 82 | 0x003f, | 82 | 0x003f, |
| 83 | 0x0263, | 83 | 0x0263, |
| @@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = { | |||
| 98 | 0 | 98 | 0 |
| 99 | }; | 99 | }; |
| 100 | 100 | ||
| 101 | static const uint16_t vert_timing_NTSC[] = { | 101 | static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = { |
| 102 | 0x2001, | 102 | 0x2001, |
| 103 | 0x200d, | 103 | 0x200d, |
| 104 | 0x1006, | 104 | 0x1006, |
| @@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = { | |||
| 115 | 0 | 115 | 0 |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | static const uint16_t hor_timing_PAL[] = { | 118 | static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = { |
| 119 | 0x0007, | 119 | 0x0007, |
| 120 | 0x0058, | 120 | 0x0058, |
| 121 | 0x027c, | 121 | 0x027c, |
| @@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = { | |||
| 136 | 0 | 136 | 0 |
| 137 | }; | 137 | }; |
| 138 | 138 | ||
| 139 | static const uint16_t vert_timing_PAL[] = { | 139 | static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = { |
| 140 | 0x2001, | 140 | 0x2001, |
| 141 | 0x200c, | 141 | 0x200c, |
| 142 | 0x1005, | 142 | 0x1005, |
| @@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, | |||
| 623 | } | 623 | } |
| 624 | flicker_removal = (tmp + 500) / 1000; | 624 | flicker_removal = (tmp + 500) / 1000; |
| 625 | 625 | ||
| 626 | if (flicker_removal < 3) | 626 | if (flicker_removal < 2) |
| 627 | flicker_removal = 3; | 627 | flicker_removal = 2; |
| 628 | for (i = 0; i < 6; ++i) { | 628 | for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) { |
| 629 | if (flicker_removal == SLOPE_limit[i]) | 629 | if (flicker_removal == SLOPE_limit[i]) |
| 630 | break; | 630 | break; |
| 631 | } | 631 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 402369db5ba0..91cb041cb40d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -46,32 +46,6 @@ struct radeon_device; | |||
| 46 | #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) | 46 | #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) |
| 47 | #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) | 47 | #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) |
| 48 | 48 | ||
| 49 | enum radeon_connector_type { | ||
| 50 | CONNECTOR_NONE, | ||
| 51 | CONNECTOR_VGA, | ||
| 52 | CONNECTOR_DVI_I, | ||
| 53 | CONNECTOR_DVI_D, | ||
| 54 | CONNECTOR_DVI_A, | ||
| 55 | CONNECTOR_STV, | ||
| 56 | CONNECTOR_CTV, | ||
| 57 | CONNECTOR_LVDS, | ||
| 58 | CONNECTOR_DIGITAL, | ||
| 59 | CONNECTOR_SCART, | ||
| 60 | CONNECTOR_HDMI_TYPE_A, | ||
| 61 | CONNECTOR_HDMI_TYPE_B, | ||
| 62 | CONNECTOR_0XC, | ||
| 63 | CONNECTOR_0XD, | ||
| 64 | CONNECTOR_DIN, | ||
| 65 | CONNECTOR_DISPLAY_PORT, | ||
| 66 | CONNECTOR_UNSUPPORTED | ||
| 67 | }; | ||
| 68 | |||
| 69 | enum radeon_dvi_type { | ||
| 70 | DVI_AUTO, | ||
| 71 | DVI_DIGITAL, | ||
| 72 | DVI_ANALOG | ||
| 73 | }; | ||
| 74 | |||
| 75 | enum radeon_rmx_type { | 49 | enum radeon_rmx_type { |
| 76 | RMX_OFF, | 50 | RMX_OFF, |
| 77 | RMX_FULL, | 51 | RMX_FULL, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d9ffe1f56e8f..4e636de877b2 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -221,8 +221,9 @@ int radeon_bo_unpin(struct radeon_bo *bo) | |||
| 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) | 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
| 222 | { | 222 | { |
| 223 | if (rdev->flags & RADEON_IS_IGP) { | 223 | if (rdev->flags & RADEON_IS_IGP) { |
| 224 | /* Useless to evict on IGP chips */ | 224 | if (rdev->mc.igp_sideport_enabled == false) |
| 225 | return 0; | 225 | /* Useless to evict on IGP chips */ |
| 226 | return 0; | ||
| 226 | } | 227 | } |
| 227 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 228 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
| 228 | } | 229 | } |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 new file mode 100644 index 000000000000..989f7a020832 --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
| @@ -0,0 +1,795 @@ | |||
| 1 | r420 0x4f60 | ||
| 2 | 0x1434 SRC_Y_X | ||
| 3 | 0x1438 DST_Y_X | ||
| 4 | 0x143C DST_HEIGHT_WIDTH | ||
| 5 | 0x146C DP_GUI_MASTER_CNTL | ||
| 6 | 0x1474 BRUSH_Y_X | ||
| 7 | 0x1478 DP_BRUSH_BKGD_CLR | ||
| 8 | 0x147C DP_BRUSH_FRGD_CLR | ||
| 9 | 0x1480 BRUSH_DATA0 | ||
| 10 | 0x1484 BRUSH_DATA1 | ||
| 11 | 0x1598 DST_WIDTH_HEIGHT | ||
| 12 | 0x15C0 CLR_CMP_CNTL | ||
| 13 | 0x15C4 CLR_CMP_CLR_SRC | ||
| 14 | 0x15C8 CLR_CMP_CLR_DST | ||
| 15 | 0x15CC CLR_CMP_MSK | ||
| 16 | 0x15D8 DP_SRC_FRGD_CLR | ||
| 17 | 0x15DC DP_SRC_BKGD_CLR | ||
| 18 | 0x1600 DST_LINE_START | ||
| 19 | 0x1604 DST_LINE_END | ||
| 20 | 0x1608 DST_LINE_PATCOUNT | ||
| 21 | 0x16C0 DP_CNTL | ||
| 22 | 0x16CC DP_WRITE_MSK | ||
| 23 | 0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR | ||
| 24 | 0x16E8 DEFAULT_SC_BOTTOM_RIGHT | ||
| 25 | 0x16EC SC_TOP_LEFT | ||
| 26 | 0x16F0 SC_BOTTOM_RIGHT | ||
| 27 | 0x16F4 SRC_SC_BOTTOM_RIGHT | ||
| 28 | 0x1714 DSTCACHE_CTLSTAT | ||
| 29 | 0x1720 WAIT_UNTIL | ||
| 30 | 0x172C RBBM_GUICNTL | ||
| 31 | 0x1D98 VAP_VPORT_XSCALE | ||
| 32 | 0x1D9C VAP_VPORT_XOFFSET | ||
| 33 | 0x1DA0 VAP_VPORT_YSCALE | ||
| 34 | 0x1DA4 VAP_VPORT_YOFFSET | ||
| 35 | 0x1DA8 VAP_VPORT_ZSCALE | ||
| 36 | 0x1DAC VAP_VPORT_ZOFFSET | ||
| 37 | 0x2080 VAP_CNTL | ||
| 38 | 0x2090 VAP_OUT_VTX_FMT_0 | ||
| 39 | 0x2094 VAP_OUT_VTX_FMT_1 | ||
| 40 | 0x20B0 VAP_VTE_CNTL | ||
| 41 | 0x2138 VAP_VF_MIN_VTX_INDX | ||
| 42 | 0x2140 VAP_CNTL_STATUS | ||
| 43 | 0x2150 VAP_PROG_STREAM_CNTL_0 | ||
| 44 | 0x2154 VAP_PROG_STREAM_CNTL_1 | ||
| 45 | 0x2158 VAP_PROG_STREAM_CNTL_2 | ||
| 46 | 0x215C VAP_PROG_STREAM_CNTL_3 | ||
| 47 | 0x2160 VAP_PROG_STREAM_CNTL_4 | ||
| 48 | 0x2164 VAP_PROG_STREAM_CNTL_5 | ||
| 49 | 0x2168 VAP_PROG_STREAM_CNTL_6 | ||
| 50 | 0x216C VAP_PROG_STREAM_CNTL_7 | ||
| 51 | 0x2180 VAP_VTX_STATE_CNTL | ||
| 52 | 0x2184 VAP_VSM_VTX_ASSM | ||
| 53 | 0x2188 VAP_VTX_STATE_IND_REG_0 | ||
| 54 | 0x218C VAP_VTX_STATE_IND_REG_1 | ||
| 55 | 0x2190 VAP_VTX_STATE_IND_REG_2 | ||
| 56 | 0x2194 VAP_VTX_STATE_IND_REG_3 | ||
| 57 | 0x2198 VAP_VTX_STATE_IND_REG_4 | ||
| 58 | 0x219C VAP_VTX_STATE_IND_REG_5 | ||
| 59 | 0x21A0 VAP_VTX_STATE_IND_REG_6 | ||
| 60 | 0x21A4 VAP_VTX_STATE_IND_REG_7 | ||
| 61 | 0x21A8 VAP_VTX_STATE_IND_REG_8 | ||
| 62 | 0x21AC VAP_VTX_STATE_IND_REG_9 | ||
| 63 | 0x21B0 VAP_VTX_STATE_IND_REG_10 | ||
| 64 | 0x21B4 VAP_VTX_STATE_IND_REG_11 | ||
| 65 | 0x21B8 VAP_VTX_STATE_IND_REG_12 | ||
| 66 | 0x21BC VAP_VTX_STATE_IND_REG_13 | ||
| 67 | 0x21C0 VAP_VTX_STATE_IND_REG_14 | ||
| 68 | 0x21C4 VAP_VTX_STATE_IND_REG_15 | ||
| 69 | 0x21DC VAP_PSC_SGN_NORM_CNTL | ||
| 70 | 0x21E0 VAP_PROG_STREAM_CNTL_EXT_0 | ||
| 71 | 0x21E4 VAP_PROG_STREAM_CNTL_EXT_1 | ||
| 72 | 0x21E8 VAP_PROG_STREAM_CNTL_EXT_2 | ||
| 73 | 0x21EC VAP_PROG_STREAM_CNTL_EXT_3 | ||
| 74 | 0x21F0 VAP_PROG_STREAM_CNTL_EXT_4 | ||
| 75 | 0x21F4 VAP_PROG_STREAM_CNTL_EXT_5 | ||
| 76 | 0x21F8 VAP_PROG_STREAM_CNTL_EXT_6 | ||
| 77 | 0x21FC VAP_PROG_STREAM_CNTL_EXT_7 | ||
| 78 | 0x2200 VAP_PVS_VECTOR_INDX_REG | ||
| 79 | 0x2204 VAP_PVS_VECTOR_DATA_REG | ||
| 80 | 0x2208 VAP_PVS_VECTOR_DATA_REG_128 | ||
| 81 | 0x221C VAP_CLIP_CNTL | ||
| 82 | 0x2220 VAP_GB_VERT_CLIP_ADJ | ||
| 83 | 0x2224 VAP_GB_VERT_DISC_ADJ | ||
| 84 | 0x2228 VAP_GB_HORZ_CLIP_ADJ | ||
| 85 | 0x222C VAP_GB_HORZ_DISC_ADJ | ||
| 86 | 0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0 | ||
| 87 | 0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1 | ||
| 88 | 0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2 | ||
| 89 | 0x223C VAP_PVS_FLOW_CNTL_ADDRS_3 | ||
| 90 | 0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4 | ||
| 91 | 0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5 | ||
| 92 | 0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6 | ||
| 93 | 0x224C VAP_PVS_FLOW_CNTL_ADDRS_7 | ||
| 94 | 0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8 | ||
| 95 | 0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9 | ||
| 96 | 0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10 | ||
| 97 | 0x225C VAP_PVS_FLOW_CNTL_ADDRS_11 | ||
| 98 | 0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12 | ||
| 99 | 0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13 | ||
| 100 | 0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14 | ||
| 101 | 0x226C VAP_PVS_FLOW_CNTL_ADDRS_15 | ||
| 102 | 0x2284 VAP_PVS_STATE_FLUSH_REG | ||
| 103 | 0x2288 VAP_PVS_VTX_TIMEOUT_REG | ||
| 104 | 0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0 | ||
| 105 | 0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1 | ||
| 106 | 0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2 | ||
| 107 | 0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3 | ||
| 108 | 0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4 | ||
| 109 | 0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5 | ||
| 110 | 0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6 | ||
| 111 | 0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7 | ||
| 112 | 0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8 | ||
| 113 | 0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9 | ||
| 114 | 0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10 | ||
| 115 | 0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11 | ||
| 116 | 0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12 | ||
| 117 | 0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13 | ||
| 118 | 0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14 | ||
| 119 | 0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15 | ||
| 120 | 0x22D0 VAP_PVS_CODE_CNTL_0 | ||
| 121 | 0x22D4 VAP_PVS_CONST_CNTL | ||
| 122 | 0x22D8 VAP_PVS_CODE_CNTL_1 | ||
| 123 | 0x22DC VAP_PVS_FLOW_CNTL_OPC | ||
| 124 | 0x342C RB2D_DSTCACHE_CTLSTAT | ||
| 125 | 0x4000 GB_VAP_RASTER_VTX_FMT_0 | ||
| 126 | 0x4004 GB_VAP_RASTER_VTX_FMT_1 | ||
| 127 | 0x4008 GB_ENABLE | ||
| 128 | 0x401C GB_SELECT | ||
| 129 | 0x4020 GB_AA_CONFIG | ||
| 130 | 0x4024 GB_FIFO_SIZE | ||
| 131 | 0x4100 TX_INVALTAGS | ||
| 132 | 0x4200 GA_POINT_S0 | ||
| 133 | 0x4204 GA_POINT_T0 | ||
| 134 | 0x4208 GA_POINT_S1 | ||
| 135 | 0x420C GA_POINT_T1 | ||
| 136 | 0x4214 GA_TRIANGLE_STIPPLE | ||
| 137 | 0x421C GA_POINT_SIZE | ||
| 138 | 0x4230 GA_POINT_MINMAX | ||
| 139 | 0x4234 GA_LINE_CNTL | ||
| 140 | 0x4238 GA_LINE_STIPPLE_CONFIG | ||
| 141 | 0x4260 GA_LINE_STIPPLE_VALUE | ||
| 142 | 0x4264 GA_LINE_S0 | ||
| 143 | 0x4268 GA_LINE_S1 | ||
| 144 | 0x4278 GA_COLOR_CONTROL | ||
| 145 | 0x427C GA_SOLID_RG | ||
| 146 | 0x4280 GA_SOLID_BA | ||
| 147 | 0x4288 GA_POLY_MODE | ||
| 148 | 0x428C GA_ROUND_MODE | ||
| 149 | 0x4290 GA_OFFSET | ||
| 150 | 0x4294 GA_FOG_SCALE | ||
| 151 | 0x4298 GA_FOG_OFFSET | ||
| 152 | 0x42A0 SU_TEX_WRAP | ||
| 153 | 0x42A4 SU_POLY_OFFSET_FRONT_SCALE | ||
| 154 | 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET | ||
| 155 | 0x42AC SU_POLY_OFFSET_BACK_SCALE | ||
| 156 | 0x42B0 SU_POLY_OFFSET_BACK_OFFSET | ||
| 157 | 0x42B4 SU_POLY_OFFSET_ENABLE | ||
| 158 | 0x42B8 SU_CULL_MODE | ||
| 159 | 0x42C0 SU_DEPTH_SCALE | ||
| 160 | 0x42C4 SU_DEPTH_OFFSET | ||
| 161 | 0x42C8 SU_REG_DEST | ||
| 162 | 0x4300 RS_COUNT | ||
| 163 | 0x4304 RS_INST_COUNT | ||
| 164 | 0x4310 RS_IP_0 | ||
| 165 | 0x4314 RS_IP_1 | ||
| 166 | 0x4318 RS_IP_2 | ||
| 167 | 0x431C RS_IP_3 | ||
| 168 | 0x4320 RS_IP_4 | ||
| 169 | 0x4324 RS_IP_5 | ||
| 170 | 0x4328 RS_IP_6 | ||
| 171 | 0x432C RS_IP_7 | ||
| 172 | 0x4330 RS_INST_0 | ||
| 173 | 0x4334 RS_INST_1 | ||
| 174 | 0x4338 RS_INST_2 | ||
| 175 | 0x433C RS_INST_3 | ||
| 176 | 0x4340 RS_INST_4 | ||
| 177 | 0x4344 RS_INST_5 | ||
| 178 | 0x4348 RS_INST_6 | ||
| 179 | 0x434C RS_INST_7 | ||
| 180 | 0x4350 RS_INST_8 | ||
| 181 | 0x4354 RS_INST_9 | ||
| 182 | 0x4358 RS_INST_10 | ||
| 183 | 0x435C RS_INST_11 | ||
| 184 | 0x4360 RS_INST_12 | ||
| 185 | 0x4364 RS_INST_13 | ||
| 186 | 0x4368 RS_INST_14 | ||
| 187 | 0x436C RS_INST_15 | ||
| 188 | 0x43A4 SC_HYPERZ_EN | ||
| 189 | 0x43A8 SC_EDGERULE | ||
| 190 | 0x43B0 SC_CLIP_0_A | ||
| 191 | 0x43B4 SC_CLIP_0_B | ||
| 192 | 0x43B8 SC_CLIP_1_A | ||
| 193 | 0x43BC SC_CLIP_1_B | ||
| 194 | 0x43C0 SC_CLIP_2_A | ||
| 195 | 0x43C4 SC_CLIP_2_B | ||
| 196 | 0x43C8 SC_CLIP_3_A | ||
| 197 | 0x43CC SC_CLIP_3_B | ||
| 198 | 0x43D0 SC_CLIP_RULE | ||
| 199 | 0x43E0 SC_SCISSOR0 | ||
| 200 | 0x43E8 SC_SCREENDOOR | ||
| 201 | 0x4440 TX_FILTER1_0 | ||
| 202 | 0x4444 TX_FILTER1_1 | ||
| 203 | 0x4448 TX_FILTER1_2 | ||
| 204 | 0x444C TX_FILTER1_3 | ||
| 205 | 0x4450 TX_FILTER1_4 | ||
| 206 | 0x4454 TX_FILTER1_5 | ||
| 207 | 0x4458 TX_FILTER1_6 | ||
| 208 | 0x445C TX_FILTER1_7 | ||
| 209 | 0x4460 TX_FILTER1_8 | ||
| 210 | 0x4464 TX_FILTER1_9 | ||
| 211 | 0x4468 TX_FILTER1_10 | ||
| 212 | 0x446C TX_FILTER1_11 | ||
| 213 | 0x4470 TX_FILTER1_12 | ||
| 214 | 0x4474 TX_FILTER1_13 | ||
| 215 | 0x4478 TX_FILTER1_14 | ||
| 216 | 0x447C TX_FILTER1_15 | ||
| 217 | 0x4580 TX_CHROMA_KEY_0 | ||
| 218 | 0x4584 TX_CHROMA_KEY_1 | ||
| 219 | 0x4588 TX_CHROMA_KEY_2 | ||
| 220 | 0x458C TX_CHROMA_KEY_3 | ||
| 221 | 0x4590 TX_CHROMA_KEY_4 | ||
| 222 | 0x4594 TX_CHROMA_KEY_5 | ||
| 223 | 0x4598 TX_CHROMA_KEY_6 | ||
| 224 | 0x459C TX_CHROMA_KEY_7 | ||
| 225 | 0x45A0 TX_CHROMA_KEY_8 | ||
| 226 | 0x45A4 TX_CHROMA_KEY_9 | ||
| 227 | 0x45A8 TX_CHROMA_KEY_10 | ||
| 228 | 0x45AC TX_CHROMA_KEY_11 | ||
| 229 | 0x45B0 TX_CHROMA_KEY_12 | ||
| 230 | 0x45B4 TX_CHROMA_KEY_13 | ||
| 231 | 0x45B8 TX_CHROMA_KEY_14 | ||
| 232 | 0x45BC TX_CHROMA_KEY_15 | ||
| 233 | 0x45C0 TX_BORDER_COLOR_0 | ||
| 234 | 0x45C4 TX_BORDER_COLOR_1 | ||
| 235 | 0x45C8 TX_BORDER_COLOR_2 | ||
| 236 | 0x45CC TX_BORDER_COLOR_3 | ||
| 237 | 0x45D0 TX_BORDER_COLOR_4 | ||
| 238 | 0x45D4 TX_BORDER_COLOR_5 | ||
| 239 | 0x45D8 TX_BORDER_COLOR_6 | ||
| 240 | 0x45DC TX_BORDER_COLOR_7 | ||
| 241 | 0x45E0 TX_BORDER_COLOR_8 | ||
| 242 | 0x45E4 TX_BORDER_COLOR_9 | ||
| 243 | 0x45E8 TX_BORDER_COLOR_10 | ||
| 244 | 0x45EC TX_BORDER_COLOR_11 | ||
| 245 | 0x45F0 TX_BORDER_COLOR_12 | ||
| 246 | 0x45F4 TX_BORDER_COLOR_13 | ||
| 247 | 0x45F8 TX_BORDER_COLOR_14 | ||
| 248 | 0x45FC TX_BORDER_COLOR_15 | ||
| 249 | 0x4600 US_CONFIG | ||
| 250 | 0x4604 US_PIXSIZE | ||
| 251 | 0x4608 US_CODE_OFFSET | ||
| 252 | 0x460C US_RESET | ||
| 253 | 0x4610 US_CODE_ADDR_0 | ||
| 254 | 0x4614 US_CODE_ADDR_1 | ||
| 255 | 0x4618 US_CODE_ADDR_2 | ||
| 256 | 0x461C US_CODE_ADDR_3 | ||
| 257 | 0x4620 US_TEX_INST_0 | ||
| 258 | 0x4624 US_TEX_INST_1 | ||
| 259 | 0x4628 US_TEX_INST_2 | ||
| 260 | 0x462C US_TEX_INST_3 | ||
| 261 | 0x4630 US_TEX_INST_4 | ||
| 262 | 0x4634 US_TEX_INST_5 | ||
| 263 | 0x4638 US_TEX_INST_6 | ||
| 264 | 0x463C US_TEX_INST_7 | ||
| 265 | 0x4640 US_TEX_INST_8 | ||
| 266 | 0x4644 US_TEX_INST_9 | ||
| 267 | 0x4648 US_TEX_INST_10 | ||
| 268 | 0x464C US_TEX_INST_11 | ||
| 269 | 0x4650 US_TEX_INST_12 | ||
| 270 | 0x4654 US_TEX_INST_13 | ||
| 271 | 0x4658 US_TEX_INST_14 | ||
| 272 | 0x465C US_TEX_INST_15 | ||
| 273 | 0x4660 US_TEX_INST_16 | ||
| 274 | 0x4664 US_TEX_INST_17 | ||
| 275 | 0x4668 US_TEX_INST_18 | ||
| 276 | 0x466C US_TEX_INST_19 | ||
| 277 | 0x4670 US_TEX_INST_20 | ||
| 278 | 0x4674 US_TEX_INST_21 | ||
| 279 | 0x4678 US_TEX_INST_22 | ||
| 280 | 0x467C US_TEX_INST_23 | ||
| 281 | 0x4680 US_TEX_INST_24 | ||
| 282 | 0x4684 US_TEX_INST_25 | ||
| 283 | 0x4688 US_TEX_INST_26 | ||
| 284 | 0x468C US_TEX_INST_27 | ||
| 285 | 0x4690 US_TEX_INST_28 | ||
| 286 | 0x4694 US_TEX_INST_29 | ||
| 287 | 0x4698 US_TEX_INST_30 | ||
| 288 | 0x469C US_TEX_INST_31 | ||
| 289 | 0x46A4 US_OUT_FMT_0 | ||
| 290 | 0x46A8 US_OUT_FMT_1 | ||
| 291 | 0x46AC US_OUT_FMT_2 | ||
| 292 | 0x46B0 US_OUT_FMT_3 | ||
| 293 | 0x46B4 US_W_FMT | ||
| 294 | 0x46B8 US_CODE_BANK | ||
| 295 | 0x46BC US_CODE_EXT | ||
| 296 | 0x46C0 US_ALU_RGB_ADDR_0 | ||
| 297 | 0x46C4 US_ALU_RGB_ADDR_1 | ||
| 298 | 0x46C8 US_ALU_RGB_ADDR_2 | ||
| 299 | 0x46CC US_ALU_RGB_ADDR_3 | ||
| 300 | 0x46D0 US_ALU_RGB_ADDR_4 | ||
| 301 | 0x46D4 US_ALU_RGB_ADDR_5 | ||
| 302 | 0x46D8 US_ALU_RGB_ADDR_6 | ||
| 303 | 0x46DC US_ALU_RGB_ADDR_7 | ||
| 304 | 0x46E0 US_ALU_RGB_ADDR_8 | ||
| 305 | 0x46E4 US_ALU_RGB_ADDR_9 | ||
| 306 | 0x46E8 US_ALU_RGB_ADDR_10 | ||
| 307 | 0x46EC US_ALU_RGB_ADDR_11 | ||
| 308 | 0x46F0 US_ALU_RGB_ADDR_12 | ||
| 309 | 0x46F4 US_ALU_RGB_ADDR_13 | ||
| 310 | 0x46F8 US_ALU_RGB_ADDR_14 | ||
| 311 | 0x46FC US_ALU_RGB_ADDR_15 | ||
| 312 | 0x4700 US_ALU_RGB_ADDR_16 | ||
| 313 | 0x4704 US_ALU_RGB_ADDR_17 | ||
| 314 | 0x4708 US_ALU_RGB_ADDR_18 | ||
| 315 | 0x470C US_ALU_RGB_ADDR_19 | ||
| 316 | 0x4710 US_ALU_RGB_ADDR_20 | ||
| 317 | 0x4714 US_ALU_RGB_ADDR_21 | ||
| 318 | 0x4718 US_ALU_RGB_ADDR_22 | ||
| 319 | 0x471C US_ALU_RGB_ADDR_23 | ||
| 320 | 0x4720 US_ALU_RGB_ADDR_24 | ||
| 321 | 0x4724 US_ALU_RGB_ADDR_25 | ||
| 322 | 0x4728 US_ALU_RGB_ADDR_26 | ||
| 323 | 0x472C US_ALU_RGB_ADDR_27 | ||
| 324 | 0x4730 US_ALU_RGB_ADDR_28 | ||
| 325 | 0x4734 US_ALU_RGB_ADDR_29 | ||
| 326 | 0x4738 US_ALU_RGB_ADDR_30 | ||
| 327 | 0x473C US_ALU_RGB_ADDR_31 | ||
| 328 | 0x4740 US_ALU_RGB_ADDR_32 | ||
| 329 | 0x4744 US_ALU_RGB_ADDR_33 | ||
| 330 | 0x4748 US_ALU_RGB_ADDR_34 | ||
| 331 | 0x474C US_ALU_RGB_ADDR_35 | ||
| 332 | 0x4750 US_ALU_RGB_ADDR_36 | ||
| 333 | 0x4754 US_ALU_RGB_ADDR_37 | ||
| 334 | 0x4758 US_ALU_RGB_ADDR_38 | ||
| 335 | 0x475C US_ALU_RGB_ADDR_39 | ||
| 336 | 0x4760 US_ALU_RGB_ADDR_40 | ||
| 337 | 0x4764 US_ALU_RGB_ADDR_41 | ||
| 338 | 0x4768 US_ALU_RGB_ADDR_42 | ||
| 339 | 0x476C US_ALU_RGB_ADDR_43 | ||
| 340 | 0x4770 US_ALU_RGB_ADDR_44 | ||
| 341 | 0x4774 US_ALU_RGB_ADDR_45 | ||
| 342 | 0x4778 US_ALU_RGB_ADDR_46 | ||
| 343 | 0x477C US_ALU_RGB_ADDR_47 | ||
| 344 | 0x4780 US_ALU_RGB_ADDR_48 | ||
| 345 | 0x4784 US_ALU_RGB_ADDR_49 | ||
| 346 | 0x4788 US_ALU_RGB_ADDR_50 | ||
| 347 | 0x478C US_ALU_RGB_ADDR_51 | ||
| 348 | 0x4790 US_ALU_RGB_ADDR_52 | ||
| 349 | 0x4794 US_ALU_RGB_ADDR_53 | ||
| 350 | 0x4798 US_ALU_RGB_ADDR_54 | ||
| 351 | 0x479C US_ALU_RGB_ADDR_55 | ||
| 352 | 0x47A0 US_ALU_RGB_ADDR_56 | ||
| 353 | 0x47A4 US_ALU_RGB_ADDR_57 | ||
| 354 | 0x47A8 US_ALU_RGB_ADDR_58 | ||
| 355 | 0x47AC US_ALU_RGB_ADDR_59 | ||
| 356 | 0x47B0 US_ALU_RGB_ADDR_60 | ||
| 357 | 0x47B4 US_ALU_RGB_ADDR_61 | ||
| 358 | 0x47B8 US_ALU_RGB_ADDR_62 | ||
| 359 | 0x47BC US_ALU_RGB_ADDR_63 | ||
| 360 | 0x47C0 US_ALU_ALPHA_ADDR_0 | ||
| 361 | 0x47C4 US_ALU_ALPHA_ADDR_1 | ||
| 362 | 0x47C8 US_ALU_ALPHA_ADDR_2 | ||
| 363 | 0x47CC US_ALU_ALPHA_ADDR_3 | ||
| 364 | 0x47D0 US_ALU_ALPHA_ADDR_4 | ||
| 365 | 0x47D4 US_ALU_ALPHA_ADDR_5 | ||
| 366 | 0x47D8 US_ALU_ALPHA_ADDR_6 | ||
| 367 | 0x47DC US_ALU_ALPHA_ADDR_7 | ||
| 368 | 0x47E0 US_ALU_ALPHA_ADDR_8 | ||
| 369 | 0x47E4 US_ALU_ALPHA_ADDR_9 | ||
| 370 | 0x47E8 US_ALU_ALPHA_ADDR_10 | ||
| 371 | 0x47EC US_ALU_ALPHA_ADDR_11 | ||
| 372 | 0x47F0 US_ALU_ALPHA_ADDR_12 | ||
| 373 | 0x47F4 US_ALU_ALPHA_ADDR_13 | ||
| 374 | 0x47F8 US_ALU_ALPHA_ADDR_14 | ||
| 375 | 0x47FC US_ALU_ALPHA_ADDR_15 | ||
| 376 | 0x4800 US_ALU_ALPHA_ADDR_16 | ||
| 377 | 0x4804 US_ALU_ALPHA_ADDR_17 | ||
| 378 | 0x4808 US_ALU_ALPHA_ADDR_18 | ||
| 379 | 0x480C US_ALU_ALPHA_ADDR_19 | ||
| 380 | 0x4810 US_ALU_ALPHA_ADDR_20 | ||
| 381 | 0x4814 US_ALU_ALPHA_ADDR_21 | ||
| 382 | 0x4818 US_ALU_ALPHA_ADDR_22 | ||
| 383 | 0x481C US_ALU_ALPHA_ADDR_23 | ||
| 384 | 0x4820 US_ALU_ALPHA_ADDR_24 | ||
| 385 | 0x4824 US_ALU_ALPHA_ADDR_25 | ||
| 386 | 0x4828 US_ALU_ALPHA_ADDR_26 | ||
| 387 | 0x482C US_ALU_ALPHA_ADDR_27 | ||
| 388 | 0x4830 US_ALU_ALPHA_ADDR_28 | ||
| 389 | 0x4834 US_ALU_ALPHA_ADDR_29 | ||
| 390 | 0x4838 US_ALU_ALPHA_ADDR_30 | ||
| 391 | 0x483C US_ALU_ALPHA_ADDR_31 | ||
| 392 | 0x4840 US_ALU_ALPHA_ADDR_32 | ||
| 393 | 0x4844 US_ALU_ALPHA_ADDR_33 | ||
| 394 | 0x4848 US_ALU_ALPHA_ADDR_34 | ||
| 395 | 0x484C US_ALU_ALPHA_ADDR_35 | ||
| 396 | 0x4850 US_ALU_ALPHA_ADDR_36 | ||
| 397 | 0x4854 US_ALU_ALPHA_ADDR_37 | ||
| 398 | 0x4858 US_ALU_ALPHA_ADDR_38 | ||
| 399 | 0x485C US_ALU_ALPHA_ADDR_39 | ||
| 400 | 0x4860 US_ALU_ALPHA_ADDR_40 | ||
| 401 | 0x4864 US_ALU_ALPHA_ADDR_41 | ||
| 402 | 0x4868 US_ALU_ALPHA_ADDR_42 | ||
| 403 | 0x486C US_ALU_ALPHA_ADDR_43 | ||
| 404 | 0x4870 US_ALU_ALPHA_ADDR_44 | ||
| 405 | 0x4874 US_ALU_ALPHA_ADDR_45 | ||
| 406 | 0x4878 US_ALU_ALPHA_ADDR_46 | ||
| 407 | 0x487C US_ALU_ALPHA_ADDR_47 | ||
| 408 | 0x4880 US_ALU_ALPHA_ADDR_48 | ||
| 409 | 0x4884 US_ALU_ALPHA_ADDR_49 | ||
| 410 | 0x4888 US_ALU_ALPHA_ADDR_50 | ||
| 411 | 0x488C US_ALU_ALPHA_ADDR_51 | ||
| 412 | 0x4890 US_ALU_ALPHA_ADDR_52 | ||
| 413 | 0x4894 US_ALU_ALPHA_ADDR_53 | ||
| 414 | 0x4898 US_ALU_ALPHA_ADDR_54 | ||
| 415 | 0x489C US_ALU_ALPHA_ADDR_55 | ||
| 416 | 0x48A0 US_ALU_ALPHA_ADDR_56 | ||
| 417 | 0x48A4 US_ALU_ALPHA_ADDR_57 | ||
| 418 | 0x48A8 US_ALU_ALPHA_ADDR_58 | ||
| 419 | 0x48AC US_ALU_ALPHA_ADDR_59 | ||
| 420 | 0x48B0 US_ALU_ALPHA_ADDR_60 | ||
| 421 | 0x48B4 US_ALU_ALPHA_ADDR_61 | ||
| 422 | 0x48B8 US_ALU_ALPHA_ADDR_62 | ||
| 423 | 0x48BC US_ALU_ALPHA_ADDR_63 | ||
| 424 | 0x48C0 US_ALU_RGB_INST_0 | ||
| 425 | 0x48C4 US_ALU_RGB_INST_1 | ||
| 426 | 0x48C8 US_ALU_RGB_INST_2 | ||
| 427 | 0x48CC US_ALU_RGB_INST_3 | ||
| 428 | 0x48D0 US_ALU_RGB_INST_4 | ||
| 429 | 0x48D4 US_ALU_RGB_INST_5 | ||
| 430 | 0x48D8 US_ALU_RGB_INST_6 | ||
| 431 | 0x48DC US_ALU_RGB_INST_7 | ||
| 432 | 0x48E0 US_ALU_RGB_INST_8 | ||
| 433 | 0x48E4 US_ALU_RGB_INST_9 | ||
| 434 | 0x48E8 US_ALU_RGB_INST_10 | ||
| 435 | 0x48EC US_ALU_RGB_INST_11 | ||
| 436 | 0x48F0 US_ALU_RGB_INST_12 | ||
| 437 | 0x48F4 US_ALU_RGB_INST_13 | ||
| 438 | 0x48F8 US_ALU_RGB_INST_14 | ||
| 439 | 0x48FC US_ALU_RGB_INST_15 | ||
| 440 | 0x4900 US_ALU_RGB_INST_16 | ||
| 441 | 0x4904 US_ALU_RGB_INST_17 | ||
| 442 | 0x4908 US_ALU_RGB_INST_18 | ||
| 443 | 0x490C US_ALU_RGB_INST_19 | ||
| 444 | 0x4910 US_ALU_RGB_INST_20 | ||
| 445 | 0x4914 US_ALU_RGB_INST_21 | ||
| 446 | 0x4918 US_ALU_RGB_INST_22 | ||
| 447 | 0x491C US_ALU_RGB_INST_23 | ||
| 448 | 0x4920 US_ALU_RGB_INST_24 | ||
| 449 | 0x4924 US_ALU_RGB_INST_25 | ||
| 450 | 0x4928 US_ALU_RGB_INST_26 | ||
| 451 | 0x492C US_ALU_RGB_INST_27 | ||
| 452 | 0x4930 US_ALU_RGB_INST_28 | ||
| 453 | 0x4934 US_ALU_RGB_INST_29 | ||
| 454 | 0x4938 US_ALU_RGB_INST_30 | ||
| 455 | 0x493C US_ALU_RGB_INST_31 | ||
| 456 | 0x4940 US_ALU_RGB_INST_32 | ||
| 457 | 0x4944 US_ALU_RGB_INST_33 | ||
| 458 | 0x4948 US_ALU_RGB_INST_34 | ||
| 459 | 0x494C US_ALU_RGB_INST_35 | ||
| 460 | 0x4950 US_ALU_RGB_INST_36 | ||
| 461 | 0x4954 US_ALU_RGB_INST_37 | ||
| 462 | 0x4958 US_ALU_RGB_INST_38 | ||
| 463 | 0x495C US_ALU_RGB_INST_39 | ||
| 464 | 0x4960 US_ALU_RGB_INST_40 | ||
| 465 | 0x4964 US_ALU_RGB_INST_41 | ||
| 466 | 0x4968 US_ALU_RGB_INST_42 | ||
| 467 | 0x496C US_ALU_RGB_INST_43 | ||
| 468 | 0x4970 US_ALU_RGB_INST_44 | ||
| 469 | 0x4974 US_ALU_RGB_INST_45 | ||
| 470 | 0x4978 US_ALU_RGB_INST_46 | ||
| 471 | 0x497C US_ALU_RGB_INST_47 | ||
| 472 | 0x4980 US_ALU_RGB_INST_48 | ||
| 473 | 0x4984 US_ALU_RGB_INST_49 | ||
| 474 | 0x4988 US_ALU_RGB_INST_50 | ||
| 475 | 0x498C US_ALU_RGB_INST_51 | ||
| 476 | 0x4990 US_ALU_RGB_INST_52 | ||
| 477 | 0x4994 US_ALU_RGB_INST_53 | ||
| 478 | 0x4998 US_ALU_RGB_INST_54 | ||
| 479 | 0x499C US_ALU_RGB_INST_55 | ||
| 480 | 0x49A0 US_ALU_RGB_INST_56 | ||
| 481 | 0x49A4 US_ALU_RGB_INST_57 | ||
| 482 | 0x49A8 US_ALU_RGB_INST_58 | ||
| 483 | 0x49AC US_ALU_RGB_INST_59 | ||
| 484 | 0x49B0 US_ALU_RGB_INST_60 | ||
| 485 | 0x49B4 US_ALU_RGB_INST_61 | ||
| 486 | 0x49B8 US_ALU_RGB_INST_62 | ||
| 487 | 0x49BC US_ALU_RGB_INST_63 | ||
| 488 | 0x49C0 US_ALU_ALPHA_INST_0 | ||
| 489 | 0x49C4 US_ALU_ALPHA_INST_1 | ||
| 490 | 0x49C8 US_ALU_ALPHA_INST_2 | ||
| 491 | 0x49CC US_ALU_ALPHA_INST_3 | ||
| 492 | 0x49D0 US_ALU_ALPHA_INST_4 | ||
| 493 | 0x49D4 US_ALU_ALPHA_INST_5 | ||
| 494 | 0x49D8 US_ALU_ALPHA_INST_6 | ||
| 495 | 0x49DC US_ALU_ALPHA_INST_7 | ||
| 496 | 0x49E0 US_ALU_ALPHA_INST_8 | ||
| 497 | 0x49E4 US_ALU_ALPHA_INST_9 | ||
| 498 | 0x49E8 US_ALU_ALPHA_INST_10 | ||
| 499 | 0x49EC US_ALU_ALPHA_INST_11 | ||
| 500 | 0x49F0 US_ALU_ALPHA_INST_12 | ||
| 501 | 0x49F4 US_ALU_ALPHA_INST_13 | ||
| 502 | 0x49F8 US_ALU_ALPHA_INST_14 | ||
| 503 | 0x49FC US_ALU_ALPHA_INST_15 | ||
| 504 | 0x4A00 US_ALU_ALPHA_INST_16 | ||
| 505 | 0x4A04 US_ALU_ALPHA_INST_17 | ||
| 506 | 0x4A08 US_ALU_ALPHA_INST_18 | ||
| 507 | 0x4A0C US_ALU_ALPHA_INST_19 | ||
| 508 | 0x4A10 US_ALU_ALPHA_INST_20 | ||
| 509 | 0x4A14 US_ALU_ALPHA_INST_21 | ||
| 510 | 0x4A18 US_ALU_ALPHA_INST_22 | ||
| 511 | 0x4A1C US_ALU_ALPHA_INST_23 | ||
| 512 | 0x4A20 US_ALU_ALPHA_INST_24 | ||
| 513 | 0x4A24 US_ALU_ALPHA_INST_25 | ||
| 514 | 0x4A28 US_ALU_ALPHA_INST_26 | ||
| 515 | 0x4A2C US_ALU_ALPHA_INST_27 | ||
| 516 | 0x4A30 US_ALU_ALPHA_INST_28 | ||
| 517 | 0x4A34 US_ALU_ALPHA_INST_29 | ||
| 518 | 0x4A38 US_ALU_ALPHA_INST_30 | ||
| 519 | 0x4A3C US_ALU_ALPHA_INST_31 | ||
| 520 | 0x4A40 US_ALU_ALPHA_INST_32 | ||
| 521 | 0x4A44 US_ALU_ALPHA_INST_33 | ||
| 522 | 0x4A48 US_ALU_ALPHA_INST_34 | ||
| 523 | 0x4A4C US_ALU_ALPHA_INST_35 | ||
| 524 | 0x4A50 US_ALU_ALPHA_INST_36 | ||
| 525 | 0x4A54 US_ALU_ALPHA_INST_37 | ||
| 526 | 0x4A58 US_ALU_ALPHA_INST_38 | ||
| 527 | 0x4A5C US_ALU_ALPHA_INST_39 | ||
| 528 | 0x4A60 US_ALU_ALPHA_INST_40 | ||
| 529 | 0x4A64 US_ALU_ALPHA_INST_41 | ||
| 530 | 0x4A68 US_ALU_ALPHA_INST_42 | ||
| 531 | 0x4A6C US_ALU_ALPHA_INST_43 | ||
| 532 | 0x4A70 US_ALU_ALPHA_INST_44 | ||
| 533 | 0x4A74 US_ALU_ALPHA_INST_45 | ||
| 534 | 0x4A78 US_ALU_ALPHA_INST_46 | ||
| 535 | 0x4A7C US_ALU_ALPHA_INST_47 | ||
| 536 | 0x4A80 US_ALU_ALPHA_INST_48 | ||
| 537 | 0x4A84 US_ALU_ALPHA_INST_49 | ||
| 538 | 0x4A88 US_ALU_ALPHA_INST_50 | ||
| 539 | 0x4A8C US_ALU_ALPHA_INST_51 | ||
| 540 | 0x4A90 US_ALU_ALPHA_INST_52 | ||
| 541 | 0x4A94 US_ALU_ALPHA_INST_53 | ||
| 542 | 0x4A98 US_ALU_ALPHA_INST_54 | ||
| 543 | 0x4A9C US_ALU_ALPHA_INST_55 | ||
| 544 | 0x4AA0 US_ALU_ALPHA_INST_56 | ||
| 545 | 0x4AA4 US_ALU_ALPHA_INST_57 | ||
| 546 | 0x4AA8 US_ALU_ALPHA_INST_58 | ||
| 547 | 0x4AAC US_ALU_ALPHA_INST_59 | ||
| 548 | 0x4AB0 US_ALU_ALPHA_INST_60 | ||
| 549 | 0x4AB4 US_ALU_ALPHA_INST_61 | ||
| 550 | 0x4AB8 US_ALU_ALPHA_INST_62 | ||
| 551 | 0x4ABC US_ALU_ALPHA_INST_63 | ||
| 552 | 0x4AC0 US_ALU_EXT_ADDR_0 | ||
| 553 | 0x4AC4 US_ALU_EXT_ADDR_1 | ||
| 554 | 0x4AC8 US_ALU_EXT_ADDR_2 | ||
| 555 | 0x4ACC US_ALU_EXT_ADDR_3 | ||
| 556 | 0x4AD0 US_ALU_EXT_ADDR_4 | ||
| 557 | 0x4AD4 US_ALU_EXT_ADDR_5 | ||
| 558 | 0x4AD8 US_ALU_EXT_ADDR_6 | ||
| 559 | 0x4ADC US_ALU_EXT_ADDR_7 | ||
| 560 | 0x4AE0 US_ALU_EXT_ADDR_8 | ||
| 561 | 0x4AE4 US_ALU_EXT_ADDR_9 | ||
| 562 | 0x4AE8 US_ALU_EXT_ADDR_10 | ||
| 563 | 0x4AEC US_ALU_EXT_ADDR_11 | ||
| 564 | 0x4AF0 US_ALU_EXT_ADDR_12 | ||
| 565 | 0x4AF4 US_ALU_EXT_ADDR_13 | ||
| 566 | 0x4AF8 US_ALU_EXT_ADDR_14 | ||
| 567 | 0x4AFC US_ALU_EXT_ADDR_15 | ||
| 568 | 0x4B00 US_ALU_EXT_ADDR_16 | ||
| 569 | 0x4B04 US_ALU_EXT_ADDR_17 | ||
| 570 | 0x4B08 US_ALU_EXT_ADDR_18 | ||
| 571 | 0x4B0C US_ALU_EXT_ADDR_19 | ||
| 572 | 0x4B10 US_ALU_EXT_ADDR_20 | ||
| 573 | 0x4B14 US_ALU_EXT_ADDR_21 | ||
| 574 | 0x4B18 US_ALU_EXT_ADDR_22 | ||
| 575 | 0x4B1C US_ALU_EXT_ADDR_23 | ||
| 576 | 0x4B20 US_ALU_EXT_ADDR_24 | ||
| 577 | 0x4B24 US_ALU_EXT_ADDR_25 | ||
| 578 | 0x4B28 US_ALU_EXT_ADDR_26 | ||
| 579 | 0x4B2C US_ALU_EXT_ADDR_27 | ||
| 580 | 0x4B30 US_ALU_EXT_ADDR_28 | ||
| 581 | 0x4B34 US_ALU_EXT_ADDR_29 | ||
| 582 | 0x4B38 US_ALU_EXT_ADDR_30 | ||
| 583 | 0x4B3C US_ALU_EXT_ADDR_31 | ||
| 584 | 0x4B40 US_ALU_EXT_ADDR_32 | ||
| 585 | 0x4B44 US_ALU_EXT_ADDR_33 | ||
| 586 | 0x4B48 US_ALU_EXT_ADDR_34 | ||
| 587 | 0x4B4C US_ALU_EXT_ADDR_35 | ||
| 588 | 0x4B50 US_ALU_EXT_ADDR_36 | ||
| 589 | 0x4B54 US_ALU_EXT_ADDR_37 | ||
| 590 | 0x4B58 US_ALU_EXT_ADDR_38 | ||
| 591 | 0x4B5C US_ALU_EXT_ADDR_39 | ||
| 592 | 0x4B60 US_ALU_EXT_ADDR_40 | ||
| 593 | 0x4B64 US_ALU_EXT_ADDR_41 | ||
| 594 | 0x4B68 US_ALU_EXT_ADDR_42 | ||
| 595 | 0x4B6C US_ALU_EXT_ADDR_43 | ||
| 596 | 0x4B70 US_ALU_EXT_ADDR_44 | ||
| 597 | 0x4B74 US_ALU_EXT_ADDR_45 | ||
| 598 | 0x4B78 US_ALU_EXT_ADDR_46 | ||
| 599 | 0x4B7C US_ALU_EXT_ADDR_47 | ||
| 600 | 0x4B80 US_ALU_EXT_ADDR_48 | ||
| 601 | 0x4B84 US_ALU_EXT_ADDR_49 | ||
| 602 | 0x4B88 US_ALU_EXT_ADDR_50 | ||
| 603 | 0x4B8C US_ALU_EXT_ADDR_51 | ||
| 604 | 0x4B90 US_ALU_EXT_ADDR_52 | ||
| 605 | 0x4B94 US_ALU_EXT_ADDR_53 | ||
| 606 | 0x4B98 US_ALU_EXT_ADDR_54 | ||
| 607 | 0x4B9C US_ALU_EXT_ADDR_55 | ||
| 608 | 0x4BA0 US_ALU_EXT_ADDR_56 | ||
| 609 | 0x4BA4 US_ALU_EXT_ADDR_57 | ||
| 610 | 0x4BA8 US_ALU_EXT_ADDR_58 | ||
| 611 | 0x4BAC US_ALU_EXT_ADDR_59 | ||
| 612 | 0x4BB0 US_ALU_EXT_ADDR_60 | ||
| 613 | 0x4BB4 US_ALU_EXT_ADDR_61 | ||
| 614 | 0x4BB8 US_ALU_EXT_ADDR_62 | ||
| 615 | 0x4BBC US_ALU_EXT_ADDR_63 | ||
| 616 | 0x4BC0 FG_FOG_BLEND | ||
| 617 | 0x4BC4 FG_FOG_FACTOR | ||
| 618 | 0x4BC8 FG_FOG_COLOR_R | ||
| 619 | 0x4BCC FG_FOG_COLOR_G | ||
| 620 | 0x4BD0 FG_FOG_COLOR_B | ||
| 621 | 0x4BD4 FG_ALPHA_FUNC | ||
| 622 | 0x4BD8 FG_DEPTH_SRC | ||
| 623 | 0x4C00 US_ALU_CONST_R_0 | ||
| 624 | 0x4C04 US_ALU_CONST_G_0 | ||
| 625 | 0x4C08 US_ALU_CONST_B_0 | ||
| 626 | 0x4C0C US_ALU_CONST_A_0 | ||
| 627 | 0x4C10 US_ALU_CONST_R_1 | ||
| 628 | 0x4C14 US_ALU_CONST_G_1 | ||
| 629 | 0x4C18 US_ALU_CONST_B_1 | ||
| 630 | 0x4C1C US_ALU_CONST_A_1 | ||
| 631 | 0x4C20 US_ALU_CONST_R_2 | ||
| 632 | 0x4C24 US_ALU_CONST_G_2 | ||
| 633 | 0x4C28 US_ALU_CONST_B_2 | ||
| 634 | 0x4C2C US_ALU_CONST_A_2 | ||
| 635 | 0x4C30 US_ALU_CONST_R_3 | ||
| 636 | 0x4C34 US_ALU_CONST_G_3 | ||
| 637 | 0x4C38 US_ALU_CONST_B_3 | ||
| 638 | 0x4C3C US_ALU_CONST_A_3 | ||
| 639 | 0x4C40 US_ALU_CONST_R_4 | ||
| 640 | 0x4C44 US_ALU_CONST_G_4 | ||
| 641 | 0x4C48 US_ALU_CONST_B_4 | ||
| 642 | 0x4C4C US_ALU_CONST_A_4 | ||
| 643 | 0x4C50 US_ALU_CONST_R_5 | ||
| 644 | 0x4C54 US_ALU_CONST_G_5 | ||
| 645 | 0x4C58 US_ALU_CONST_B_5 | ||
| 646 | 0x4C5C US_ALU_CONST_A_5 | ||
| 647 | 0x4C60 US_ALU_CONST_R_6 | ||
| 648 | 0x4C64 US_ALU_CONST_G_6 | ||
| 649 | 0x4C68 US_ALU_CONST_B_6 | ||
| 650 | 0x4C6C US_ALU_CONST_A_6 | ||
| 651 | 0x4C70 US_ALU_CONST_R_7 | ||
| 652 | 0x4C74 US_ALU_CONST_G_7 | ||
| 653 | 0x4C78 US_ALU_CONST_B_7 | ||
| 654 | 0x4C7C US_ALU_CONST_A_7 | ||
| 655 | 0x4C80 US_ALU_CONST_R_8 | ||
| 656 | 0x4C84 US_ALU_CONST_G_8 | ||
| 657 | 0x4C88 US_ALU_CONST_B_8 | ||
| 658 | 0x4C8C US_ALU_CONST_A_8 | ||
| 659 | 0x4C90 US_ALU_CONST_R_9 | ||
| 660 | 0x4C94 US_ALU_CONST_G_9 | ||
| 661 | 0x4C98 US_ALU_CONST_B_9 | ||
| 662 | 0x4C9C US_ALU_CONST_A_9 | ||
| 663 | 0x4CA0 US_ALU_CONST_R_10 | ||
| 664 | 0x4CA4 US_ALU_CONST_G_10 | ||
| 665 | 0x4CA8 US_ALU_CONST_B_10 | ||
| 666 | 0x4CAC US_ALU_CONST_A_10 | ||
| 667 | 0x4CB0 US_ALU_CONST_R_11 | ||
| 668 | 0x4CB4 US_ALU_CONST_G_11 | ||
| 669 | 0x4CB8 US_ALU_CONST_B_11 | ||
| 670 | 0x4CBC US_ALU_CONST_A_11 | ||
| 671 | 0x4CC0 US_ALU_CONST_R_12 | ||
| 672 | 0x4CC4 US_ALU_CONST_G_12 | ||
| 673 | 0x4CC8 US_ALU_CONST_B_12 | ||
| 674 | 0x4CCC US_ALU_CONST_A_12 | ||
| 675 | 0x4CD0 US_ALU_CONST_R_13 | ||
| 676 | 0x4CD4 US_ALU_CONST_G_13 | ||
| 677 | 0x4CD8 US_ALU_CONST_B_13 | ||
| 678 | 0x4CDC US_ALU_CONST_A_13 | ||
| 679 | 0x4CE0 US_ALU_CONST_R_14 | ||
| 680 | 0x4CE4 US_ALU_CONST_G_14 | ||
| 681 | 0x4CE8 US_ALU_CONST_B_14 | ||
| 682 | 0x4CEC US_ALU_CONST_A_14 | ||
| 683 | 0x4CF0 US_ALU_CONST_R_15 | ||
| 684 | 0x4CF4 US_ALU_CONST_G_15 | ||
| 685 | 0x4CF8 US_ALU_CONST_B_15 | ||
| 686 | 0x4CFC US_ALU_CONST_A_15 | ||
| 687 | 0x4D00 US_ALU_CONST_R_16 | ||
| 688 | 0x4D04 US_ALU_CONST_G_16 | ||
| 689 | 0x4D08 US_ALU_CONST_B_16 | ||
| 690 | 0x4D0C US_ALU_CONST_A_16 | ||
| 691 | 0x4D10 US_ALU_CONST_R_17 | ||
| 692 | 0x4D14 US_ALU_CONST_G_17 | ||
| 693 | 0x4D18 US_ALU_CONST_B_17 | ||
| 694 | 0x4D1C US_ALU_CONST_A_17 | ||
| 695 | 0x4D20 US_ALU_CONST_R_18 | ||
| 696 | 0x4D24 US_ALU_CONST_G_18 | ||
| 697 | 0x4D28 US_ALU_CONST_B_18 | ||
| 698 | 0x4D2C US_ALU_CONST_A_18 | ||
| 699 | 0x4D30 US_ALU_CONST_R_19 | ||
| 700 | 0x4D34 US_ALU_CONST_G_19 | ||
| 701 | 0x4D38 US_ALU_CONST_B_19 | ||
| 702 | 0x4D3C US_ALU_CONST_A_19 | ||
| 703 | 0x4D40 US_ALU_CONST_R_20 | ||
| 704 | 0x4D44 US_ALU_CONST_G_20 | ||
| 705 | 0x4D48 US_ALU_CONST_B_20 | ||
| 706 | 0x4D4C US_ALU_CONST_A_20 | ||
| 707 | 0x4D50 US_ALU_CONST_R_21 | ||
| 708 | 0x4D54 US_ALU_CONST_G_21 | ||
| 709 | 0x4D58 US_ALU_CONST_B_21 | ||
| 710 | 0x4D5C US_ALU_CONST_A_21 | ||
| 711 | 0x4D60 US_ALU_CONST_R_22 | ||
| 712 | 0x4D64 US_ALU_CONST_G_22 | ||
| 713 | 0x4D68 US_ALU_CONST_B_22 | ||
| 714 | 0x4D6C US_ALU_CONST_A_22 | ||
| 715 | 0x4D70 US_ALU_CONST_R_23 | ||
| 716 | 0x4D74 US_ALU_CONST_G_23 | ||
| 717 | 0x4D78 US_ALU_CONST_B_23 | ||
| 718 | 0x4D7C US_ALU_CONST_A_23 | ||
| 719 | 0x4D80 US_ALU_CONST_R_24 | ||
| 720 | 0x4D84 US_ALU_CONST_G_24 | ||
| 721 | 0x4D88 US_ALU_CONST_B_24 | ||
| 722 | 0x4D8C US_ALU_CONST_A_24 | ||
| 723 | 0x4D90 US_ALU_CONST_R_25 | ||
| 724 | 0x4D94 US_ALU_CONST_G_25 | ||
| 725 | 0x4D98 US_ALU_CONST_B_25 | ||
| 726 | 0x4D9C US_ALU_CONST_A_25 | ||
| 727 | 0x4DA0 US_ALU_CONST_R_26 | ||
| 728 | 0x4DA4 US_ALU_CONST_G_26 | ||
| 729 | 0x4DA8 US_ALU_CONST_B_26 | ||
| 730 | 0x4DAC US_ALU_CONST_A_26 | ||
| 731 | 0x4DB0 US_ALU_CONST_R_27 | ||
| 732 | 0x4DB4 US_ALU_CONST_G_27 | ||
| 733 | 0x4DB8 US_ALU_CONST_B_27 | ||
| 734 | 0x4DBC US_ALU_CONST_A_27 | ||
| 735 | 0x4DC0 US_ALU_CONST_R_28 | ||
| 736 | 0x4DC4 US_ALU_CONST_G_28 | ||
| 737 | 0x4DC8 US_ALU_CONST_B_28 | ||
| 738 | 0x4DCC US_ALU_CONST_A_28 | ||
| 739 | 0x4DD0 US_ALU_CONST_R_29 | ||
| 740 | 0x4DD4 US_ALU_CONST_G_29 | ||
| 741 | 0x4DD8 US_ALU_CONST_B_29 | ||
| 742 | 0x4DDC US_ALU_CONST_A_29 | ||
| 743 | 0x4DE0 US_ALU_CONST_R_30 | ||
| 744 | 0x4DE4 US_ALU_CONST_G_30 | ||
| 745 | 0x4DE8 US_ALU_CONST_B_30 | ||
| 746 | 0x4DEC US_ALU_CONST_A_30 | ||
| 747 | 0x4DF0 US_ALU_CONST_R_31 | ||
| 748 | 0x4DF4 US_ALU_CONST_G_31 | ||
| 749 | 0x4DF8 US_ALU_CONST_B_31 | ||
| 750 | 0x4DFC US_ALU_CONST_A_31 | ||
| 751 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
| 752 | 0x4E08 RB3D_ABLENDCNTL_R3 | ||
| 753 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
| 754 | 0x4E10 RB3D_CONSTANT_COLOR | ||
| 755 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | ||
| 756 | 0x4E18 RB3D_ROPCNTL_R3 | ||
| 757 | 0x4E1C RB3D_CLRCMP_FLIPE_R3 | ||
| 758 | 0x4E20 RB3D_CLRCMP_CLR_R3 | ||
| 759 | 0x4E24 RB3D_CLRCMP_MSK_R3 | ||
| 760 | 0x4E48 RB3D_DEBUG_CTL | ||
| 761 | 0x4E4C RB3D_DSTCACHE_CTLSTAT_R3 | ||
| 762 | 0x4E50 RB3D_DITHER_CTL | ||
| 763 | 0x4E54 RB3D_CMASK_OFFSET0 | ||
| 764 | 0x4E58 RB3D_CMASK_OFFSET1 | ||
| 765 | 0x4E5C RB3D_CMASK_OFFSET2 | ||
| 766 | 0x4E60 RB3D_CMASK_OFFSET3 | ||
| 767 | 0x4E64 RB3D_CMASK_PITCH0 | ||
| 768 | 0x4E68 RB3D_CMASK_PITCH1 | ||
| 769 | 0x4E6C RB3D_CMASK_PITCH2 | ||
| 770 | 0x4E70 RB3D_CMASK_PITCH3 | ||
| 771 | 0x4E74 RB3D_CMASK_WRINDEX | ||
| 772 | 0x4E78 RB3D_CMASK_DWORD | ||
| 773 | 0x4E7C RB3D_CMASK_RDINDEX | ||
| 774 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
| 775 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
| 776 | 0x4E88 RB3D_AARESOLVE_CTL | ||
| 777 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | ||
| 778 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | ||
| 779 | 0x4F04 ZB_ZSTENCILCNTL | ||
| 780 | 0x4F08 ZB_STENCILREFMASK | ||
| 781 | 0x4F14 ZB_ZTOP | ||
| 782 | 0x4F18 ZB_ZCACHE_CTLSTAT | ||
| 783 | 0x4F1C ZB_BW_CNTL | ||
| 784 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
| 785 | 0x4F30 ZB_ZMASK_OFFSET | ||
| 786 | 0x4F34 ZB_ZMASK_PITCH | ||
| 787 | 0x4F38 ZB_ZMASK_WRINDEX | ||
| 788 | 0x4F3C ZB_ZMASK_DWORD | ||
| 789 | 0x4F40 ZB_ZMASK_RDINDEX | ||
| 790 | 0x4F44 ZB_HIZ_OFFSET | ||
| 791 | 0x4F48 ZB_HIZ_WRINDEX | ||
| 792 | 0x4F4C ZB_HIZ_DWORD | ||
| 793 | 0x4F50 ZB_HIZ_RDINDEX | ||
| 794 | 0x4F54 ZB_HIZ_PITCH | ||
| 795 | 0x4F58 ZB_ZPASS_DATA | ||
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 8e3c0b807add..6801b865d1c4 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
| @@ -153,7 +153,7 @@ rs600 0x6d40 | |||
| 153 | 0x42A4 SU_POLY_OFFSET_FRONT_SCALE | 153 | 0x42A4 SU_POLY_OFFSET_FRONT_SCALE |
| 154 | 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET | 154 | 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET |
| 155 | 0x42AC SU_POLY_OFFSET_BACK_SCALE | 155 | 0x42AC SU_POLY_OFFSET_BACK_SCALE |
| 156 | 0x42B0 SU_POLY_OFFSET_BACK_OFFSET | 156 | 0x42B0 SU_POLY_OFFSET_BACK_OFFSET |
| 157 | 0x42B4 SU_POLY_OFFSET_ENABLE | 157 | 0x42B4 SU_POLY_OFFSET_ENABLE |
| 158 | 0x42B8 SU_CULL_MODE | 158 | 0x42B8 SU_CULL_MODE |
| 159 | 0x42C0 SU_DEPTH_SCALE | 159 | 0x42C0 SU_DEPTH_SCALE |
| @@ -291,6 +291,8 @@ rs600 0x6d40 | |||
| 291 | 0x46AC US_OUT_FMT_2 | 291 | 0x46AC US_OUT_FMT_2 |
| 292 | 0x46B0 US_OUT_FMT_3 | 292 | 0x46B0 US_OUT_FMT_3 |
| 293 | 0x46B4 US_W_FMT | 293 | 0x46B4 US_W_FMT |
| 294 | 0x46B8 US_CODE_BANK | ||
| 295 | 0x46BC US_CODE_EXT | ||
| 294 | 0x46C0 US_ALU_RGB_ADDR_0 | 296 | 0x46C0 US_ALU_RGB_ADDR_0 |
| 295 | 0x46C4 US_ALU_RGB_ADDR_1 | 297 | 0x46C4 US_ALU_RGB_ADDR_1 |
| 296 | 0x46C8 US_ALU_RGB_ADDR_2 | 298 | 0x46C8 US_ALU_RGB_ADDR_2 |
| @@ -547,6 +549,70 @@ rs600 0x6d40 | |||
| 547 | 0x4AB4 US_ALU_ALPHA_INST_61 | 549 | 0x4AB4 US_ALU_ALPHA_INST_61 |
| 548 | 0x4AB8 US_ALU_ALPHA_INST_62 | 550 | 0x4AB8 US_ALU_ALPHA_INST_62 |
| 549 | 0x4ABC US_ALU_ALPHA_INST_63 | 551 | 0x4ABC US_ALU_ALPHA_INST_63 |
| 552 | 0x4AC0 US_ALU_EXT_ADDR_0 | ||
| 553 | 0x4AC4 US_ALU_EXT_ADDR_1 | ||
| 554 | 0x4AC8 US_ALU_EXT_ADDR_2 | ||
| 555 | 0x4ACC US_ALU_EXT_ADDR_3 | ||
| 556 | 0x4AD0 US_ALU_EXT_ADDR_4 | ||
| 557 | 0x4AD4 US_ALU_EXT_ADDR_5 | ||
| 558 | 0x4AD8 US_ALU_EXT_ADDR_6 | ||
| 559 | 0x4ADC US_ALU_EXT_ADDR_7 | ||
| 560 | 0x4AE0 US_ALU_EXT_ADDR_8 | ||
| 561 | 0x4AE4 US_ALU_EXT_ADDR_9 | ||
| 562 | 0x4AE8 US_ALU_EXT_ADDR_10 | ||
| 563 | 0x4AEC US_ALU_EXT_ADDR_11 | ||
| 564 | 0x4AF0 US_ALU_EXT_ADDR_12 | ||
| 565 | 0x4AF4 US_ALU_EXT_ADDR_13 | ||
| 566 | 0x4AF8 US_ALU_EXT_ADDR_14 | ||
| 567 | 0x4AFC US_ALU_EXT_ADDR_15 | ||
| 568 | 0x4B00 US_ALU_EXT_ADDR_16 | ||
| 569 | 0x4B04 US_ALU_EXT_ADDR_17 | ||
| 570 | 0x4B08 US_ALU_EXT_ADDR_18 | ||
| 571 | 0x4B0C US_ALU_EXT_ADDR_19 | ||
| 572 | 0x4B10 US_ALU_EXT_ADDR_20 | ||
| 573 | 0x4B14 US_ALU_EXT_ADDR_21 | ||
| 574 | 0x4B18 US_ALU_EXT_ADDR_22 | ||
| 575 | 0x4B1C US_ALU_EXT_ADDR_23 | ||
| 576 | 0x4B20 US_ALU_EXT_ADDR_24 | ||
| 577 | 0x4B24 US_ALU_EXT_ADDR_25 | ||
| 578 | 0x4B28 US_ALU_EXT_ADDR_26 | ||
| 579 | 0x4B2C US_ALU_EXT_ADDR_27 | ||
| 580 | 0x4B30 US_ALU_EXT_ADDR_28 | ||
| 581 | 0x4B34 US_ALU_EXT_ADDR_29 | ||
| 582 | 0x4B38 US_ALU_EXT_ADDR_30 | ||
| 583 | 0x4B3C US_ALU_EXT_ADDR_31 | ||
| 584 | 0x4B40 US_ALU_EXT_ADDR_32 | ||
| 585 | 0x4B44 US_ALU_EXT_ADDR_33 | ||
| 586 | 0x4B48 US_ALU_EXT_ADDR_34 | ||
| 587 | 0x4B4C US_ALU_EXT_ADDR_35 | ||
| 588 | 0x4B50 US_ALU_EXT_ADDR_36 | ||
| 589 | 0x4B54 US_ALU_EXT_ADDR_37 | ||
| 590 | 0x4B58 US_ALU_EXT_ADDR_38 | ||
| 591 | 0x4B5C US_ALU_EXT_ADDR_39 | ||
| 592 | 0x4B60 US_ALU_EXT_ADDR_40 | ||
| 593 | 0x4B64 US_ALU_EXT_ADDR_41 | ||
| 594 | 0x4B68 US_ALU_EXT_ADDR_42 | ||
| 595 | 0x4B6C US_ALU_EXT_ADDR_43 | ||
| 596 | 0x4B70 US_ALU_EXT_ADDR_44 | ||
| 597 | 0x4B74 US_ALU_EXT_ADDR_45 | ||
| 598 | 0x4B78 US_ALU_EXT_ADDR_46 | ||
| 599 | 0x4B7C US_ALU_EXT_ADDR_47 | ||
| 600 | 0x4B80 US_ALU_EXT_ADDR_48 | ||
| 601 | 0x4B84 US_ALU_EXT_ADDR_49 | ||
| 602 | 0x4B88 US_ALU_EXT_ADDR_50 | ||
| 603 | 0x4B8C US_ALU_EXT_ADDR_51 | ||
| 604 | 0x4B90 US_ALU_EXT_ADDR_52 | ||
| 605 | 0x4B94 US_ALU_EXT_ADDR_53 | ||
| 606 | 0x4B98 US_ALU_EXT_ADDR_54 | ||
| 607 | 0x4B9C US_ALU_EXT_ADDR_55 | ||
| 608 | 0x4BA0 US_ALU_EXT_ADDR_56 | ||
| 609 | 0x4BA4 US_ALU_EXT_ADDR_57 | ||
| 610 | 0x4BA8 US_ALU_EXT_ADDR_58 | ||
| 611 | 0x4BAC US_ALU_EXT_ADDR_59 | ||
| 612 | 0x4BB0 US_ALU_EXT_ADDR_60 | ||
| 613 | 0x4BB4 US_ALU_EXT_ADDR_61 | ||
| 614 | 0x4BB8 US_ALU_EXT_ADDR_62 | ||
| 615 | 0x4BBC US_ALU_EXT_ADDR_63 | ||
| 550 | 0x4BC0 FG_FOG_BLEND | 616 | 0x4BC0 FG_FOG_BLEND |
| 551 | 0x4BC4 FG_FOG_FACTOR | 617 | 0x4BC4 FG_FOG_FACTOR |
| 552 | 0x4BC8 FG_FOG_COLOR_R | 618 | 0x4BC8 FG_FOG_COLOR_R |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 0102a0d5735c..38abf63bf2cd 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
| @@ -161,7 +161,12 @@ rv515 0x6d40 | |||
| 161 | 0x401C GB_SELECT | 161 | 0x401C GB_SELECT |
| 162 | 0x4020 GB_AA_CONFIG | 162 | 0x4020 GB_AA_CONFIG |
| 163 | 0x4024 GB_FIFO_SIZE | 163 | 0x4024 GB_FIFO_SIZE |
| 164 | 0x4028 GB_Z_PEQ_CONFIG | ||
| 164 | 0x4100 TX_INVALTAGS | 165 | 0x4100 TX_INVALTAGS |
| 166 | 0x4114 SU_TEX_WRAP_PS3 | ||
| 167 | 0x4118 PS3_ENABLE | ||
| 168 | 0x411c PS3_VTX_FMT | ||
| 169 | 0x4120 PS3_TEX_SOURCE | ||
| 165 | 0x4200 GA_POINT_S0 | 170 | 0x4200 GA_POINT_S0 |
| 166 | 0x4204 GA_POINT_T0 | 171 | 0x4204 GA_POINT_T0 |
| 167 | 0x4208 GA_POINT_S1 | 172 | 0x4208 GA_POINT_S1 |
| @@ -171,6 +176,7 @@ rv515 0x6d40 | |||
| 171 | 0x4230 GA_POINT_MINMAX | 176 | 0x4230 GA_POINT_MINMAX |
| 172 | 0x4234 GA_LINE_CNTL | 177 | 0x4234 GA_LINE_CNTL |
| 173 | 0x4238 GA_LINE_STIPPLE_CONFIG | 178 | 0x4238 GA_LINE_STIPPLE_CONFIG |
| 179 | 0x4258 GA_COLOR_CONTROL_PS3 | ||
| 174 | 0x4260 GA_LINE_STIPPLE_VALUE | 180 | 0x4260 GA_LINE_STIPPLE_VALUE |
| 175 | 0x4264 GA_LINE_S0 | 181 | 0x4264 GA_LINE_S0 |
| 176 | 0x4268 GA_LINE_S1 | 182 | 0x4268 GA_LINE_S1 |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 368415df5f3a..9f5418983e2a 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -356,6 +356,7 @@ static int rs400_mc_init(struct radeon_device *rdev) | |||
| 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; |
| 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
| 358 | r = radeon_mc_setup(rdev); | 358 | r = radeon_mc_setup(rdev); |
| 359 | rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); | ||
| 359 | if (r) | 360 | if (r) |
| 360 | return r; | 361 | return r; |
| 361 | return 0; | 362 | return 0; |
| @@ -395,6 +396,7 @@ static int rs400_startup(struct radeon_device *rdev) | |||
| 395 | return r; | 396 | return r; |
| 396 | /* Enable IRQ */ | 397 | /* Enable IRQ */ |
| 397 | r100_irq_set(rdev); | 398 | r100_irq_set(rdev); |
| 399 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 398 | /* 1M ring buffer */ | 400 | /* 1M ring buffer */ |
| 399 | r = r100_cp_init(rdev, 1024 * 1024); | 401 | r = r100_cp_init(rdev, 1024 * 1024); |
| 400 | if (r) { | 402 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4f8ea4260572..d5255751e7b3 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev) | |||
| 56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; | 56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; |
| 57 | rdev->mc.gtt_location = 0xffffffffUL; | 57 | rdev->mc.gtt_location = 0xffffffffUL; |
| 58 | r = radeon_mc_setup(rdev); | 58 | r = radeon_mc_setup(rdev); |
| 59 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
| 59 | if (r) | 60 | if (r) |
| 60 | return r; | 61 | return r; |
| 61 | return 0; | 62 | return 0; |
| @@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev) | |||
| 134 | break; | 135 | break; |
| 135 | } | 136 | } |
| 136 | } | 137 | } |
| 137 | rs600_irq_set(rdev); | 138 | if (rdev->irq.installed) |
| 139 | rs600_irq_set(rdev); | ||
| 138 | } | 140 | } |
| 139 | 141 | ||
| 140 | void rs600_hpd_fini(struct radeon_device *rdev) | 142 | void rs600_hpd_fini(struct radeon_device *rdev) |
| @@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
| 315 | u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & | 317 | u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & |
| 316 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 318 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
| 317 | 319 | ||
| 320 | if (!rdev->irq.installed) { | ||
| 321 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | ||
| 322 | WREG32(R_000040_GEN_INT_CNTL, 0); | ||
| 323 | return -EINVAL; | ||
| 324 | } | ||
| 318 | if (rdev->irq.sw_int) { | 325 | if (rdev->irq.sw_int) { |
| 319 | tmp |= S_000040_SW_INT_EN(1); | 326 | tmp |= S_000040_SW_INT_EN(1); |
| 320 | } | 327 | } |
| @@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
| 396 | } | 403 | } |
| 397 | while (status || r500_disp_int) { | 404 | while (status || r500_disp_int) { |
| 398 | /* SW interrupt */ | 405 | /* SW interrupt */ |
| 399 | if (G_000040_SW_INT_EN(status)) | 406 | if (G_000044_SW_INT(status)) |
| 400 | radeon_fence_process(rdev); | 407 | radeon_fence_process(rdev); |
| 401 | /* Vertical blank interrupts */ | 408 | /* Vertical blank interrupts */ |
| 402 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) | 409 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) |
| @@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev) | |||
| 553 | return r; | 560 | return r; |
| 554 | /* Enable IRQ */ | 561 | /* Enable IRQ */ |
| 555 | rs600_irq_set(rdev); | 562 | rs600_irq_set(rdev); |
| 563 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 556 | /* 1M ring buffer */ | 564 | /* 1M ring buffer */ |
| 557 | r = r100_cp_init(rdev, 1024 * 1024); | 565 | r = r100_cp_init(rdev, 1024 * 1024); |
| 558 | if (r) { | 566 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 1e22f52d6039..cd31da913771 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev) | |||
| 172 | rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; | 172 | rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; |
| 173 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 173 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
| 174 | r = radeon_mc_setup(rdev); | 174 | r = radeon_mc_setup(rdev); |
| 175 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
| 175 | if (r) | 176 | if (r) |
| 176 | return r; | 177 | return r; |
| 177 | return 0; | 178 | return 0; |
| @@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev) | |||
| 625 | return r; | 626 | return r; |
| 626 | /* Enable IRQ */ | 627 | /* Enable IRQ */ |
| 627 | rs600_irq_set(rdev); | 628 | rs600_irq_set(rdev); |
| 629 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 628 | /* 1M ring buffer */ | 630 | /* 1M ring buffer */ |
| 629 | r = r100_cp_init(rdev, 1024 * 1024); | 631 | r = r100_cp_init(rdev, 1024 * 1024); |
| 630 | if (r) { | 632 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 59632a506b46..62756717b044 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev) | |||
| 479 | } | 479 | } |
| 480 | /* Enable IRQ */ | 480 | /* Enable IRQ */ |
| 481 | rs600_irq_set(rdev); | 481 | rs600_irq_set(rdev); |
| 482 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 482 | /* 1M ring buffer */ | 483 | /* 1M ring buffer */ |
| 483 | r = r100_cp_init(rdev, 1024 * 1024); | 484 | r = r100_cp_init(rdev, 1024 * 1024); |
| 484 | if (r) { | 485 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 3bcb66e52786..59c71245fb91 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -1096,8 +1096,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1096 | radeon_gem_fini(rdev); | 1096 | radeon_gem_fini(rdev); |
| 1097 | radeon_fence_driver_fini(rdev); | 1097 | radeon_fence_driver_fini(rdev); |
| 1098 | radeon_clocks_fini(rdev); | 1098 | radeon_clocks_fini(rdev); |
| 1099 | if (rdev->flags & RADEON_IS_AGP) | 1099 | radeon_agp_fini(rdev); |
| 1100 | radeon_agp_fini(rdev); | ||
| 1101 | radeon_bo_fini(rdev); | 1100 | radeon_bo_fini(rdev); |
| 1102 | radeon_atombios_fini(rdev); | 1101 | radeon_atombios_fini(rdev); |
| 1103 | kfree(rdev->bios); | 1102 | kfree(rdev->bios); |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 46c3c566307e..68cf87749a42 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -392,7 +392,7 @@ config SENSORS_GL520SM | |||
| 392 | 392 | ||
| 393 | config SENSORS_CORETEMP | 393 | config SENSORS_CORETEMP |
| 394 | tristate "Intel Core/Core2/Atom temperature sensor" | 394 | tristate "Intel Core/Core2/Atom temperature sensor" |
| 395 | depends on X86 && EXPERIMENTAL | 395 | depends on X86 && PCI && EXPERIMENTAL |
| 396 | help | 396 | help |
| 397 | If you say yes here you get support for the temperature | 397 | If you say yes here you get support for the temperature |
| 398 | sensor inside your CPU. Most of the family 6 CPUs | 398 | sensor inside your CPU. Most of the family 6 CPUs |
| @@ -792,6 +792,16 @@ config SENSORS_ADS7828 | |||
| 792 | This driver can also be built as a module. If so, the module | 792 | This driver can also be built as a module. If so, the module |
| 793 | will be called ads7828. | 793 | will be called ads7828. |
| 794 | 794 | ||
| 795 | config SENSORS_AMC6821 | ||
| 796 | tristate "Texas Instruments AMC6821" | ||
| 797 | depends on I2C && EXPERIMENTAL | ||
| 798 | help | ||
| 799 | If you say yes here you get support for the Texas Instruments | ||
| 800 | AMC6821 hardware monitoring chips. | ||
| 801 | |||
| 802 | This driver can also be build as a module. If so, the module | ||
| 803 | will be called amc6821. | ||
| 804 | |||
| 795 | config SENSORS_THMC50 | 805 | config SENSORS_THMC50 |
| 796 | tristate "Texas Instruments THMC50 / Analog Devices ADM1022" | 806 | tristate "Texas Instruments THMC50 / Analog Devices ADM1022" |
| 797 | depends on I2C && EXPERIMENTAL | 807 | depends on I2C && EXPERIMENTAL |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 450c8e894277..4bc215c0953f 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
| @@ -86,6 +86,7 @@ obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o | |||
| 86 | obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o | 86 | obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o |
| 87 | obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o | 87 | obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o |
| 88 | obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o | 88 | obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o |
| 89 | obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o | ||
| 89 | obj-$(CONFIG_SENSORS_THMC50) += thmc50.o | 90 | obj-$(CONFIG_SENSORS_THMC50) += thmc50.o |
| 90 | obj-$(CONFIG_SENSORS_TMP401) += tmp401.o | 91 | obj-$(CONFIG_SENSORS_TMP401) += tmp401.o |
| 91 | obj-$(CONFIG_SENSORS_TMP421) += tmp421.o | 92 | obj-$(CONFIG_SENSORS_TMP421) += tmp421.o |
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index a1a7ef14b519..a31e77c776ae 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c | |||
| @@ -94,7 +94,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END }; | |||
| 94 | #define ADT7462_PIN24_SHIFT 6 | 94 | #define ADT7462_PIN24_SHIFT 6 |
| 95 | #define ADT7462_PIN26_VOLT_INPUT 0x08 | 95 | #define ADT7462_PIN26_VOLT_INPUT 0x08 |
| 96 | #define ADT7462_PIN25_VOLT_INPUT 0x20 | 96 | #define ADT7462_PIN25_VOLT_INPUT 0x20 |
| 97 | #define ADT7462_PIN28_SHIFT 6 /* cfg3 */ | 97 | #define ADT7462_PIN28_SHIFT 4 /* cfg3 */ |
| 98 | #define ADT7462_PIN28_VOLT 0x5 | 98 | #define ADT7462_PIN28_VOLT 0x5 |
| 99 | 99 | ||
| 100 | #define ADT7462_REG_ALARM1 0xB8 | 100 | #define ADT7462_REG_ALARM1 0xB8 |
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c new file mode 100644 index 000000000000..1c89d922d619 --- /dev/null +++ b/drivers/hwmon/amc6821.c | |||
| @@ -0,0 +1,1116 @@ | |||
| 1 | /* | ||
| 2 | amc6821.c - Part of lm_sensors, Linux kernel modules for hardware | ||
| 3 | monitoring | ||
| 4 | Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> | ||
| 5 | |||
| 6 | Based on max6650.c: | ||
| 7 | Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de> | ||
| 8 | |||
| 9 | This program is free software; you can redistribute it and/or modify | ||
| 10 | it under the terms of the GNU General Public License as published by | ||
| 11 | the Free Software Foundation; either version 2 of the License, or | ||
| 12 | (at your option) any later version. | ||
| 13 | |||
| 14 | This program is distributed in the hope that it will be useful, | ||
| 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | GNU General Public License for more details. | ||
| 18 | |||
| 19 | You should have received a copy of the GNU General Public License | ||
| 20 | along with this program; if not, write to the Free Software | ||
| 21 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 22 | */ | ||
| 23 | |||
| 24 | |||
| 25 | #include <linux/kernel.h> /* Needed for KERN_INFO */ | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/init.h> | ||
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/jiffies.h> | ||
| 30 | #include <linux/i2c.h> | ||
| 31 | #include <linux/hwmon.h> | ||
| 32 | #include <linux/hwmon-sysfs.h> | ||
| 33 | #include <linux/err.h> | ||
| 34 | #include <linux/mutex.h> | ||
| 35 | |||
| 36 | |||
| 37 | /* | ||
| 38 | * Addresses to scan. | ||
| 39 | */ | ||
| 40 | |||
| 41 | static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e, | ||
| 42 | 0x4c, 0x4d, 0x4e, I2C_CLIENT_END}; | ||
| 43 | |||
| 44 | |||
| 45 | |||
| 46 | /* | ||
| 47 | * Insmod parameters | ||
| 48 | */ | ||
| 49 | |||
| 50 | static int pwminv = 0; /*Inverted PWM output. */ | ||
| 51 | module_param(pwminv, int, S_IRUGO); | ||
| 52 | |||
| 53 | static int init = 1; /*Power-on initialization.*/ | ||
| 54 | module_param(init, int, S_IRUGO); | ||
| 55 | |||
| 56 | |||
| 57 | enum chips { amc6821 }; | ||
| 58 | |||
| 59 | #define AMC6821_REG_DEV_ID 0x3D | ||
| 60 | #define AMC6821_REG_COMP_ID 0x3E | ||
| 61 | #define AMC6821_REG_CONF1 0x00 | ||
| 62 | #define AMC6821_REG_CONF2 0x01 | ||
| 63 | #define AMC6821_REG_CONF3 0x3F | ||
| 64 | #define AMC6821_REG_CONF4 0x04 | ||
| 65 | #define AMC6821_REG_STAT1 0x02 | ||
| 66 | #define AMC6821_REG_STAT2 0x03 | ||
| 67 | #define AMC6821_REG_TDATA_LOW 0x08 | ||
| 68 | #define AMC6821_REG_TDATA_HI 0x09 | ||
| 69 | #define AMC6821_REG_LTEMP_HI 0x0A | ||
| 70 | #define AMC6821_REG_RTEMP_HI 0x0B | ||
| 71 | #define AMC6821_REG_LTEMP_LIMIT_MIN 0x15 | ||
| 72 | #define AMC6821_REG_LTEMP_LIMIT_MAX 0x14 | ||
| 73 | #define AMC6821_REG_RTEMP_LIMIT_MIN 0x19 | ||
| 74 | #define AMC6821_REG_RTEMP_LIMIT_MAX 0x18 | ||
| 75 | #define AMC6821_REG_LTEMP_CRIT 0x1B | ||
| 76 | #define AMC6821_REG_RTEMP_CRIT 0x1D | ||
| 77 | #define AMC6821_REG_PSV_TEMP 0x1C | ||
| 78 | #define AMC6821_REG_DCY 0x22 | ||
| 79 | #define AMC6821_REG_LTEMP_FAN_CTRL 0x24 | ||
| 80 | #define AMC6821_REG_RTEMP_FAN_CTRL 0x25 | ||
| 81 | #define AMC6821_REG_DCY_LOW_TEMP 0x21 | ||
| 82 | |||
| 83 | #define AMC6821_REG_TACH_LLIMITL 0x10 | ||
| 84 | #define AMC6821_REG_TACH_LLIMITH 0x11 | ||
| 85 | #define AMC6821_REG_TACH_HLIMITL 0x12 | ||
| 86 | #define AMC6821_REG_TACH_HLIMITH 0x13 | ||
| 87 | |||
| 88 | #define AMC6821_CONF1_START 0x01 | ||
| 89 | #define AMC6821_CONF1_FAN_INT_EN 0x02 | ||
| 90 | #define AMC6821_CONF1_FANIE 0x04 | ||
| 91 | #define AMC6821_CONF1_PWMINV 0x08 | ||
| 92 | #define AMC6821_CONF1_FAN_FAULT_EN 0x10 | ||
| 93 | #define AMC6821_CONF1_FDRC0 0x20 | ||
| 94 | #define AMC6821_CONF1_FDRC1 0x40 | ||
| 95 | #define AMC6821_CONF1_THERMOVIE 0x80 | ||
| 96 | |||
| 97 | #define AMC6821_CONF2_PWM_EN 0x01 | ||
| 98 | #define AMC6821_CONF2_TACH_MODE 0x02 | ||
| 99 | #define AMC6821_CONF2_TACH_EN 0x04 | ||
| 100 | #define AMC6821_CONF2_RTFIE 0x08 | ||
| 101 | #define AMC6821_CONF2_LTOIE 0x10 | ||
| 102 | #define AMC6821_CONF2_RTOIE 0x20 | ||
| 103 | #define AMC6821_CONF2_PSVIE 0x40 | ||
| 104 | #define AMC6821_CONF2_RST 0x80 | ||
| 105 | |||
| 106 | #define AMC6821_CONF3_THERM_FAN_EN 0x80 | ||
| 107 | #define AMC6821_CONF3_REV_MASK 0x0F | ||
| 108 | |||
| 109 | #define AMC6821_CONF4_OVREN 0x10 | ||
| 110 | #define AMC6821_CONF4_TACH_FAST 0x20 | ||
| 111 | #define AMC6821_CONF4_PSPR 0x40 | ||
| 112 | #define AMC6821_CONF4_MODE 0x80 | ||
| 113 | |||
| 114 | #define AMC6821_STAT1_RPM_ALARM 0x01 | ||
| 115 | #define AMC6821_STAT1_FANS 0x02 | ||
| 116 | #define AMC6821_STAT1_RTH 0x04 | ||
| 117 | #define AMC6821_STAT1_RTL 0x08 | ||
| 118 | #define AMC6821_STAT1_R_THERM 0x10 | ||
| 119 | #define AMC6821_STAT1_RTF 0x20 | ||
| 120 | #define AMC6821_STAT1_LTH 0x40 | ||
| 121 | #define AMC6821_STAT1_LTL 0x80 | ||
| 122 | |||
| 123 | #define AMC6821_STAT2_RTC 0x08 | ||
| 124 | #define AMC6821_STAT2_LTC 0x10 | ||
| 125 | #define AMC6821_STAT2_LPSV 0x20 | ||
| 126 | #define AMC6821_STAT2_L_THERM 0x40 | ||
| 127 | #define AMC6821_STAT2_THERM_IN 0x80 | ||
| 128 | |||
| 129 | enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX, | ||
| 130 | IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN, | ||
| 131 | IDX_TEMP2_MAX, IDX_TEMP2_CRIT, | ||
| 132 | TEMP_IDX_LEN, }; | ||
| 133 | |||
| 134 | static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI, | ||
| 135 | AMC6821_REG_LTEMP_LIMIT_MIN, | ||
| 136 | AMC6821_REG_LTEMP_LIMIT_MAX, | ||
| 137 | AMC6821_REG_LTEMP_CRIT, | ||
| 138 | AMC6821_REG_RTEMP_HI, | ||
| 139 | AMC6821_REG_RTEMP_LIMIT_MIN, | ||
| 140 | AMC6821_REG_RTEMP_LIMIT_MAX, | ||
| 141 | AMC6821_REG_RTEMP_CRIT, }; | ||
| 142 | |||
| 143 | enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX, | ||
| 144 | FAN1_IDX_LEN, }; | ||
| 145 | |||
| 146 | static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW, | ||
| 147 | AMC6821_REG_TACH_LLIMITL, | ||
| 148 | AMC6821_REG_TACH_HLIMITL, }; | ||
| 149 | |||
| 150 | |||
| 151 | static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI, | ||
| 152 | AMC6821_REG_TACH_LLIMITH, | ||
| 153 | AMC6821_REG_TACH_HLIMITH, }; | ||
| 154 | |||
| 155 | static int amc6821_probe( | ||
| 156 | struct i2c_client *client, | ||
| 157 | const struct i2c_device_id *id); | ||
| 158 | static int amc6821_detect( | ||
| 159 | struct i2c_client *client, | ||
| 160 | struct i2c_board_info *info); | ||
| 161 | static int amc6821_init_client(struct i2c_client *client); | ||
| 162 | static int amc6821_remove(struct i2c_client *client); | ||
| 163 | static struct amc6821_data *amc6821_update_device(struct device *dev); | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Driver data (common to all clients) | ||
| 167 | */ | ||
| 168 | |||
| 169 | static const struct i2c_device_id amc6821_id[] = { | ||
| 170 | { "amc6821", amc6821 }, | ||
| 171 | { } | ||
| 172 | }; | ||
| 173 | |||
| 174 | MODULE_DEVICE_TABLE(i2c, amc6821_id); | ||
| 175 | |||
| 176 | static struct i2c_driver amc6821_driver = { | ||
| 177 | .class = I2C_CLASS_HWMON, | ||
| 178 | .driver = { | ||
| 179 | .name = "amc6821", | ||
| 180 | }, | ||
| 181 | .probe = amc6821_probe, | ||
| 182 | .remove = amc6821_remove, | ||
| 183 | .id_table = amc6821_id, | ||
| 184 | .detect = amc6821_detect, | ||
| 185 | .address_list = normal_i2c, | ||
| 186 | }; | ||
| 187 | |||
| 188 | |||
| 189 | /* | ||
| 190 | * Client data (each client gets its own) | ||
| 191 | */ | ||
| 192 | |||
| 193 | struct amc6821_data { | ||
| 194 | struct device *hwmon_dev; | ||
| 195 | struct mutex update_lock; | ||
| 196 | char valid; /* zero until following fields are valid */ | ||
| 197 | unsigned long last_updated; /* in jiffies */ | ||
| 198 | |||
| 199 | /* register values */ | ||
| 200 | int temp[TEMP_IDX_LEN]; | ||
| 201 | |||
| 202 | u16 fan[FAN1_IDX_LEN]; | ||
| 203 | u8 fan1_div; | ||
| 204 | |||
| 205 | u8 pwm1; | ||
| 206 | u8 temp1_auto_point_temp[3]; | ||
| 207 | u8 temp2_auto_point_temp[3]; | ||
| 208 | u8 pwm1_auto_point_pwm[3]; | ||
| 209 | u8 pwm1_enable; | ||
| 210 | u8 pwm1_auto_channels_temp; | ||
| 211 | |||
| 212 | u8 stat1; | ||
| 213 | u8 stat2; | ||
| 214 | }; | ||
| 215 | |||
| 216 | |||
| 217 | static ssize_t get_temp( | ||
| 218 | struct device *dev, | ||
| 219 | struct device_attribute *devattr, | ||
| 220 | char *buf) | ||
| 221 | { | ||
| 222 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 223 | int ix = to_sensor_dev_attr(devattr)->index; | ||
| 224 | |||
| 225 | return sprintf(buf, "%d\n", data->temp[ix] * 1000); | ||
| 226 | } | ||
| 227 | |||
| 228 | |||
| 229 | |||
| 230 | static ssize_t set_temp( | ||
| 231 | struct device *dev, | ||
| 232 | struct device_attribute *attr, | ||
| 233 | const char *buf, | ||
| 234 | size_t count) | ||
| 235 | { | ||
| 236 | struct i2c_client *client = to_i2c_client(dev); | ||
| 237 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 238 | int ix = to_sensor_dev_attr(attr)->index; | ||
| 239 | long val; | ||
| 240 | |||
| 241 | int ret = strict_strtol(buf, 10, &val); | ||
| 242 | if (ret) | ||
| 243 | return ret; | ||
| 244 | val = SENSORS_LIMIT(val / 1000, -128, 127); | ||
| 245 | |||
| 246 | mutex_lock(&data->update_lock); | ||
| 247 | data->temp[ix] = val; | ||
| 248 | if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) { | ||
| 249 | dev_err(&client->dev, "Register write error, aborting.\n"); | ||
| 250 | count = -EIO; | ||
| 251 | } | ||
| 252 | mutex_unlock(&data->update_lock); | ||
| 253 | return count; | ||
| 254 | } | ||
| 255 | |||
| 256 | |||
| 257 | |||
| 258 | |||
| 259 | static ssize_t get_temp_alarm( | ||
| 260 | struct device *dev, | ||
| 261 | struct device_attribute *devattr, | ||
| 262 | char *buf) | ||
| 263 | { | ||
| 264 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 265 | int ix = to_sensor_dev_attr(devattr)->index; | ||
| 266 | u8 flag; | ||
| 267 | |||
| 268 | switch (ix) { | ||
| 269 | case IDX_TEMP1_MIN: | ||
| 270 | flag = data->stat1 & AMC6821_STAT1_LTL; | ||
| 271 | break; | ||
| 272 | case IDX_TEMP1_MAX: | ||
| 273 | flag = data->stat1 & AMC6821_STAT1_LTH; | ||
| 274 | break; | ||
| 275 | case IDX_TEMP1_CRIT: | ||
| 276 | flag = data->stat2 & AMC6821_STAT2_LTC; | ||
| 277 | break; | ||
| 278 | case IDX_TEMP2_MIN: | ||
| 279 | flag = data->stat1 & AMC6821_STAT1_RTL; | ||
| 280 | break; | ||
| 281 | case IDX_TEMP2_MAX: | ||
| 282 | flag = data->stat1 & AMC6821_STAT1_RTH; | ||
| 283 | break; | ||
| 284 | case IDX_TEMP2_CRIT: | ||
| 285 | flag = data->stat2 & AMC6821_STAT2_RTC; | ||
| 286 | break; | ||
| 287 | default: | ||
| 288 | dev_dbg(dev, "Unknown attr->index (%d).\n", ix); | ||
| 289 | return -EINVAL; | ||
| 290 | } | ||
| 291 | if (flag) | ||
| 292 | return sprintf(buf, "1"); | ||
| 293 | else | ||
| 294 | return sprintf(buf, "0"); | ||
| 295 | } | ||
| 296 | |||
| 297 | |||
| 298 | |||
| 299 | |||
| 300 | static ssize_t get_temp2_fault( | ||
| 301 | struct device *dev, | ||
| 302 | struct device_attribute *devattr, | ||
| 303 | char *buf) | ||
| 304 | { | ||
| 305 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 306 | if (data->stat1 & AMC6821_STAT1_RTF) | ||
| 307 | return sprintf(buf, "1"); | ||
| 308 | else | ||
| 309 | return sprintf(buf, "0"); | ||
| 310 | } | ||
| 311 | |||
| 312 | static ssize_t get_pwm1( | ||
| 313 | struct device *dev, | ||
| 314 | struct device_attribute *devattr, | ||
| 315 | char *buf) | ||
| 316 | { | ||
| 317 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 318 | return sprintf(buf, "%d\n", data->pwm1); | ||
| 319 | } | ||
| 320 | |||
| 321 | static ssize_t set_pwm1( | ||
| 322 | struct device *dev, | ||
| 323 | struct device_attribute *devattr, | ||
| 324 | const char *buf, | ||
| 325 | size_t count) | ||
| 326 | { | ||
| 327 | struct i2c_client *client = to_i2c_client(dev); | ||
| 328 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 329 | long val; | ||
| 330 | int ret = strict_strtol(buf, 10, &val); | ||
| 331 | if (ret) | ||
| 332 | return ret; | ||
| 333 | |||
| 334 | mutex_lock(&data->update_lock); | ||
| 335 | data->pwm1 = SENSORS_LIMIT(val , 0, 255); | ||
| 336 | i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1); | ||
| 337 | mutex_unlock(&data->update_lock); | ||
| 338 | return count; | ||
| 339 | } | ||
| 340 | |||
| 341 | static ssize_t get_pwm1_enable( | ||
| 342 | struct device *dev, | ||
| 343 | struct device_attribute *devattr, | ||
| 344 | char *buf) | ||
| 345 | { | ||
| 346 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 347 | return sprintf(buf, "%d\n", data->pwm1_enable); | ||
| 348 | } | ||
| 349 | |||
| 350 | static ssize_t set_pwm1_enable( | ||
| 351 | struct device *dev, | ||
| 352 | struct device_attribute *attr, | ||
| 353 | const char *buf, | ||
| 354 | size_t count) | ||
| 355 | { | ||
| 356 | struct i2c_client *client = to_i2c_client(dev); | ||
| 357 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 358 | long val; | ||
| 359 | int config = strict_strtol(buf, 10, &val); | ||
| 360 | if (config) | ||
| 361 | return config; | ||
| 362 | |||
| 363 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); | ||
| 364 | if (config < 0) { | ||
| 365 | dev_err(&client->dev, | ||
| 366 | "Error reading configuration register, aborting.\n"); | ||
| 367 | return -EIO; | ||
| 368 | } | ||
| 369 | |||
| 370 | switch (val) { | ||
| 371 | case 1: | ||
| 372 | config &= ~AMC6821_CONF1_FDRC0; | ||
| 373 | config &= ~AMC6821_CONF1_FDRC1; | ||
| 374 | break; | ||
| 375 | case 2: | ||
| 376 | config &= ~AMC6821_CONF1_FDRC0; | ||
| 377 | config |= AMC6821_CONF1_FDRC1; | ||
| 378 | break; | ||
| 379 | case 3: | ||
| 380 | config |= AMC6821_CONF1_FDRC0; | ||
| 381 | config |= AMC6821_CONF1_FDRC1; | ||
| 382 | break; | ||
| 383 | default: | ||
| 384 | return -EINVAL; | ||
| 385 | } | ||
| 386 | mutex_lock(&data->update_lock); | ||
| 387 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) { | ||
| 388 | dev_err(&client->dev, | ||
| 389 | "Configuration register write error, aborting.\n"); | ||
| 390 | count = -EIO; | ||
| 391 | } | ||
| 392 | mutex_unlock(&data->update_lock); | ||
| 393 | return count; | ||
| 394 | } | ||
| 395 | |||
| 396 | |||
| 397 | static ssize_t get_pwm1_auto_channels_temp( | ||
| 398 | struct device *dev, | ||
| 399 | struct device_attribute *devattr, | ||
| 400 | char *buf) | ||
| 401 | { | ||
| 402 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 403 | return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp); | ||
| 404 | } | ||
| 405 | |||
| 406 | |||
| 407 | static ssize_t get_temp_auto_point_temp( | ||
| 408 | struct device *dev, | ||
| 409 | struct device_attribute *devattr, | ||
| 410 | char *buf) | ||
| 411 | { | ||
| 412 | int ix = to_sensor_dev_attr_2(devattr)->index; | ||
| 413 | int nr = to_sensor_dev_attr_2(devattr)->nr; | ||
| 414 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 415 | switch (nr) { | ||
| 416 | case 1: | ||
| 417 | return sprintf(buf, "%d\n", | ||
| 418 | data->temp1_auto_point_temp[ix] * 1000); | ||
| 419 | break; | ||
| 420 | case 2: | ||
| 421 | return sprintf(buf, "%d\n", | ||
| 422 | data->temp2_auto_point_temp[ix] * 1000); | ||
| 423 | break; | ||
| 424 | default: | ||
| 425 | dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); | ||
| 426 | return -EINVAL; | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 430 | |||
| 431 | static ssize_t get_pwm1_auto_point_pwm( | ||
| 432 | struct device *dev, | ||
| 433 | struct device_attribute *devattr, | ||
| 434 | char *buf) | ||
| 435 | { | ||
| 436 | int ix = to_sensor_dev_attr(devattr)->index; | ||
| 437 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 438 | return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]); | ||
| 439 | } | ||
| 440 | |||
| 441 | |||
| 442 | static inline ssize_t set_slope_register(struct i2c_client *client, | ||
| 443 | u8 reg, | ||
| 444 | u8 dpwm, | ||
| 445 | u8 *ptemp) | ||
| 446 | { | ||
| 447 | int dt; | ||
| 448 | u8 tmp; | ||
| 449 | |||
| 450 | dt = ptemp[2]-ptemp[1]; | ||
| 451 | for (tmp = 4; tmp > 0; tmp--) { | ||
| 452 | if (dt * (0x20 >> tmp) >= dpwm) | ||
| 453 | break; | ||
| 454 | } | ||
| 455 | tmp |= (ptemp[1] & 0x7C) << 1; | ||
| 456 | if (i2c_smbus_write_byte_data(client, | ||
| 457 | reg, tmp)) { | ||
| 458 | dev_err(&client->dev, "Register write error, aborting.\n"); | ||
| 459 | return -EIO; | ||
| 460 | } | ||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | |||
| 465 | |||
| 466 | static ssize_t set_temp_auto_point_temp( | ||
| 467 | struct device *dev, | ||
| 468 | struct device_attribute *attr, | ||
| 469 | const char *buf, | ||
| 470 | size_t count) | ||
| 471 | { | ||
| 472 | struct i2c_client *client = to_i2c_client(dev); | ||
| 473 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 474 | int ix = to_sensor_dev_attr_2(attr)->index; | ||
| 475 | int nr = to_sensor_dev_attr_2(attr)->nr; | ||
| 476 | u8 *ptemp; | ||
| 477 | u8 reg; | ||
| 478 | int dpwm; | ||
| 479 | long val; | ||
| 480 | int ret = strict_strtol(buf, 10, &val); | ||
| 481 | if (ret) | ||
| 482 | return ret; | ||
| 483 | |||
| 484 | switch (nr) { | ||
| 485 | case 1: | ||
| 486 | ptemp = data->temp1_auto_point_temp; | ||
| 487 | reg = AMC6821_REG_LTEMP_FAN_CTRL; | ||
| 488 | break; | ||
| 489 | case 2: | ||
| 490 | ptemp = data->temp2_auto_point_temp; | ||
| 491 | reg = AMC6821_REG_RTEMP_FAN_CTRL; | ||
| 492 | break; | ||
| 493 | default: | ||
| 494 | dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); | ||
| 495 | return -EINVAL; | ||
| 496 | } | ||
| 497 | |||
| 498 | data->valid = 0; | ||
| 499 | mutex_lock(&data->update_lock); | ||
| 500 | switch (ix) { | ||
| 501 | case 0: | ||
| 502 | ptemp[0] = SENSORS_LIMIT(val / 1000, 0, | ||
| 503 | data->temp1_auto_point_temp[1]); | ||
| 504 | ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, | ||
| 505 | data->temp2_auto_point_temp[1]); | ||
| 506 | ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63); | ||
| 507 | if (i2c_smbus_write_byte_data( | ||
| 508 | client, | ||
| 509 | AMC6821_REG_PSV_TEMP, | ||
| 510 | ptemp[0])) { | ||
| 511 | dev_err(&client->dev, | ||
| 512 | "Register write error, aborting.\n"); | ||
| 513 | count = -EIO; | ||
| 514 | } | ||
| 515 | goto EXIT; | ||
| 516 | break; | ||
| 517 | case 1: | ||
| 518 | ptemp[1] = SENSORS_LIMIT( | ||
| 519 | val / 1000, | ||
| 520 | (ptemp[0] & 0x7C) + 4, | ||
| 521 | 124); | ||
| 522 | ptemp[1] &= 0x7C; | ||
| 523 | ptemp[2] = SENSORS_LIMIT( | ||
| 524 | ptemp[2], ptemp[1] + 1, | ||
| 525 | 255); | ||
| 526 | break; | ||
| 527 | case 2: | ||
| 528 | ptemp[2] = SENSORS_LIMIT( | ||
| 529 | val / 1000, | ||
| 530 | ptemp[1]+1, | ||
| 531 | 255); | ||
| 532 | break; | ||
| 533 | default: | ||
| 534 | dev_dbg(dev, "Unknown attr->index (%d).\n", ix); | ||
| 535 | count = -EINVAL; | ||
| 536 | goto EXIT; | ||
| 537 | } | ||
| 538 | dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; | ||
| 539 | if (set_slope_register(client, reg, dpwm, ptemp)) | ||
| 540 | count = -EIO; | ||
| 541 | |||
| 542 | EXIT: | ||
| 543 | mutex_unlock(&data->update_lock); | ||
| 544 | return count; | ||
| 545 | } | ||
| 546 | |||
| 547 | |||
| 548 | |||
| 549 | static ssize_t set_pwm1_auto_point_pwm( | ||
| 550 | struct device *dev, | ||
| 551 | struct device_attribute *attr, | ||
| 552 | const char *buf, | ||
| 553 | size_t count) | ||
| 554 | { | ||
| 555 | struct i2c_client *client = to_i2c_client(dev); | ||
| 556 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 557 | int dpwm; | ||
| 558 | long val; | ||
| 559 | int ret = strict_strtol(buf, 10, &val); | ||
| 560 | if (ret) | ||
| 561 | return ret; | ||
| 562 | |||
| 563 | mutex_lock(&data->update_lock); | ||
| 564 | data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254); | ||
| 565 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP, | ||
| 566 | data->pwm1_auto_point_pwm[1])) { | ||
| 567 | dev_err(&client->dev, "Register write error, aborting.\n"); | ||
| 568 | count = -EIO; | ||
| 569 | goto EXIT; | ||
| 570 | } | ||
| 571 | dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1]; | ||
| 572 | if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm, | ||
| 573 | data->temp1_auto_point_temp)) { | ||
| 574 | count = -EIO; | ||
| 575 | goto EXIT; | ||
| 576 | } | ||
| 577 | if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm, | ||
| 578 | data->temp2_auto_point_temp)) { | ||
| 579 | count = -EIO; | ||
| 580 | goto EXIT; | ||
| 581 | } | ||
| 582 | |||
| 583 | EXIT: | ||
| 584 | data->valid = 0; | ||
| 585 | mutex_unlock(&data->update_lock); | ||
| 586 | return count; | ||
| 587 | } | ||
| 588 | |||
| 589 | static ssize_t get_fan( | ||
| 590 | struct device *dev, | ||
| 591 | struct device_attribute *devattr, | ||
| 592 | char *buf) | ||
| 593 | { | ||
| 594 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 595 | int ix = to_sensor_dev_attr(devattr)->index; | ||
| 596 | if (0 == data->fan[ix]) | ||
| 597 | return sprintf(buf, "0"); | ||
| 598 | return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix])); | ||
| 599 | } | ||
| 600 | |||
| 601 | |||
| 602 | |||
| 603 | static ssize_t get_fan1_fault( | ||
| 604 | struct device *dev, | ||
| 605 | struct device_attribute *devattr, | ||
| 606 | char *buf) | ||
| 607 | { | ||
| 608 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 609 | if (data->stat1 & AMC6821_STAT1_FANS) | ||
| 610 | return sprintf(buf, "1"); | ||
| 611 | else | ||
| 612 | return sprintf(buf, "0"); | ||
| 613 | } | ||
| 614 | |||
| 615 | |||
| 616 | |||
| 617 | static ssize_t set_fan( | ||
| 618 | struct device *dev, | ||
| 619 | struct device_attribute *attr, | ||
| 620 | const char *buf, size_t count) | ||
| 621 | { | ||
| 622 | struct i2c_client *client = to_i2c_client(dev); | ||
| 623 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 624 | long val; | ||
| 625 | int ix = to_sensor_dev_attr(attr)->index; | ||
| 626 | int ret = strict_strtol(buf, 10, &val); | ||
| 627 | if (ret) | ||
| 628 | return ret; | ||
| 629 | val = 1 > val ? 0xFFFF : 6000000/val; | ||
| 630 | |||
| 631 | mutex_lock(&data->update_lock); | ||
| 632 | data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF); | ||
| 633 | if (i2c_smbus_write_byte_data(client, fan_reg_low[ix], | ||
| 634 | data->fan[ix] & 0xFF)) { | ||
| 635 | dev_err(&client->dev, "Register write error, aborting.\n"); | ||
| 636 | count = -EIO; | ||
| 637 | goto EXIT; | ||
| 638 | } | ||
| 639 | if (i2c_smbus_write_byte_data(client, | ||
| 640 | fan_reg_hi[ix], data->fan[ix] >> 8)) { | ||
| 641 | dev_err(&client->dev, "Register write error, aborting.\n"); | ||
| 642 | count = -EIO; | ||
| 643 | } | ||
| 644 | EXIT: | ||
| 645 | mutex_unlock(&data->update_lock); | ||
| 646 | return count; | ||
| 647 | } | ||
| 648 | |||
| 649 | |||
| 650 | |||
| 651 | static ssize_t get_fan1_div( | ||
| 652 | struct device *dev, | ||
| 653 | struct device_attribute *devattr, | ||
| 654 | char *buf) | ||
| 655 | { | ||
| 656 | struct amc6821_data *data = amc6821_update_device(dev); | ||
| 657 | return sprintf(buf, "%d\n", data->fan1_div); | ||
| 658 | } | ||
| 659 | |||
| 660 | static ssize_t set_fan1_div( | ||
| 661 | struct device *dev, | ||
| 662 | struct device_attribute *attr, | ||
| 663 | const char *buf, size_t count) | ||
| 664 | { | ||
| 665 | struct i2c_client *client = to_i2c_client(dev); | ||
| 666 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 667 | long val; | ||
| 668 | int config = strict_strtol(buf, 10, &val); | ||
| 669 | if (config) | ||
| 670 | return config; | ||
| 671 | |||
| 672 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); | ||
| 673 | if (config < 0) { | ||
| 674 | dev_err(&client->dev, | ||
| 675 | "Error reading configuration register, aborting.\n"); | ||
| 676 | return -EIO; | ||
| 677 | } | ||
| 678 | mutex_lock(&data->update_lock); | ||
| 679 | switch (val) { | ||
| 680 | case 2: | ||
| 681 | config &= ~AMC6821_CONF4_PSPR; | ||
| 682 | data->fan1_div = 2; | ||
| 683 | break; | ||
| 684 | case 4: | ||
| 685 | config |= AMC6821_CONF4_PSPR; | ||
| 686 | data->fan1_div = 4; | ||
| 687 | break; | ||
| 688 | default: | ||
| 689 | mutex_unlock(&data->update_lock); | ||
| 690 | count = -EINVAL; | ||
| 691 | goto EXIT; | ||
| 692 | } | ||
| 693 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) { | ||
| 694 | dev_err(&client->dev, | ||
| 695 | "Configuration register write error, aborting.\n"); | ||
| 696 | count = -EIO; | ||
| 697 | } | ||
| 698 | EXIT: | ||
| 699 | mutex_unlock(&data->update_lock); | ||
| 700 | return count; | ||
| 701 | } | ||
| 702 | |||
| 703 | |||
| 704 | |||
| 705 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, | ||
| 706 | get_temp, NULL, IDX_TEMP1_INPUT); | ||
| 707 | static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, get_temp, | ||
| 708 | set_temp, IDX_TEMP1_MIN); | ||
| 709 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp, | ||
| 710 | set_temp, IDX_TEMP1_MAX); | ||
| 711 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, get_temp, | ||
| 712 | set_temp, IDX_TEMP1_CRIT); | ||
| 713 | static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, | ||
| 714 | get_temp_alarm, NULL, IDX_TEMP1_MIN); | ||
| 715 | static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, | ||
| 716 | get_temp_alarm, NULL, IDX_TEMP1_MAX); | ||
| 717 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, | ||
| 718 | get_temp_alarm, NULL, IDX_TEMP1_CRIT); | ||
| 719 | static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, | ||
| 720 | get_temp, NULL, IDX_TEMP2_INPUT); | ||
| 721 | static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, | ||
| 722 | set_temp, IDX_TEMP2_MIN); | ||
| 723 | static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp, | ||
| 724 | set_temp, IDX_TEMP2_MAX); | ||
| 725 | static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, get_temp, | ||
| 726 | set_temp, IDX_TEMP2_CRIT); | ||
| 727 | static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, | ||
| 728 | get_temp2_fault, NULL, 0); | ||
| 729 | static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, | ||
| 730 | get_temp_alarm, NULL, IDX_TEMP2_MIN); | ||
| 731 | static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, | ||
| 732 | get_temp_alarm, NULL, IDX_TEMP2_MAX); | ||
| 733 | static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, | ||
| 734 | get_temp_alarm, NULL, IDX_TEMP2_CRIT); | ||
| 735 | static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, IDX_FAN1_INPUT); | ||
| 736 | static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR, | ||
| 737 | get_fan, set_fan, IDX_FAN1_MIN); | ||
| 738 | static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO | S_IWUSR, | ||
| 739 | get_fan, set_fan, IDX_FAN1_MAX); | ||
| 740 | static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan1_fault, NULL, 0); | ||
| 741 | static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, | ||
| 742 | get_fan1_div, set_fan1_div, 0); | ||
| 743 | |||
| 744 | static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm1, set_pwm1, 0); | ||
| 745 | static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, | ||
| 746 | get_pwm1_enable, set_pwm1_enable, 0); | ||
| 747 | static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, | ||
| 748 | get_pwm1_auto_point_pwm, NULL, 0); | ||
| 749 | static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO, | ||
| 750 | get_pwm1_auto_point_pwm, set_pwm1_auto_point_pwm, 1); | ||
| 751 | static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, | ||
| 752 | get_pwm1_auto_point_pwm, NULL, 2); | ||
| 753 | static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO, | ||
| 754 | get_pwm1_auto_channels_temp, NULL, 0); | ||
| 755 | static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO, | ||
| 756 | get_temp_auto_point_temp, NULL, 1, 0); | ||
| 757 | static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IWUSR | S_IRUGO, | ||
| 758 | get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 1); | ||
| 759 | static SENSOR_DEVICE_ATTR_2(temp1_auto_point3_temp, S_IWUSR | S_IRUGO, | ||
| 760 | get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 2); | ||
| 761 | |||
| 762 | static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IWUSR | S_IRUGO, | ||
| 763 | get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 0); | ||
| 764 | static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IWUSR | S_IRUGO, | ||
| 765 | get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 1); | ||
| 766 | static SENSOR_DEVICE_ATTR_2(temp2_auto_point3_temp, S_IWUSR | S_IRUGO, | ||
| 767 | get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 2); | ||
| 768 | |||
| 769 | |||
| 770 | |||
| 771 | static struct attribute *amc6821_attrs[] = { | ||
| 772 | &sensor_dev_attr_temp1_input.dev_attr.attr, | ||
| 773 | &sensor_dev_attr_temp1_min.dev_attr.attr, | ||
| 774 | &sensor_dev_attr_temp1_max.dev_attr.attr, | ||
| 775 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | ||
| 776 | &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, | ||
| 777 | &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, | ||
| 778 | &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, | ||
| 779 | &sensor_dev_attr_temp2_input.dev_attr.attr, | ||
| 780 | &sensor_dev_attr_temp2_min.dev_attr.attr, | ||
| 781 | &sensor_dev_attr_temp2_max.dev_attr.attr, | ||
| 782 | &sensor_dev_attr_temp2_crit.dev_attr.attr, | ||
| 783 | &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, | ||
| 784 | &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, | ||
| 785 | &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, | ||
| 786 | &sensor_dev_attr_temp2_fault.dev_attr.attr, | ||
| 787 | &sensor_dev_attr_fan1_input.dev_attr.attr, | ||
| 788 | &sensor_dev_attr_fan1_min.dev_attr.attr, | ||
| 789 | &sensor_dev_attr_fan1_max.dev_attr.attr, | ||
| 790 | &sensor_dev_attr_fan1_fault.dev_attr.attr, | ||
| 791 | &sensor_dev_attr_fan1_div.dev_attr.attr, | ||
| 792 | &sensor_dev_attr_pwm1.dev_attr.attr, | ||
| 793 | &sensor_dev_attr_pwm1_enable.dev_attr.attr, | ||
| 794 | &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr, | ||
| 795 | &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, | ||
| 796 | &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, | ||
| 797 | &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, | ||
| 798 | &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, | ||
| 799 | &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr, | ||
| 800 | &sensor_dev_attr_temp1_auto_point3_temp.dev_attr.attr, | ||
| 801 | &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr, | ||
| 802 | &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr, | ||
| 803 | &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr, | ||
| 804 | NULL | ||
| 805 | }; | ||
| 806 | |||
| 807 | static struct attribute_group amc6821_attr_grp = { | ||
| 808 | .attrs = amc6821_attrs, | ||
| 809 | }; | ||
| 810 | |||
| 811 | |||
| 812 | |||
| 813 | /* Return 0 if detection is successful, -ENODEV otherwise */ | ||
| 814 | static int amc6821_detect( | ||
| 815 | struct i2c_client *client, | ||
| 816 | struct i2c_board_info *info) | ||
| 817 | { | ||
| 818 | struct i2c_adapter *adapter = client->adapter; | ||
| 819 | int address = client->addr; | ||
| 820 | int dev_id, comp_id; | ||
| 821 | |||
| 822 | dev_dbg(&adapter->dev, "amc6821_detect called.\n"); | ||
| 823 | |||
| 824 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { | ||
| 825 | dev_dbg(&adapter->dev, | ||
| 826 | "amc6821: I2C bus doesn't support byte mode, " | ||
| 827 | "skipping.\n"); | ||
| 828 | return -ENODEV; | ||
| 829 | } | ||
| 830 | |||
| 831 | dev_id = i2c_smbus_read_byte_data(client, AMC6821_REG_DEV_ID); | ||
| 832 | comp_id = i2c_smbus_read_byte_data(client, AMC6821_REG_COMP_ID); | ||
| 833 | if (dev_id != 0x21 || comp_id != 0x49) { | ||
| 834 | dev_dbg(&adapter->dev, | ||
| 835 | "amc6821: detection failed at 0x%02x.\n", | ||
| 836 | address); | ||
| 837 | return -ENODEV; | ||
| 838 | } | ||
| 839 | |||
| 840 | /* Bit 7 of the address register is ignored, so we can check the | ||
| 841 | ID registers again */ | ||
| 842 | dev_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_DEV_ID); | ||
| 843 | comp_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_COMP_ID); | ||
| 844 | if (dev_id != 0x21 || comp_id != 0x49) { | ||
| 845 | dev_dbg(&adapter->dev, | ||
| 846 | "amc6821: detection failed at 0x%02x.\n", | ||
| 847 | address); | ||
| 848 | return -ENODEV; | ||
| 849 | } | ||
| 850 | |||
| 851 | dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address); | ||
| 852 | strlcpy(info->type, "amc6821", I2C_NAME_SIZE); | ||
| 853 | |||
| 854 | return 0; | ||
| 855 | } | ||
| 856 | |||
| 857 | static int amc6821_probe( | ||
| 858 | struct i2c_client *client, | ||
| 859 | const struct i2c_device_id *id) | ||
| 860 | { | ||
| 861 | struct amc6821_data *data; | ||
| 862 | int err; | ||
| 863 | |||
| 864 | data = kzalloc(sizeof(struct amc6821_data), GFP_KERNEL); | ||
| 865 | if (!data) { | ||
| 866 | dev_err(&client->dev, "out of memory.\n"); | ||
| 867 | return -ENOMEM; | ||
| 868 | } | ||
| 869 | |||
| 870 | |||
| 871 | i2c_set_clientdata(client, data); | ||
| 872 | mutex_init(&data->update_lock); | ||
| 873 | |||
| 874 | /* | ||
| 875 | * Initialize the amc6821 chip | ||
| 876 | */ | ||
| 877 | err = amc6821_init_client(client); | ||
| 878 | if (err) | ||
| 879 | goto err_free; | ||
| 880 | |||
| 881 | err = sysfs_create_group(&client->dev.kobj, &amc6821_attr_grp); | ||
| 882 | if (err) | ||
| 883 | goto err_free; | ||
| 884 | |||
| 885 | data->hwmon_dev = hwmon_device_register(&client->dev); | ||
| 886 | if (!IS_ERR(data->hwmon_dev)) | ||
| 887 | return 0; | ||
| 888 | |||
| 889 | err = PTR_ERR(data->hwmon_dev); | ||
| 890 | dev_err(&client->dev, "error registering hwmon device.\n"); | ||
| 891 | sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp); | ||
| 892 | err_free: | ||
| 893 | kfree(data); | ||
| 894 | return err; | ||
| 895 | } | ||
| 896 | |||
| 897 | static int amc6821_remove(struct i2c_client *client) | ||
| 898 | { | ||
| 899 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 900 | |||
| 901 | hwmon_device_unregister(data->hwmon_dev); | ||
| 902 | sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp); | ||
| 903 | |||
| 904 | kfree(data); | ||
| 905 | |||
| 906 | return 0; | ||
| 907 | } | ||
| 908 | |||
| 909 | |||
| 910 | static int amc6821_init_client(struct i2c_client *client) | ||
| 911 | { | ||
| 912 | int config; | ||
| 913 | int err = -EIO; | ||
| 914 | |||
| 915 | if (init) { | ||
| 916 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); | ||
| 917 | |||
| 918 | if (config < 0) { | ||
| 919 | dev_err(&client->dev, | ||
| 920 | "Error reading configuration register, aborting.\n"); | ||
| 921 | return err; | ||
| 922 | } | ||
| 923 | |||
| 924 | config |= AMC6821_CONF4_MODE; | ||
| 925 | |||
| 926 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, | ||
| 927 | config)) { | ||
| 928 | dev_err(&client->dev, | ||
| 929 | "Configuration register write error, aborting.\n"); | ||
| 930 | return err; | ||
| 931 | } | ||
| 932 | |||
| 933 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3); | ||
| 934 | |||
| 935 | if (config < 0) { | ||
| 936 | dev_err(&client->dev, | ||
| 937 | "Error reading configuration register, aborting.\n"); | ||
| 938 | return err; | ||
| 939 | } | ||
| 940 | |||
| 941 | dev_info(&client->dev, "Revision %d\n", config & 0x0f); | ||
| 942 | |||
| 943 | config &= ~AMC6821_CONF3_THERM_FAN_EN; | ||
| 944 | |||
| 945 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3, | ||
| 946 | config)) { | ||
| 947 | dev_err(&client->dev, | ||
| 948 | "Configuration register write error, aborting.\n"); | ||
| 949 | return err; | ||
| 950 | } | ||
| 951 | |||
| 952 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2); | ||
| 953 | |||
| 954 | if (config < 0) { | ||
| 955 | dev_err(&client->dev, | ||
| 956 | "Error reading configuration register, aborting.\n"); | ||
| 957 | return err; | ||
| 958 | } | ||
| 959 | |||
| 960 | config &= ~AMC6821_CONF2_RTFIE; | ||
| 961 | config &= ~AMC6821_CONF2_LTOIE; | ||
| 962 | config &= ~AMC6821_CONF2_RTOIE; | ||
| 963 | if (i2c_smbus_write_byte_data(client, | ||
| 964 | AMC6821_REG_CONF2, config)) { | ||
| 965 | dev_err(&client->dev, | ||
| 966 | "Configuration register write error, aborting.\n"); | ||
| 967 | return err; | ||
| 968 | } | ||
| 969 | |||
| 970 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); | ||
| 971 | |||
| 972 | if (config < 0) { | ||
| 973 | dev_err(&client->dev, | ||
| 974 | "Error reading configuration register, aborting.\n"); | ||
| 975 | return err; | ||
| 976 | } | ||
| 977 | |||
| 978 | config &= ~AMC6821_CONF1_THERMOVIE; | ||
| 979 | config &= ~AMC6821_CONF1_FANIE; | ||
| 980 | config |= AMC6821_CONF1_START; | ||
| 981 | if (pwminv) | ||
| 982 | config |= AMC6821_CONF1_PWMINV; | ||
| 983 | else | ||
| 984 | config &= ~AMC6821_CONF1_PWMINV; | ||
| 985 | |||
| 986 | if (i2c_smbus_write_byte_data( | ||
| 987 | client, AMC6821_REG_CONF1, config)) { | ||
| 988 | dev_err(&client->dev, | ||
| 989 | "Configuration register write error, aborting.\n"); | ||
| 990 | return err; | ||
| 991 | } | ||
| 992 | } | ||
| 993 | return 0; | ||
| 994 | } | ||
| 995 | |||
| 996 | |||
| 997 | static struct amc6821_data *amc6821_update_device(struct device *dev) | ||
| 998 | { | ||
| 999 | struct i2c_client *client = to_i2c_client(dev); | ||
| 1000 | struct amc6821_data *data = i2c_get_clientdata(client); | ||
| 1001 | int timeout = HZ; | ||
| 1002 | u8 reg; | ||
| 1003 | int i; | ||
| 1004 | |||
| 1005 | mutex_lock(&data->update_lock); | ||
| 1006 | |||
| 1007 | if (time_after(jiffies, data->last_updated + timeout) || | ||
| 1008 | !data->valid) { | ||
| 1009 | |||
| 1010 | for (i = 0; i < TEMP_IDX_LEN; i++) | ||
| 1011 | data->temp[i] = i2c_smbus_read_byte_data(client, | ||
| 1012 | temp_reg[i]); | ||
| 1013 | |||
| 1014 | data->stat1 = i2c_smbus_read_byte_data(client, | ||
| 1015 | AMC6821_REG_STAT1); | ||
| 1016 | data->stat2 = i2c_smbus_read_byte_data(client, | ||
| 1017 | AMC6821_REG_STAT2); | ||
| 1018 | |||
| 1019 | data->pwm1 = i2c_smbus_read_byte_data(client, | ||
| 1020 | AMC6821_REG_DCY); | ||
| 1021 | for (i = 0; i < FAN1_IDX_LEN; i++) { | ||
| 1022 | data->fan[i] = i2c_smbus_read_byte_data( | ||
| 1023 | client, | ||
| 1024 | fan_reg_low[i]); | ||
| 1025 | data->fan[i] += i2c_smbus_read_byte_data( | ||
| 1026 | client, | ||
| 1027 | fan_reg_hi[i]) << 8; | ||
| 1028 | } | ||
| 1029 | data->fan1_div = i2c_smbus_read_byte_data(client, | ||
| 1030 | AMC6821_REG_CONF4); | ||
| 1031 | data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2; | ||
| 1032 | |||
| 1033 | data->pwm1_auto_point_pwm[0] = 0; | ||
| 1034 | data->pwm1_auto_point_pwm[2] = 255; | ||
| 1035 | data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client, | ||
| 1036 | AMC6821_REG_DCY_LOW_TEMP); | ||
| 1037 | |||
| 1038 | data->temp1_auto_point_temp[0] = | ||
| 1039 | i2c_smbus_read_byte_data(client, | ||
| 1040 | AMC6821_REG_PSV_TEMP); | ||
| 1041 | data->temp2_auto_point_temp[0] = | ||
| 1042 | data->temp1_auto_point_temp[0]; | ||
| 1043 | reg = i2c_smbus_read_byte_data(client, | ||
| 1044 | AMC6821_REG_LTEMP_FAN_CTRL); | ||
| 1045 | data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1; | ||
| 1046 | reg &= 0x07; | ||
| 1047 | reg = 0x20 >> reg; | ||
| 1048 | if (reg > 0) | ||
| 1049 | data->temp1_auto_point_temp[2] = | ||
| 1050 | data->temp1_auto_point_temp[1] + | ||
| 1051 | (data->pwm1_auto_point_pwm[2] - | ||
| 1052 | data->pwm1_auto_point_pwm[1]) / reg; | ||
| 1053 | else | ||
| 1054 | data->temp1_auto_point_temp[2] = 255; | ||
| 1055 | |||
| 1056 | reg = i2c_smbus_read_byte_data(client, | ||
| 1057 | AMC6821_REG_RTEMP_FAN_CTRL); | ||
| 1058 | data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1; | ||
| 1059 | reg &= 0x07; | ||
| 1060 | reg = 0x20 >> reg; | ||
| 1061 | if (reg > 0) | ||
| 1062 | data->temp2_auto_point_temp[2] = | ||
| 1063 | data->temp2_auto_point_temp[1] + | ||
| 1064 | (data->pwm1_auto_point_pwm[2] - | ||
| 1065 | data->pwm1_auto_point_pwm[1]) / reg; | ||
| 1066 | else | ||
| 1067 | data->temp2_auto_point_temp[2] = 255; | ||
| 1068 | |||
| 1069 | reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); | ||
| 1070 | reg = (reg >> 5) & 0x3; | ||
| 1071 | switch (reg) { | ||
| 1072 | case 0: /*open loop: software sets pwm1*/ | ||
| 1073 | data->pwm1_auto_channels_temp = 0; | ||
| 1074 | data->pwm1_enable = 1; | ||
| 1075 | break; | ||
| 1076 | case 2: /*closed loop: remote T (temp2)*/ | ||
| 1077 | data->pwm1_auto_channels_temp = 2; | ||
| 1078 | data->pwm1_enable = 2; | ||
| 1079 | break; | ||
| 1080 | case 3: /*closed loop: local and remote T (temp2)*/ | ||
| 1081 | data->pwm1_auto_channels_temp = 3; | ||
| 1082 | data->pwm1_enable = 3; | ||
| 1083 | break; | ||
| 1084 | case 1: /*semi-open loop: software sets rpm, chip controls pwm1, | ||
| 1085 | *currently not implemented | ||
| 1086 | */ | ||
| 1087 | data->pwm1_auto_channels_temp = 0; | ||
| 1088 | data->pwm1_enable = 0; | ||
| 1089 | break; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | data->last_updated = jiffies; | ||
| 1093 | data->valid = 1; | ||
| 1094 | } | ||
| 1095 | mutex_unlock(&data->update_lock); | ||
| 1096 | return data; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | |||
| 1100 | static int __init amc6821_init(void) | ||
| 1101 | { | ||
| 1102 | return i2c_add_driver(&amc6821_driver); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | static void __exit amc6821_exit(void) | ||
| 1106 | { | ||
| 1107 | i2c_del_driver(&amc6821_driver); | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | module_init(amc6821_init); | ||
| 1111 | module_exit(amc6821_exit); | ||
| 1112 | |||
| 1113 | |||
| 1114 | MODULE_LICENSE("GPL"); | ||
| 1115 | MODULE_AUTHOR("T. Mertelj <tomaz.mertelj@guest.arnes.si>"); | ||
| 1116 | MODULE_DESCRIPTION("Texas Instruments amc6821 hwmon driver"); | ||
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 5a3ee00c0e7d..6811346c1c62 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * See COPYING in the top level directory of the kernel tree. | 5 | * See COPYING in the top level directory of the kernel tree. |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/debugfs.h> | ||
| 8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 9 | #include <linux/hwmon.h> | 10 | #include <linux/hwmon.h> |
| 10 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| @@ -101,6 +102,11 @@ struct atk_data { | |||
| 101 | int temperature_count; | 102 | int temperature_count; |
| 102 | int fan_count; | 103 | int fan_count; |
| 103 | struct list_head sensor_list; | 104 | struct list_head sensor_list; |
| 105 | |||
| 106 | struct { | ||
| 107 | struct dentry *root; | ||
| 108 | u32 id; | ||
| 109 | } debugfs; | ||
| 104 | }; | 110 | }; |
| 105 | 111 | ||
| 106 | 112 | ||
| @@ -624,6 +630,187 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) | |||
| 624 | return err; | 630 | return err; |
| 625 | } | 631 | } |
| 626 | 632 | ||
| 633 | #ifdef CONFIG_DEBUG_FS | ||
| 634 | static int atk_debugfs_gitm_get(void *p, u64 *val) | ||
| 635 | { | ||
| 636 | struct atk_data *data = p; | ||
| 637 | union acpi_object *ret; | ||
| 638 | struct atk_acpi_ret_buffer *buf; | ||
| 639 | int err = 0; | ||
| 640 | |||
| 641 | if (!data->read_handle) | ||
| 642 | return -ENODEV; | ||
| 643 | |||
| 644 | if (!data->debugfs.id) | ||
| 645 | return -EINVAL; | ||
| 646 | |||
| 647 | ret = atk_gitm(data, data->debugfs.id); | ||
| 648 | if (IS_ERR(ret)) | ||
| 649 | return PTR_ERR(ret); | ||
| 650 | |||
| 651 | buf = (struct atk_acpi_ret_buffer *)ret->buffer.pointer; | ||
| 652 | if (buf->flags) | ||
| 653 | *val = buf->value; | ||
| 654 | else | ||
| 655 | err = -EIO; | ||
| 656 | |||
| 657 | return err; | ||
| 658 | } | ||
| 659 | |||
| 660 | DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm, | ||
| 661 | atk_debugfs_gitm_get, | ||
| 662 | NULL, | ||
| 663 | "0x%08llx\n") | ||
| 664 | |||
| 665 | static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj) | ||
| 666 | { | ||
| 667 | int ret = 0; | ||
| 668 | |||
| 669 | switch (obj->type) { | ||
| 670 | case ACPI_TYPE_INTEGER: | ||
| 671 | ret = snprintf(buf, sz, "0x%08llx\n", obj->integer.value); | ||
| 672 | break; | ||
| 673 | case ACPI_TYPE_STRING: | ||
| 674 | ret = snprintf(buf, sz, "%s\n", obj->string.pointer); | ||
| 675 | break; | ||
| 676 | } | ||
| 677 | |||
| 678 | return ret; | ||
| 679 | } | ||
| 680 | |||
| 681 | static void atk_pack_print(char *buf, size_t sz, union acpi_object *pack) | ||
| 682 | { | ||
| 683 | int ret; | ||
| 684 | int i; | ||
| 685 | |||
| 686 | for (i = 0; i < pack->package.count; i++) { | ||
| 687 | union acpi_object *obj = &pack->package.elements[i]; | ||
| 688 | |||
| 689 | ret = atk_acpi_print(buf, sz, obj); | ||
| 690 | if (ret >= sz) | ||
| 691 | break; | ||
| 692 | buf += ret; | ||
| 693 | sz -= ret; | ||
| 694 | } | ||
| 695 | } | ||
| 696 | |||
| 697 | static int atk_debugfs_ggrp_open(struct inode *inode, struct file *file) | ||
| 698 | { | ||
| 699 | struct atk_data *data = inode->i_private; | ||
| 700 | char *buf = NULL; | ||
| 701 | union acpi_object *ret; | ||
| 702 | u8 cls; | ||
| 703 | int i; | ||
| 704 | |||
| 705 | if (!data->enumerate_handle) | ||
| 706 | return -ENODEV; | ||
| 707 | if (!data->debugfs.id) | ||
| 708 | return -EINVAL; | ||
| 709 | |||
| 710 | cls = (data->debugfs.id & 0xff000000) >> 24; | ||
| 711 | ret = atk_ggrp(data, cls); | ||
| 712 | if (IS_ERR(ret)) | ||
| 713 | return PTR_ERR(ret); | ||
| 714 | |||
| 715 | for (i = 0; i < ret->package.count; i++) { | ||
| 716 | union acpi_object *pack = &ret->package.elements[i]; | ||
| 717 | union acpi_object *id; | ||
| 718 | |||
| 719 | if (pack->type != ACPI_TYPE_PACKAGE) | ||
| 720 | continue; | ||
| 721 | if (!pack->package.count) | ||
| 722 | continue; | ||
| 723 | id = &pack->package.elements[0]; | ||
| 724 | if (id->integer.value == data->debugfs.id) { | ||
| 725 | /* Print the package */ | ||
| 726 | buf = kzalloc(512, GFP_KERNEL); | ||
| 727 | if (!buf) { | ||
| 728 | ACPI_FREE(ret); | ||
| 729 | return -ENOMEM; | ||
| 730 | } | ||
| 731 | atk_pack_print(buf, 512, pack); | ||
| 732 | break; | ||
| 733 | } | ||
| 734 | } | ||
| 735 | ACPI_FREE(ret); | ||
| 736 | |||
| 737 | if (!buf) | ||
| 738 | return -EINVAL; | ||
| 739 | |||
| 740 | file->private_data = buf; | ||
| 741 | |||
| 742 | return nonseekable_open(inode, file); | ||
| 743 | } | ||
| 744 | |||
| 745 | static ssize_t atk_debugfs_ggrp_read(struct file *file, char __user *buf, | ||
| 746 | size_t count, loff_t *pos) | ||
| 747 | { | ||
| 748 | char *str = file->private_data; | ||
| 749 | size_t len = strlen(str); | ||
| 750 | |||
| 751 | return simple_read_from_buffer(buf, count, pos, str, len); | ||
| 752 | } | ||
| 753 | |||
| 754 | static int atk_debugfs_ggrp_release(struct inode *inode, struct file *file) | ||
| 755 | { | ||
| 756 | kfree(file->private_data); | ||
| 757 | return 0; | ||
| 758 | } | ||
| 759 | |||
| 760 | static const struct file_operations atk_debugfs_ggrp_fops = { | ||
| 761 | .read = atk_debugfs_ggrp_read, | ||
| 762 | .open = atk_debugfs_ggrp_open, | ||
| 763 | .release = atk_debugfs_ggrp_release, | ||
| 764 | }; | ||
| 765 | |||
| 766 | static void atk_debugfs_init(struct atk_data *data) | ||
| 767 | { | ||
| 768 | struct dentry *d; | ||
| 769 | struct dentry *f; | ||
| 770 | |||
| 771 | data->debugfs.id = 0; | ||
| 772 | |||
| 773 | d = debugfs_create_dir("asus_atk0110", NULL); | ||
| 774 | if (!d || IS_ERR(d)) | ||
| 775 | return; | ||
| 776 | |||
| 777 | f = debugfs_create_x32("id", S_IRUSR | S_IWUSR, d, &data->debugfs.id); | ||
| 778 | if (!f || IS_ERR(f)) | ||
| 779 | goto cleanup; | ||
| 780 | |||
| 781 | f = debugfs_create_file("gitm", S_IRUSR, d, data, | ||
| 782 | &atk_debugfs_gitm); | ||
| 783 | if (!f || IS_ERR(f)) | ||
| 784 | goto cleanup; | ||
| 785 | |||
| 786 | f = debugfs_create_file("ggrp", S_IRUSR, d, data, | ||
| 787 | &atk_debugfs_ggrp_fops); | ||
| 788 | if (!f || IS_ERR(f)) | ||
| 789 | goto cleanup; | ||
| 790 | |||
| 791 | data->debugfs.root = d; | ||
| 792 | |||
| 793 | return; | ||
| 794 | cleanup: | ||
| 795 | debugfs_remove_recursive(d); | ||
| 796 | } | ||
| 797 | |||
| 798 | static void atk_debugfs_cleanup(struct atk_data *data) | ||
| 799 | { | ||
| 800 | debugfs_remove_recursive(data->debugfs.root); | ||
| 801 | } | ||
| 802 | |||
| 803 | #else /* CONFIG_DEBUG_FS */ | ||
| 804 | |||
| 805 | static void atk_debugfs_init(struct atk_data *data) | ||
| 806 | { | ||
| 807 | } | ||
| 808 | |||
| 809 | static void atk_debugfs_cleanup(struct atk_data *data) | ||
| 810 | { | ||
| 811 | } | ||
| 812 | #endif | ||
| 813 | |||
| 627 | static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) | 814 | static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) |
| 628 | { | 815 | { |
| 629 | struct device *dev = &data->acpi_dev->dev; | 816 | struct device *dev = &data->acpi_dev->dev; |
| @@ -1047,76 +1234,75 @@ remove: | |||
| 1047 | return err; | 1234 | return err; |
| 1048 | } | 1235 | } |
| 1049 | 1236 | ||
| 1050 | static int atk_check_old_if(struct atk_data *data) | 1237 | static int atk_probe_if(struct atk_data *data) |
| 1051 | { | 1238 | { |
| 1052 | struct device *dev = &data->acpi_dev->dev; | 1239 | struct device *dev = &data->acpi_dev->dev; |
| 1053 | acpi_handle ret; | 1240 | acpi_handle ret; |
| 1054 | acpi_status status; | 1241 | acpi_status status; |
| 1242 | int err = 0; | ||
| 1055 | 1243 | ||
| 1056 | /* RTMP: read temperature */ | 1244 | /* RTMP: read temperature */ |
| 1057 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret); | 1245 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret); |
| 1058 | if (status != AE_OK) { | 1246 | if (ACPI_SUCCESS(status)) |
| 1247 | data->rtmp_handle = ret; | ||
| 1248 | else | ||
| 1059 | dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n", | 1249 | dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n", |
| 1060 | acpi_format_exception(status)); | 1250 | acpi_format_exception(status)); |
| 1061 | return -ENODEV; | ||
| 1062 | } | ||
| 1063 | data->rtmp_handle = ret; | ||
| 1064 | 1251 | ||
| 1065 | /* RVLT: read voltage */ | 1252 | /* RVLT: read voltage */ |
| 1066 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret); | 1253 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret); |
| 1067 | if (status != AE_OK) { | 1254 | if (ACPI_SUCCESS(status)) |
| 1255 | data->rvlt_handle = ret; | ||
| 1256 | else | ||
| 1068 | dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n", | 1257 | dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n", |
| 1069 | acpi_format_exception(status)); | 1258 | acpi_format_exception(status)); |
| 1070 | return -ENODEV; | ||
| 1071 | } | ||
| 1072 | data->rvlt_handle = ret; | ||
| 1073 | 1259 | ||
| 1074 | /* RFAN: read fan status */ | 1260 | /* RFAN: read fan status */ |
| 1075 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret); | 1261 | status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret); |
| 1076 | if (status != AE_OK) { | 1262 | if (ACPI_SUCCESS(status)) |
| 1263 | data->rfan_handle = ret; | ||
| 1264 | else | ||
| 1077 | dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n", | 1265 | dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n", |
| 1078 | acpi_format_exception(status)); | 1266 | acpi_format_exception(status)); |
| 1079 | return -ENODEV; | ||
| 1080 | } | ||
| 1081 | data->rfan_handle = ret; | ||
| 1082 | |||
| 1083 | return 0; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static int atk_check_new_if(struct atk_data *data) | ||
| 1087 | { | ||
| 1088 | struct device *dev = &data->acpi_dev->dev; | ||
| 1089 | acpi_handle ret; | ||
| 1090 | acpi_status status; | ||
| 1091 | 1267 | ||
| 1092 | /* Enumeration */ | 1268 | /* Enumeration */ |
| 1093 | status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret); | 1269 | status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret); |
| 1094 | if (status != AE_OK) { | 1270 | if (ACPI_SUCCESS(status)) |
| 1271 | data->enumerate_handle = ret; | ||
| 1272 | else | ||
| 1095 | dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n", | 1273 | dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n", |
| 1096 | acpi_format_exception(status)); | 1274 | acpi_format_exception(status)); |
| 1097 | return -ENODEV; | ||
| 1098 | } | ||
| 1099 | data->enumerate_handle = ret; | ||
| 1100 | 1275 | ||
| 1101 | /* De-multiplexer (read) */ | 1276 | /* De-multiplexer (read) */ |
| 1102 | status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret); | 1277 | status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret); |
| 1103 | if (status != AE_OK) { | 1278 | if (ACPI_SUCCESS(status)) |
| 1279 | data->read_handle = ret; | ||
| 1280 | else | ||
| 1104 | dev_dbg(dev, "method " METHOD_READ " not found: %s\n", | 1281 | dev_dbg(dev, "method " METHOD_READ " not found: %s\n", |
| 1105 | acpi_format_exception(status)); | 1282 | acpi_format_exception(status)); |
| 1106 | return -ENODEV; | ||
| 1107 | } | ||
| 1108 | data->read_handle = ret; | ||
| 1109 | 1283 | ||
| 1110 | /* De-multiplexer (write) */ | 1284 | /* De-multiplexer (write) */ |
| 1111 | status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret); | 1285 | status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret); |
| 1112 | if (status != AE_OK) { | 1286 | if (ACPI_SUCCESS(status)) |
| 1113 | dev_dbg(dev, "method " METHOD_READ " not found: %s\n", | 1287 | data->write_handle = ret; |
| 1288 | else | ||
| 1289 | dev_dbg(dev, "method " METHOD_WRITE " not found: %s\n", | ||
| 1114 | acpi_format_exception(status)); | 1290 | acpi_format_exception(status)); |
| 1115 | return -ENODEV; | ||
| 1116 | } | ||
| 1117 | data->write_handle = ret; | ||
| 1118 | 1291 | ||
| 1119 | return 0; | 1292 | /* Check for hwmon methods: first check "old" style methods; note that |
| 1293 | * both may be present: in this case we stick to the old interface; | ||
| 1294 | * analysis of multiple DSDTs indicates that when both interfaces | ||
| 1295 | * are present the new one (GGRP/GITM) is not functional. | ||
| 1296 | */ | ||
| 1297 | if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle) | ||
| 1298 | data->old_interface = true; | ||
| 1299 | else if (data->enumerate_handle && data->read_handle && | ||
| 1300 | data->write_handle) | ||
| 1301 | data->old_interface = false; | ||
| 1302 | else | ||
| 1303 | err = -ENODEV; | ||
| 1304 | |||
| 1305 | return err; | ||
| 1120 | } | 1306 | } |
| 1121 | 1307 | ||
| 1122 | static int atk_add(struct acpi_device *device) | 1308 | static int atk_add(struct acpi_device *device) |
| @@ -1155,28 +1341,19 @@ static int atk_add(struct acpi_device *device) | |||
| 1155 | } | 1341 | } |
| 1156 | ACPI_FREE(buf.pointer); | 1342 | ACPI_FREE(buf.pointer); |
| 1157 | 1343 | ||
| 1158 | /* Check for hwmon methods: first check "old" style methods; note that | 1344 | err = atk_probe_if(data); |
| 1159 | * both may be present: in this case we stick to the old interface; | 1345 | if (err) { |
| 1160 | * analysis of multiple DSDTs indicates that when both interfaces | 1346 | dev_err(&device->dev, "No usable hwmon interface detected\n"); |
| 1161 | * are present the new one (GGRP/GITM) is not functional. | 1347 | goto out; |
| 1162 | */ | ||
| 1163 | err = atk_check_old_if(data); | ||
| 1164 | if (!err) { | ||
| 1165 | dev_dbg(&device->dev, "Using old hwmon interface\n"); | ||
| 1166 | data->old_interface = true; | ||
| 1167 | } else { | ||
| 1168 | err = atk_check_new_if(data); | ||
| 1169 | if (err) | ||
| 1170 | goto out; | ||
| 1171 | |||
| 1172 | dev_dbg(&device->dev, "Using new hwmon interface\n"); | ||
| 1173 | data->old_interface = false; | ||
| 1174 | } | 1348 | } |
| 1175 | 1349 | ||
| 1176 | if (data->old_interface) | 1350 | if (data->old_interface) { |
| 1351 | dev_dbg(&device->dev, "Using old hwmon interface\n"); | ||
| 1177 | err = atk_enumerate_old_hwmon(data); | 1352 | err = atk_enumerate_old_hwmon(data); |
| 1178 | else | 1353 | } else { |
| 1354 | dev_dbg(&device->dev, "Using new hwmon interface\n"); | ||
| 1179 | err = atk_enumerate_new_hwmon(data); | 1355 | err = atk_enumerate_new_hwmon(data); |
| 1356 | } | ||
| 1180 | if (err < 0) | 1357 | if (err < 0) |
| 1181 | goto out; | 1358 | goto out; |
| 1182 | if (err == 0) { | 1359 | if (err == 0) { |
| @@ -1190,6 +1367,8 @@ static int atk_add(struct acpi_device *device) | |||
| 1190 | if (err) | 1367 | if (err) |
| 1191 | goto cleanup; | 1368 | goto cleanup; |
| 1192 | 1369 | ||
| 1370 | atk_debugfs_init(data); | ||
| 1371 | |||
| 1193 | device->driver_data = data; | 1372 | device->driver_data = data; |
| 1194 | return 0; | 1373 | return 0; |
| 1195 | cleanup: | 1374 | cleanup: |
| @@ -1208,6 +1387,8 @@ static int atk_remove(struct acpi_device *device, int type) | |||
| 1208 | 1387 | ||
| 1209 | device->driver_data = NULL; | 1388 | device->driver_data = NULL; |
| 1210 | 1389 | ||
| 1390 | atk_debugfs_cleanup(data); | ||
| 1391 | |||
| 1211 | atk_remove_files(data); | 1392 | atk_remove_files(data); |
| 1212 | atk_free_sensors(data); | 1393 | atk_free_sensors(data); |
| 1213 | hwmon_device_unregister(data->hwmon_dev); | 1394 | hwmon_device_unregister(data->hwmon_dev); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index caef39cda8c8..2d7bceeed0bc 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
| 34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
| 35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
| 36 | #include <linux/pci.h> | ||
| 36 | #include <asm/msr.h> | 37 | #include <asm/msr.h> |
| 37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
| 38 | 39 | ||
| @@ -161,6 +162,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * | |||
| 161 | int usemsr_ee = 1; | 162 | int usemsr_ee = 1; |
| 162 | int err; | 163 | int err; |
| 163 | u32 eax, edx; | 164 | u32 eax, edx; |
| 165 | struct pci_dev *host_bridge; | ||
| 164 | 166 | ||
| 165 | /* Early chips have no MSR for TjMax */ | 167 | /* Early chips have no MSR for TjMax */ |
| 166 | 168 | ||
| @@ -168,11 +170,21 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * | |||
| 168 | usemsr_ee = 0; | 170 | usemsr_ee = 0; |
| 169 | } | 171 | } |
| 170 | 172 | ||
| 171 | /* Atoms seems to have TjMax at 90C */ | 173 | /* Atom CPUs */ |
| 172 | 174 | ||
| 173 | if (c->x86_model == 0x1c) { | 175 | if (c->x86_model == 0x1c) { |
| 174 | usemsr_ee = 0; | 176 | usemsr_ee = 0; |
| 175 | tjmax = 90000; | 177 | |
| 178 | host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | ||
| 179 | |||
| 180 | if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL | ||
| 181 | && (host_bridge->device == 0xa000 /* NM10 based nettop */ | ||
| 182 | || host_bridge->device == 0xa010)) /* NM10 based netbook */ | ||
| 183 | tjmax = 100000; | ||
| 184 | else | ||
| 185 | tjmax = 90000; | ||
| 186 | |||
| 187 | pci_dev_put(host_bridge); | ||
| 176 | } | 188 | } |
| 177 | 189 | ||
| 178 | if ((c->x86_model > 0xe) && (usemsr_ee)) { | 190 | if ((c->x86_model > 0xe) && (usemsr_ee)) { |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index d8a26d16d948..099a2138cdf6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
| @@ -33,6 +33,16 @@ static bool force; | |||
| 33 | module_param(force, bool, 0444); | 33 | module_param(force, bool, 0444); |
| 34 | MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); | 34 | MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); |
| 35 | 35 | ||
| 36 | /* CPUID function 0x80000001, ebx */ | ||
| 37 | #define CPUID_PKGTYPE_MASK 0xf0000000 | ||
| 38 | #define CPUID_PKGTYPE_F 0x00000000 | ||
| 39 | #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 | ||
| 40 | |||
| 41 | /* DRAM controller (PCI function 2) */ | ||
| 42 | #define REG_DCT0_CONFIG_HIGH 0x094 | ||
| 43 | #define DDR3_MODE 0x00000100 | ||
| 44 | |||
| 45 | /* miscellaneous (PCI function 3) */ | ||
| 36 | #define REG_HARDWARE_THERMAL_CONTROL 0x64 | 46 | #define REG_HARDWARE_THERMAL_CONTROL 0x64 |
| 37 | #define HTC_ENABLE 0x00000001 | 47 | #define HTC_ENABLE 0x00000001 |
| 38 | 48 | ||
| @@ -85,13 +95,28 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0); | |||
| 85 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1); | 95 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1); |
| 86 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | 96 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); |
| 87 | 97 | ||
| 88 | static bool __devinit has_erratum_319(void) | 98 | static bool __devinit has_erratum_319(struct pci_dev *pdev) |
| 89 | { | 99 | { |
| 100 | u32 pkg_type, reg_dram_cfg; | ||
| 101 | |||
| 102 | if (boot_cpu_data.x86 != 0x10) | ||
| 103 | return false; | ||
| 104 | |||
| 90 | /* | 105 | /* |
| 91 | * Erratum 319: The thermal sensor of older Family 10h processors | 106 | * Erratum 319: The thermal sensor of Socket F/AM2+ processors |
| 92 | * (B steppings) may be unreliable. | 107 | * may be unreliable. |
| 93 | */ | 108 | */ |
| 94 | return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2; | 109 | pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK; |
| 110 | if (pkg_type == CPUID_PKGTYPE_F) | ||
| 111 | return true; | ||
| 112 | if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3) | ||
| 113 | return false; | ||
| 114 | |||
| 115 | /* Differentiate between AM2+ (bad) and AM3 (good) */ | ||
| 116 | pci_bus_read_config_dword(pdev->bus, | ||
| 117 | PCI_DEVFN(PCI_SLOT(pdev->devfn), 2), | ||
| 118 | REG_DCT0_CONFIG_HIGH, ®_dram_cfg); | ||
| 119 | return !(reg_dram_cfg & DDR3_MODE); | ||
| 95 | } | 120 | } |
| 96 | 121 | ||
| 97 | static int __devinit k10temp_probe(struct pci_dev *pdev, | 122 | static int __devinit k10temp_probe(struct pci_dev *pdev, |
| @@ -99,9 +124,10 @@ static int __devinit k10temp_probe(struct pci_dev *pdev, | |||
| 99 | { | 124 | { |
| 100 | struct device *hwmon_dev; | 125 | struct device *hwmon_dev; |
| 101 | u32 reg_caps, reg_htc; | 126 | u32 reg_caps, reg_htc; |
| 127 | int unreliable = has_erratum_319(pdev); | ||
| 102 | int err; | 128 | int err; |
| 103 | 129 | ||
| 104 | if (has_erratum_319() && !force) { | 130 | if (unreliable && !force) { |
| 105 | dev_err(&pdev->dev, | 131 | dev_err(&pdev->dev, |
| 106 | "unreliable CPU thermal sensor; monitoring disabled\n"); | 132 | "unreliable CPU thermal sensor; monitoring disabled\n"); |
| 107 | err = -ENODEV; | 133 | err = -ENODEV; |
| @@ -139,7 +165,7 @@ static int __devinit k10temp_probe(struct pci_dev *pdev, | |||
| 139 | } | 165 | } |
| 140 | dev_set_drvdata(&pdev->dev, hwmon_dev); | 166 | dev_set_drvdata(&pdev->dev, hwmon_dev); |
| 141 | 167 | ||
| 142 | if (has_erratum_319() && force) | 168 | if (unreliable && force) |
| 143 | dev_warn(&pdev->dev, | 169 | dev_warn(&pdev->dev, |
| 144 | "unreliable CPU thermal sensor; check erratum 319\n"); | 170 | "unreliable CPU thermal sensor; check erratum 319\n"); |
| 145 | return 0; | 171 | return 0; |
| @@ -169,7 +195,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) | |||
| 169 | dev_set_drvdata(&pdev->dev, NULL); | 195 | dev_set_drvdata(&pdev->dev, NULL); |
| 170 | } | 196 | } |
| 171 | 197 | ||
| 172 | static struct pci_device_id k10temp_id_table[] = { | 198 | static const struct pci_device_id k10temp_id_table[] = { |
| 173 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 199 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
| 174 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | 200 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, |
| 175 | {} | 201 | {} |
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 1fe995111841..0ceb6d6200a3 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c | |||
| @@ -136,7 +136,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0); | |||
| 136 | static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); | 136 | static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); |
| 137 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); | 137 | static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); |
| 138 | 138 | ||
| 139 | static struct pci_device_id k8temp_ids[] = { | 139 | static const struct pci_device_id k8temp_ids[] = { |
| 140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
| 141 | { 0 }, | 141 | { 0 }, |
| 142 | }; | 142 | }; |
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 12f2e7086560..79c2931e3008 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c | |||
| @@ -697,7 +697,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev) | |||
| 697 | return data; | 697 | return data; |
| 698 | } | 698 | } |
| 699 | 699 | ||
| 700 | static struct pci_device_id sis5595_pci_ids[] = { | 700 | static const struct pci_device_id sis5595_pci_ids[] = { |
| 701 | { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, | 701 | { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, |
| 702 | { 0, } | 702 | { 0, } |
| 703 | }; | 703 | }; |
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index 39e82a492f26..f397ce7ad598 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c | |||
| @@ -767,7 +767,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) | |||
| 767 | return data; | 767 | return data; |
| 768 | } | 768 | } |
| 769 | 769 | ||
| 770 | static struct pci_device_id via686a_pci_ids[] = { | 770 | static const struct pci_device_id via686a_pci_ids[] = { |
| 771 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, | 771 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, |
| 772 | { 0, } | 772 | { 0, } |
| 773 | }; | 773 | }; |
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 470a1226ba2b..d47b4c9949c2 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c | |||
| @@ -697,7 +697,7 @@ static struct platform_driver vt8231_driver = { | |||
| 697 | .remove = __devexit_p(vt8231_remove), | 697 | .remove = __devexit_p(vt8231_remove), |
| 698 | }; | 698 | }; |
| 699 | 699 | ||
| 700 | static struct pci_device_id vt8231_pci_ids[] = { | 700 | static const struct pci_device_id vt8231_pci_ids[] = { |
| 701 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, | 701 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, |
| 702 | { 0, } | 702 | { 0, } |
| 703 | }; | 703 | }; |
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index f102fcc7e52a..e02096cf7d95 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig | |||
| @@ -1,8 +1,3 @@ | |||
| 1 | menu "IEEE 1394 (FireWire) support" | ||
| 2 | depends on PCI || BROKEN | ||
| 3 | |||
| 4 | source "drivers/firewire/Kconfig" | ||
| 5 | |||
| 6 | config IEEE1394 | 1 | config IEEE1394 |
| 7 | tristate "Legacy alternative FireWire driver stack" | 2 | tristate "Legacy alternative FireWire driver stack" |
| 8 | depends on PCI || BROKEN | 3 | depends on PCI || BROKEN |
| @@ -16,8 +11,13 @@ config IEEE1394 | |||
| 16 | is the core support only, you will also need to select a driver for | 11 | is the core support only, you will also need to select a driver for |
| 17 | your IEEE 1394 adapter. | 12 | your IEEE 1394 adapter. |
| 18 | 13 | ||
| 19 | To compile this driver as a module, say M here: the | 14 | To compile this driver as a module, say M here: the module will be |
| 20 | module will be called ieee1394. | 15 | called ieee1394. |
| 16 | |||
| 17 | NOTE: | ||
| 18 | ieee1394 is superseded by the newer firewire-core driver. See | ||
| 19 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 20 | further information on how to switch to the new FireWire drivers. | ||
| 21 | 21 | ||
| 22 | config IEEE1394_OHCI1394 | 22 | config IEEE1394_OHCI1394 |
| 23 | tristate "OHCI-1394 controllers" | 23 | tristate "OHCI-1394 controllers" |
| @@ -29,19 +29,23 @@ config IEEE1394_OHCI1394 | |||
| 29 | use one of these chipsets. It should work with any OHCI-1394 | 29 | use one of these chipsets. It should work with any OHCI-1394 |
| 30 | compliant card, however. | 30 | compliant card, however. |
| 31 | 31 | ||
| 32 | To compile this driver as a module, say M here: the | 32 | To compile this driver as a module, say M here: the module will be |
| 33 | module will be called ohci1394. | 33 | called ohci1394. |
| 34 | 34 | ||
| 35 | NOTE: | 35 | NOTE: |
| 36 | ohci1394 is superseded by the newer firewire-ohci driver. See | ||
| 37 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 38 | further information on how to switch to the new FireWire drivers. | ||
| 39 | |||
| 36 | If you want to install firewire-ohci and ohci1394 together, you | 40 | If you want to install firewire-ohci and ohci1394 together, you |
| 37 | should configure them only as modules and blacklist the driver(s) | 41 | should configure them only as modules and blacklist the driver(s) |
| 38 | which you don't want to have auto-loaded. Add either | 42 | which you don't want to have auto-loaded. Add either |
| 39 | 43 | ||
| 40 | blacklist firewire-ohci | ||
| 41 | or | ||
| 42 | blacklist ohci1394 | 44 | blacklist ohci1394 |
| 43 | blacklist video1394 | 45 | blacklist video1394 |
| 44 | blacklist dv1394 | 46 | blacklist dv1394 |
| 47 | or | ||
| 48 | blacklist firewire-ohci | ||
| 45 | 49 | ||
| 46 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | 50 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf |
| 47 | depending on your distribution. | 51 | depending on your distribution. |
| @@ -58,8 +62,8 @@ config IEEE1394_PCILYNX | |||
| 58 | Instruments PCILynx chip. Note: this driver is written for revision | 62 | Instruments PCILynx chip. Note: this driver is written for revision |
| 59 | 2 of this chip and may not work with revision 0. | 63 | 2 of this chip and may not work with revision 0. |
| 60 | 64 | ||
| 61 | To compile this driver as a module, say M here: the | 65 | To compile this driver as a module, say M here: the module will be |
| 62 | module will be called pcilynx. | 66 | called pcilynx. |
| 63 | 67 | ||
| 64 | Only some old and now very rare PCI and CardBus cards and | 68 | Only some old and now very rare PCI and CardBus cards and |
| 65 | PowerMacs G3 B&W contain the PCILynx controller. Therefore | 69 | PowerMacs G3 B&W contain the PCILynx controller. Therefore |
| @@ -79,6 +83,14 @@ config IEEE1394_SBP2 | |||
| 79 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | 83 | You should also enable support for disks, CD-ROMs, etc. in the SCSI |
| 80 | configuration section. | 84 | configuration section. |
| 81 | 85 | ||
| 86 | To compile this driver as a module, say M here: the module will be | ||
| 87 | called sbp2. | ||
| 88 | |||
| 89 | NOTE: | ||
| 90 | sbp2 is superseded by the newer firewire-sbp2 driver. See | ||
| 91 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 92 | further information on how to switch to the new FireWire drivers. | ||
| 93 | |||
| 82 | config IEEE1394_SBP2_PHYS_DMA | 94 | config IEEE1394_SBP2_PHYS_DMA |
| 83 | bool "Enable replacement for physical DMA in SBP2" | 95 | bool "Enable replacement for physical DMA in SBP2" |
| 84 | depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL | 96 | depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL |
| @@ -111,6 +123,11 @@ config IEEE1394_ETH1394 | |||
| 111 | 123 | ||
| 112 | The module is called eth1394 although it does not emulate Ethernet. | 124 | The module is called eth1394 although it does not emulate Ethernet. |
| 113 | 125 | ||
| 126 | NOTE: | ||
| 127 | eth1394 is superseded by the newer firewire-net driver. See | ||
| 128 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 129 | further information on how to switch to the new FireWire drivers. | ||
| 130 | |||
| 114 | config IEEE1394_RAWIO | 131 | config IEEE1394_RAWIO |
| 115 | tristate "raw1394 userspace interface" | 132 | tristate "raw1394 userspace interface" |
| 116 | depends on IEEE1394 | 133 | depends on IEEE1394 |
| @@ -123,6 +140,11 @@ config IEEE1394_RAWIO | |||
| 123 | To compile this driver as a module, say M here: the module will be | 140 | To compile this driver as a module, say M here: the module will be |
| 124 | called raw1394. | 141 | called raw1394. |
| 125 | 142 | ||
| 143 | NOTE: | ||
| 144 | raw1394 is superseded by the newer firewire-core driver. See | ||
| 145 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 146 | further information on how to switch to the new FireWire drivers. | ||
| 147 | |||
| 126 | config IEEE1394_VIDEO1394 | 148 | config IEEE1394_VIDEO1394 |
| 127 | tristate "video1394 userspace interface" | 149 | tristate "video1394 userspace interface" |
| 128 | depends on IEEE1394 && IEEE1394_OHCI1394 | 150 | depends on IEEE1394 && IEEE1394_OHCI1394 |
| @@ -136,13 +158,18 @@ config IEEE1394_VIDEO1394 | |||
| 136 | To compile this driver as a module, say M here: the module will be | 158 | To compile this driver as a module, say M here: the module will be |
| 137 | called video1394. | 159 | called video1394. |
| 138 | 160 | ||
| 161 | NOTE: | ||
| 162 | video1394 is superseded by the newer firewire-core driver. See | ||
| 163 | http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for | ||
| 164 | further information on how to switch to the new FireWire drivers. | ||
| 165 | |||
| 139 | config IEEE1394_DV1394 | 166 | config IEEE1394_DV1394 |
| 140 | tristate "dv1394 userspace interface (deprecated)" | 167 | tristate "dv1394 userspace interface (deprecated)" |
| 141 | depends on IEEE1394 && IEEE1394_OHCI1394 | 168 | depends on IEEE1394 && IEEE1394_OHCI1394 |
| 142 | help | 169 | help |
| 143 | The dv1394 driver is unsupported and may be removed from Linux in a | 170 | The dv1394 driver is unsupported and may be removed from Linux in a |
| 144 | future release. Its functionality is now provided by raw1394 together | 171 | future release. Its functionality is now provided by either |
| 145 | with libraries such as libiec61883. | 172 | raw1394 or firewire-core together with libraries such as libiec61883. |
| 146 | 173 | ||
| 147 | config IEEE1394_VERBOSEDEBUG | 174 | config IEEE1394_VERBOSEDEBUG |
| 148 | bool "Excessive debugging output" | 175 | bool "Excessive debugging output" |
| @@ -153,5 +180,3 @@ config IEEE1394_VERBOSEDEBUG | |||
| 153 | will quickly result in large amounts of data sent to the system log. | 180 | will quickly result in large amounts of data sent to the system log. |
| 154 | 181 | ||
| 155 | Say Y if you really need the debugging output. Everyone else says N. | 182 | Say Y if you really need the debugging output. Everyone else says N. |
| 156 | |||
| 157 | endmenu | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index fbdd73106000..cc9b5940fa97 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -2083,7 +2083,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
| 2083 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, | 2083 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, |
| 2084 | struct sockaddr *addr) | 2084 | struct sockaddr *addr) |
| 2085 | { | 2085 | { |
| 2086 | #if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE) | 2086 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 2087 | struct sockaddr_in6 *sin6; | 2087 | struct sockaddr_in6 *sin6; |
| 2088 | 2088 | ||
| 2089 | if (addr->sa_family != AF_INET6) | 2089 | if (addr->sa_family != AF_INET6) |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 989555cee883..2a97c964b9ef 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -1752,7 +1752,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 1752 | ind = qp->rq.head & (qp->rq.wqe_cnt - 1); | 1752 | ind = qp->rq.head & (qp->rq.wqe_cnt - 1); |
| 1753 | 1753 | ||
| 1754 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1754 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
| 1755 | if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) { | 1755 | if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
| 1756 | err = -ENOMEM; | 1756 | err = -ENOMEM; |
| 1757 | *bad_wr = wr; | 1757 | *bad_wr = wr; |
| 1758 | goto out; | 1758 | goto out; |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index d42565258fb7..cf8085bcbd6d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
| @@ -74,6 +74,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
| 74 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | 74 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
| 75 | struct mlx4_ib_srq *srq; | 75 | struct mlx4_ib_srq *srq; |
| 76 | struct mlx4_wqe_srq_next_seg *next; | 76 | struct mlx4_wqe_srq_next_seg *next; |
| 77 | struct mlx4_wqe_data_seg *scatter; | ||
| 77 | int desc_size; | 78 | int desc_size; |
| 78 | int buf_size; | 79 | int buf_size; |
| 79 | int err; | 80 | int err; |
| @@ -149,6 +150,11 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
| 149 | next = get_wqe(srq, i); | 150 | next = get_wqe(srq, i); |
| 150 | next->next_wqe_index = | 151 | next->next_wqe_index = |
| 151 | cpu_to_be16((i + 1) & (srq->msrq.max - 1)); | 152 | cpu_to_be16((i + 1) & (srq->msrq.max - 1)); |
| 153 | |||
| 154 | for (scatter = (void *) (next + 1); | ||
| 155 | (void *) scatter < (void *) next + desc_size; | ||
| 156 | ++scatter) | ||
| 157 | scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY); | ||
| 152 | } | 158 | } |
| 153 | 159 | ||
| 154 | err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, | 160 | err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, |
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c index b483b2995fa9..f967008f332e 100644 --- a/drivers/input/ff-memless.c +++ b/drivers/input/ff-memless.c | |||
| @@ -221,11 +221,27 @@ static int get_compatible_type(struct ff_device *ff, int effect_type) | |||
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | /* | 223 | /* |
| 224 | * Only left/right direction should be used (under/over 0x8000) for | ||
| 225 | * forward/reverse motor direction (to keep calculation fast & simple). | ||
| 226 | */ | ||
| 227 | static u16 ml_calculate_direction(u16 direction, u16 force, | ||
| 228 | u16 new_direction, u16 new_force) | ||
| 229 | { | ||
| 230 | if (!force) | ||
| 231 | return new_direction; | ||
| 232 | if (!new_force) | ||
| 233 | return direction; | ||
| 234 | return (((u32)(direction >> 1) * force + | ||
| 235 | (new_direction >> 1) * new_force) / | ||
| 236 | (force + new_force)) << 1; | ||
| 237 | } | ||
| 238 | |||
| 239 | /* | ||
| 224 | * Combine two effects and apply gain. | 240 | * Combine two effects and apply gain. |
| 225 | */ | 241 | */ |
| 226 | static void ml_combine_effects(struct ff_effect *effect, | 242 | static void ml_combine_effects(struct ff_effect *effect, |
| 227 | struct ml_effect_state *state, | 243 | struct ml_effect_state *state, |
| 228 | unsigned int gain) | 244 | int gain) |
| 229 | { | 245 | { |
| 230 | struct ff_effect *new = state->effect; | 246 | struct ff_effect *new = state->effect; |
| 231 | unsigned int strong, weak, i; | 247 | unsigned int strong, weak, i; |
| @@ -252,8 +268,21 @@ static void ml_combine_effects(struct ff_effect *effect, | |||
| 252 | break; | 268 | break; |
| 253 | 269 | ||
| 254 | case FF_RUMBLE: | 270 | case FF_RUMBLE: |
| 255 | strong = new->u.rumble.strong_magnitude * gain / 0xffff; | 271 | strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff; |
| 256 | weak = new->u.rumble.weak_magnitude * gain / 0xffff; | 272 | weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff; |
| 273 | |||
| 274 | if (effect->u.rumble.strong_magnitude + strong) | ||
| 275 | effect->direction = ml_calculate_direction( | ||
| 276 | effect->direction, | ||
| 277 | effect->u.rumble.strong_magnitude, | ||
| 278 | new->direction, strong); | ||
| 279 | else if (effect->u.rumble.weak_magnitude + weak) | ||
| 280 | effect->direction = ml_calculate_direction( | ||
| 281 | effect->direction, | ||
| 282 | effect->u.rumble.weak_magnitude, | ||
| 283 | new->direction, weak); | ||
| 284 | else | ||
| 285 | effect->direction = 0; | ||
| 257 | effect->u.rumble.strong_magnitude = | 286 | effect->u.rumble.strong_magnitude = |
| 258 | min(strong + effect->u.rumble.strong_magnitude, | 287 | min(strong + effect->u.rumble.strong_magnitude, |
| 259 | 0xffffU); | 288 | 0xffffU); |
| @@ -268,6 +297,13 @@ static void ml_combine_effects(struct ff_effect *effect, | |||
| 268 | /* here we also scale it 0x7fff => 0xffff */ | 297 | /* here we also scale it 0x7fff => 0xffff */ |
| 269 | i = i * gain / 0x7fff; | 298 | i = i * gain / 0x7fff; |
| 270 | 299 | ||
| 300 | if (effect->u.rumble.strong_magnitude + i) | ||
| 301 | effect->direction = ml_calculate_direction( | ||
| 302 | effect->direction, | ||
| 303 | effect->u.rumble.strong_magnitude, | ||
| 304 | new->direction, i); | ||
| 305 | else | ||
| 306 | effect->direction = 0; | ||
| 271 | effect->u.rumble.strong_magnitude = | 307 | effect->u.rumble.strong_magnitude = |
| 272 | min(i + effect->u.rumble.strong_magnitude, 0xffffU); | 308 | min(i + effect->u.rumble.strong_magnitude, 0xffffU); |
| 273 | effect->u.rumble.weak_magnitude = | 309 | effect->u.rumble.weak_magnitude = |
| @@ -411,8 +447,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) | |||
| 411 | msecs_to_jiffies(state->effect->replay.length); | 447 | msecs_to_jiffies(state->effect->replay.length); |
| 412 | state->adj_at = state->play_at; | 448 | state->adj_at = state->play_at; |
| 413 | 449 | ||
| 414 | ml_schedule_timer(ml); | ||
| 415 | |||
| 416 | } else { | 450 | } else { |
| 417 | debug("initiated stop"); | 451 | debug("initiated stop"); |
| 418 | 452 | ||
| @@ -420,10 +454,10 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) | |||
| 420 | __set_bit(FF_EFFECT_ABORTING, &state->flags); | 454 | __set_bit(FF_EFFECT_ABORTING, &state->flags); |
| 421 | else | 455 | else |
| 422 | __clear_bit(FF_EFFECT_STARTED, &state->flags); | 456 | __clear_bit(FF_EFFECT_STARTED, &state->flags); |
| 423 | |||
| 424 | ml_play_effects(ml); | ||
| 425 | } | 457 | } |
| 426 | 458 | ||
| 459 | ml_play_effects(ml); | ||
| 460 | |||
| 427 | return 0; | 461 | return 0; |
| 428 | } | 462 | } |
| 429 | 463 | ||
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index f6c688cae334..b1edd778639c 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c | |||
| @@ -210,7 +210,7 @@ static int iforce_open(struct input_dev *dev) | |||
| 210 | return 0; | 210 | return 0; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | static void iforce_release(struct input_dev *dev) | 213 | static void iforce_close(struct input_dev *dev) |
| 214 | { | 214 | { |
| 215 | struct iforce *iforce = input_get_drvdata(dev); | 215 | struct iforce *iforce = input_get_drvdata(dev); |
| 216 | int i; | 216 | int i; |
| @@ -228,30 +228,17 @@ static void iforce_release(struct input_dev *dev) | |||
| 228 | 228 | ||
| 229 | /* Disable force feedback playback */ | 229 | /* Disable force feedback playback */ |
| 230 | iforce_send_packet(iforce, FF_CMD_ENABLE, "\001"); | 230 | iforce_send_packet(iforce, FF_CMD_ENABLE, "\001"); |
| 231 | /* Wait for the command to complete */ | ||
| 232 | wait_event_interruptible(iforce->wait, | ||
| 233 | !test_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)); | ||
| 231 | } | 234 | } |
| 232 | 235 | ||
| 233 | switch (iforce->bus) { | 236 | switch (iforce->bus) { |
| 234 | #ifdef CONFIG_JOYSTICK_IFORCE_USB | 237 | #ifdef CONFIG_JOYSTICK_IFORCE_USB |
| 235 | case IFORCE_USB: | ||
| 236 | usb_kill_urb(iforce->irq); | ||
| 237 | |||
| 238 | /* The device was unplugged before the file | ||
| 239 | * was released */ | ||
| 240 | if (iforce->usbdev == NULL) { | ||
| 241 | iforce_delete_device(iforce); | ||
| 242 | kfree(iforce); | ||
| 243 | } | ||
| 244 | break; | ||
| 245 | #endif | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | void iforce_delete_device(struct iforce *iforce) | ||
| 250 | { | ||
| 251 | switch (iforce->bus) { | ||
| 252 | #ifdef CONFIG_JOYSTICK_IFORCE_USB | ||
| 253 | case IFORCE_USB: | 238 | case IFORCE_USB: |
| 254 | iforce_usb_delete(iforce); | 239 | usb_kill_urb(iforce->irq); |
| 240 | usb_kill_urb(iforce->out); | ||
| 241 | usb_kill_urb(iforce->ctrl); | ||
| 255 | break; | 242 | break; |
| 256 | #endif | 243 | #endif |
| 257 | #ifdef CONFIG_JOYSTICK_IFORCE_232 | 244 | #ifdef CONFIG_JOYSTICK_IFORCE_232 |
| @@ -303,7 +290,7 @@ int iforce_init_device(struct iforce *iforce) | |||
| 303 | 290 | ||
| 304 | input_dev->name = "Unknown I-Force device"; | 291 | input_dev->name = "Unknown I-Force device"; |
| 305 | input_dev->open = iforce_open; | 292 | input_dev->open = iforce_open; |
| 306 | input_dev->close = iforce_release; | 293 | input_dev->close = iforce_close; |
| 307 | 294 | ||
| 308 | /* | 295 | /* |
| 309 | * On-device memory allocation. | 296 | * On-device memory allocation. |
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index 9f289d8f52c6..b41303d3ec54 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c | |||
| @@ -109,6 +109,7 @@ static void iforce_usb_out(struct urb *urb) | |||
| 109 | struct iforce *iforce = urb->context; | 109 | struct iforce *iforce = urb->context; |
| 110 | 110 | ||
| 111 | if (urb->status) { | 111 | if (urb->status) { |
| 112 | clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); | ||
| 112 | dbg("urb->status %d, exiting", urb->status); | 113 | dbg("urb->status %d, exiting", urb->status); |
| 113 | return; | 114 | return; |
| 114 | } | 115 | } |
| @@ -186,33 +187,19 @@ fail: | |||
| 186 | return err; | 187 | return err; |
| 187 | } | 188 | } |
| 188 | 189 | ||
| 189 | /* Called by iforce_delete() */ | ||
| 190 | void iforce_usb_delete(struct iforce* iforce) | ||
| 191 | { | ||
| 192 | usb_kill_urb(iforce->irq); | ||
| 193 | usb_kill_urb(iforce->out); | ||
| 194 | usb_kill_urb(iforce->ctrl); | ||
| 195 | |||
| 196 | usb_free_urb(iforce->irq); | ||
| 197 | usb_free_urb(iforce->out); | ||
| 198 | usb_free_urb(iforce->ctrl); | ||
| 199 | } | ||
| 200 | |||
| 201 | static void iforce_usb_disconnect(struct usb_interface *intf) | 190 | static void iforce_usb_disconnect(struct usb_interface *intf) |
| 202 | { | 191 | { |
| 203 | struct iforce *iforce = usb_get_intfdata(intf); | 192 | struct iforce *iforce = usb_get_intfdata(intf); |
| 204 | int open = 0; /* FIXME! iforce->dev.handle->open; */ | ||
| 205 | 193 | ||
| 206 | usb_set_intfdata(intf, NULL); | 194 | usb_set_intfdata(intf, NULL); |
| 207 | if (iforce) { | ||
| 208 | iforce->usbdev = NULL; | ||
| 209 | input_unregister_device(iforce->dev); | ||
| 210 | 195 | ||
| 211 | if (!open) { | 196 | input_unregister_device(iforce->dev); |
| 212 | iforce_delete_device(iforce); | 197 | |
| 213 | kfree(iforce); | 198 | usb_free_urb(iforce->irq); |
| 214 | } | 199 | usb_free_urb(iforce->out); |
| 215 | } | 200 | usb_free_urb(iforce->ctrl); |
| 201 | |||
| 202 | kfree(iforce); | ||
| 216 | } | 203 | } |
| 217 | 204 | ||
| 218 | static struct usb_device_id iforce_usb_ids [] = { | 205 | static struct usb_device_id iforce_usb_ids [] = { |
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h index f2d91f4028ca..9f494b75848a 100644 --- a/drivers/input/joystick/iforce/iforce.h +++ b/drivers/input/joystick/iforce/iforce.h | |||
| @@ -150,11 +150,9 @@ void iforce_serial_xmit(struct iforce *iforce); | |||
| 150 | 150 | ||
| 151 | /* iforce-usb.c */ | 151 | /* iforce-usb.c */ |
| 152 | void iforce_usb_xmit(struct iforce *iforce); | 152 | void iforce_usb_xmit(struct iforce *iforce); |
| 153 | void iforce_usb_delete(struct iforce *iforce); | ||
| 154 | 153 | ||
| 155 | /* iforce-main.c */ | 154 | /* iforce-main.c */ |
| 156 | int iforce_init_device(struct iforce *iforce); | 155 | int iforce_init_device(struct iforce *iforce); |
| 157 | void iforce_delete_device(struct iforce *iforce); | ||
| 158 | 156 | ||
| 159 | /* iforce-packets.c */ | 157 | /* iforce-packets.c */ |
| 160 | int iforce_control_playback(struct iforce*, u16 id, unsigned int); | 158 | int iforce_control_playback(struct iforce*, u16 id, unsigned int); |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index a3573570c52f..1f5e2ce327d6 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
| @@ -134,7 +134,8 @@ static const unsigned short atkbd_unxlate_table[128] = { | |||
| 134 | #define ATKBD_CMD_GETID 0x02f2 | 134 | #define ATKBD_CMD_GETID 0x02f2 |
| 135 | #define ATKBD_CMD_SETREP 0x10f3 | 135 | #define ATKBD_CMD_SETREP 0x10f3 |
| 136 | #define ATKBD_CMD_ENABLE 0x00f4 | 136 | #define ATKBD_CMD_ENABLE 0x00f4 |
| 137 | #define ATKBD_CMD_RESET_DIS 0x00f5 | 137 | #define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ |
| 138 | #define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ | ||
| 138 | #define ATKBD_CMD_SETALL_MBR 0x00fa | 139 | #define ATKBD_CMD_SETALL_MBR 0x00fa |
| 139 | #define ATKBD_CMD_RESET_BAT 0x02ff | 140 | #define ATKBD_CMD_RESET_BAT 0x02ff |
| 140 | #define ATKBD_CMD_RESEND 0x00fe | 141 | #define ATKBD_CMD_RESEND 0x00fe |
| @@ -836,7 +837,7 @@ static void atkbd_cleanup(struct serio *serio) | |||
| 836 | struct atkbd *atkbd = serio_get_drvdata(serio); | 837 | struct atkbd *atkbd = serio_get_drvdata(serio); |
| 837 | 838 | ||
| 838 | atkbd_disable(atkbd); | 839 | atkbd_disable(atkbd); |
| 839 | ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_BAT); | 840 | ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_DEF); |
| 840 | } | 841 | } |
| 841 | 842 | ||
| 842 | 843 | ||
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 34f4a29d4973..d3c8b61a941d 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
| @@ -29,11 +29,13 @@ struct matrix_keypad { | |||
| 29 | unsigned short *keycodes; | 29 | unsigned short *keycodes; |
| 30 | unsigned int row_shift; | 30 | unsigned int row_shift; |
| 31 | 31 | ||
| 32 | DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS); | ||
| 33 | |||
| 32 | uint32_t last_key_state[MATRIX_MAX_COLS]; | 34 | uint32_t last_key_state[MATRIX_MAX_COLS]; |
| 33 | struct delayed_work work; | 35 | struct delayed_work work; |
| 36 | spinlock_t lock; | ||
| 34 | bool scan_pending; | 37 | bool scan_pending; |
| 35 | bool stopped; | 38 | bool stopped; |
| 36 | spinlock_t lock; | ||
| 37 | }; | 39 | }; |
| 38 | 40 | ||
| 39 | /* | 41 | /* |
| @@ -222,9 +224,16 @@ static int matrix_keypad_suspend(struct device *dev) | |||
| 222 | 224 | ||
| 223 | matrix_keypad_stop(keypad->input_dev); | 225 | matrix_keypad_stop(keypad->input_dev); |
| 224 | 226 | ||
| 225 | if (device_may_wakeup(&pdev->dev)) | 227 | if (device_may_wakeup(&pdev->dev)) { |
| 226 | for (i = 0; i < pdata->num_row_gpios; i++) | 228 | for (i = 0; i < pdata->num_row_gpios; i++) { |
| 227 | enable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); | 229 | if (!test_bit(i, keypad->disabled_gpios)) { |
| 230 | unsigned int gpio = pdata->row_gpios[i]; | ||
| 231 | |||
| 232 | if (enable_irq_wake(gpio_to_irq(gpio)) == 0) | ||
| 233 | __set_bit(i, keypad->disabled_gpios); | ||
| 234 | } | ||
| 235 | } | ||
| 236 | } | ||
| 228 | 237 | ||
| 229 | return 0; | 238 | return 0; |
| 230 | } | 239 | } |
| @@ -236,9 +245,15 @@ static int matrix_keypad_resume(struct device *dev) | |||
| 236 | const struct matrix_keypad_platform_data *pdata = keypad->pdata; | 245 | const struct matrix_keypad_platform_data *pdata = keypad->pdata; |
| 237 | int i; | 246 | int i; |
| 238 | 247 | ||
| 239 | if (device_may_wakeup(&pdev->dev)) | 248 | if (device_may_wakeup(&pdev->dev)) { |
| 240 | for (i = 0; i < pdata->num_row_gpios; i++) | 249 | for (i = 0; i < pdata->num_row_gpios; i++) { |
| 241 | disable_irq_wake(gpio_to_irq(pdata->row_gpios[i])); | 250 | if (test_and_clear_bit(i, keypad->disabled_gpios)) { |
| 251 | unsigned int gpio = pdata->row_gpios[i]; | ||
| 252 | |||
| 253 | disable_irq_wake(gpio_to_irq(gpio)); | ||
| 254 | } | ||
| 255 | } | ||
| 256 | } | ||
| 242 | 257 | ||
| 243 | matrix_keypad_start(keypad->input_dev); | 258 | matrix_keypad_start(keypad->input_dev); |
| 244 | 259 | ||
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index eeaa7acb9cfc..21d6184efa96 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c | |||
| @@ -253,14 +253,6 @@ static irqreturn_t do_kp_irq(int irq, void *_kp) | |||
| 253 | u8 reg; | 253 | u8 reg; |
| 254 | int ret; | 254 | int ret; |
| 255 | 255 | ||
| 256 | #ifdef CONFIG_LOCKDEP | ||
| 257 | /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which | ||
| 258 | * we don't want and can't tolerate. Although it might be | ||
| 259 | * friendlier not to borrow this thread context... | ||
| 260 | */ | ||
| 261 | local_irq_enable(); | ||
| 262 | #endif | ||
| 263 | |||
| 264 | /* Read & Clear TWL4030 pending interrupt */ | 256 | /* Read & Clear TWL4030 pending interrupt */ |
| 265 | ret = twl4030_kpread(kp, ®, KEYP_ISR1, 1); | 257 | ret = twl4030_kpread(kp, ®, KEYP_ISR1, 1); |
| 266 | 258 | ||
| @@ -403,7 +395,8 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev) | |||
| 403 | * | 395 | * |
| 404 | * NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ... | 396 | * NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ... |
| 405 | */ | 397 | */ |
| 406 | error = request_irq(kp->irq, do_kp_irq, 0, pdev->name, kp); | 398 | error = request_threaded_irq(kp->irq, NULL, do_kp_irq, |
| 399 | 0, pdev->name, kp); | ||
| 407 | if (error) { | 400 | if (error) { |
| 408 | dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n", | 401 | dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n", |
| 409 | kp->irq); | 402 | kp->irq); |
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index bdde5c889035..e9069b87fde2 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c | |||
| @@ -39,18 +39,8 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr) | |||
| 39 | int err; | 39 | int err; |
| 40 | u8 value; | 40 | u8 value; |
| 41 | 41 | ||
| 42 | #ifdef CONFIG_LOCKDEP | ||
| 43 | /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which | ||
| 44 | * we don't want and can't tolerate since this is a threaded | ||
| 45 | * IRQ and can sleep due to the i2c reads it has to issue. | ||
| 46 | * Although it might be friendlier not to borrow this thread | ||
| 47 | * context... | ||
| 48 | */ | ||
| 49 | local_irq_enable(); | ||
| 50 | #endif | ||
| 51 | |||
| 52 | err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value, | 42 | err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value, |
| 53 | STS_HW_CONDITIONS); | 43 | STS_HW_CONDITIONS); |
| 54 | if (!err) { | 44 | if (!err) { |
| 55 | input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ); | 45 | input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ); |
| 56 | input_sync(pwr); | 46 | input_sync(pwr); |
| @@ -80,7 +70,7 @@ static int __devinit twl4030_pwrbutton_probe(struct platform_device *pdev) | |||
| 80 | pwr->phys = "twl4030_pwrbutton/input0"; | 70 | pwr->phys = "twl4030_pwrbutton/input0"; |
| 81 | pwr->dev.parent = &pdev->dev; | 71 | pwr->dev.parent = &pdev->dev; |
| 82 | 72 | ||
| 83 | err = request_irq(irq, powerbutton_irq, | 73 | err = request_threaded_irq(irq, NULL, powerbutton_irq, |
| 84 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 74 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, |
| 85 | "twl4030_pwrbutton", pwr); | 75 | "twl4030_pwrbutton", pwr); |
| 86 | if (err < 0) { | 76 | if (err < 0) { |
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 38da6ab04384..c0afb71a3a6d 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c | |||
| @@ -1328,7 +1328,7 @@ static struct platform_driver wistron_driver = { | |||
| 1328 | .driver = { | 1328 | .driver = { |
| 1329 | .name = "wistron-bios", | 1329 | .name = "wistron-bios", |
| 1330 | .owner = THIS_MODULE, | 1330 | .owner = THIS_MODULE, |
| 1331 | #if CONFIG_PM | 1331 | #ifdef CONFIG_PM |
| 1332 | .pm = &wistron_pm_ops, | 1332 | .pm = &wistron_pm_ops, |
| 1333 | #endif | 1333 | #endif |
| 1334 | }, | 1334 | }, |
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 3feeb3af8abd..c714ca2407f8 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig | |||
| @@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS | |||
| 70 | config MOUSE_PS2_LIFEBOOK | 70 | config MOUSE_PS2_LIFEBOOK |
| 71 | bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED | 71 | bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED |
| 72 | default y | 72 | default y |
| 73 | depends on MOUSE_PS2 && X86 | 73 | depends on MOUSE_PS2 && X86 && DMI |
| 74 | help | 74 | help |
| 75 | Say Y here if you have a Fujitsu B-series Lifebook PS/2 | 75 | Say Y here if you have a Fujitsu B-series Lifebook PS/2 |
| 76 | TouchScreen connected to your system. | 76 | TouchScreen connected to your system. |
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index b146237266d8..90be30e93556 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c | |||
| @@ -427,7 +427,6 @@ static void hgpk_recalib_work(struct work_struct *work) | |||
| 427 | 427 | ||
| 428 | static int hgpk_register(struct psmouse *psmouse) | 428 | static int hgpk_register(struct psmouse *psmouse) |
| 429 | { | 429 | { |
| 430 | struct input_dev *dev = psmouse->dev; | ||
| 431 | int err; | 430 | int err; |
| 432 | 431 | ||
| 433 | /* register handlers */ | 432 | /* register handlers */ |
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c index 2e6bdfea0165..6d7aa10d10f0 100644 --- a/drivers/input/mouse/lifebook.c +++ b/drivers/input/mouse/lifebook.c | |||
| @@ -44,7 +44,6 @@ static int lifebook_set_6byte_proto(const struct dmi_system_id *d) | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static const struct dmi_system_id __initconst lifebook_dmi_table[] = { | 46 | static const struct dmi_system_id __initconst lifebook_dmi_table[] = { |
| 47 | #if defined(CONFIG_DMI) && defined(CONFIG_X86) | ||
| 48 | { | 47 | { |
| 49 | /* FLORA-ie 55mi */ | 48 | /* FLORA-ie 55mi */ |
| 50 | .matches = { | 49 | .matches = { |
| @@ -118,7 +117,6 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = { | |||
| 118 | }, | 117 | }, |
| 119 | }, | 118 | }, |
| 120 | { } | 119 | { } |
| 121 | #endif | ||
| 122 | }; | 120 | }; |
| 123 | 121 | ||
| 124 | void __init lifebook_module_init(void) | 122 | void __init lifebook_module_init(void) |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index fd0bc094616a..401ac6b6edd4 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
| @@ -1137,7 +1137,10 @@ static void psmouse_cleanup(struct serio *serio) | |||
| 1137 | if (psmouse->cleanup) | 1137 | if (psmouse->cleanup) |
| 1138 | psmouse->cleanup(psmouse); | 1138 | psmouse->cleanup(psmouse); |
| 1139 | 1139 | ||
| 1140 | psmouse_reset(psmouse); | 1140 | /* |
| 1141 | * Reset the mouse to defaults (bare PS/2 protocol). | ||
| 1142 | */ | ||
| 1143 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS); | ||
| 1141 | 1144 | ||
| 1142 | /* | 1145 | /* |
| 1143 | * Some boxes, such as HP nx7400, get terribly confused if mouse | 1146 | * Some boxes, such as HP nx7400, get terribly confused if mouse |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 0236f0d5fd91..e0f30186d513 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
| @@ -284,13 +284,7 @@ static void serio_handle_event(void) | |||
| 284 | 284 | ||
| 285 | mutex_lock(&serio_mutex); | 285 | mutex_lock(&serio_mutex); |
| 286 | 286 | ||
| 287 | /* | 287 | while ((event = serio_get_event())) { |
| 288 | * Note that we handle only one event here to give swsusp | ||
| 289 | * a chance to freeze kseriod thread. Serio events should | ||
| 290 | * be pretty rare so we are not concerned about taking | ||
| 291 | * performance hit. | ||
| 292 | */ | ||
| 293 | if ((event = serio_get_event())) { | ||
| 294 | 288 | ||
| 295 | switch (event->type) { | 289 | switch (event->type) { |
| 296 | case SERIO_REGISTER_PORT: | 290 | case SERIO_REGISTER_PORT: |
| @@ -380,10 +374,9 @@ static struct serio *serio_get_pending_child(struct serio *parent) | |||
| 380 | 374 | ||
| 381 | static int serio_thread(void *nothing) | 375 | static int serio_thread(void *nothing) |
| 382 | { | 376 | { |
| 383 | set_freezable(); | ||
| 384 | do { | 377 | do { |
| 385 | serio_handle_event(); | 378 | serio_handle_event(); |
| 386 | wait_event_freezable(serio_wait, | 379 | wait_event_interruptible(serio_wait, |
| 387 | kthread_should_stop() || !list_empty(&serio_event_list)); | 380 | kthread_should_stop() || !list_empty(&serio_event_list)); |
| 388 | } while (!kthread_should_stop()); | 381 | } while (!kthread_should_stop()); |
| 389 | 382 | ||
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index a6624ad252c5..1a1420d7a828 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c | |||
| @@ -3152,7 +3152,7 @@ static void | |||
| 3152 | hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx, | 3152 | hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx, |
| 3153 | int slot_rx, int bank_rx) | 3153 | int slot_rx, int bank_rx) |
| 3154 | { | 3154 | { |
| 3155 | if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) { | 3155 | if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) { |
| 3156 | /* disable PCM */ | 3156 | /* disable PCM */ |
| 3157 | mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0); | 3157 | mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0); |
| 3158 | return; | 3158 | return; |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 951c57b0a7e0..ede46581351a 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
| @@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | |||
| 179 | * We assume the Guest has the same number of GDT entries as the | 179 | * We assume the Guest has the same number of GDT entries as the |
| 180 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. | 180 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. |
| 181 | */ | 181 | */ |
| 182 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) | 182 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) { |
| 183 | kill_guest(cpu, "too many gdt entries %i", num); | 183 | kill_guest(cpu, "too many gdt entries %i", num); |
| 184 | return; | ||
| 185 | } | ||
| 184 | 186 | ||
| 185 | /* Set it up, then fix it. */ | 187 | /* Set it up, then fix it. */ |
| 186 | cpu->arch.gdt[num].a = lo; | 188 | cpu->arch.gdt[num].a = lo; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index f4f5f82f9f53..dd3dfe42d5a9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev) | |||
| 386 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) | 386 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) |
| 387 | return; | 387 | return; |
| 388 | if (!mddev->raid_disks && list_empty(&mddev->disks) && | 388 | if (!mddev->raid_disks && list_empty(&mddev->disks) && |
| 389 | !mddev->hold_active) { | 389 | mddev->ctime == 0 && !mddev->hold_active) { |
| 390 | /* Array is not configured at all, and not held active, | ||
| 391 | * so destroy it */ | ||
| 390 | list_del(&mddev->all_mddevs); | 392 | list_del(&mddev->all_mddevs); |
| 391 | if (mddev->gendisk) { | 393 | if (mddev->gendisk) { |
| 392 | /* we did a probe so need to clean up. | 394 | /* we did a probe so need to clean up. |
| @@ -4355,7 +4357,7 @@ static int do_md_run(mddev_t * mddev) | |||
| 4355 | mddev->barriers_work = 1; | 4357 | mddev->barriers_work = 1; |
| 4356 | mddev->ok_start_degraded = start_dirty_degraded; | 4358 | mddev->ok_start_degraded = start_dirty_degraded; |
| 4357 | 4359 | ||
| 4358 | if (start_readonly) | 4360 | if (start_readonly && mddev->ro == 0) |
| 4359 | mddev->ro = 2; /* read-only, but switch on first write */ | 4361 | mddev->ro = 2; /* read-only, but switch on first write */ |
| 4360 | 4362 | ||
| 4361 | err = mddev->pers->run(mddev); | 4363 | err = mddev->pers->run(mddev); |
| @@ -4419,33 +4421,6 @@ static int do_md_run(mddev_t * mddev) | |||
| 4419 | 4421 | ||
| 4420 | set_capacity(disk, mddev->array_sectors); | 4422 | set_capacity(disk, mddev->array_sectors); |
| 4421 | 4423 | ||
| 4422 | /* If there is a partially-recovered drive we need to | ||
| 4423 | * start recovery here. If we leave it to md_check_recovery, | ||
| 4424 | * it will remove the drives and not do the right thing | ||
| 4425 | */ | ||
| 4426 | if (mddev->degraded && !mddev->sync_thread) { | ||
| 4427 | int spares = 0; | ||
| 4428 | list_for_each_entry(rdev, &mddev->disks, same_set) | ||
| 4429 | if (rdev->raid_disk >= 0 && | ||
| 4430 | !test_bit(In_sync, &rdev->flags) && | ||
| 4431 | !test_bit(Faulty, &rdev->flags)) | ||
| 4432 | /* complete an interrupted recovery */ | ||
| 4433 | spares++; | ||
| 4434 | if (spares && mddev->pers->sync_request) { | ||
| 4435 | mddev->recovery = 0; | ||
| 4436 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | ||
| 4437 | mddev->sync_thread = md_register_thread(md_do_sync, | ||
| 4438 | mddev, | ||
| 4439 | "resync"); | ||
| 4440 | if (!mddev->sync_thread) { | ||
| 4441 | printk(KERN_ERR "%s: could not start resync" | ||
| 4442 | " thread...\n", | ||
| 4443 | mdname(mddev)); | ||
| 4444 | /* leave the spares where they are, it shouldn't hurt */ | ||
| 4445 | mddev->recovery = 0; | ||
| 4446 | } | ||
| 4447 | } | ||
| 4448 | } | ||
| 4449 | md_wakeup_thread(mddev->thread); | 4424 | md_wakeup_thread(mddev->thread); |
| 4450 | md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ | 4425 | md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ |
| 4451 | 4426 | ||
| @@ -5262,6 +5237,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | |||
| 5262 | mddev->minor_version = info->minor_version; | 5237 | mddev->minor_version = info->minor_version; |
| 5263 | mddev->patch_version = info->patch_version; | 5238 | mddev->patch_version = info->patch_version; |
| 5264 | mddev->persistent = !info->not_persistent; | 5239 | mddev->persistent = !info->not_persistent; |
| 5240 | /* ensure mddev_put doesn't delete this now that there | ||
| 5241 | * is some minimal configuration. | ||
| 5242 | */ | ||
| 5243 | mddev->ctime = get_seconds(); | ||
| 5265 | return 0; | 5244 | return 0; |
| 5266 | } | 5245 | } |
| 5267 | mddev->major_version = MD_MAJOR_VERSION; | 5246 | mddev->major_version = MD_MAJOR_VERSION; |
| @@ -6494,10 +6473,11 @@ void md_do_sync(mddev_t *mddev) | |||
| 6494 | mddev->curr_resync = 2; | 6473 | mddev->curr_resync = 2; |
| 6495 | 6474 | ||
| 6496 | try_again: | 6475 | try_again: |
| 6497 | if (kthread_should_stop()) { | 6476 | if (kthread_should_stop()) |
| 6498 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 6477 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 6478 | |||
| 6479 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | ||
| 6499 | goto skip; | 6480 | goto skip; |
| 6500 | } | ||
| 6501 | for_each_mddev(mddev2, tmp) { | 6481 | for_each_mddev(mddev2, tmp) { |
| 6502 | if (mddev2 == mddev) | 6482 | if (mddev2 == mddev) |
| 6503 | continue; | 6483 | continue; |
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c index fe44789ab037..6223bf01efe9 100644 --- a/drivers/media/dvb/firewire/firedtv-fw.c +++ b/drivers/media/dvb/firewire/firedtv-fw.c | |||
| @@ -202,14 +202,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request, | |||
| 202 | unsigned long flags; | 202 | unsigned long flags; |
| 203 | int su; | 203 | int su; |
| 204 | 204 | ||
| 205 | if ((tcode != TCODE_WRITE_QUADLET_REQUEST && | 205 | if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0) |
| 206 | tcode != TCODE_WRITE_BLOCK_REQUEST) || | ||
| 207 | offset != CSR_REGISTER_BASE + CSR_FCP_RESPONSE || | ||
| 208 | length == 0 || | ||
| 209 | (((u8 *)payload)[0] & 0xf0) != 0) { | ||
| 210 | fw_send_response(card, request, RCODE_TYPE_ERROR); | ||
| 211 | return; | 206 | return; |
| 212 | } | ||
| 213 | 207 | ||
| 214 | su = ((u8 *)payload)[1] & 0x7; | 208 | su = ((u8 *)payload)[1] & 0x7; |
| 215 | 209 | ||
| @@ -230,10 +224,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request, | |||
| 230 | } | 224 | } |
| 231 | spin_unlock_irqrestore(&node_list_lock, flags); | 225 | spin_unlock_irqrestore(&node_list_lock, flags); |
| 232 | 226 | ||
| 233 | if (fdtv) { | 227 | if (fdtv) |
| 234 | avc_recv(fdtv, payload, length); | 228 | avc_recv(fdtv, payload, length); |
| 235 | fw_send_response(card, request, RCODE_COMPLETE); | ||
| 236 | } | ||
| 237 | } | 229 | } |
| 238 | 230 | ||
| 239 | static struct fw_address_handler fcp_handler = { | 231 | static struct fw_address_handler fcp_handler = { |
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index efba7021948a..3d5f40cd69df 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c | |||
| @@ -40,8 +40,7 @@ | |||
| 40 | 40 | ||
| 41 | #define SG_TABLESIZE 30 | 41 | #define SG_TABLESIZE 30 |
| 42 | 42 | ||
| 43 | static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int, | 43 | static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); |
| 44 | unsigned long); | ||
| 45 | 44 | ||
| 46 | static spinlock_t i2o_config_lock; | 45 | static spinlock_t i2o_config_lock; |
| 47 | 46 | ||
| @@ -751,7 +750,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, | |||
| 751 | lock_kernel(); | 750 | lock_kernel(); |
| 752 | switch (cmd) { | 751 | switch (cmd) { |
| 753 | case I2OGETIOPS: | 752 | case I2OGETIOPS: |
| 754 | ret = i2o_cfg_ioctl(NULL, file, cmd, arg); | 753 | ret = i2o_cfg_ioctl(file, cmd, arg); |
| 755 | break; | 754 | break; |
| 756 | case I2OPASSTHRU32: | 755 | case I2OPASSTHRU32: |
| 757 | ret = i2o_cfg_passthru32(file, cmd, arg); | 756 | ret = i2o_cfg_passthru32(file, cmd, arg); |
| @@ -984,11 +983,11 @@ out: | |||
| 984 | /* | 983 | /* |
| 985 | * IOCTL Handler | 984 | * IOCTL Handler |
| 986 | */ | 985 | */ |
| 987 | static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, | 986 | static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) |
| 988 | unsigned long arg) | ||
| 989 | { | 987 | { |
| 990 | int ret; | 988 | int ret; |
| 991 | 989 | ||
| 990 | lock_kernel(); | ||
| 992 | switch (cmd) { | 991 | switch (cmd) { |
| 993 | case I2OGETIOPS: | 992 | case I2OGETIOPS: |
| 994 | ret = i2o_cfg_getiops(arg); | 993 | ret = i2o_cfg_getiops(arg); |
| @@ -1044,7 +1043,7 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, | |||
| 1044 | osm_debug("unknown ioctl called!\n"); | 1043 | osm_debug("unknown ioctl called!\n"); |
| 1045 | ret = -EINVAL; | 1044 | ret = -EINVAL; |
| 1046 | } | 1045 | } |
| 1047 | 1046 | unlock_kernel(); | |
| 1048 | return ret; | 1047 | return ret; |
| 1049 | } | 1048 | } |
| 1050 | 1049 | ||
| @@ -1118,7 +1117,7 @@ static int cfg_release(struct inode *inode, struct file *file) | |||
| 1118 | static const struct file_operations config_fops = { | 1117 | static const struct file_operations config_fops = { |
| 1119 | .owner = THIS_MODULE, | 1118 | .owner = THIS_MODULE, |
| 1120 | .llseek = no_llseek, | 1119 | .llseek = no_llseek, |
| 1121 | .ioctl = i2o_cfg_ioctl, | 1120 | .unlocked_ioctl = i2o_cfg_ioctl, |
| 1122 | #ifdef CONFIG_COMPAT | 1121 | #ifdef CONFIG_COMPAT |
| 1123 | .compat_ioctl = i2o_cfg_compat_ioctl, | 1122 | .compat_ioctl = i2o_cfg_compat_ioctl, |
| 1124 | #endif | 1123 | #endif |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 20d29bafc9f5..9df9a5ad38f9 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
| @@ -568,12 +568,12 @@ static void twl4030_sih_do_edge(struct work_struct *work) | |||
| 568 | 568 | ||
| 569 | bytes[byte] &= ~(0x03 << off); | 569 | bytes[byte] &= ~(0x03 << off); |
| 570 | 570 | ||
| 571 | spin_lock_irq(&d->lock); | 571 | raw_spin_lock_irq(&d->lock); |
| 572 | if (d->status & IRQ_TYPE_EDGE_RISING) | 572 | if (d->status & IRQ_TYPE_EDGE_RISING) |
| 573 | bytes[byte] |= BIT(off + 1); | 573 | bytes[byte] |= BIT(off + 1); |
| 574 | if (d->status & IRQ_TYPE_EDGE_FALLING) | 574 | if (d->status & IRQ_TYPE_EDGE_FALLING) |
| 575 | bytes[byte] |= BIT(off + 0); | 575 | bytes[byte] |= BIT(off + 0); |
| 576 | spin_unlock_irq(&d->lock); | 576 | raw_spin_unlock_irq(&d->lock); |
| 577 | 577 | ||
| 578 | edge_change &= ~BIT(i); | 578 | edge_change &= ~BIT(i); |
| 579 | } | 579 | } |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 85f0e8cd875b..1f552c6e7579 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
| @@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md) | |||
| 85 | mutex_lock(&open_lock); | 85 | mutex_lock(&open_lock); |
| 86 | md->usage--; | 86 | md->usage--; |
| 87 | if (md->usage == 0) { | 87 | if (md->usage == 0) { |
| 88 | int devmaj = MAJOR(disk_devt(md->disk)); | ||
| 88 | int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; | 89 | int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; |
| 90 | |||
| 91 | if (!devmaj) | ||
| 92 | devidx = md->disk->first_minor >> MMC_SHIFT; | ||
| 93 | |||
| 94 | blk_cleanup_queue(md->queue.queue); | ||
| 95 | |||
| 89 | __clear_bit(devidx, dev_use); | 96 | __clear_bit(devidx, dev_use); |
| 90 | 97 | ||
| 91 | put_disk(md->disk); | 98 | put_disk(md->disk); |
| @@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
| 613 | return 0; | 620 | return 0; |
| 614 | 621 | ||
| 615 | out: | 622 | out: |
| 623 | mmc_cleanup_queue(&md->queue); | ||
| 616 | mmc_blk_put(md); | 624 | mmc_blk_put(md); |
| 617 | 625 | ||
| 618 | return err; | 626 | return err; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 49e582356c65..c5a7a855f4b1 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
| @@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q) | |||
| 90 | struct request *req; | 90 | struct request *req; |
| 91 | 91 | ||
| 92 | if (!mq) { | 92 | if (!mq) { |
| 93 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | 93 | while ((req = blk_fetch_request(q)) != NULL) { |
| 94 | while ((req = blk_fetch_request(q)) != NULL) | 94 | req->cmd_flags |= REQ_QUIET; |
| 95 | __blk_end_request_all(req, -EIO); | 95 | __blk_end_request_all(req, -EIO); |
| 96 | } | ||
| 96 | return; | 97 | return; |
| 97 | } | 98 | } |
| 98 | 99 | ||
| @@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
| 223 | struct request_queue *q = mq->queue; | 224 | struct request_queue *q = mq->queue; |
| 224 | unsigned long flags; | 225 | unsigned long flags; |
| 225 | 226 | ||
| 226 | /* Mark that we should start throwing out stragglers */ | ||
| 227 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 228 | q->queuedata = NULL; | ||
| 229 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 230 | |||
| 231 | /* Make sure the queue isn't suspended, as that will deadlock */ | 227 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 232 | mmc_queue_resume(mq); | 228 | mmc_queue_resume(mq); |
| 233 | 229 | ||
| 234 | /* Then terminate our worker thread */ | 230 | /* Then terminate our worker thread */ |
| 235 | kthread_stop(mq->thread); | 231 | kthread_stop(mq->thread); |
| 236 | 232 | ||
| 233 | /* Empty the queue */ | ||
| 234 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 235 | q->queuedata = NULL; | ||
| 236 | blk_start_queue(q); | ||
| 237 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 238 | |||
| 237 | if (mq->bounce_sg) | 239 | if (mq->bounce_sg) |
| 238 | kfree(mq->bounce_sg); | 240 | kfree(mq->bounce_sg); |
| 239 | mq->bounce_sg = NULL; | 241 | mq->bounce_sg = NULL; |
| @@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | |||
| 245 | kfree(mq->bounce_buf); | 247 | kfree(mq->bounce_buf); |
| 246 | mq->bounce_buf = NULL; | 248 | mq->bounce_buf = NULL; |
| 247 | 249 | ||
| 248 | blk_cleanup_queue(mq->queue); | ||
| 249 | |||
| 250 | mq->card = NULL; | 250 | mq->card = NULL; |
| 251 | } | 251 | } |
| 252 | EXPORT_SYMBOL(mmc_cleanup_queue); | 252 | EXPORT_SYMBOL(mmc_cleanup_queue); |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c11189446a1f..0eac6c814904 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; | 209 | card->ext_csd.rev = ext_csd[EXT_CSD_REV]; |
| 210 | if (card->ext_csd.rev > 3) { | 210 | if (card->ext_csd.rev > 5) { |
| 211 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " | 211 | printk(KERN_ERR "%s: unrecognised EXT_CSD structure " |
| 212 | "version %d\n", mmc_hostname(card->host), | 212 | "version %d\n", mmc_hostname(card->host), |
| 213 | card->ext_csd.rev); | 213 | card->ext_csd.rev); |
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index fbc231153e55..77cf0901a441 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c | |||
| @@ -56,6 +56,7 @@ static const char version[] = | |||
| 56 | #include <linux/errno.h> | 56 | #include <linux/errno.h> |
| 57 | #include <linux/netdevice.h> | 57 | #include <linux/netdevice.h> |
| 58 | #include <linux/etherdevice.h> | 58 | #include <linux/etherdevice.h> |
| 59 | #include <linux/if_ether.h> | ||
| 59 | #include <linux/skbuff.h> | 60 | #include <linux/skbuff.h> |
| 60 | #include <linux/slab.h> | 61 | #include <linux/slab.h> |
| 61 | #include <linux/init.h> | 62 | #include <linux/init.h> |
| @@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev) | |||
| 734 | memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); | 735 | memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); |
| 735 | 736 | ||
| 736 | /* Fill in the station address. */ | 737 | /* Fill in the station address. */ |
| 737 | memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, | 738 | memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN); |
| 738 | sizeof(dev->dev_addr)); | ||
| 739 | 739 | ||
| 740 | /* The Tx-block list is written as needed. We just set up the values. */ | 740 | /* The Tx-block list is written as needed. We just set up the values. */ |
| 741 | lp->tx_cmd_link = IDLELOOP + 4; | 741 | lp->tx_cmd_link = IDLELOOP + 4; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e58a65391ad2..dd9a09c72dff 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2346,6 +2346,7 @@ config GELIC_NET | |||
| 2346 | 2346 | ||
| 2347 | config GELIC_WIRELESS | 2347 | config GELIC_WIRELESS |
| 2348 | bool "PS3 Wireless support" | 2348 | bool "PS3 Wireless support" |
| 2349 | depends on WLAN | ||
| 2349 | depends on GELIC_NET | 2350 | depends on GELIC_NET |
| 2350 | select WIRELESS_EXT | 2351 | select WIRELESS_EXT |
| 2351 | help | 2352 | help |
| @@ -2358,6 +2359,7 @@ config GELIC_WIRELESS | |||
| 2358 | config GELIC_WIRELESS_OLD_PSK_INTERFACE | 2359 | config GELIC_WIRELESS_OLD_PSK_INTERFACE |
| 2359 | bool "PS3 Wireless private PSK interface (OBSOLETE)" | 2360 | bool "PS3 Wireless private PSK interface (OBSOLETE)" |
| 2360 | depends on GELIC_WIRELESS | 2361 | depends on GELIC_WIRELESS |
| 2362 | select WEXT_PRIV | ||
| 2361 | help | 2363 | help |
| 2362 | This option retains the obsolete private interface to pass | 2364 | This option retains the obsolete private interface to pass |
| 2363 | the PSK from user space programs to the driver. The PSK | 2365 | the PSK from user space programs to the driver. The PSK |
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index c37ee9e6b67b..39e1c0d39476 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig | |||
| @@ -68,6 +68,7 @@ config W90P910_ETH | |||
| 68 | tristate "Nuvoton w90p910 Ethernet support" | 68 | tristate "Nuvoton w90p910 Ethernet support" |
| 69 | depends on ARM && ARCH_W90X900 | 69 | depends on ARM && ARCH_W90X900 |
| 70 | select PHYLIB | 70 | select PHYLIB |
| 71 | select MII | ||
| 71 | help | 72 | help |
| 72 | Say Y here if you want to use built-in Ethernet ports | 73 | Say Y here if you want to use built-in Ethernet ports |
| 73 | on w90p910 processor. | 74 | on w90p910 processor. |
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index c5721cb38265..cc9ed8643910 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c | |||
| @@ -663,7 +663,7 @@ static int lance_open( struct net_device *dev ) | |||
| 663 | while (--i > 0) | 663 | while (--i > 0) |
| 664 | if (DREG & CSR0_IDON) | 664 | if (DREG & CSR0_IDON) |
| 665 | break; | 665 | break; |
| 666 | if (i < 0 || (DREG & CSR0_ERR)) { | 666 | if (i <= 0 || (DREG & CSR0_ERR)) { |
| 667 | DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", | 667 | DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", |
| 668 | dev->name, i, DREG )); | 668 | dev->name, i, DREG )); |
| 669 | DREG = CSR0_STOP; | 669 | DREG = CSR0_STOP; |
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index c0451d75cdcf..ec52529394ad 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c | |||
| @@ -1959,12 +1959,15 @@ static int atl2_get_eeprom(struct net_device *netdev, | |||
| 1959 | return -ENOMEM; | 1959 | return -ENOMEM; |
| 1960 | 1960 | ||
| 1961 | for (i = first_dword; i < last_dword; i++) { | 1961 | for (i = first_dword; i < last_dword; i++) { |
| 1962 | if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) | 1962 | if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { |
| 1963 | return -EIO; | 1963 | ret_val = -EIO; |
| 1964 | goto free; | ||
| 1965 | } | ||
| 1964 | } | 1966 | } |
| 1965 | 1967 | ||
| 1966 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), | 1968 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), |
| 1967 | eeprom->len); | 1969 | eeprom->len); |
| 1970 | free: | ||
| 1968 | kfree(eeprom_buff); | 1971 | kfree(eeprom_buff); |
| 1969 | 1972 | ||
| 1970 | return ret_val; | 1973 | return ret_val; |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 9e56014d27ed..9fd8e5ecd5d7 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
| @@ -275,6 +275,7 @@ struct be_adapter { | |||
| 275 | u32 tx_fc; /* Tx flow control */ | 275 | u32 tx_fc; /* Tx flow control */ |
| 276 | int link_speed; | 276 | int link_speed; |
| 277 | u8 port_type; | 277 | u8 port_type; |
| 278 | u8 transceiver; | ||
| 278 | }; | 279 | }; |
| 279 | 280 | ||
| 280 | extern const struct ethtool_ops be_ethtool_ops; | 281 | extern const struct ethtool_ops be_ethtool_ops; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1b68bd98dc0c..102ade134165 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
| @@ -1479,6 +1479,41 @@ err: | |||
| 1479 | return status; | 1479 | return status; |
| 1480 | } | 1480 | } |
| 1481 | 1481 | ||
| 1482 | int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, | ||
| 1483 | u8 loopback_type, u8 enable) | ||
| 1484 | { | ||
| 1485 | struct be_mcc_wrb *wrb; | ||
| 1486 | struct be_cmd_req_set_lmode *req; | ||
| 1487 | int status; | ||
| 1488 | |||
| 1489 | spin_lock_bh(&adapter->mcc_lock); | ||
| 1490 | |||
| 1491 | wrb = wrb_from_mccq(adapter); | ||
| 1492 | if (!wrb) { | ||
| 1493 | status = -EBUSY; | ||
| 1494 | goto err; | ||
| 1495 | } | ||
| 1496 | |||
| 1497 | req = embedded_payload(wrb); | ||
| 1498 | |||
| 1499 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, | ||
| 1500 | OPCODE_LOWLEVEL_SET_LOOPBACK_MODE); | ||
| 1501 | |||
| 1502 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | ||
| 1503 | OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, | ||
| 1504 | sizeof(*req)); | ||
| 1505 | |||
| 1506 | req->src_port = port_num; | ||
| 1507 | req->dest_port = port_num; | ||
| 1508 | req->loopback_type = loopback_type; | ||
| 1509 | req->loopback_state = enable; | ||
| 1510 | |||
| 1511 | status = be_mcc_notify_wait(adapter); | ||
| 1512 | err: | ||
| 1513 | spin_unlock_bh(&adapter->mcc_lock); | ||
| 1514 | return status; | ||
| 1515 | } | ||
| 1516 | |||
| 1482 | int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | 1517 | int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, |
| 1483 | u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) | 1518 | u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) |
| 1484 | { | 1519 | { |
| @@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
| 1501 | 1536 | ||
| 1502 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, | 1537 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, |
| 1503 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); | 1538 | OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); |
| 1539 | req->hdr.timeout = 4; | ||
| 1504 | 1540 | ||
| 1505 | req->pattern = cpu_to_le64(pattern); | 1541 | req->pattern = cpu_to_le64(pattern); |
| 1506 | req->src_port = cpu_to_le32(port_num); | 1542 | req->src_port = cpu_to_le32(port_num); |
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 92b87ef156ed..c002b8391b4d 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h | |||
| @@ -155,6 +155,7 @@ struct be_mcc_mailbox { | |||
| 155 | 155 | ||
| 156 | #define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 | 156 | #define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 |
| 157 | #define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 | 157 | #define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 |
| 158 | #define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19 | ||
| 158 | 159 | ||
| 159 | struct be_cmd_req_hdr { | 160 | struct be_cmd_req_hdr { |
| 160 | u8 opcode; /* dword 0 */ | 161 | u8 opcode; /* dword 0 */ |
| @@ -821,6 +822,19 @@ struct be_cmd_resp_loopback_test { | |||
| 821 | u32 ticks_compl; | 822 | u32 ticks_compl; |
| 822 | }; | 823 | }; |
| 823 | 824 | ||
| 825 | struct be_cmd_req_set_lmode { | ||
| 826 | struct be_cmd_req_hdr hdr; | ||
| 827 | u8 src_port; | ||
| 828 | u8 dest_port; | ||
| 829 | u8 loopback_type; | ||
| 830 | u8 loopback_state; | ||
| 831 | }; | ||
| 832 | |||
| 833 | struct be_cmd_resp_set_lmode { | ||
| 834 | struct be_cmd_resp_hdr resp_hdr; | ||
| 835 | u8 rsvd0[4]; | ||
| 836 | }; | ||
| 837 | |||
| 824 | /********************** DDR DMA test *********************/ | 838 | /********************** DDR DMA test *********************/ |
| 825 | struct be_cmd_req_ddrdma_test { | 839 | struct be_cmd_req_ddrdma_test { |
| 826 | struct be_cmd_req_hdr hdr; | 840 | struct be_cmd_req_hdr hdr; |
| @@ -912,3 +926,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, | |||
| 912 | u32 num_pkts, u64 pattern); | 926 | u32 num_pkts, u64 pattern); |
| 913 | extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, | 927 | extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, |
| 914 | u32 byte_cnt, struct be_dma_mem *cmd); | 928 | u32 byte_cnt, struct be_dma_mem *cmd); |
| 929 | extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, | ||
| 930 | u8 loopback_type, u8 enable); | ||
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 298b92cbd689..5d001c4deac1 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c | |||
| @@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = { | |||
| 118 | #define BE_MAC_LOOPBACK 0x0 | 118 | #define BE_MAC_LOOPBACK 0x0 |
| 119 | #define BE_PHY_LOOPBACK 0x1 | 119 | #define BE_PHY_LOOPBACK 0x1 |
| 120 | #define BE_ONE_PORT_EXT_LOOPBACK 0x2 | 120 | #define BE_ONE_PORT_EXT_LOOPBACK 0x2 |
| 121 | #define BE_NO_LOOPBACK 0xff | ||
| 121 | 122 | ||
| 122 | static void | 123 | static void |
| 123 | be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | 124 | be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) |
| @@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 339 | 340 | ||
| 340 | status = be_cmd_read_port_type(adapter, adapter->port_num, | 341 | status = be_cmd_read_port_type(adapter, adapter->port_num, |
| 341 | &connector); | 342 | &connector); |
| 342 | switch (connector) { | 343 | if (!status) { |
| 343 | case 7: | 344 | switch (connector) { |
| 344 | ecmd->port = PORT_FIBRE; | 345 | case 7: |
| 345 | break; | 346 | ecmd->port = PORT_FIBRE; |
| 346 | default: | 347 | ecmd->transceiver = XCVR_EXTERNAL; |
| 347 | ecmd->port = PORT_TP; | 348 | break; |
| 348 | break; | 349 | case 0: |
| 350 | ecmd->port = PORT_TP; | ||
| 351 | ecmd->transceiver = XCVR_EXTERNAL; | ||
| 352 | break; | ||
| 353 | default: | ||
| 354 | ecmd->port = PORT_TP; | ||
| 355 | ecmd->transceiver = XCVR_INTERNAL; | ||
| 356 | break; | ||
| 357 | } | ||
| 358 | } else { | ||
| 359 | ecmd->port = PORT_AUI; | ||
| 360 | ecmd->transceiver = XCVR_INTERNAL; | ||
| 349 | } | 361 | } |
| 350 | 362 | ||
| 351 | /* Save for future use */ | 363 | /* Save for future use */ |
| 352 | adapter->link_speed = ecmd->speed; | 364 | adapter->link_speed = ecmd->speed; |
| 353 | adapter->port_type = ecmd->port; | 365 | adapter->port_type = ecmd->port; |
| 366 | adapter->transceiver = ecmd->transceiver; | ||
| 354 | } else { | 367 | } else { |
| 355 | ecmd->speed = adapter->link_speed; | 368 | ecmd->speed = adapter->link_speed; |
| 356 | ecmd->port = adapter->port_type; | 369 | ecmd->port = adapter->port_type; |
| 370 | ecmd->transceiver = adapter->transceiver; | ||
| 357 | } | 371 | } |
| 358 | 372 | ||
| 359 | ecmd->duplex = DUPLEX_FULL; | 373 | ecmd->duplex = DUPLEX_FULL; |
| 360 | ecmd->autoneg = AUTONEG_DISABLE; | 374 | ecmd->autoneg = AUTONEG_DISABLE; |
| 361 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); | ||
| 362 | ecmd->phy_address = adapter->port_num; | 375 | ecmd->phy_address = adapter->port_num; |
| 363 | ecmd->transceiver = XCVR_INTERNAL; | 376 | switch (ecmd->port) { |
| 377 | case PORT_FIBRE: | ||
| 378 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | ||
| 379 | break; | ||
| 380 | case PORT_TP: | ||
| 381 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); | ||
| 382 | break; | ||
| 383 | case PORT_AUI: | ||
| 384 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI); | ||
| 385 | break; | ||
| 386 | } | ||
| 364 | 387 | ||
| 365 | return 0; | 388 | return 0; |
| 366 | } | 389 | } |
| @@ -489,6 +512,19 @@ err: | |||
| 489 | return ret; | 512 | return ret; |
| 490 | } | 513 | } |
| 491 | 514 | ||
| 515 | static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, | ||
| 516 | u64 *status) | ||
| 517 | { | ||
| 518 | be_cmd_set_loopback(adapter, adapter->port_num, | ||
| 519 | loopback_type, 1); | ||
| 520 | *status = be_cmd_loopback_test(adapter, adapter->port_num, | ||
| 521 | loopback_type, 1500, | ||
| 522 | 2, 0xabc); | ||
| 523 | be_cmd_set_loopback(adapter, adapter->port_num, | ||
| 524 | BE_NO_LOOPBACK, 1); | ||
| 525 | return *status; | ||
| 526 | } | ||
| 527 | |||
| 492 | static void | 528 | static void |
| 493 | be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) | 529 | be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) |
| 494 | { | 530 | { |
| @@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) | |||
| 497 | memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); | 533 | memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); |
| 498 | 534 | ||
| 499 | if (test->flags & ETH_TEST_FL_OFFLINE) { | 535 | if (test->flags & ETH_TEST_FL_OFFLINE) { |
| 500 | data[0] = be_cmd_loopback_test(adapter, adapter->port_num, | 536 | if (be_loopback_test(adapter, BE_MAC_LOOPBACK, |
| 501 | BE_MAC_LOOPBACK, 1500, | 537 | &data[0]) != 0) { |
| 502 | 2, 0xabc); | ||
| 503 | if (data[0] != 0) | ||
| 504 | test->flags |= ETH_TEST_FL_FAILED; | 538 | test->flags |= ETH_TEST_FL_FAILED; |
| 505 | 539 | } | |
| 506 | data[1] = be_cmd_loopback_test(adapter, adapter->port_num, | 540 | if (be_loopback_test(adapter, BE_PHY_LOOPBACK, |
| 507 | BE_PHY_LOOPBACK, 1500, | 541 | &data[1]) != 0) { |
| 508 | 2, 0xabc); | ||
| 509 | if (data[1] != 0) | ||
| 510 | test->flags |= ETH_TEST_FL_FAILED; | 542 | test->flags |= ETH_TEST_FL_FAILED; |
| 511 | 543 | } | |
| 512 | data[2] = be_cmd_loopback_test(adapter, adapter->port_num, | 544 | if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, |
| 513 | BE_ONE_PORT_EXT_LOOPBACK, | 545 | &data[2]) != 0) { |
| 514 | 1500, 2, 0xabc); | ||
| 515 | if (data[2] != 0) | ||
| 516 | test->flags |= ETH_TEST_FL_FAILED; | 546 | test->flags |= ETH_TEST_FL_FAILED; |
| 547 | } | ||
| 517 | 548 | ||
| 518 | data[3] = be_test_ddr_dma(adapter); | 549 | data[3] = be_test_ddr_dma(adapter); |
| 519 | if (data[3] != 0) | 550 | if (data[3] != 0) |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 77ba13520d87..306c2b8165e2 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
| @@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 7593 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { | 7593 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { |
| 7594 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | 7594 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); |
| 7595 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | 7595 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; |
| 7596 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, | ||
| 7597 | CNIC_SB_ID(bp)); | ||
| 7596 | } | 7598 | } |
| 7597 | mutex_unlock(&bp->cnic_mutex); | 7599 | mutex_unlock(&bp->cnic_mutex); |
| 7598 | #endif | 7600 | #endif |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 0fb7a4964e75..822f586d72af 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg) | |||
| 1580 | // check if any partner replys | 1580 | // check if any partner replys |
| 1581 | if (best->is_individual) { | 1581 | if (best->is_individual) { |
| 1582 | pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", | 1582 | pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", |
| 1583 | best->slave->dev->master->name); | 1583 | best->slave ? best->slave->dev->master->name : "NULL"); |
| 1584 | } | 1584 | } |
| 1585 | 1585 | ||
| 1586 | best->is_active = 1; | 1586 | best->is_active = 1; |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 9c5a1537939c..1a72ca066a17 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
| @@ -990,7 +990,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi) | |||
| 990 | goto error_tx_buf; | 990 | goto error_tx_buf; |
| 991 | } | 991 | } |
| 992 | priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); | 992 | priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); |
| 993 | if (!priv->spi_tx_buf) { | 993 | if (!priv->spi_rx_buf) { |
| 994 | ret = -ENOMEM; | 994 | ret = -ENOMEM; |
| 995 | goto error_rx_buf; | 995 | goto error_rx_buf; |
| 996 | } | 996 | } |
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index af9321617ce4..0e79cef95c0a 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
| @@ -1325,8 +1325,7 @@ net_open(struct net_device *dev) | |||
| 1325 | write_irq(dev, lp->chip_type, dev->irq); | 1325 | write_irq(dev, lp->chip_type, dev->irq); |
| 1326 | ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); | 1326 | ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); |
| 1327 | if (ret) { | 1327 | if (ret) { |
| 1328 | if (net_debug) | 1328 | printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq); |
| 1329 | printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq); | ||
| 1330 | goto bad_out; | 1329 | goto bad_out; |
| 1331 | } | 1330 | } |
| 1332 | } | 1331 | } |
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 34e03104c3c1..33c4fe26178c 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
| @@ -2711,6 +2711,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
| 2711 | SET_ETHTOOL_OPS(ndev, ðtool_ops); | 2711 | SET_ETHTOOL_OPS(ndev, ðtool_ops); |
| 2712 | netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); | 2712 | netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); |
| 2713 | 2713 | ||
| 2714 | clk_enable(emac_clk); | ||
| 2715 | |||
| 2714 | /* register the network device */ | 2716 | /* register the network device */ |
| 2715 | SET_NETDEV_DEV(ndev, &pdev->dev); | 2717 | SET_NETDEV_DEV(ndev, &pdev->dev); |
| 2716 | rc = register_netdev(ndev); | 2718 | rc = register_netdev(ndev); |
| @@ -2720,7 +2722,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
| 2720 | goto netdev_reg_err; | 2722 | goto netdev_reg_err; |
| 2721 | } | 2723 | } |
| 2722 | 2724 | ||
| 2723 | clk_enable(emac_clk); | ||
| 2724 | 2725 | ||
| 2725 | /* MII/Phy intialisation, mdio bus registration */ | 2726 | /* MII/Phy intialisation, mdio bus registration */ |
| 2726 | emac_mii = mdiobus_alloc(); | 2727 | emac_mii = mdiobus_alloc(); |
| @@ -2760,6 +2761,7 @@ mdiobus_quit: | |||
| 2760 | 2761 | ||
| 2761 | netdev_reg_err: | 2762 | netdev_reg_err: |
| 2762 | mdio_alloc_err: | 2763 | mdio_alloc_err: |
| 2764 | clk_disable(emac_clk); | ||
| 2763 | no_irq_res: | 2765 | no_irq_res: |
| 2764 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2766 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2765 | release_mem_region(res->start, res->end - res->start + 1); | 2767 | release_mem_region(res->start, res->end - res->start + 1); |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index b979464091bb..02d67d047d96 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
| @@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
| 237 | /* Set if manageability features are enabled. */ | 237 | /* Set if manageability features are enabled. */ |
| 238 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) | 238 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
| 239 | ? true : false; | 239 | ? true : false; |
| 240 | /* Adaptive IFS supported */ | ||
| 241 | mac->adaptive_ifs = true; | ||
| 240 | 242 | ||
| 241 | /* check for link */ | 243 | /* check for link */ |
| 242 | switch (hw->phy.media_type) { | 244 | switch (hw->phy.media_type) { |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index 3028f23da891..e2aa3b788564 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
| @@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
| 224 | /* Set if manageability features are enabled. */ | 224 | /* Set if manageability features are enabled. */ |
| 225 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) | 225 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) |
| 226 | ? true : false; | 226 | ? true : false; |
| 227 | /* Adaptive IFS not supported */ | ||
| 228 | mac->adaptive_ifs = false; | ||
| 227 | 229 | ||
| 228 | /* check for link */ | 230 | /* check for link */ |
| 229 | switch (hw->phy.media_type) { | 231 | switch (hw->phy.media_type) { |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 2784cf44a6f3..eccf29b75c41 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
| @@ -818,6 +818,7 @@ struct e1000_mac_info { | |||
| 818 | 818 | ||
| 819 | u8 forced_speed_duplex; | 819 | u8 forced_speed_duplex; |
| 820 | 820 | ||
| 821 | bool adaptive_ifs; | ||
| 821 | bool arc_subsystem_valid; | 822 | bool arc_subsystem_valid; |
| 822 | bool autoneg; | 823 | bool autoneg; |
| 823 | bool autoneg_failed; | 824 | bool autoneg_failed; |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 9b09246af064..ad08cf3f40c0 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
| @@ -454,6 +454,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
| 454 | mac->rar_entry_count--; | 454 | mac->rar_entry_count--; |
| 455 | /* Set if manageability features are enabled. */ | 455 | /* Set if manageability features are enabled. */ |
| 456 | mac->arc_subsystem_valid = true; | 456 | mac->arc_subsystem_valid = true; |
| 457 | /* Adaptive IFS supported */ | ||
| 458 | mac->adaptive_ifs = true; | ||
| 457 | 459 | ||
| 458 | /* LED operations */ | 460 | /* LED operations */ |
| 459 | switch (mac->type) { | 461 | switch (mac->type) { |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index a86c17548c1e..2fa9b36a2c5a 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
| @@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) | |||
| 125 | void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | 125 | void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) |
| 126 | { | 126 | { |
| 127 | u32 i; | 127 | u32 i; |
| 128 | u8 mac_addr[ETH_ALEN] = {0}; | ||
| 128 | 129 | ||
| 129 | /* Setup the receive address */ | 130 | /* Setup the receive address */ |
| 130 | e_dbg("Programming MAC Address into RAR[0]\n"); | 131 | e_dbg("Programming MAC Address into RAR[0]\n"); |
| @@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) | |||
| 133 | 134 | ||
| 134 | /* Zero out the other (rar_entry_count - 1) receive addresses */ | 135 | /* Zero out the other (rar_entry_count - 1) receive addresses */ |
| 135 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); | 136 | e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
| 136 | for (i = 1; i < rar_count; i++) { | 137 | for (i = 1; i < rar_count; i++) |
| 137 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); | 138 | e1000e_rar_set(hw, mac_addr, i); |
| 138 | e1e_flush(); | ||
| 139 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); | ||
| 140 | e1e_flush(); | ||
| 141 | } | ||
| 142 | } | 139 | } |
| 143 | 140 | ||
| 144 | /** | 141 | /** |
| @@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
| 164 | 161 | ||
| 165 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | 162 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); |
| 166 | 163 | ||
| 167 | rar_high |= E1000_RAH_AV; | 164 | /* If MAC address zero, no need to set the AV bit */ |
| 165 | if (rar_low || rar_high) | ||
| 166 | rar_high |= E1000_RAH_AV; | ||
| 168 | 167 | ||
| 169 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); | 168 | /* |
| 170 | E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); | 169 | * Some bridges will combine consecutive 32-bit writes into |
| 170 | * a single burst write, which will malfunction on some parts. | ||
| 171 | * The flushes avoid this. | ||
| 172 | */ | ||
| 173 | ew32(RAL(index), rar_low); | ||
| 174 | e1e_flush(); | ||
| 175 | ew32(RAH(index), rar_high); | ||
| 176 | e1e_flush(); | ||
| 171 | } | 177 | } |
| 172 | 178 | ||
| 173 | /** | 179 | /** |
| @@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) | |||
| 1609 | { | 1615 | { |
| 1610 | struct e1000_mac_info *mac = &hw->mac; | 1616 | struct e1000_mac_info *mac = &hw->mac; |
| 1611 | 1617 | ||
| 1618 | if (!mac->adaptive_ifs) { | ||
| 1619 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
| 1620 | goto out; | ||
| 1621 | } | ||
| 1622 | |||
| 1612 | mac->current_ifs_val = 0; | 1623 | mac->current_ifs_val = 0; |
| 1613 | mac->ifs_min_val = IFS_MIN; | 1624 | mac->ifs_min_val = IFS_MIN; |
| 1614 | mac->ifs_max_val = IFS_MAX; | 1625 | mac->ifs_max_val = IFS_MAX; |
| @@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) | |||
| 1617 | 1628 | ||
| 1618 | mac->in_ifs_mode = false; | 1629 | mac->in_ifs_mode = false; |
| 1619 | ew32(AIT, 0); | 1630 | ew32(AIT, 0); |
| 1631 | out: | ||
| 1632 | return; | ||
| 1620 | } | 1633 | } |
| 1621 | 1634 | ||
| 1622 | /** | 1635 | /** |
| @@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
| 1630 | { | 1643 | { |
| 1631 | struct e1000_mac_info *mac = &hw->mac; | 1644 | struct e1000_mac_info *mac = &hw->mac; |
| 1632 | 1645 | ||
| 1646 | if (!mac->adaptive_ifs) { | ||
| 1647 | e_dbg("Not in Adaptive IFS mode!\n"); | ||
| 1648 | goto out; | ||
| 1649 | } | ||
| 1650 | |||
| 1633 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | 1651 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { |
| 1634 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | 1652 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { |
| 1635 | mac->in_ifs_mode = true; | 1653 | mac->in_ifs_mode = true; |
| @@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
| 1650 | ew32(AIT, 0); | 1668 | ew32(AIT, 0); |
| 1651 | } | 1669 | } |
| 1652 | } | 1670 | } |
| 1671 | out: | ||
| 1672 | return; | ||
| 1653 | } | 1673 | } |
| 1654 | 1674 | ||
| 1655 | /** | 1675 | /** |
| @@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
| 2287 | s32 ret_val, hdr_csum, csum; | 2307 | s32 ret_val, hdr_csum, csum; |
| 2288 | u8 i, len; | 2308 | u8 i, len; |
| 2289 | 2309 | ||
| 2310 | hw->mac.tx_pkt_filtering = true; | ||
| 2311 | |||
| 2290 | /* No manageability, no filtering */ | 2312 | /* No manageability, no filtering */ |
| 2291 | if (!e1000e_check_mng_mode(hw)) { | 2313 | if (!e1000e_check_mng_mode(hw)) { |
| 2292 | hw->mac.tx_pkt_filtering = false; | 2314 | hw->mac.tx_pkt_filtering = false; |
| 2293 | return 0; | 2315 | goto out; |
| 2294 | } | 2316 | } |
| 2295 | 2317 | ||
| 2296 | /* | 2318 | /* |
| @@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
| 2298 | * reason, disable filtering. | 2320 | * reason, disable filtering. |
| 2299 | */ | 2321 | */ |
| 2300 | ret_val = e1000_mng_enable_host_if(hw); | 2322 | ret_val = e1000_mng_enable_host_if(hw); |
| 2301 | if (ret_val != 0) { | 2323 | if (ret_val) { |
| 2302 | hw->mac.tx_pkt_filtering = false; | 2324 | hw->mac.tx_pkt_filtering = false; |
| 2303 | return ret_val; | 2325 | goto out; |
| 2304 | } | 2326 | } |
| 2305 | 2327 | ||
| 2306 | /* Read in the header. Length and offset are in dwords. */ | 2328 | /* Read in the header. Length and offset are in dwords. */ |
| @@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
| 2319 | */ | 2341 | */ |
| 2320 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { | 2342 | if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { |
| 2321 | hw->mac.tx_pkt_filtering = true; | 2343 | hw->mac.tx_pkt_filtering = true; |
| 2322 | return 1; | 2344 | goto out; |
| 2323 | } | 2345 | } |
| 2324 | 2346 | ||
| 2325 | /* Cookie area is valid, make the final check for filtering. */ | 2347 | /* Cookie area is valid, make the final check for filtering. */ |
| 2326 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { | 2348 | if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { |
| 2327 | hw->mac.tx_pkt_filtering = false; | 2349 | hw->mac.tx_pkt_filtering = false; |
| 2328 | return 0; | 2350 | goto out; |
| 2329 | } | 2351 | } |
| 2330 | 2352 | ||
| 2331 | hw->mac.tx_pkt_filtering = true; | 2353 | out: |
| 2332 | return 1; | 2354 | return hw->mac.tx_pkt_filtering; |
| 2333 | } | 2355 | } |
| 2334 | 2356 | ||
| 2335 | /** | 2357 | /** |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 762b697ce731..c45965a256b6 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
| @@ -3315,24 +3315,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
| 3315 | if ((hw->phy.type == e1000_phy_82578) || | 3315 | if ((hw->phy.type == e1000_phy_82578) || |
| 3316 | (hw->phy.type == e1000_phy_82577)) { | 3316 | (hw->phy.type == e1000_phy_82577)) { |
| 3317 | e1e_rphy(hw, HV_SCC_UPPER, &phy_data); | 3317 | e1e_rphy(hw, HV_SCC_UPPER, &phy_data); |
| 3318 | e1e_rphy(hw, HV_SCC_LOWER, &phy_data); | 3318 | if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) |
| 3319 | adapter->stats.scc += phy_data; | 3319 | adapter->stats.scc += phy_data; |
| 3320 | 3320 | ||
| 3321 | e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); | 3321 | e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); |
| 3322 | e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); | 3322 | if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) |
| 3323 | adapter->stats.ecol += phy_data; | 3323 | adapter->stats.ecol += phy_data; |
| 3324 | 3324 | ||
| 3325 | e1e_rphy(hw, HV_MCC_UPPER, &phy_data); | 3325 | e1e_rphy(hw, HV_MCC_UPPER, &phy_data); |
| 3326 | e1e_rphy(hw, HV_MCC_LOWER, &phy_data); | 3326 | if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) |
| 3327 | adapter->stats.mcc += phy_data; | 3327 | adapter->stats.mcc += phy_data; |
| 3328 | 3328 | ||
| 3329 | e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); | 3329 | e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); |
| 3330 | e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); | 3330 | if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) |
| 3331 | adapter->stats.latecol += phy_data; | 3331 | adapter->stats.latecol += phy_data; |
| 3332 | 3332 | ||
| 3333 | e1e_rphy(hw, HV_DC_UPPER, &phy_data); | 3333 | e1e_rphy(hw, HV_DC_UPPER, &phy_data); |
| 3334 | e1e_rphy(hw, HV_DC_LOWER, &phy_data); | 3334 | if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) |
| 3335 | adapter->stats.dc += phy_data; | 3335 | adapter->stats.dc += phy_data; |
| 3336 | } else { | 3336 | } else { |
| 3337 | adapter->stats.scc += er32(SCC); | 3337 | adapter->stats.scc += er32(SCC); |
| 3338 | adapter->stats.ecol += er32(ECOL); | 3338 | adapter->stats.ecol += er32(ECOL); |
| @@ -3360,8 +3360,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
| 3360 | if ((hw->phy.type == e1000_phy_82578) || | 3360 | if ((hw->phy.type == e1000_phy_82578) || |
| 3361 | (hw->phy.type == e1000_phy_82577)) { | 3361 | (hw->phy.type == e1000_phy_82577)) { |
| 3362 | e1e_rphy(hw, HV_COLC_UPPER, &phy_data); | 3362 | e1e_rphy(hw, HV_COLC_UPPER, &phy_data); |
| 3363 | e1e_rphy(hw, HV_COLC_LOWER, &phy_data); | 3363 | if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) |
| 3364 | hw->mac.collision_delta = phy_data; | 3364 | hw->mac.collision_delta = phy_data; |
| 3365 | } else { | 3365 | } else { |
| 3366 | hw->mac.collision_delta = er32(COLC); | 3366 | hw->mac.collision_delta = er32(COLC); |
| 3367 | } | 3367 | } |
| @@ -3372,8 +3372,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
| 3372 | if ((hw->phy.type == e1000_phy_82578) || | 3372 | if ((hw->phy.type == e1000_phy_82578) || |
| 3373 | (hw->phy.type == e1000_phy_82577)) { | 3373 | (hw->phy.type == e1000_phy_82577)) { |
| 3374 | e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); | 3374 | e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); |
| 3375 | e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); | 3375 | if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) |
| 3376 | adapter->stats.tncrs += phy_data; | 3376 | adapter->stats.tncrs += phy_data; |
| 3377 | } else { | 3377 | } else { |
| 3378 | if ((hw->mac.type != e1000_82574) && | 3378 | if ((hw->mac.type != e1000_82574) && |
| 3379 | (hw->mac.type != e1000_82583)) | 3379 | (hw->mac.type != e1000_82583)) |
| @@ -4674,6 +4674,7 @@ static int e1000_resume(struct pci_dev *pdev) | |||
| 4674 | 4674 | ||
| 4675 | pci_set_power_state(pdev, PCI_D0); | 4675 | pci_set_power_state(pdev, PCI_D0); |
| 4676 | pci_restore_state(pdev); | 4676 | pci_restore_state(pdev); |
| 4677 | pci_save_state(pdev); | ||
| 4677 | e1000e_disable_l1aspm(pdev); | 4678 | e1000e_disable_l1aspm(pdev); |
| 4678 | 4679 | ||
| 4679 | err = pci_enable_device_mem(pdev); | 4680 | err = pci_enable_device_mem(pdev); |
| @@ -4825,6 +4826,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
| 4825 | } else { | 4826 | } else { |
| 4826 | pci_set_master(pdev); | 4827 | pci_set_master(pdev); |
| 4827 | pci_restore_state(pdev); | 4828 | pci_restore_state(pdev); |
| 4829 | pci_save_state(pdev); | ||
| 4828 | 4830 | ||
| 4829 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4831 | pci_enable_wake(pdev, PCI_D3hot, 0); |
| 4830 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4832 | pci_enable_wake(pdev, PCI_D3cold, 0); |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index 25fabb3eedc5..d5160edf2fcf 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
| @@ -46,6 +46,11 @@ | |||
| 46 | #include "gianfar.h" | 46 | #include "gianfar.h" |
| 47 | #include "fsl_pq_mdio.h" | 47 | #include "fsl_pq_mdio.h" |
| 48 | 48 | ||
| 49 | struct fsl_pq_mdio_priv { | ||
| 50 | void __iomem *map; | ||
| 51 | struct fsl_pq_mdio __iomem *regs; | ||
| 52 | }; | ||
| 53 | |||
| 49 | /* | 54 | /* |
| 50 | * Write value to the PHY at mii_id at register regnum, | 55 | * Write value to the PHY at mii_id at register regnum, |
| 51 | * on the bus attached to the local interface, which may be different from the | 56 | * on the bus attached to the local interface, which may be different from the |
| @@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, | |||
| 105 | 110 | ||
| 106 | static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) | 111 | static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) |
| 107 | { | 112 | { |
| 108 | return (void __iomem __force *)bus->priv; | 113 | struct fsl_pq_mdio_priv *priv = bus->priv; |
| 114 | |||
| 115 | return priv->regs; | ||
| 109 | } | 116 | } |
| 110 | 117 | ||
| 111 | /* | 118 | /* |
| @@ -266,6 +273,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
| 266 | { | 273 | { |
| 267 | struct device_node *np = ofdev->node; | 274 | struct device_node *np = ofdev->node; |
| 268 | struct device_node *tbi; | 275 | struct device_node *tbi; |
| 276 | struct fsl_pq_mdio_priv *priv; | ||
| 269 | struct fsl_pq_mdio __iomem *regs = NULL; | 277 | struct fsl_pq_mdio __iomem *regs = NULL; |
| 270 | void __iomem *map; | 278 | void __iomem *map; |
| 271 | u32 __iomem *tbipa; | 279 | u32 __iomem *tbipa; |
| @@ -274,14 +282,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
| 274 | u64 addr = 0, size = 0; | 282 | u64 addr = 0, size = 0; |
| 275 | int err = 0; | 283 | int err = 0; |
| 276 | 284 | ||
| 285 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 286 | if (!priv) | ||
| 287 | return -ENOMEM; | ||
| 288 | |||
| 277 | new_bus = mdiobus_alloc(); | 289 | new_bus = mdiobus_alloc(); |
| 278 | if (NULL == new_bus) | 290 | if (NULL == new_bus) |
| 279 | return -ENOMEM; | 291 | goto err_free_priv; |
| 280 | 292 | ||
| 281 | new_bus->name = "Freescale PowerQUICC MII Bus", | 293 | new_bus->name = "Freescale PowerQUICC MII Bus", |
| 282 | new_bus->read = &fsl_pq_mdio_read, | 294 | new_bus->read = &fsl_pq_mdio_read, |
| 283 | new_bus->write = &fsl_pq_mdio_write, | 295 | new_bus->write = &fsl_pq_mdio_write, |
| 284 | new_bus->reset = &fsl_pq_mdio_reset, | 296 | new_bus->reset = &fsl_pq_mdio_reset, |
| 297 | new_bus->priv = priv; | ||
| 285 | fsl_pq_mdio_bus_name(new_bus->id, np); | 298 | fsl_pq_mdio_bus_name(new_bus->id, np); |
| 286 | 299 | ||
| 287 | /* Set the PHY base address */ | 300 | /* Set the PHY base address */ |
| @@ -291,6 +304,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
| 291 | err = -ENOMEM; | 304 | err = -ENOMEM; |
| 292 | goto err_free_bus; | 305 | goto err_free_bus; |
| 293 | } | 306 | } |
| 307 | priv->map = map; | ||
| 294 | 308 | ||
| 295 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || | 309 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || |
| 296 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | 310 | of_device_is_compatible(np, "fsl,gianfar-tbi") || |
| @@ -298,8 +312,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
| 298 | of_device_is_compatible(np, "ucc_geth_phy")) | 312 | of_device_is_compatible(np, "ucc_geth_phy")) |
| 299 | map -= offsetof(struct fsl_pq_mdio, miimcfg); | 313 | map -= offsetof(struct fsl_pq_mdio, miimcfg); |
| 300 | regs = map; | 314 | regs = map; |
| 301 | 315 | priv->regs = regs; | |
| 302 | new_bus->priv = (void __force *)regs; | ||
| 303 | 316 | ||
| 304 | new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); | 317 | new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); |
| 305 | 318 | ||
| @@ -392,10 +405,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, | |||
| 392 | err_free_irqs: | 405 | err_free_irqs: |
| 393 | kfree(new_bus->irq); | 406 | kfree(new_bus->irq); |
| 394 | err_unmap_regs: | 407 | err_unmap_regs: |
| 395 | iounmap(regs); | 408 | iounmap(priv->map); |
| 396 | err_free_bus: | 409 | err_free_bus: |
| 397 | kfree(new_bus); | 410 | kfree(new_bus); |
| 398 | 411 | err_free_priv: | |
| 412 | kfree(priv); | ||
| 399 | return err; | 413 | return err; |
| 400 | } | 414 | } |
| 401 | 415 | ||
| @@ -404,14 +418,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev) | |||
| 404 | { | 418 | { |
| 405 | struct device *device = &ofdev->dev; | 419 | struct device *device = &ofdev->dev; |
| 406 | struct mii_bus *bus = dev_get_drvdata(device); | 420 | struct mii_bus *bus = dev_get_drvdata(device); |
| 421 | struct fsl_pq_mdio_priv *priv = bus->priv; | ||
| 407 | 422 | ||
| 408 | mdiobus_unregister(bus); | 423 | mdiobus_unregister(bus); |
| 409 | 424 | ||
| 410 | dev_set_drvdata(device, NULL); | 425 | dev_set_drvdata(device, NULL); |
| 411 | 426 | ||
| 412 | iounmap(fsl_pq_mdio_get_regs(bus)); | 427 | iounmap(priv->map); |
| 413 | bus->priv = NULL; | 428 | bus->priv = NULL; |
| 414 | mdiobus_free(bus); | 429 | mdiobus_free(bus); |
| 430 | kfree(priv); | ||
| 415 | 431 | ||
| 416 | return 0; | 432 | return 0; |
| 417 | } | 433 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index e0620d084644..8bd3c9f17532 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev); | |||
| 143 | static void gfar_clear_exact_match(struct net_device *dev); | 143 | static void gfar_clear_exact_match(struct net_device *dev); |
| 144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | 144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); |
| 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| 146 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); | ||
| 147 | 146 | ||
| 148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | 147 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
| 149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | 148 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
| @@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = { | |||
| 455 | .ndo_set_multicast_list = gfar_set_multi, | 454 | .ndo_set_multicast_list = gfar_set_multi, |
| 456 | .ndo_tx_timeout = gfar_timeout, | 455 | .ndo_tx_timeout = gfar_timeout, |
| 457 | .ndo_do_ioctl = gfar_ioctl, | 456 | .ndo_do_ioctl = gfar_ioctl, |
| 458 | .ndo_select_queue = gfar_select_queue, | ||
| 459 | .ndo_get_stats = gfar_get_stats, | 457 | .ndo_get_stats = gfar_get_stats, |
| 460 | .ndo_vlan_rx_register = gfar_vlan_rx_register, | 458 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
| 461 | .ndo_set_mac_address = eth_mac_addr, | 459 | .ndo_set_mac_address = eth_mac_addr, |
| @@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv) | |||
| 506 | return priv->vlgrp || priv->rx_csum_enable; | 504 | return priv->vlgrp || priv->rx_csum_enable; |
| 507 | } | 505 | } |
| 508 | 506 | ||
| 509 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
| 510 | { | ||
| 511 | return skb_get_queue_mapping(skb); | ||
| 512 | } | ||
| 513 | static void free_tx_pointers(struct gfar_private *priv) | 507 | static void free_tx_pointers(struct gfar_private *priv) |
| 514 | { | 508 | { |
| 515 | int i = 0; | 509 | int i = 0; |
| @@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 2470 | fcb = (struct rxfcb *)skb->data; | 2464 | fcb = (struct rxfcb *)skb->data; |
| 2471 | 2465 | ||
| 2472 | /* Remove the FCB from the skb */ | 2466 | /* Remove the FCB from the skb */ |
| 2473 | skb_set_queue_mapping(skb, fcb->rq); | ||
| 2474 | /* Remove the padded bytes, if there are any */ | 2467 | /* Remove the padded bytes, if there are any */ |
| 2475 | if (amount_pull) | 2468 | if (amount_pull) { |
| 2469 | skb_record_rx_queue(skb, fcb->rq); | ||
| 2476 | skb_pull(skb, amount_pull); | 2470 | skb_pull(skb, amount_pull); |
| 2471 | } | ||
| 2477 | 2472 | ||
| 2478 | if (priv->rx_csum_enable) | 2473 | if (priv->rx_csum_enable) |
| 2479 | gfar_rx_checksum(skb, fcb); | 2474 | gfar_rx_checksum(skb, fcb); |
| @@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2554 | /* Remove the FCS from the packet length */ | 2549 | /* Remove the FCS from the packet length */ |
| 2555 | skb_put(skb, pkt_len); | 2550 | skb_put(skb, pkt_len); |
| 2556 | rx_queue->stats.rx_bytes += pkt_len; | 2551 | rx_queue->stats.rx_bytes += pkt_len; |
| 2557 | 2552 | skb_record_rx_queue(skb, rx_queue->qindex); | |
| 2558 | gfar_process_frame(dev, skb, amount_pull); | 2553 | gfar_process_frame(dev, skb, amount_pull); |
| 2559 | 2554 | ||
| 2560 | } else { | 2555 | } else { |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index ae5f11c8fc13..bdadf3e23c94 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
| @@ -248,6 +248,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 248 | { | 248 | { |
| 249 | unsigned char *ptr; | 249 | unsigned char *ptr; |
| 250 | struct bpqdev *bpq; | 250 | struct bpqdev *bpq; |
| 251 | struct net_device *orig_dev; | ||
| 251 | int size; | 252 | int size; |
| 252 | 253 | ||
| 253 | /* | 254 | /* |
| @@ -282,8 +283,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 282 | 283 | ||
| 283 | bpq = netdev_priv(dev); | 284 | bpq = netdev_priv(dev); |
| 284 | 285 | ||
| 286 | orig_dev = dev; | ||
| 285 | if ((dev = bpq_get_ether_dev(dev)) == NULL) { | 287 | if ((dev = bpq_get_ether_dev(dev)) == NULL) { |
| 286 | dev->stats.tx_dropped++; | 288 | orig_dev->stats.tx_dropped++; |
| 287 | kfree_skb(skb); | 289 | kfree_skb(skb); |
| 288 | return NETDEV_TX_OK; | 290 | return NETDEV_TX_OK; |
| 289 | } | 291 | } |
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index 090a6d3af112..052c74091d91 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c | |||
| @@ -87,6 +87,7 @@ History: | |||
| 87 | #include <linux/module.h> | 87 | #include <linux/module.h> |
| 88 | #include <linux/netdevice.h> | 88 | #include <linux/netdevice.h> |
| 89 | #include <linux/etherdevice.h> | 89 | #include <linux/etherdevice.h> |
| 90 | #include <linux/if_ether.h> | ||
| 90 | #include <linux/skbuff.h> | 91 | #include <linux/skbuff.h> |
| 91 | #include <linux/bitops.h> | 92 | #include <linux/bitops.h> |
| 92 | 93 | ||
| @@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev) | |||
| 988 | 989 | ||
| 989 | /* copy out MAC address */ | 990 | /* copy out MAC address */ |
| 990 | 991 | ||
| 991 | for (z = 0; z < sizeof(dev->dev_addr); z++) | 992 | for (z = 0; z < ETH_ALEN; z++) |
| 992 | dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); | 993 | dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); |
| 993 | 994 | ||
| 994 | /* print config */ | 995 | /* print config */ |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index e8e9e9194a88..c505b50d1fa3 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
| @@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
| 1096 | hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); | 1096 | hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); |
| 1097 | } else { | 1097 | } else { |
| 1098 | /* Set PCS register for forced link */ | 1098 | /* Set PCS register for forced link */ |
| 1099 | reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ | 1099 | reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ |
| 1100 | E1000_PCS_LCTL_FORCE_LINK | /* Force Link */ | ||
| 1101 | E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */ | ||
| 1102 | 1100 | ||
| 1103 | hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); | 1101 | hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); |
| 1104 | } | 1102 | } |
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index 5c9d73e9bb8d..3670a66401b8 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c | |||
| @@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw) | |||
| 457 | phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; | 457 | phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; |
| 458 | 458 | ||
| 459 | ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); | 459 | ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); |
| 460 | if (ret_val) | ||
| 461 | goto out; | ||
| 462 | |||
| 463 | /* Set number of link attempts before downshift */ | ||
| 464 | ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data); | ||
| 465 | if (ret_val) | ||
| 466 | goto out; | ||
| 467 | phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK; | ||
| 468 | ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data); | ||
| 469 | 460 | ||
| 470 | out: | 461 | out: |
| 471 | return ret_val; | 462 | return ret_val; |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index ac9d5272650d..f771a6c08777 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
| @@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
| 1795 | /* dual port cards only support WoL on port A from now on | 1795 | /* dual port cards only support WoL on port A from now on |
| 1796 | * unless it was enabled in the eeprom for port B | 1796 | * unless it was enabled in the eeprom for port B |
| 1797 | * so exclude FUNC_1 ports from having WoL enabled */ | 1797 | * so exclude FUNC_1 ports from having WoL enabled */ |
| 1798 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && | 1798 | if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && |
| 1799 | !adapter->eeprom_wol) { | 1799 | !adapter->eeprom_wol) { |
| 1800 | wol->supported = 0; | 1800 | wol->supported = 0; |
| 1801 | break; | 1801 | break; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 78963a0e128d..933c64ff2465 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
| @@ -1306,13 +1306,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
| 1306 | hwm = min(((pba << 10) * 9 / 10), | 1306 | hwm = min(((pba << 10) * 9 / 10), |
| 1307 | ((pba << 10) - 2 * adapter->max_frame_size)); | 1307 | ((pba << 10) - 2 * adapter->max_frame_size)); |
| 1308 | 1308 | ||
| 1309 | if (mac->type < e1000_82576) { | 1309 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ |
| 1310 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ | 1310 | fc->low_water = fc->high_water - 16; |
| 1311 | fc->low_water = fc->high_water - 8; | ||
| 1312 | } else { | ||
| 1313 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ | ||
| 1314 | fc->low_water = fc->high_water - 16; | ||
| 1315 | } | ||
| 1316 | fc->pause_time = 0xFFFF; | 1311 | fc->pause_time = 0xFFFF; |
| 1317 | fc->send_xon = 1; | 1312 | fc->send_xon = 1; |
| 1318 | fc->current_mode = fc->requested_mode; | 1313 | fc->current_mode = fc->requested_mode; |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index e9dd95f136aa..0dbd0320023a 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
| @@ -2763,7 +2763,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, | |||
| 2763 | err = hw->mac.ops.reset_hw(hw); | 2763 | err = hw->mac.ops.reset_hw(hw); |
| 2764 | if (err) { | 2764 | if (err) { |
| 2765 | dev_info(&pdev->dev, | 2765 | dev_info(&pdev->dev, |
| 2766 | "PF still in reset state, assigning new address\n"); | 2766 | "PF still in reset state, assigning new address." |
| 2767 | " Is the PF interface up?\n"); | ||
| 2767 | random_ether_addr(hw->mac.addr); | 2768 | random_ether_addr(hw->mac.addr); |
| 2768 | } else { | 2769 | } else { |
| 2769 | err = hw->mac.ops.read_mac_addr(hw); | 2770 | err = hw->mac.ops.read_mac_addr(hw); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index bd64387563f0..2ad754c864cf 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
| @@ -262,10 +262,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, | |||
| 262 | int reg_idx = tx_ring->reg_idx; | 262 | int reg_idx = tx_ring->reg_idx; |
| 263 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | 263 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; |
| 264 | 264 | ||
| 265 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 265 | switch (adapter->hw.mac.type) { |
| 266 | case ixgbe_mac_82598EB: | ||
| 266 | tc = reg_idx >> 2; | 267 | tc = reg_idx >> 2; |
| 267 | txoff = IXGBE_TFCS_TXOFF0; | 268 | txoff = IXGBE_TFCS_TXOFF0; |
| 268 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 269 | break; |
| 270 | case ixgbe_mac_82599EB: | ||
| 269 | tc = 0; | 271 | tc = 0; |
| 270 | txoff = IXGBE_TFCS_TXOFF; | 272 | txoff = IXGBE_TFCS_TXOFF; |
| 271 | if (dcb_i == 8) { | 273 | if (dcb_i == 8) { |
| @@ -284,6 +286,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, | |||
| 284 | tc += (reg_idx - 96) >> 4; | 286 | tc += (reg_idx - 96) >> 4; |
| 285 | } | 287 | } |
| 286 | } | 288 | } |
| 289 | break; | ||
| 290 | default: | ||
| 291 | tc = 0; | ||
| 287 | } | 292 | } |
| 288 | txoff <<= tc; | 293 | txoff <<= tc; |
| 289 | } | 294 | } |
| @@ -4373,6 +4378,11 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
| 4373 | 4378 | ||
| 4374 | pci_set_power_state(pdev, PCI_D0); | 4379 | pci_set_power_state(pdev, PCI_D0); |
| 4375 | pci_restore_state(pdev); | 4380 | pci_restore_state(pdev); |
| 4381 | /* | ||
| 4382 | * pci_restore_state clears dev->state_saved so call | ||
| 4383 | * pci_save_state to restore it. | ||
| 4384 | */ | ||
| 4385 | pci_save_state(pdev); | ||
| 4376 | 4386 | ||
| 4377 | err = pci_enable_device_mem(pdev); | 4387 | err = pci_enable_device_mem(pdev); |
| 4378 | if (err) { | 4388 | if (err) { |
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 336e7c7a9275..a8522bd73ae7 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
| @@ -134,7 +134,7 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
| 134 | struct sk_buff *skb; | 134 | struct sk_buff *skb; |
| 135 | int i; | 135 | int i; |
| 136 | 136 | ||
| 137 | lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL); | 137 | lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); |
| 138 | /* allocate the tx and rx ring buffer descriptors. */ | 138 | /* allocate the tx and rx ring buffer descriptors. */ |
| 139 | /* returns a virtual addres and a physical address. */ | 139 | /* returns a virtual addres and a physical address. */ |
| 140 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, | 140 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 291a505fd4fc..3cf56d90d859 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
| @@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1174 | return 0; | 1174 | return 0; |
| 1175 | 1175 | ||
| 1176 | err_port: | 1176 | err_port: |
| 1177 | for (port = 1; port <= dev->caps.num_ports; port++) | 1177 | for (--port; port >= 1; --port) |
| 1178 | mlx4_cleanup_port_info(&priv->port[port]); | 1178 | mlx4_cleanup_port_info(&priv->port[port]); |
| 1179 | 1179 | ||
| 1180 | mlx4_cleanup_mcg_table(dev); | 1180 | mlx4_cleanup_mcg_table(dev); |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 1405a170bb43..af67af55efe7 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
| @@ -656,6 +656,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) | |||
| 656 | struct sk_buff *skb; | 656 | struct sk_buff *skb; |
| 657 | int rx; | 657 | int rx; |
| 658 | struct rx_desc *rx_desc; | 658 | struct rx_desc *rx_desc; |
| 659 | int size; | ||
| 659 | 660 | ||
| 660 | skb = __skb_dequeue(&mp->rx_recycle); | 661 | skb = __skb_dequeue(&mp->rx_recycle); |
| 661 | if (skb == NULL) | 662 | if (skb == NULL) |
| @@ -678,10 +679,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget) | |||
| 678 | 679 | ||
| 679 | rx_desc = rxq->rx_desc_area + rx; | 680 | rx_desc = rxq->rx_desc_area + rx; |
| 680 | 681 | ||
| 682 | size = skb->end - skb->data; | ||
| 681 | rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, | 683 | rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, |
| 682 | skb->data, mp->skb_size, | 684 | skb->data, size, |
| 683 | DMA_FROM_DEVICE); | 685 | DMA_FROM_DEVICE); |
| 684 | rx_desc->buf_size = mp->skb_size; | 686 | rx_desc->buf_size = size; |
| 685 | rxq->rx_skb[rx] = skb; | 687 | rxq->rx_skb[rx] = skb; |
| 686 | wmb(); | 688 | wmb(); |
| 687 | rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; | 689 | rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 76cd1f3e9fc8..9bc5bd1d538a 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
| @@ -53,8 +53,8 @@ | |||
| 53 | 53 | ||
| 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 | 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
| 55 | #define _NETXEN_NIC_LINUX_MINOR 0 | 55 | #define _NETXEN_NIC_LINUX_MINOR 0 |
| 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 65 | 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 72 |
| 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.65" | 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.72" |
| 58 | 58 | ||
| 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) | 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) |
| 60 | #define _major(v) (((v) >> 24) & 0xff) | 60 | #define _major(v) (((v) >> 24) & 0xff) |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index ddd704ae0188..542f408333ff 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
| @@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { | |||
| 66 | 66 | ||
| 67 | #define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) | 67 | #define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) |
| 68 | 68 | ||
| 69 | #define NETXEN_NIC_REGS_COUNT 42 | 69 | #define NETXEN_NIC_REGS_COUNT 30 |
| 70 | #define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) | 70 | #define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) |
| 71 | #define NETXEN_MAX_EEPROM_LEN 1024 | 71 | #define NETXEN_MAX_EEPROM_LEN 1024 |
| 72 | 72 | ||
| @@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev) | |||
| 312 | return NETXEN_NIC_REGS_LEN; | 312 | return NETXEN_NIC_REGS_LEN; |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | struct netxen_niu_regs { | ||
| 316 | __u32 reg[NETXEN_NIC_REGS_COUNT]; | ||
| 317 | }; | ||
| 318 | |||
| 319 | static struct netxen_niu_regs niu_registers[] = { | ||
| 320 | { | ||
| 321 | /* GB Mode */ | ||
| 322 | { | ||
| 323 | NETXEN_NIU_GB_SERDES_RESET, | ||
| 324 | NETXEN_NIU_GB0_MII_MODE, | ||
| 325 | NETXEN_NIU_GB1_MII_MODE, | ||
| 326 | NETXEN_NIU_GB2_MII_MODE, | ||
| 327 | NETXEN_NIU_GB3_MII_MODE, | ||
| 328 | NETXEN_NIU_GB0_GMII_MODE, | ||
| 329 | NETXEN_NIU_GB1_GMII_MODE, | ||
| 330 | NETXEN_NIU_GB2_GMII_MODE, | ||
| 331 | NETXEN_NIU_GB3_GMII_MODE, | ||
| 332 | NETXEN_NIU_REMOTE_LOOPBACK, | ||
| 333 | NETXEN_NIU_GB0_HALF_DUPLEX, | ||
| 334 | NETXEN_NIU_GB1_HALF_DUPLEX, | ||
| 335 | NETXEN_NIU_RESET_SYS_FIFOS, | ||
| 336 | NETXEN_NIU_GB_CRC_DROP, | ||
| 337 | NETXEN_NIU_GB_DROP_WRONGADDR, | ||
| 338 | NETXEN_NIU_TEST_MUX_CTL, | ||
| 339 | |||
| 340 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | ||
| 341 | NETXEN_NIU_GB_MAC_CONFIG_1(0), | ||
| 342 | NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0), | ||
| 343 | NETXEN_NIU_GB_MAX_FRAME_SIZE(0), | ||
| 344 | NETXEN_NIU_GB_TEST_REG(0), | ||
| 345 | NETXEN_NIU_GB_MII_MGMT_CONFIG(0), | ||
| 346 | NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | ||
| 347 | NETXEN_NIU_GB_MII_MGMT_ADDR(0), | ||
| 348 | NETXEN_NIU_GB_MII_MGMT_CTRL(0), | ||
| 349 | NETXEN_NIU_GB_MII_MGMT_STATUS(0), | ||
| 350 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | ||
| 351 | NETXEN_NIU_GB_INTERFACE_CTRL(0), | ||
| 352 | NETXEN_NIU_GB_INTERFACE_STATUS(0), | ||
| 353 | NETXEN_NIU_GB_STATION_ADDR_0(0), | ||
| 354 | NETXEN_NIU_GB_STATION_ADDR_1(0), | ||
| 355 | -1, | ||
| 356 | } | ||
| 357 | }, | ||
| 358 | { | ||
| 359 | /* XG Mode */ | ||
| 360 | { | ||
| 361 | NETXEN_NIU_XG_SINGLE_TERM, | ||
| 362 | NETXEN_NIU_XG_DRIVE_HI, | ||
| 363 | NETXEN_NIU_XG_DRIVE_LO, | ||
| 364 | NETXEN_NIU_XG_DTX, | ||
| 365 | NETXEN_NIU_XG_DEQ, | ||
| 366 | NETXEN_NIU_XG_WORD_ALIGN, | ||
| 367 | NETXEN_NIU_XG_RESET, | ||
| 368 | NETXEN_NIU_XG_POWER_DOWN, | ||
| 369 | NETXEN_NIU_XG_RESET_PLL, | ||
| 370 | NETXEN_NIU_XG_SERDES_LOOPBACK, | ||
| 371 | NETXEN_NIU_XG_DO_BYTE_ALIGN, | ||
| 372 | NETXEN_NIU_XG_TX_ENABLE, | ||
| 373 | NETXEN_NIU_XG_RX_ENABLE, | ||
| 374 | NETXEN_NIU_XG_STATUS, | ||
| 375 | NETXEN_NIU_XG_PAUSE_THRESHOLD, | ||
| 376 | NETXEN_NIU_XGE_CONFIG_0, | ||
| 377 | NETXEN_NIU_XGE_CONFIG_1, | ||
| 378 | NETXEN_NIU_XGE_IPG, | ||
| 379 | NETXEN_NIU_XGE_STATION_ADDR_0_HI, | ||
| 380 | NETXEN_NIU_XGE_STATION_ADDR_0_1, | ||
| 381 | NETXEN_NIU_XGE_STATION_ADDR_1_LO, | ||
| 382 | NETXEN_NIU_XGE_STATUS, | ||
| 383 | NETXEN_NIU_XGE_MAX_FRAME_SIZE, | ||
| 384 | NETXEN_NIU_XGE_PAUSE_FRAME_VALUE, | ||
| 385 | NETXEN_NIU_XGE_TX_BYTE_CNT, | ||
| 386 | NETXEN_NIU_XGE_TX_FRAME_CNT, | ||
| 387 | NETXEN_NIU_XGE_RX_BYTE_CNT, | ||
| 388 | NETXEN_NIU_XGE_RX_FRAME_CNT, | ||
| 389 | NETXEN_NIU_XGE_AGGR_ERROR_CNT, | ||
| 390 | NETXEN_NIU_XGE_MULTICAST_FRAME_CNT, | ||
| 391 | NETXEN_NIU_XGE_UNICAST_FRAME_CNT, | ||
| 392 | NETXEN_NIU_XGE_CRC_ERROR_CNT, | ||
| 393 | NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR, | ||
| 394 | NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR, | ||
| 395 | NETXEN_NIU_XGE_LOCAL_ERROR_CNT, | ||
| 396 | NETXEN_NIU_XGE_REMOTE_ERROR_CNT, | ||
| 397 | NETXEN_NIU_XGE_CONTROL_CHAR_CNT, | ||
| 398 | NETXEN_NIU_XGE_PAUSE_FRAME_CNT, | ||
| 399 | -1, | ||
| 400 | } | ||
| 401 | } | ||
| 402 | }; | ||
| 403 | |||
| 404 | static void | 315 | static void |
| 405 | netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | 316 | netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) |
| 406 | { | 317 | { |
| 407 | struct netxen_adapter *adapter = netdev_priv(dev); | 318 | struct netxen_adapter *adapter = netdev_priv(dev); |
| 408 | __u32 mode, *regs_buff = p; | 319 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
| 409 | int i, window; | 320 | struct nx_host_sds_ring *sds_ring; |
| 321 | u32 *regs_buff = p; | ||
| 322 | int ring, i = 0; | ||
| 323 | int port = adapter->physical_port; | ||
| 410 | 324 | ||
| 411 | memset(p, 0, NETXEN_NIC_REGS_LEN); | 325 | memset(p, 0, NETXEN_NIC_REGS_LEN); |
| 326 | |||
| 412 | regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | | 327 | regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | |
| 413 | (adapter->pdev)->device; | 328 | (adapter->pdev)->device; |
| 414 | /* which mode */ | ||
| 415 | regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE); | ||
| 416 | mode = regs_buff[0]; | ||
| 417 | |||
| 418 | /* Common registers to all the modes */ | ||
| 419 | regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER); | ||
| 420 | /* GB/XGB Mode */ | ||
| 421 | mode = (mode / 2) - 1; | ||
| 422 | window = 0; | ||
| 423 | if (mode <= 1) { | ||
| 424 | for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { | ||
| 425 | /* GB: port specific registers */ | ||
| 426 | if (mode == 0 && i >= 19) | ||
| 427 | window = adapter->physical_port * | ||
| 428 | NETXEN_NIC_PORT_WINDOW; | ||
| 429 | |||
| 430 | regs_buff[i] = NXRD32(adapter, | ||
| 431 | niu_registers[mode].reg[i - 3] + window); | ||
| 432 | } | ||
| 433 | 329 | ||
| 330 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) | ||
| 331 | return; | ||
| 332 | |||
| 333 | regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); | ||
| 334 | regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); | ||
| 335 | regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); | ||
| 336 | regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); | ||
| 337 | regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); | ||
| 338 | regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); | ||
| 339 | regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); | ||
| 340 | regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); | ||
| 341 | regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); | ||
| 342 | |||
| 343 | regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); | ||
| 344 | regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); | ||
| 345 | regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); | ||
| 346 | regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); | ||
| 347 | |||
| 348 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
| 349 | |||
| 350 | regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); | ||
| 351 | i += 2; | ||
| 352 | |||
| 353 | regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); | ||
| 354 | regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); | ||
| 355 | |||
| 356 | } else { | ||
| 357 | i++; | ||
| 358 | |||
| 359 | regs_buff[i++] = NXRD32(adapter, | ||
| 360 | NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); | ||
| 361 | regs_buff[i++] = NXRD32(adapter, | ||
| 362 | NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); | ||
| 363 | |||
| 364 | regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); | ||
| 365 | regs_buff[i++] = NXRDIO(adapter, | ||
| 366 | adapter->tx_ring->crb_cmd_consumer); | ||
| 367 | } | ||
| 368 | |||
| 369 | regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); | ||
| 370 | |||
| 371 | regs_buff[i++] = NXRDIO(adapter, | ||
| 372 | recv_ctx->rds_rings[0].crb_rcv_producer); | ||
| 373 | regs_buff[i++] = NXRDIO(adapter, | ||
| 374 | recv_ctx->rds_rings[1].crb_rcv_producer); | ||
| 375 | |||
| 376 | regs_buff[i++] = adapter->max_sds_rings; | ||
| 377 | |||
| 378 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
| 379 | sds_ring = &(recv_ctx->sds_rings[ring]); | ||
| 380 | regs_buff[i++] = NXRDIO(adapter, | ||
| 381 | sds_ring->crb_sts_consumer); | ||
| 434 | } | 382 | } |
| 435 | } | 383 | } |
| 436 | 384 | ||
| 437 | static u32 netxen_nic_test_link(struct net_device *dev) | 385 | static u32 netxen_nic_test_link(struct net_device *dev) |
| 438 | { | 386 | { |
| 439 | struct netxen_adapter *adapter = netdev_priv(dev); | 387 | struct netxen_adapter *adapter = netdev_priv(dev); |
| 440 | __u32 status; | 388 | u32 val, port; |
| 441 | int val; | ||
| 442 | 389 | ||
| 443 | /* read which mode */ | 390 | port = adapter->physical_port; |
| 444 | if (adapter->ahw.port_type == NETXEN_NIC_GBE) { | 391 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { |
| 445 | if (adapter->phy_read && | 392 | val = NXRD32(adapter, CRB_XG_STATE_P3); |
| 446 | adapter->phy_read(adapter, | 393 | val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); |
| 447 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | 394 | return (val == XG_LINK_UP_P3) ? 0 : 1; |
| 448 | &status) != 0) | 395 | } else { |
| 449 | return -EIO; | ||
| 450 | else { | ||
| 451 | val = netxen_get_phy_link(status); | ||
| 452 | return !val; | ||
| 453 | } | ||
| 454 | } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { | ||
| 455 | val = NXRD32(adapter, CRB_XG_STATE); | 396 | val = NXRD32(adapter, CRB_XG_STATE); |
| 397 | val = (val >> port*8) & 0xff; | ||
| 456 | return (val == XG_LINK_UP) ? 0 : 1; | 398 | return (val == XG_LINK_UP) ? 0 : 1; |
| 457 | } | 399 | } |
| 458 | return -EIO; | ||
| 459 | } | 400 | } |
| 460 | 401 | ||
| 461 | static int | 402 | static int |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 2e364fee3cbb..85e28e60ecf1 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
| @@ -345,8 +345,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) | |||
| 345 | void | 345 | void |
| 346 | netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) | 346 | netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) |
| 347 | { | 347 | { |
| 348 | int val; | 348 | NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); |
| 349 | val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); | ||
| 350 | } | 349 | } |
| 351 | 350 | ||
| 352 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) | 351 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) |
| @@ -691,6 +690,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) | |||
| 691 | struct list_head *head; | 690 | struct list_head *head; |
| 692 | nx_mac_list_t *cur; | 691 | nx_mac_list_t *cur; |
| 693 | 692 | ||
| 693 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) | ||
| 694 | return; | ||
| 695 | |||
| 694 | list_splice_tail_init(&adapter->mac_list, &del_list); | 696 | list_splice_tail_init(&adapter->mac_list, &del_list); |
| 695 | 697 | ||
| 696 | nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); | 698 | nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 02f8d4b4db63..64cff68d372c 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
| @@ -184,6 +184,8 @@ skip_rds: | |||
| 184 | 184 | ||
| 185 | tx_ring = adapter->tx_ring; | 185 | tx_ring = adapter->tx_ring; |
| 186 | vfree(tx_ring->cmd_buf_arr); | 186 | vfree(tx_ring->cmd_buf_arr); |
| 187 | kfree(tx_ring); | ||
| 188 | adapter->tx_ring = NULL; | ||
| 187 | } | 189 | } |
| 188 | 190 | ||
| 189 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | 191 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter) |
| @@ -782,7 +784,7 @@ netxen_need_fw_reset(struct netxen_adapter *adapter) | |||
| 782 | if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) | 784 | if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) |
| 783 | return 1; | 785 | return 1; |
| 784 | 786 | ||
| 785 | old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); | 787 | old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); |
| 786 | 788 | ||
| 787 | for (i = 0; i < 10; i++) { | 789 | for (i = 0; i < 10; i++) { |
| 788 | 790 | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 6cae26a5bd67..9f9d6081959b 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -340,7 +340,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) | |||
| 340 | if (!(first_boot & 0x4)) { | 340 | if (!(first_boot & 0x4)) { |
| 341 | first_boot |= 0x4; | 341 | first_boot |= 0x4; |
| 342 | NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); | 342 | NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); |
| 343 | first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); | 343 | NXRD32(adapter, NETXEN_PCIE_REG(0x4)); |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | /* This is the first boot after power up */ | 346 | /* This is the first boot after power up */ |
| @@ -1898,12 +1898,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) | |||
| 1898 | linkup = (val == XG_LINK_UP_P3); | 1898 | linkup = (val == XG_LINK_UP_P3); |
| 1899 | } else { | 1899 | } else { |
| 1900 | val = NXRD32(adapter, CRB_XG_STATE); | 1900 | val = NXRD32(adapter, CRB_XG_STATE); |
| 1901 | if (adapter->ahw.port_type == NETXEN_NIC_GBE) | 1901 | val = (val >> port*8) & 0xff; |
| 1902 | linkup = (val >> port) & 1; | 1902 | linkup = (val == XG_LINK_UP); |
| 1903 | else { | ||
| 1904 | val = (val >> port*8) & 0xff; | ||
| 1905 | linkup = (val == XG_LINK_UP); | ||
| 1906 | } | ||
| 1907 | } | 1903 | } |
| 1908 | 1904 | ||
| 1909 | netxen_advert_link_change(adapter, linkup); | 1905 | netxen_advert_link_change(adapter, linkup); |
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 8ce58c4c7dd3..2aed2b382c40 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
| @@ -2844,7 +2844,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit) | |||
| 2844 | break; | 2844 | break; |
| 2845 | udelay(1); | 2845 | udelay(1); |
| 2846 | } | 2846 | } |
| 2847 | if (limit < 0) | 2847 | if (limit <= 0) |
| 2848 | return -ENODEV; | 2848 | return -ENODEV; |
| 2849 | 2849 | ||
| 2850 | return 0; | 2850 | return 0; |
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 8a5ae3b182ed..12e3233868e9 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c | |||
| @@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr) | |||
| 1402 | for (i = 0; i < 8; i++) | 1402 | for (i = 0; i < 8; i++) |
| 1403 | printk(KERN_CONT " %02X", ladrf[i]); | 1403 | printk(KERN_CONT " %02X", ladrf[i]); |
| 1404 | printk(KERN_CONT "\n"); | 1404 | printk(KERN_CONT "\n"); |
| 1405 | } | ||
| 1406 | #endif | 1405 | #endif |
| 1407 | } /* BuildLAF */ | 1406 | } /* BuildLAF */ |
| 1408 | 1407 | ||
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 92ed3fbf89a5..776cad2f5715 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
| @@ -1741,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
| 1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), | 1741 | PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), |
| 1742 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), | 1742 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), |
| 1743 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), | 1743 | PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), |
| 1744 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), | 1744 | PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), |
| 1745 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), | 1745 | PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), |
| 1746 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), | 1746 | PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), |
| 1747 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), | 1747 | PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), |
| @@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); | |||
| 1754 | MODULE_FIRMWARE("cis/PCMLM28.cis"); | 1754 | MODULE_FIRMWARE("cis/PCMLM28.cis"); |
| 1755 | MODULE_FIRMWARE("cis/DP83903.cis"); | 1755 | MODULE_FIRMWARE("cis/DP83903.cis"); |
| 1756 | MODULE_FIRMWARE("cis/LA-PCM.cis"); | 1756 | MODULE_FIRMWARE("cis/LA-PCM.cis"); |
| 1757 | MODULE_FIRMWARE("PE520.cis"); | 1757 | MODULE_FIRMWARE("cis/PE520.cis"); |
| 1758 | MODULE_FIRMWARE("cis/NE2K.cis"); | 1758 | MODULE_FIRMWARE("cis/NE2K.cis"); |
| 1759 | MODULE_FIRMWARE("cis/PE-200.cis"); | 1759 | MODULE_FIRMWARE("cis/PE-200.cis"); |
| 1760 | MODULE_FIRMWARE("cis/tamarack.cis"); | 1760 | MODULE_FIRMWARE("cis/tamarack.cis"); |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index dcc67a35e8f2..e154677ff706 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
| @@ -45,6 +45,7 @@ static const char *const version = | |||
| 45 | #include <linux/crc32.h> | 45 | #include <linux/crc32.h> |
| 46 | #include <linux/netdevice.h> | 46 | #include <linux/netdevice.h> |
| 47 | #include <linux/etherdevice.h> | 47 | #include <linux/etherdevice.h> |
| 48 | #include <linux/if_ether.h> | ||
| 48 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
| 49 | #include <linux/spinlock.h> | 50 | #include <linux/spinlock.h> |
| 50 | #include <linux/moduleparam.h> | 51 | #include <linux/moduleparam.h> |
| @@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1765 | 1766 | ||
| 1766 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ | 1767 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ |
| 1767 | if (!is_valid_ether_addr(dev->perm_addr)) | 1768 | if (!is_valid_ether_addr(dev->perm_addr)) |
| 1768 | memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); | 1769 | memset(dev->dev_addr, 0, ETH_ALEN); |
| 1769 | 1770 | ||
| 1770 | if (pcnet32_debug & NETIF_MSG_PROBE) { | 1771 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
| 1771 | printk(" %pM", dev->dev_addr); | 1772 | printk(" %pM", dev->dev_addr); |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index c13cf64095b6..33c4b12a63ba 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
| @@ -331,8 +331,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev) | |||
| 331 | bool clk125en = true; | 331 | bool clk125en = true; |
| 332 | 332 | ||
| 333 | /* Abort if we are using an untested phy. */ | 333 | /* Abort if we are using an untested phy. */ |
| 334 | if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 || | 334 | if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 && |
| 335 | BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 || | 335 | BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 && |
| 336 | BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M) | 336 | BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M) |
| 337 | return; | 337 | return; |
| 338 | 338 | ||
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index bd4e8d72dc08..e17b70291bbc 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
| @@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
| 264 | (phydev->phy_id & phydrv->phy_id_mask)); | 264 | (phydev->phy_id & phydrv->phy_id_mask)); |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | #ifdef CONFIG_PM | ||
| 268 | |||
| 267 | static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) | 269 | static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) |
| 268 | { | 270 | { |
| 269 | struct device_driver *drv = phydev->dev.driver; | 271 | struct device_driver *drv = phydev->dev.driver; |
| @@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) | |||
| 295 | return true; | 297 | return true; |
| 296 | } | 298 | } |
| 297 | 299 | ||
| 298 | /* Suspend and resume. Copied from platform_suspend and | 300 | static int mdio_bus_suspend(struct device *dev) |
| 299 | * platform_resume | ||
| 300 | */ | ||
| 301 | static int mdio_bus_suspend(struct device * dev, pm_message_t state) | ||
| 302 | { | 301 | { |
| 303 | struct phy_driver *phydrv = to_phy_driver(dev->driver); | 302 | struct phy_driver *phydrv = to_phy_driver(dev->driver); |
| 304 | struct phy_device *phydev = to_phy_device(dev); | 303 | struct phy_device *phydev = to_phy_device(dev); |
| 305 | 304 | ||
| 305 | /* | ||
| 306 | * We must stop the state machine manually, otherwise it stops out of | ||
| 307 | * control, possibly with the phydev->lock held. Upon resume, netdev | ||
| 308 | * may call phy routines that try to grab the same lock, and that may | ||
| 309 | * lead to a deadlock. | ||
| 310 | */ | ||
| 311 | if (phydev->attached_dev) | ||
| 312 | phy_stop_machine(phydev); | ||
| 313 | |||
| 306 | if (!mdio_bus_phy_may_suspend(phydev)) | 314 | if (!mdio_bus_phy_may_suspend(phydev)) |
| 307 | return 0; | 315 | return 0; |
| 316 | |||
| 308 | return phydrv->suspend(phydev); | 317 | return phydrv->suspend(phydev); |
| 309 | } | 318 | } |
| 310 | 319 | ||
| 311 | static int mdio_bus_resume(struct device * dev) | 320 | static int mdio_bus_resume(struct device *dev) |
| 312 | { | 321 | { |
| 313 | struct phy_driver *phydrv = to_phy_driver(dev->driver); | 322 | struct phy_driver *phydrv = to_phy_driver(dev->driver); |
| 314 | struct phy_device *phydev = to_phy_device(dev); | 323 | struct phy_device *phydev = to_phy_device(dev); |
| 324 | int ret; | ||
| 315 | 325 | ||
| 316 | if (!mdio_bus_phy_may_suspend(phydev)) | 326 | if (!mdio_bus_phy_may_suspend(phydev)) |
| 327 | goto no_resume; | ||
| 328 | |||
| 329 | ret = phydrv->resume(phydev); | ||
| 330 | if (ret < 0) | ||
| 331 | return ret; | ||
| 332 | |||
| 333 | no_resume: | ||
| 334 | if (phydev->attached_dev) | ||
| 335 | phy_start_machine(phydev, NULL); | ||
| 336 | |||
| 337 | return 0; | ||
| 338 | } | ||
| 339 | |||
| 340 | static int mdio_bus_restore(struct device *dev) | ||
| 341 | { | ||
| 342 | struct phy_device *phydev = to_phy_device(dev); | ||
| 343 | struct net_device *netdev = phydev->attached_dev; | ||
| 344 | int ret; | ||
| 345 | |||
| 346 | if (!netdev) | ||
| 317 | return 0; | 347 | return 0; |
| 318 | return phydrv->resume(phydev); | 348 | |
| 349 | ret = phy_init_hw(phydev); | ||
| 350 | if (ret < 0) | ||
| 351 | return ret; | ||
| 352 | |||
| 353 | /* The PHY needs to renegotiate. */ | ||
| 354 | phydev->link = 0; | ||
| 355 | phydev->state = PHY_UP; | ||
| 356 | |||
| 357 | phy_start_machine(phydev, NULL); | ||
| 358 | |||
| 359 | return 0; | ||
| 319 | } | 360 | } |
| 320 | 361 | ||
| 362 | static struct dev_pm_ops mdio_bus_pm_ops = { | ||
| 363 | .suspend = mdio_bus_suspend, | ||
| 364 | .resume = mdio_bus_resume, | ||
| 365 | .freeze = mdio_bus_suspend, | ||
| 366 | .thaw = mdio_bus_resume, | ||
| 367 | .restore = mdio_bus_restore, | ||
| 368 | }; | ||
| 369 | |||
| 370 | #define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops) | ||
| 371 | |||
| 372 | #else | ||
| 373 | |||
| 374 | #define MDIO_BUS_PM_OPS NULL | ||
| 375 | |||
| 376 | #endif /* CONFIG_PM */ | ||
| 377 | |||
| 321 | struct bus_type mdio_bus_type = { | 378 | struct bus_type mdio_bus_type = { |
| 322 | .name = "mdio_bus", | 379 | .name = "mdio_bus", |
| 323 | .match = mdio_bus_match, | 380 | .match = mdio_bus_match, |
| 324 | .suspend = mdio_bus_suspend, | 381 | .pm = MDIO_BUS_PM_OPS, |
| 325 | .resume = mdio_bus_resume, | ||
| 326 | }; | 382 | }; |
| 327 | EXPORT_SYMBOL(mdio_bus_type); | 383 | EXPORT_SYMBOL(mdio_bus_type); |
| 328 | 384 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b10fedd82143..8212b2b93422 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -378,6 +378,20 @@ void phy_disconnect(struct phy_device *phydev) | |||
| 378 | } | 378 | } |
| 379 | EXPORT_SYMBOL(phy_disconnect); | 379 | EXPORT_SYMBOL(phy_disconnect); |
| 380 | 380 | ||
| 381 | int phy_init_hw(struct phy_device *phydev) | ||
| 382 | { | ||
| 383 | int ret; | ||
| 384 | |||
| 385 | if (!phydev->drv || !phydev->drv->config_init) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | ret = phy_scan_fixups(phydev); | ||
| 389 | if (ret < 0) | ||
| 390 | return ret; | ||
| 391 | |||
| 392 | return phydev->drv->config_init(phydev); | ||
| 393 | } | ||
| 394 | |||
| 381 | /** | 395 | /** |
| 382 | * phy_attach_direct - attach a network device to a given PHY device pointer | 396 | * phy_attach_direct - attach a network device to a given PHY device pointer |
| 383 | * @dev: network device to attach | 397 | * @dev: network device to attach |
| @@ -425,21 +439,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
| 425 | /* Do initial configuration here, now that | 439 | /* Do initial configuration here, now that |
| 426 | * we have certain key parameters | 440 | * we have certain key parameters |
| 427 | * (dev_flags and interface) */ | 441 | * (dev_flags and interface) */ |
| 428 | if (phydev->drv->config_init) { | 442 | return phy_init_hw(phydev); |
| 429 | int err; | ||
| 430 | |||
| 431 | err = phy_scan_fixups(phydev); | ||
| 432 | |||
| 433 | if (err < 0) | ||
| 434 | return err; | ||
| 435 | |||
| 436 | err = phydev->drv->config_init(phydev); | ||
| 437 | |||
| 438 | if (err < 0) | ||
| 439 | return err; | ||
| 440 | } | ||
| 441 | |||
| 442 | return 0; | ||
| 443 | } | 443 | } |
| 444 | EXPORT_SYMBOL(phy_attach_direct); | 444 | EXPORT_SYMBOL(phy_attach_direct); |
| 445 | 445 | ||
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 20a71749154a..1c257098d0a6 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c | |||
| @@ -1293,7 +1293,7 @@ static void rr_dump(struct net_device *dev) | |||
| 1293 | 1293 | ||
| 1294 | printk("Error code 0x%x\n", readl(®s->Fail1)); | 1294 | printk("Error code 0x%x\n", readl(®s->Fail1)); |
| 1295 | 1295 | ||
| 1296 | index = (((readl(®s->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES; | 1296 | index = (((readl(®s->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES; |
| 1297 | cons = rrpriv->dirty_tx; | 1297 | cons = rrpriv->dirty_tx; |
| 1298 | printk("TX ring index %i, TX consumer %i\n", | 1298 | printk("TX ring index %i, TX consumer %i\n", |
| 1299 | index, cons); | 1299 | index, cons); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index f983e3b507cc..103e8b0e2a0d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
| @@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx) | |||
| 741 | 741 | ||
| 742 | EFX_LOG(efx, "create port\n"); | 742 | EFX_LOG(efx, "create port\n"); |
| 743 | 743 | ||
| 744 | if (phy_flash_cfg) | ||
| 745 | efx->phy_mode = PHY_MODE_SPECIAL; | ||
| 746 | |||
| 744 | /* Connect up MAC/PHY operations table */ | 747 | /* Connect up MAC/PHY operations table */ |
| 745 | rc = efx->type->probe_port(efx); | 748 | rc = efx->type->probe_port(efx); |
| 746 | if (rc) | 749 | if (rc) |
| 747 | goto err; | 750 | goto err; |
| 748 | 751 | ||
| 749 | if (phy_flash_cfg) | ||
| 750 | efx->phy_mode = PHY_MODE_SPECIAL; | ||
| 751 | |||
| 752 | /* Sanity check MAC address */ | 752 | /* Sanity check MAC address */ |
| 753 | if (is_valid_ether_addr(efx->mac_address)) { | 753 | if (is_valid_ether_addr(efx->mac_address)) { |
| 754 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | 754 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 17afcd26e870..9d009c46e962 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
| @@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx) | |||
| 925 | 925 | ||
| 926 | static void falcon_remove_port(struct efx_nic *efx) | 926 | static void falcon_remove_port(struct efx_nic *efx) |
| 927 | { | 927 | { |
| 928 | efx->phy_op->remove(efx); | ||
| 928 | efx_nic_free_buffer(efx, &efx->stats_buffer); | 929 | efx_nic_free_buffer(efx, &efx->stats_buffer); |
| 929 | } | 930 | } |
| 930 | 931 | ||
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index 3da933f8f079..8ccab2c67a20 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
| @@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) | |||
| 111 | efx_writeo(efx, ®, FR_AB_XM_MGT_INT_MASK); | 111 | efx_writeo(efx, ®, FR_AB_XM_MGT_INT_MASK); |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | /* Get status of XAUI link */ | 114 | static bool falcon_xgxs_link_ok(struct efx_nic *efx) |
| 115 | static bool falcon_xaui_link_ok(struct efx_nic *efx) | ||
| 116 | { | 115 | { |
| 117 | efx_oword_t reg; | 116 | efx_oword_t reg; |
| 118 | bool align_done, link_ok = false; | 117 | bool align_done, link_ok = false; |
| 119 | int sync_status; | 118 | int sync_status; |
| 120 | 119 | ||
| 121 | if (LOOPBACK_INTERNAL(efx)) | ||
| 122 | return true; | ||
| 123 | |||
| 124 | /* Read link status */ | 120 | /* Read link status */ |
| 125 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | 121 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
| 126 | 122 | ||
| @@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx) | |||
| 135 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); | 131 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); |
| 136 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); | 132 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); |
| 137 | 133 | ||
| 138 | /* If the link is up, then check the phy side of the xaui link */ | ||
| 139 | if (efx->link_state.up && link_ok) | ||
| 140 | if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) | ||
| 141 | link_ok = efx_mdio_phyxgxs_lane_sync(efx); | ||
| 142 | |||
| 143 | return link_ok; | 134 | return link_ok; |
| 144 | } | 135 | } |
| 145 | 136 | ||
| 137 | static bool falcon_xmac_link_ok(struct efx_nic *efx) | ||
| 138 | { | ||
| 139 | /* | ||
| 140 | * Check MAC's XGXS link status except when using XGMII loopback | ||
| 141 | * which bypasses the XGXS block. | ||
| 142 | * If possible, check PHY's XGXS link status except when using | ||
| 143 | * MAC loopback. | ||
| 144 | */ | ||
| 145 | return (efx->loopback_mode == LOOPBACK_XGMII || | ||
| 146 | falcon_xgxs_link_ok(efx)) && | ||
| 147 | (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || | ||
| 148 | LOOPBACK_INTERNAL(efx) || | ||
| 149 | efx_mdio_phyxgxs_lane_sync(efx)); | ||
| 150 | } | ||
| 151 | |||
| 146 | void falcon_reconfigure_xmac_core(struct efx_nic *efx) | 152 | void falcon_reconfigure_xmac_core(struct efx_nic *efx) |
| 147 | { | 153 | { |
| 148 | unsigned int max_frame_len; | 154 | unsigned int max_frame_len; |
| @@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | |||
| 245 | 251 | ||
| 246 | 252 | ||
| 247 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ | 253 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ |
| 248 | static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) | 254 | static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) |
| 249 | { | 255 | { |
| 250 | bool mac_up = falcon_xaui_link_ok(efx); | 256 | bool mac_up = falcon_xmac_link_ok(efx); |
| 251 | 257 | ||
| 252 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || | 258 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || |
| 253 | efx_phy_mode_disabled(efx->phy_mode)) | 259 | efx_phy_mode_disabled(efx->phy_mode)) |
| @@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) | |||
| 261 | falcon_reset_xaui(efx); | 267 | falcon_reset_xaui(efx); |
| 262 | udelay(200); | 268 | udelay(200); |
| 263 | 269 | ||
| 264 | mac_up = falcon_xaui_link_ok(efx); | 270 | mac_up = falcon_xmac_link_ok(efx); |
| 265 | --tries; | 271 | --tries; |
| 266 | } | 272 | } |
| 267 | 273 | ||
| @@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) | |||
| 272 | 278 | ||
| 273 | static bool falcon_xmac_check_fault(struct efx_nic *efx) | 279 | static bool falcon_xmac_check_fault(struct efx_nic *efx) |
| 274 | { | 280 | { |
| 275 | return !falcon_check_xaui_link_up(efx, 5); | 281 | return !falcon_xmac_link_ok_retry(efx, 5); |
| 276 | } | 282 | } |
| 277 | 283 | ||
| 278 | static int falcon_reconfigure_xmac(struct efx_nic *efx) | 284 | static int falcon_reconfigure_xmac(struct efx_nic *efx) |
| @@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx) | |||
| 284 | 290 | ||
| 285 | falcon_reconfigure_mac_wrapper(efx); | 291 | falcon_reconfigure_mac_wrapper(efx); |
| 286 | 292 | ||
| 287 | efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5); | 293 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); |
| 288 | falcon_mask_status_intr(efx, true); | 294 | falcon_mask_status_intr(efx, true); |
| 289 | 295 | ||
| 290 | return 0; | 296 | return 0; |
| @@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx) | |||
| 357 | return; | 363 | return; |
| 358 | 364 | ||
| 359 | falcon_mask_status_intr(efx, false); | 365 | falcon_mask_status_intr(efx, false); |
| 360 | efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1); | 366 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); |
| 361 | falcon_mask_status_intr(efx, true); | 367 | falcon_mask_status_intr(efx, true); |
| 362 | } | 368 | } |
| 363 | 369 | ||
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 0e1bcc5a0d52..eb694af7a473 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c | |||
| @@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media) | |||
| 304 | 304 | ||
| 305 | static int efx_mcdi_phy_probe(struct efx_nic *efx) | 305 | static int efx_mcdi_phy_probe(struct efx_nic *efx) |
| 306 | { | 306 | { |
| 307 | struct efx_mcdi_phy_cfg *phy_cfg; | 307 | struct efx_mcdi_phy_cfg *phy_data; |
| 308 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
| 309 | u32 caps; | ||
| 308 | int rc; | 310 | int rc; |
| 309 | 311 | ||
| 310 | /* TODO: Move phy_data initialisation to | 312 | /* Initialise and populate phy_data */ |
| 311 | * phy_op->probe/remove, rather than init/fini */ | 313 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
| 312 | phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); | 314 | if (phy_data == NULL) |
| 313 | if (phy_cfg == NULL) { | 315 | return -ENOMEM; |
| 314 | rc = -ENOMEM; | 316 | |
| 315 | goto fail_alloc; | 317 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); |
| 316 | } | ||
| 317 | rc = efx_mcdi_get_phy_cfg(efx, phy_cfg); | ||
| 318 | if (rc != 0) | 318 | if (rc != 0) |
| 319 | goto fail; | 319 | goto fail; |
| 320 | 320 | ||
| 321 | efx->phy_type = phy_cfg->type; | 321 | /* Read initial link advertisement */ |
| 322 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
| 323 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
| 324 | outbuf, sizeof(outbuf), NULL); | ||
| 325 | if (rc) | ||
| 326 | goto fail; | ||
| 327 | |||
| 328 | /* Fill out nic state */ | ||
| 329 | efx->phy_data = phy_data; | ||
| 330 | efx->phy_type = phy_data->type; | ||
| 322 | 331 | ||
| 323 | efx->mdio_bus = phy_cfg->channel; | 332 | efx->mdio_bus = phy_data->channel; |
| 324 | efx->mdio.prtad = phy_cfg->port; | 333 | efx->mdio.prtad = phy_data->port; |
| 325 | efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); | 334 | efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); |
| 326 | efx->mdio.mode_support = 0; | 335 | efx->mdio.mode_support = 0; |
| 327 | if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) | 336 | if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) |
| 328 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; | 337 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; |
| 329 | if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) | 338 | if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) |
| 330 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 339 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
| 331 | 340 | ||
| 341 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); | ||
| 342 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
| 343 | efx->link_advertising = | ||
| 344 | mcdi_to_ethtool_cap(phy_data->media, caps); | ||
| 345 | else | ||
| 346 | phy_data->forced_cap = caps; | ||
| 347 | |||
| 332 | /* Assert that we can map efx -> mcdi loopback modes */ | 348 | /* Assert that we can map efx -> mcdi loopback modes */ |
| 333 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); | 349 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); |
| 334 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); | 350 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); |
| @@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) | |||
| 365 | * but by convention we don't */ | 381 | * but by convention we don't */ |
| 366 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); | 382 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); |
| 367 | 383 | ||
| 368 | kfree(phy_cfg); | ||
| 369 | |||
| 370 | return 0; | ||
| 371 | |||
| 372 | fail: | ||
| 373 | kfree(phy_cfg); | ||
| 374 | fail_alloc: | ||
| 375 | return rc; | ||
| 376 | } | ||
| 377 | |||
| 378 | static int efx_mcdi_phy_init(struct efx_nic *efx) | ||
| 379 | { | ||
| 380 | struct efx_mcdi_phy_cfg *phy_data; | ||
| 381 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | ||
| 382 | u32 caps; | ||
| 383 | int rc; | ||
| 384 | |||
| 385 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
| 386 | if (phy_data == NULL) | ||
| 387 | return -ENOMEM; | ||
| 388 | |||
| 389 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); | ||
| 390 | if (rc != 0) | ||
| 391 | goto fail; | ||
| 392 | |||
| 393 | efx->phy_data = phy_data; | ||
| 394 | |||
| 395 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | ||
| 396 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | ||
| 397 | outbuf, sizeof(outbuf), NULL); | ||
| 398 | if (rc) | ||
| 399 | goto fail; | ||
| 400 | |||
| 401 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); | ||
| 402 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) | ||
| 403 | efx->link_advertising = | ||
| 404 | mcdi_to_ethtool_cap(phy_data->media, caps); | ||
| 405 | else | ||
| 406 | phy_data->forced_cap = caps; | ||
| 407 | |||
| 408 | return 0; | 384 | return 0; |
| 409 | 385 | ||
| 410 | fail: | 386 | fail: |
| @@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx) | |||
| 504 | return !efx_link_state_equal(&efx->link_state, &old_state); | 480 | return !efx_link_state_equal(&efx->link_state, &old_state); |
| 505 | } | 481 | } |
| 506 | 482 | ||
| 507 | static void efx_mcdi_phy_fini(struct efx_nic *efx) | 483 | static void efx_mcdi_phy_remove(struct efx_nic *efx) |
| 508 | { | 484 | { |
| 509 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; | 485 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; |
| 510 | 486 | ||
| @@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec | |||
| 586 | 562 | ||
| 587 | struct efx_phy_operations efx_mcdi_phy_ops = { | 563 | struct efx_phy_operations efx_mcdi_phy_ops = { |
| 588 | .probe = efx_mcdi_phy_probe, | 564 | .probe = efx_mcdi_phy_probe, |
| 589 | .init = efx_mcdi_phy_init, | 565 | .init = efx_port_dummy_op_int, |
| 590 | .reconfigure = efx_mcdi_phy_reconfigure, | 566 | .reconfigure = efx_mcdi_phy_reconfigure, |
| 591 | .poll = efx_mcdi_phy_poll, | 567 | .poll = efx_mcdi_phy_poll, |
| 592 | .fini = efx_mcdi_phy_fini, | 568 | .fini = efx_port_dummy_op_void, |
| 569 | .remove = efx_mcdi_phy_remove, | ||
| 593 | .get_settings = efx_mcdi_phy_get_settings, | 570 | .get_settings = efx_mcdi_phy_get_settings, |
| 594 | .set_settings = efx_mcdi_phy_set_settings, | 571 | .set_settings = efx_mcdi_phy_set_settings, |
| 595 | .run_tests = NULL, | 572 | .run_tests = NULL, |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 34c381f009b7..d5aab5b3fa06 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
| @@ -524,6 +524,7 @@ struct efx_phy_operations { | |||
| 524 | int (*probe) (struct efx_nic *efx); | 524 | int (*probe) (struct efx_nic *efx); |
| 525 | int (*init) (struct efx_nic *efx); | 525 | int (*init) (struct efx_nic *efx); |
| 526 | void (*fini) (struct efx_nic *efx); | 526 | void (*fini) (struct efx_nic *efx); |
| 527 | void (*remove) (struct efx_nic *efx); | ||
| 527 | int (*reconfigure) (struct efx_nic *efx); | 528 | int (*reconfigure) (struct efx_nic *efx); |
| 528 | bool (*poll) (struct efx_nic *efx); | 529 | bool (*poll) (struct efx_nic *efx); |
| 529 | void (*get_settings) (struct efx_nic *efx, | 530 | void (*get_settings) (struct efx_nic *efx, |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index a577be227862..db44224ed2ca 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
| @@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx) | |||
| 1576 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | 1576 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
| 1577 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 1577 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
| 1578 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | 1578 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
| 1579 | /* Disable hardware watchdog which can misfire */ | ||
| 1580 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | ||
| 1579 | /* Squash TX of packets of 16 bytes or less */ | 1581 | /* Squash TX of packets of 16 bytes or less */ |
| 1580 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1582 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
| 1581 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 1583 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index 3800fc791b2f..ff8f0a417fa3 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c | |||
| @@ -33,6 +33,9 @@ | |||
| 33 | #define PCS_FW_HEARTBEAT_REG 0xd7ee | 33 | #define PCS_FW_HEARTBEAT_REG 0xd7ee |
| 34 | #define PCS_FW_HEARTB_LBN 0 | 34 | #define PCS_FW_HEARTB_LBN 0 |
| 35 | #define PCS_FW_HEARTB_WIDTH 8 | 35 | #define PCS_FW_HEARTB_WIDTH 8 |
| 36 | #define PCS_FW_PRODUCT_CODE_1 0xd7f0 | ||
| 37 | #define PCS_FW_VERSION_1 0xd7f3 | ||
| 38 | #define PCS_FW_BUILD_1 0xd7f6 | ||
| 36 | #define PCS_UC8051_STATUS_REG 0xd7fd | 39 | #define PCS_UC8051_STATUS_REG 0xd7fd |
| 37 | #define PCS_UC_STATUS_LBN 0 | 40 | #define PCS_UC_STATUS_LBN 0 |
| 38 | #define PCS_UC_STATUS_WIDTH 8 | 41 | #define PCS_UC_STATUS_WIDTH 8 |
| @@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) | |||
| 52 | 55 | ||
| 53 | struct qt202x_phy_data { | 56 | struct qt202x_phy_data { |
| 54 | enum efx_phy_mode phy_mode; | 57 | enum efx_phy_mode phy_mode; |
| 58 | bool bug17190_in_bad_state; | ||
| 59 | unsigned long bug17190_timer; | ||
| 60 | u32 firmware_ver; | ||
| 55 | }; | 61 | }; |
| 56 | 62 | ||
| 57 | #define QT2022C2_MAX_RESET_TIME 500 | 63 | #define QT2022C2_MAX_RESET_TIME 500 |
| 58 | #define QT2022C2_RESET_WAIT 10 | 64 | #define QT2022C2_RESET_WAIT 10 |
| 59 | 65 | ||
| 60 | static int qt2025c_wait_reset(struct efx_nic *efx) | 66 | #define QT2025C_MAX_HEARTB_TIME (5 * HZ) |
| 67 | #define QT2025C_HEARTB_WAIT 100 | ||
| 68 | #define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10) | ||
| 69 | #define QT2025C_FWSTART_WAIT 100 | ||
| 70 | |||
| 71 | #define BUG17190_INTERVAL (2 * HZ) | ||
| 72 | |||
| 73 | static int qt2025c_wait_heartbeat(struct efx_nic *efx) | ||
| 61 | { | 74 | { |
| 62 | unsigned long timeout = jiffies + 10 * HZ; | 75 | unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME; |
| 63 | int reg, old_counter = 0; | 76 | int reg, old_counter = 0; |
| 64 | 77 | ||
| 65 | /* Wait for firmware heartbeat to start */ | 78 | /* Wait for firmware heartbeat to start */ |
| @@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx) | |||
| 74 | old_counter = counter; | 87 | old_counter = counter; |
| 75 | else if (counter != old_counter) | 88 | else if (counter != old_counter) |
| 76 | break; | 89 | break; |
| 77 | if (time_after(jiffies, timeout)) | 90 | if (time_after(jiffies, timeout)) { |
| 91 | /* Some cables have EEPROMs that conflict with the | ||
| 92 | * PHY's on-board EEPROM so it cannot load firmware */ | ||
| 93 | EFX_ERR(efx, "If an SFP+ direct attach cable is" | ||
| 94 | " connected, please check that it complies" | ||
| 95 | " with the SFP+ specification\n"); | ||
| 78 | return -ETIMEDOUT; | 96 | return -ETIMEDOUT; |
| 79 | msleep(10); | 97 | } |
| 98 | msleep(QT2025C_HEARTB_WAIT); | ||
| 80 | } | 99 | } |
| 81 | 100 | ||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int qt2025c_wait_fw_status_good(struct efx_nic *efx) | ||
| 105 | { | ||
| 106 | unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME; | ||
| 107 | int reg; | ||
| 108 | |||
| 82 | /* Wait for firmware status to look good */ | 109 | /* Wait for firmware status to look good */ |
| 83 | for (;;) { | 110 | for (;;) { |
| 84 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); | 111 | reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); |
| @@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx) | |||
| 90 | break; | 117 | break; |
| 91 | if (time_after(jiffies, timeout)) | 118 | if (time_after(jiffies, timeout)) |
| 92 | return -ETIMEDOUT; | 119 | return -ETIMEDOUT; |
| 120 | msleep(QT2025C_FWSTART_WAIT); | ||
| 121 | } | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | static void qt2025c_restart_firmware(struct efx_nic *efx) | ||
| 127 | { | ||
| 128 | /* Restart microcontroller execution of firmware from RAM */ | ||
| 129 | efx_mdio_write(efx, 3, 0xe854, 0x00c0); | ||
| 130 | efx_mdio_write(efx, 3, 0xe854, 0x0040); | ||
| 131 | msleep(50); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int qt2025c_wait_reset(struct efx_nic *efx) | ||
| 135 | { | ||
| 136 | int rc; | ||
| 137 | |||
| 138 | rc = qt2025c_wait_heartbeat(efx); | ||
| 139 | if (rc != 0) | ||
| 140 | return rc; | ||
| 141 | |||
| 142 | rc = qt2025c_wait_fw_status_good(efx); | ||
| 143 | if (rc == -ETIMEDOUT) { | ||
| 144 | /* Bug 17689: occasionally heartbeat starts but firmware status | ||
| 145 | * code never progresses beyond 0x00. Try again, once, after | ||
| 146 | * restarting execution of the firmware image. */ | ||
| 147 | EFX_LOG(efx, "bashing QT2025C microcontroller\n"); | ||
| 148 | qt2025c_restart_firmware(efx); | ||
| 149 | rc = qt2025c_wait_heartbeat(efx); | ||
| 150 | if (rc != 0) | ||
| 151 | return rc; | ||
| 152 | rc = qt2025c_wait_fw_status_good(efx); | ||
| 153 | } | ||
| 154 | |||
| 155 | return rc; | ||
| 156 | } | ||
| 157 | |||
| 158 | static void qt2025c_firmware_id(struct efx_nic *efx) | ||
| 159 | { | ||
| 160 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
| 161 | u8 firmware_id[9]; | ||
| 162 | size_t i; | ||
| 163 | |||
| 164 | for (i = 0; i < sizeof(firmware_id); i++) | ||
| 165 | firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, | ||
| 166 | PCS_FW_PRODUCT_CODE_1 + i); | ||
| 167 | EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", | ||
| 168 | (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], | ||
| 169 | firmware_id[3] >> 4, firmware_id[3] & 0xf, | ||
| 170 | firmware_id[4], firmware_id[5], | ||
| 171 | firmware_id[6], firmware_id[7], firmware_id[8]); | ||
| 172 | phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | | ||
| 173 | ((firmware_id[3] & 0x0f) << 16) | | ||
| 174 | (firmware_id[4] << 8) | firmware_id[5]; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void qt2025c_bug17190_workaround(struct efx_nic *efx) | ||
| 178 | { | ||
| 179 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
| 180 | |||
| 181 | /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD | ||
| 182 | * layers up, but PCS down (no block_lock). If we notice this state | ||
| 183 | * persisting for a couple of seconds, we switch PMA/PMD loopback | ||
| 184 | * briefly on and then off again, which is normally sufficient to | ||
| 185 | * recover it. | ||
| 186 | */ | ||
| 187 | if (efx->link_state.up || | ||
| 188 | !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) { | ||
| 189 | phy_data->bug17190_in_bad_state = false; | ||
| 190 | return; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (!phy_data->bug17190_in_bad_state) { | ||
| 194 | phy_data->bug17190_in_bad_state = true; | ||
| 195 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
| 196 | return; | ||
| 197 | } | ||
| 198 | |||
| 199 | if (time_after_eq(jiffies, phy_data->bug17190_timer)) { | ||
| 200 | EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); | ||
| 201 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
| 202 | MDIO_PMA_CTRL1_LOOPBACK, true); | ||
| 93 | msleep(100); | 203 | msleep(100); |
| 204 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, | ||
| 205 | MDIO_PMA_CTRL1_LOOPBACK, false); | ||
| 206 | phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 210 | static int qt2025c_select_phy_mode(struct efx_nic *efx) | ||
| 211 | { | ||
| 212 | struct qt202x_phy_data *phy_data = efx->phy_data; | ||
| 213 | struct falcon_board *board = falcon_board(efx); | ||
| 214 | int reg, rc, i; | ||
| 215 | uint16_t phy_op_mode; | ||
| 216 | |||
| 217 | /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+ | ||
| 218 | * Self-Configure mode. Don't attempt any switching if we encounter | ||
| 219 | * older firmware. */ | ||
| 220 | if (phy_data->firmware_ver < 0x02000100) | ||
| 221 | return 0; | ||
| 222 | |||
| 223 | /* In general we will get optimal behaviour in "SFP+ Self-Configure" | ||
| 224 | * mode; however, that powers down most of the PHY when no module is | ||
| 225 | * present, so we must use a different mode (any fixed mode will do) | ||
| 226 | * to be sure that loopbacks will work. */ | ||
| 227 | phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020; | ||
| 228 | |||
| 229 | /* Only change mode if really necessary */ | ||
| 230 | reg = efx_mdio_read(efx, 1, 0xc319); | ||
| 231 | if ((reg & 0x0038) == phy_op_mode) | ||
| 232 | return 0; | ||
| 233 | EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); | ||
| 234 | |||
| 235 | /* This sequence replicates the register writes configured in the boot | ||
| 236 | * EEPROM (including the differences between board revisions), except | ||
| 237 | * that the operating mode is changed, and the PHY is prevented from | ||
| 238 | * unnecessarily reloading the main firmware image again. */ | ||
| 239 | efx_mdio_write(efx, 1, 0xc300, 0x0000); | ||
| 240 | /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9 | ||
| 241 | * STOPs onto the firmware/module I2C bus to reset it, varies across | ||
| 242 | * board revisions, as the bus is connected to different GPIO/LED | ||
| 243 | * outputs on the PHY.) */ | ||
| 244 | if (board->major == 0 && board->minor < 2) { | ||
| 245 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
| 246 | for (i = 0; i < 9; i++) { | ||
| 247 | efx_mdio_write(efx, 1, 0xc303, 0x4488); | ||
| 248 | efx_mdio_write(efx, 1, 0xc303, 0x4480); | ||
| 249 | efx_mdio_write(efx, 1, 0xc303, 0x4490); | ||
| 250 | efx_mdio_write(efx, 1, 0xc303, 0x4498); | ||
| 251 | } | ||
| 252 | } else { | ||
| 253 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
| 254 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
| 255 | for (i = 0; i < 9; i++) { | ||
| 256 | efx_mdio_write(efx, 1, 0xc303, 0x0900); | ||
| 257 | efx_mdio_write(efx, 1, 0xd008, 0x0005); | ||
| 258 | efx_mdio_write(efx, 1, 0xc303, 0x0920); | ||
| 259 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
| 260 | } | ||
| 261 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
| 262 | } | ||
| 263 | efx_mdio_write(efx, 1, 0xc303, 0x4900); | ||
| 264 | efx_mdio_write(efx, 1, 0xc302, 0x0004); | ||
| 265 | efx_mdio_write(efx, 1, 0xc316, 0x0013); | ||
| 266 | efx_mdio_write(efx, 1, 0xc318, 0x0054); | ||
| 267 | efx_mdio_write(efx, 1, 0xc319, phy_op_mode); | ||
| 268 | efx_mdio_write(efx, 1, 0xc31a, 0x0098); | ||
| 269 | efx_mdio_write(efx, 3, 0x0026, 0x0e00); | ||
| 270 | efx_mdio_write(efx, 3, 0x0027, 0x0013); | ||
| 271 | efx_mdio_write(efx, 3, 0x0028, 0xa528); | ||
| 272 | efx_mdio_write(efx, 1, 0xd006, 0x000a); | ||
| 273 | efx_mdio_write(efx, 1, 0xd007, 0x0009); | ||
| 274 | efx_mdio_write(efx, 1, 0xd008, 0x0004); | ||
| 275 | /* This additional write is not present in the boot EEPROM. It | ||
| 276 | * prevents the PHY's internal boot ROM doing another pointless (and | ||
| 277 | * slow) reload of the firmware image (the microcontroller's code | ||
| 278 | * memory is not affected by the microcontroller reset). */ | ||
| 279 | efx_mdio_write(efx, 1, 0xc317, 0x00ff); | ||
| 280 | efx_mdio_write(efx, 1, 0xc300, 0x0002); | ||
| 281 | msleep(20); | ||
| 282 | |||
| 283 | /* Restart microcontroller execution of firmware from RAM */ | ||
| 284 | qt2025c_restart_firmware(efx); | ||
| 285 | |||
| 286 | /* Wait for the microcontroller to be ready again */ | ||
| 287 | rc = qt2025c_wait_reset(efx); | ||
| 288 | if (rc < 0) { | ||
| 289 | EFX_ERR(efx, "PHY microcontroller reset during mode switch " | ||
| 290 | "timed out\n"); | ||
| 291 | return rc; | ||
| 94 | } | 292 | } |
| 95 | 293 | ||
| 96 | return 0; | 294 | return 0; |
| @@ -137,6 +335,16 @@ static int qt202x_reset_phy(struct efx_nic *efx) | |||
| 137 | 335 | ||
| 138 | static int qt202x_phy_probe(struct efx_nic *efx) | 336 | static int qt202x_phy_probe(struct efx_nic *efx) |
| 139 | { | 337 | { |
| 338 | struct qt202x_phy_data *phy_data; | ||
| 339 | |||
| 340 | phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); | ||
| 341 | if (!phy_data) | ||
| 342 | return -ENOMEM; | ||
| 343 | efx->phy_data = phy_data; | ||
| 344 | phy_data->phy_mode = efx->phy_mode; | ||
| 345 | phy_data->bug17190_in_bad_state = false; | ||
| 346 | phy_data->bug17190_timer = 0; | ||
| 347 | |||
| 140 | efx->mdio.mmds = QT202X_REQUIRED_DEVS; | 348 | efx->mdio.mmds = QT202X_REQUIRED_DEVS; |
| 141 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 349 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
| 142 | efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | 350 | efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; |
| @@ -145,7 +353,6 @@ static int qt202x_phy_probe(struct efx_nic *efx) | |||
| 145 | 353 | ||
| 146 | static int qt202x_phy_init(struct efx_nic *efx) | 354 | static int qt202x_phy_init(struct efx_nic *efx) |
| 147 | { | 355 | { |
| 148 | struct qt202x_phy_data *phy_data; | ||
| 149 | u32 devid; | 356 | u32 devid; |
| 150 | int rc; | 357 | int rc; |
| 151 | 358 | ||
| @@ -155,17 +362,14 @@ static int qt202x_phy_init(struct efx_nic *efx) | |||
| 155 | return rc; | 362 | return rc; |
| 156 | } | 363 | } |
| 157 | 364 | ||
| 158 | phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); | ||
| 159 | if (!phy_data) | ||
| 160 | return -ENOMEM; | ||
| 161 | efx->phy_data = phy_data; | ||
| 162 | |||
| 163 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); | 365 | devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); |
| 164 | EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", | 366 | EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", |
| 165 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), | 367 | devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), |
| 166 | efx_mdio_id_rev(devid)); | 368 | efx_mdio_id_rev(devid)); |
| 167 | 369 | ||
| 168 | phy_data->phy_mode = efx->phy_mode; | 370 | if (efx->phy_type == PHY_TYPE_QT2025C) |
| 371 | qt2025c_firmware_id(efx); | ||
| 372 | |||
| 169 | return 0; | 373 | return 0; |
| 170 | } | 374 | } |
| 171 | 375 | ||
| @@ -183,6 +387,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx) | |||
| 183 | efx->link_state.fd = true; | 387 | efx->link_state.fd = true; |
| 184 | efx->link_state.fc = efx->wanted_fc; | 388 | efx->link_state.fc = efx->wanted_fc; |
| 185 | 389 | ||
| 390 | if (efx->phy_type == PHY_TYPE_QT2025C) | ||
| 391 | qt2025c_bug17190_workaround(efx); | ||
| 392 | |||
| 186 | return efx->link_state.up != was_up; | 393 | return efx->link_state.up != was_up; |
| 187 | } | 394 | } |
| 188 | 395 | ||
| @@ -191,6 +398,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx) | |||
| 191 | struct qt202x_phy_data *phy_data = efx->phy_data; | 398 | struct qt202x_phy_data *phy_data = efx->phy_data; |
| 192 | 399 | ||
| 193 | if (efx->phy_type == PHY_TYPE_QT2025C) { | 400 | if (efx->phy_type == PHY_TYPE_QT2025C) { |
| 401 | int rc = qt2025c_select_phy_mode(efx); | ||
| 402 | if (rc) | ||
| 403 | return rc; | ||
| 404 | |||
| 194 | /* There are several different register bits which can | 405 | /* There are several different register bits which can |
| 195 | * disable TX (and save power) on direct-attach cables | 406 | * disable TX (and save power) on direct-attach cables |
| 196 | * or optical transceivers, varying somewhat between | 407 | * or optical transceivers, varying somewhat between |
| @@ -224,7 +435,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm | |||
| 224 | mdio45_ethtool_gset(&efx->mdio, ecmd); | 435 | mdio45_ethtool_gset(&efx->mdio, ecmd); |
| 225 | } | 436 | } |
| 226 | 437 | ||
| 227 | static void qt202x_phy_fini(struct efx_nic *efx) | 438 | static void qt202x_phy_remove(struct efx_nic *efx) |
| 228 | { | 439 | { |
| 229 | /* Free the context block */ | 440 | /* Free the context block */ |
| 230 | kfree(efx->phy_data); | 441 | kfree(efx->phy_data); |
| @@ -236,7 +447,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = { | |||
| 236 | .init = qt202x_phy_init, | 447 | .init = qt202x_phy_init, |
| 237 | .reconfigure = qt202x_phy_reconfigure, | 448 | .reconfigure = qt202x_phy_reconfigure, |
| 238 | .poll = qt202x_phy_poll, | 449 | .poll = qt202x_phy_poll, |
| 239 | .fini = qt202x_phy_fini, | 450 | .fini = efx_port_dummy_op_void, |
| 451 | .remove = qt202x_phy_remove, | ||
| 240 | .get_settings = qt202x_phy_get_settings, | 452 | .get_settings = qt202x_phy_get_settings, |
| 241 | .set_settings = efx_mdio_set_settings, | 453 | .set_settings = efx_mdio_set_settings, |
| 242 | }; | 454 | }; |
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index de07a4f031b2..f8c6771e66d8 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
| @@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx) | |||
| 133 | 133 | ||
| 134 | void siena_remove_port(struct efx_nic *efx) | 134 | void siena_remove_port(struct efx_nic *efx) |
| 135 | { | 135 | { |
| 136 | efx->phy_op->remove(efx); | ||
| 136 | efx_nic_free_buffer(efx, &efx->stats_buffer); | 137 | efx_nic_free_buffer(efx, &efx->stats_buffer); |
| 137 | } | 138 | } |
| 138 | 139 | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index ca11572a49a9..3009c297c135 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
| @@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev, | |||
| 202 | int rc; | 202 | int rc; |
| 203 | 203 | ||
| 204 | rtnl_lock(); | 204 | rtnl_lock(); |
| 205 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, | 205 | if (efx->state != STATE_RUNNING) { |
| 206 | MDIO_PMA_10GBT_TXPWR_SHORT, | 206 | rc = -EBUSY; |
| 207 | count != 0 && *buf != '0'); | 207 | } else { |
| 208 | rc = efx_reconfigure_port(efx); | 208 | efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, |
| 209 | MDIO_PMA_10GBT_TXPWR_SHORT, | ||
| 210 | count != 0 && *buf != '0'); | ||
| 211 | rc = efx_reconfigure_port(efx); | ||
| 212 | } | ||
| 209 | rtnl_unlock(); | 213 | rtnl_unlock(); |
| 210 | 214 | ||
| 211 | return rc < 0 ? rc : (ssize_t)count; | 215 | return rc < 0 ? rc : (ssize_t)count; |
| @@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx) | |||
| 298 | return 0; | 302 | return 0; |
| 299 | } | 303 | } |
| 300 | 304 | ||
| 301 | static int sfx7101_phy_probe(struct efx_nic *efx) | 305 | static int tenxpress_phy_probe(struct efx_nic *efx) |
| 302 | { | 306 | { |
| 303 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | 307 | struct tenxpress_phy_data *phy_data; |
| 304 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 308 | int rc; |
| 305 | efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | 309 | |
| 306 | return 0; | 310 | /* Allocate phy private storage */ |
| 307 | } | 311 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
| 312 | if (!phy_data) | ||
| 313 | return -ENOMEM; | ||
| 314 | efx->phy_data = phy_data; | ||
| 315 | phy_data->phy_mode = efx->phy_mode; | ||
| 316 | |||
| 317 | /* Create any special files */ | ||
| 318 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | ||
| 319 | rc = device_create_file(&efx->pci_dev->dev, | ||
| 320 | &dev_attr_phy_short_reach); | ||
| 321 | if (rc) | ||
| 322 | goto fail; | ||
| 323 | } | ||
| 324 | |||
| 325 | if (efx->phy_type == PHY_TYPE_SFX7101) { | ||
| 326 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
| 327 | efx->mdio.mode_support = MDIO_SUPPORTS_C45; | ||
| 328 | |||
| 329 | efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; | ||
| 330 | |||
| 331 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
| 332 | ADVERTISED_10000baseT_Full); | ||
| 333 | } else { | ||
| 334 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
| 335 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
| 336 | |||
| 337 | efx->loopback_modes = (SFT9001_LOOPBACKS | | ||
| 338 | FALCON_XMAC_LOOPBACKS | | ||
| 339 | FALCON_GMAC_LOOPBACKS); | ||
| 340 | |||
| 341 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
| 342 | ADVERTISED_10000baseT_Full | | ||
| 343 | ADVERTISED_1000baseT_Full | | ||
| 344 | ADVERTISED_100baseT_Full); | ||
| 345 | } | ||
| 308 | 346 | ||
| 309 | static int sft9001_phy_probe(struct efx_nic *efx) | ||
| 310 | { | ||
| 311 | efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; | ||
| 312 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
| 313 | efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS | | ||
| 314 | FALCON_GMAC_LOOPBACKS); | ||
| 315 | return 0; | 347 | return 0; |
| 348 | |||
| 349 | fail: | ||
| 350 | kfree(efx->phy_data); | ||
| 351 | efx->phy_data = NULL; | ||
| 352 | return rc; | ||
| 316 | } | 353 | } |
| 317 | 354 | ||
| 318 | static int tenxpress_phy_init(struct efx_nic *efx) | 355 | static int tenxpress_phy_init(struct efx_nic *efx) |
| 319 | { | 356 | { |
| 320 | struct tenxpress_phy_data *phy_data; | 357 | int rc; |
| 321 | int rc = 0; | ||
| 322 | 358 | ||
| 323 | falcon_board(efx)->type->init_phy(efx); | 359 | falcon_board(efx)->type->init_phy(efx); |
| 324 | 360 | ||
| 325 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
| 326 | if (!phy_data) | ||
| 327 | return -ENOMEM; | ||
| 328 | efx->phy_data = phy_data; | ||
| 329 | phy_data->phy_mode = efx->phy_mode; | ||
| 330 | |||
| 331 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { | 361 | if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { |
| 332 | if (efx->phy_type == PHY_TYPE_SFT9001A) { | 362 | if (efx->phy_type == PHY_TYPE_SFT9001A) { |
| 333 | int reg; | 363 | int reg; |
| @@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
| 341 | 371 | ||
| 342 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); | 372 | rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); |
| 343 | if (rc < 0) | 373 | if (rc < 0) |
| 344 | goto fail; | 374 | return rc; |
| 345 | 375 | ||
| 346 | rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); | 376 | rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); |
| 347 | if (rc < 0) | 377 | if (rc < 0) |
| 348 | goto fail; | 378 | return rc; |
| 349 | } | 379 | } |
| 350 | 380 | ||
| 351 | rc = tenxpress_init(efx); | 381 | rc = tenxpress_init(efx); |
| 352 | if (rc < 0) | 382 | if (rc < 0) |
| 353 | goto fail; | 383 | return rc; |
| 354 | 384 | ||
| 355 | /* Initialise advertising flags */ | 385 | /* Reinitialise flow control settings */ |
| 356 | efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | | ||
| 357 | ADVERTISED_10000baseT_Full); | ||
| 358 | if (efx->phy_type != PHY_TYPE_SFX7101) | ||
| 359 | efx->link_advertising |= (ADVERTISED_1000baseT_Full | | ||
| 360 | ADVERTISED_100baseT_Full); | ||
| 361 | efx_link_set_wanted_fc(efx, efx->wanted_fc); | 386 | efx_link_set_wanted_fc(efx, efx->wanted_fc); |
| 362 | efx_mdio_an_reconfigure(efx); | 387 | efx_mdio_an_reconfigure(efx); |
| 363 | 388 | ||
| 364 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | ||
| 365 | rc = device_create_file(&efx->pci_dev->dev, | ||
| 366 | &dev_attr_phy_short_reach); | ||
| 367 | if (rc) | ||
| 368 | goto fail; | ||
| 369 | } | ||
| 370 | |||
| 371 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ | 389 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ |
| 372 | 390 | ||
| 373 | /* Let XGXS and SerDes out of reset */ | 391 | /* Let XGXS and SerDes out of reset */ |
| 374 | falcon_reset_xaui(efx); | 392 | falcon_reset_xaui(efx); |
| 375 | 393 | ||
| 376 | return 0; | 394 | return 0; |
| 377 | |||
| 378 | fail: | ||
| 379 | kfree(efx->phy_data); | ||
| 380 | efx->phy_data = NULL; | ||
| 381 | return rc; | ||
| 382 | } | 395 | } |
| 383 | 396 | ||
| 384 | /* Perform a "special software reset" on the PHY. The caller is | 397 | /* Perform a "special software reset" on the PHY. The caller is |
| @@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx) | |||
| 589 | return !efx_link_state_equal(&efx->link_state, &old_state); | 602 | return !efx_link_state_equal(&efx->link_state, &old_state); |
| 590 | } | 603 | } |
| 591 | 604 | ||
| 592 | static void tenxpress_phy_fini(struct efx_nic *efx) | 605 | static void sfx7101_phy_fini(struct efx_nic *efx) |
| 593 | { | 606 | { |
| 594 | int reg; | 607 | int reg; |
| 595 | 608 | ||
| 609 | /* Power down the LNPGA */ | ||
| 610 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
| 611 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
| 612 | |||
| 613 | /* Waiting here ensures that the board fini, which can turn | ||
| 614 | * off the power to the PHY, won't get run until the LNPGA | ||
| 615 | * powerdown has been given long enough to complete. */ | ||
| 616 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
| 617 | } | ||
| 618 | |||
| 619 | static void tenxpress_phy_remove(struct efx_nic *efx) | ||
| 620 | { | ||
| 596 | if (efx->phy_type == PHY_TYPE_SFT9001B) | 621 | if (efx->phy_type == PHY_TYPE_SFT9001B) |
| 597 | device_remove_file(&efx->pci_dev->dev, | 622 | device_remove_file(&efx->pci_dev->dev, |
| 598 | &dev_attr_phy_short_reach); | 623 | &dev_attr_phy_short_reach); |
| 599 | 624 | ||
| 600 | if (efx->phy_type == PHY_TYPE_SFX7101) { | ||
| 601 | /* Power down the LNPGA */ | ||
| 602 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
| 603 | efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); | ||
| 604 | |||
| 605 | /* Waiting here ensures that the board fini, which can turn | ||
| 606 | * off the power to the PHY, won't get run until the LNPGA | ||
| 607 | * powerdown has been given long enough to complete. */ | ||
| 608 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
| 609 | } | ||
| 610 | |||
| 611 | kfree(efx->phy_data); | 625 | kfree(efx->phy_data); |
| 612 | efx->phy_data = NULL; | 626 | efx->phy_data = NULL; |
| 613 | } | 627 | } |
| @@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) | |||
| 819 | } | 833 | } |
| 820 | 834 | ||
| 821 | struct efx_phy_operations falcon_sfx7101_phy_ops = { | 835 | struct efx_phy_operations falcon_sfx7101_phy_ops = { |
| 822 | .probe = sfx7101_phy_probe, | 836 | .probe = tenxpress_phy_probe, |
| 823 | .init = tenxpress_phy_init, | 837 | .init = tenxpress_phy_init, |
| 824 | .reconfigure = tenxpress_phy_reconfigure, | 838 | .reconfigure = tenxpress_phy_reconfigure, |
| 825 | .poll = tenxpress_phy_poll, | 839 | .poll = tenxpress_phy_poll, |
| 826 | .fini = tenxpress_phy_fini, | 840 | .fini = sfx7101_phy_fini, |
| 841 | .remove = tenxpress_phy_remove, | ||
| 827 | .get_settings = tenxpress_get_settings, | 842 | .get_settings = tenxpress_get_settings, |
| 828 | .set_settings = tenxpress_set_settings, | 843 | .set_settings = tenxpress_set_settings, |
| 829 | .set_npage_adv = sfx7101_set_npage_adv, | 844 | .set_npage_adv = sfx7101_set_npage_adv, |
| @@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = { | |||
| 832 | }; | 847 | }; |
| 833 | 848 | ||
| 834 | struct efx_phy_operations falcon_sft9001_phy_ops = { | 849 | struct efx_phy_operations falcon_sft9001_phy_ops = { |
| 835 | .probe = sft9001_phy_probe, | 850 | .probe = tenxpress_phy_probe, |
| 836 | .init = tenxpress_phy_init, | 851 | .init = tenxpress_phy_init, |
| 837 | .reconfigure = tenxpress_phy_reconfigure, | 852 | .reconfigure = tenxpress_phy_reconfigure, |
| 838 | .poll = tenxpress_phy_poll, | 853 | .poll = tenxpress_phy_poll, |
| 839 | .fini = tenxpress_phy_fini, | 854 | .fini = efx_port_dummy_op_void, |
| 855 | .remove = tenxpress_phy_remove, | ||
| 840 | .get_settings = tenxpress_get_settings, | 856 | .get_settings = tenxpress_get_settings, |
| 841 | .set_settings = tenxpress_set_settings, | 857 | .set_settings = tenxpress_set_settings, |
| 842 | .set_npage_adv = sft9001_set_npage_adv, | 858 | .set_npage_adv = sft9001_set_npage_adv, |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index e669f94e821b..a8b70ef6d817 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
| @@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
| 821 | EFX_TXQ_MASK]; | 821 | EFX_TXQ_MASK]; |
| 822 | efx_tsoh_free(tx_queue, buffer); | 822 | efx_tsoh_free(tx_queue, buffer); |
| 823 | EFX_BUG_ON_PARANOID(buffer->skb); | 823 | EFX_BUG_ON_PARANOID(buffer->skb); |
| 824 | buffer->len = 0; | ||
| 825 | buffer->continuation = true; | ||
| 826 | if (buffer->unmap_len) { | 824 | if (buffer->unmap_len) { |
| 827 | unmap_addr = (buffer->dma_addr + buffer->len - | 825 | unmap_addr = (buffer->dma_addr + buffer->len - |
| 828 | buffer->unmap_len); | 826 | buffer->unmap_len); |
| @@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
| 836 | PCI_DMA_TODEVICE); | 834 | PCI_DMA_TODEVICE); |
| 837 | buffer->unmap_len = 0; | 835 | buffer->unmap_len = 0; |
| 838 | } | 836 | } |
| 837 | buffer->len = 0; | ||
| 838 | buffer->continuation = true; | ||
| 839 | } | 839 | } |
| 840 | } | 840 | } |
| 841 | 841 | ||
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index ca6285016dfd..7402b858cab7 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
| @@ -110,7 +110,7 @@ static void sh_eth_reset(struct net_device *ndev) | |||
| 110 | mdelay(1); | 110 | mdelay(1); |
| 111 | cnt--; | 111 | cnt--; |
| 112 | } | 112 | } |
| 113 | if (cnt < 0) | 113 | if (cnt == 0) |
| 114 | printk(KERN_ERR "Device reset fail\n"); | 114 | printk(KERN_ERR "Device reset fail\n"); |
| 115 | 115 | ||
| 116 | /* Table Init */ | 116 | /* Table Init */ |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 1c01b96c9611..37f486b65f63 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -1844,7 +1844,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
| 1844 | sky2->tx_cons = idx; | 1844 | sky2->tx_cons = idx; |
| 1845 | smp_mb(); | 1845 | smp_mb(); |
| 1846 | 1846 | ||
| 1847 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) | 1847 | /* Wake unless it's detached, and called e.g. from sky2_down() */ |
| 1848 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev)) | ||
| 1848 | netif_wake_queue(dev); | 1849 | netif_wake_queue(dev); |
| 1849 | } | 1850 | } |
| 1850 | 1851 | ||
| @@ -4684,6 +4685,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
| 4684 | INIT_WORK(&hw->restart_work, sky2_restart); | 4685 | INIT_WORK(&hw->restart_work, sky2_restart); |
| 4685 | 4686 | ||
| 4686 | pci_set_drvdata(pdev, hw); | 4687 | pci_set_drvdata(pdev, hw); |
| 4688 | pdev->d3_delay = 150; | ||
| 4687 | 4689 | ||
| 4688 | return 0; | 4690 | return 0; |
| 4689 | 4691 | ||
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig index 1cc8cf4425d1..516713fa0a05 100644 --- a/drivers/net/tulip/Kconfig +++ b/drivers/net/tulip/Kconfig | |||
| @@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION | |||
| 101 | 101 | ||
| 102 | If in doubt, say Y. | 102 | If in doubt, say Y. |
| 103 | 103 | ||
| 104 | config TULIP_DM910X | ||
| 105 | def_bool y | ||
| 106 | depends on TULIP && SPARC | ||
| 107 | |||
| 104 | config DE4X5 | 108 | config DE4X5 |
| 105 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" | 109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" |
| 106 | depends on PCI || EISA | 110 | depends on PCI || EISA |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index ad63621913c3..6f44ebf58910 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
| @@ -92,6 +92,10 @@ | |||
| 92 | #include <asm/uaccess.h> | 92 | #include <asm/uaccess.h> |
| 93 | #include <asm/irq.h> | 93 | #include <asm/irq.h> |
| 94 | 94 | ||
| 95 | #ifdef CONFIG_TULIP_DM910X | ||
| 96 | #include <linux/of.h> | ||
| 97 | #endif | ||
| 98 | |||
| 95 | 99 | ||
| 96 | /* Board/System/Debug information/definition ---------------- */ | 100 | /* Board/System/Debug information/definition ---------------- */ |
| 97 | #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ | 101 | #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ |
| @@ -377,6 +381,23 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
| 377 | if (!printed_version++) | 381 | if (!printed_version++) |
| 378 | printk(version); | 382 | printk(version); |
| 379 | 383 | ||
| 384 | /* | ||
| 385 | * SPARC on-board DM910x chips should be handled by the main | ||
| 386 | * tulip driver, except for early DM9100s. | ||
| 387 | */ | ||
| 388 | #ifdef CONFIG_TULIP_DM910X | ||
| 389 | if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || | ||
| 390 | ent->driver_data == PCI_DM9102_ID) { | ||
| 391 | struct device_node *dp = pci_device_to_OF_node(pdev); | ||
| 392 | |||
| 393 | if (dp && of_get_property(dp, "local-mac-address", NULL)) { | ||
| 394 | printk(KERN_INFO DRV_NAME | ||
| 395 | ": skipping on-board DM910x (use tulip)\n"); | ||
| 396 | return -ENODEV; | ||
| 397 | } | ||
| 398 | } | ||
| 399 | #endif | ||
| 400 | |||
| 380 | /* Init network device */ | 401 | /* Init network device */ |
| 381 | dev = alloc_etherdev(sizeof(*db)); | 402 | dev = alloc_etherdev(sizeof(*db)); |
| 382 | if (dev == NULL) | 403 | if (dev == NULL) |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 0fa3140d65bf..595777dcadb1 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
| @@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = { | |||
| 196 | | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, | 196 | | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, |
| 197 | 197 | ||
| 198 | /* DM910X */ | 198 | /* DM910X */ |
| 199 | #ifdef CONFIG_TULIP_DM910X | ||
| 199 | { "Davicom DM9102/DM9102A", 128, 0x0001ebef, | 200 | { "Davicom DM9102/DM9102A", 128, 0x0001ebef, |
| 200 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, | 201 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, |
| 201 | tulip_timer, tulip_media_task }, | 202 | tulip_timer, tulip_media_task }, |
| 203 | #else | ||
| 204 | { NULL }, | ||
| 205 | #endif | ||
| 202 | 206 | ||
| 203 | /* RS7112 */ | 207 | /* RS7112 */ |
| 204 | { "Conexant LANfinity", 256, 0x0001ebef, | 208 | { "Conexant LANfinity", 256, 0x0001ebef, |
| @@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tbl[] = { | |||
| 228 | { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 232 | { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
| 229 | { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, | 233 | { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, |
| 230 | { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, | 234 | { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, |
| 235 | #ifdef CONFIG_TULIP_DM910X | ||
| 231 | { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, | 236 | { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, |
| 232 | { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, | 237 | { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, |
| 238 | #endif | ||
| 233 | { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 239 | { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
| 234 | { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, | 240 | { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, |
| 235 | { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 241 | { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
| @@ -1299,18 +1305,30 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
| 1299 | } | 1305 | } |
| 1300 | 1306 | ||
| 1301 | /* | 1307 | /* |
| 1302 | * Early DM9100's need software CRC and the DMFE driver | 1308 | * DM910x chips should be handled by the dmfe driver, except |
| 1309 | * on-board chips on SPARC systems. Also, early DM9100s need | ||
| 1310 | * software CRC which only the dmfe driver supports. | ||
| 1303 | */ | 1311 | */ |
| 1304 | 1312 | ||
| 1305 | if (pdev->vendor == 0x1282 && pdev->device == 0x9100) | 1313 | #ifdef CONFIG_TULIP_DM910X |
| 1306 | { | 1314 | if (chip_idx == DM910X) { |
| 1307 | /* Read Chip revision */ | 1315 | struct device_node *dp; |
| 1308 | if (pdev->revision < 0x30) | 1316 | |
| 1309 | { | 1317 | if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && |
| 1310 | printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); | 1318 | pdev->revision < 0x30) { |
| 1319 | printk(KERN_INFO PFX | ||
| 1320 | "skipping early DM9100 with Crc bug (use dmfe)\n"); | ||
| 1321 | return -ENODEV; | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | dp = pci_device_to_OF_node(pdev); | ||
| 1325 | if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { | ||
| 1326 | printk(KERN_INFO PFX | ||
| 1327 | "skipping DM910x expansion card (use dmfe)\n"); | ||
| 1311 | return -ENODEV; | 1328 | return -ENODEV; |
| 1312 | } | 1329 | } |
| 1313 | } | 1330 | } |
| 1331 | #endif | ||
| 1314 | 1332 | ||
| 1315 | /* | 1333 | /* |
| 1316 | * Looks for early PCI chipsets where people report hangs | 1334 | * Looks for early PCI chipsets where people report hangs |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 01e99f22210e..2834a01bae24 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk) | |||
| 849 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 849 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
| 850 | wake_up_interruptible_sync(sk->sk_sleep); | 850 | wake_up_interruptible_sync(sk->sk_sleep); |
| 851 | 851 | ||
| 852 | tun = container_of(sk, struct tun_sock, sk)->tun; | 852 | tun = tun_sk(sk)->tun; |
| 853 | kill_fasync(&tun->fasync, SIGIO, POLL_OUT); | 853 | kill_fasync(&tun->fasync, SIGIO, POLL_OUT); |
| 854 | } | 854 | } |
| 855 | 855 | ||
| 856 | static void tun_sock_destruct(struct sock *sk) | 856 | static void tun_sock_destruct(struct sock *sk) |
| 857 | { | 857 | { |
| 858 | free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev); | 858 | free_netdev(tun_sk(sk)->tun->dev); |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | static struct proto tun_proto = { | 861 | static struct proto tun_proto = { |
| @@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 990 | sk->sk_write_space = tun_sock_write_space; | 990 | sk->sk_write_space = tun_sock_write_space; |
| 991 | sk->sk_sndbuf = INT_MAX; | 991 | sk->sk_sndbuf = INT_MAX; |
| 992 | 992 | ||
| 993 | container_of(sk, struct tun_sock, sk)->tun = tun; | 993 | tun_sk(sk)->tun = tun; |
| 994 | 994 | ||
| 995 | security_tun_dev_post_create(sk); | 995 | security_tun_dev_post_create(sk); |
| 996 | 996 | ||
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index afaf088b72ea..96bdc0b43889 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
| @@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) | |||
| 1563 | 1563 | ||
| 1564 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) | 1564 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) |
| 1565 | { | 1565 | { |
| 1566 | /* Wait for and prevent any further xmits. */ | 1566 | /* Prevent any further xmits, plus detach the device. */ |
| 1567 | netif_device_detach(ugeth->ndev); | ||
| 1568 | |||
| 1569 | /* Wait for any current xmits to finish. */ | ||
| 1567 | netif_tx_disable(ugeth->ndev); | 1570 | netif_tx_disable(ugeth->ndev); |
| 1568 | 1571 | ||
| 1569 | /* Disable the interrupt to avoid NAPI rescheduling. */ | 1572 | /* Disable the interrupt to avoid NAPI rescheduling. */ |
| @@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) | |||
| 1577 | { | 1580 | { |
| 1578 | napi_enable(&ugeth->napi); | 1581 | napi_enable(&ugeth->napi); |
| 1579 | enable_irq(ugeth->ug_info->uf_info.irq); | 1582 | enable_irq(ugeth->ug_info->uf_info.irq); |
| 1580 | netif_tx_wake_all_queues(ugeth->ndev); | 1583 | netif_device_attach(ugeth->ndev); |
| 1581 | } | 1584 | } |
| 1582 | 1585 | ||
| 1583 | /* Called every time the controller might need to be made | 1586 | /* Called every time the controller might need to be made |
| @@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev) | |||
| 1648 | ugeth->oldspeed = phydev->speed; | 1651 | ugeth->oldspeed = phydev->speed; |
| 1649 | } | 1652 | } |
| 1650 | 1653 | ||
| 1651 | /* | ||
| 1652 | * To change the MAC configuration we need to disable the | ||
| 1653 | * controller. To do so, we have to either grab ugeth->lock, | ||
| 1654 | * which is a bad idea since 'graceful stop' commands might | ||
| 1655 | * take quite a while, or we can quiesce driver's activity. | ||
| 1656 | */ | ||
| 1657 | ugeth_quiesce(ugeth); | ||
| 1658 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
| 1659 | |||
| 1660 | out_be32(&ug_regs->maccfg2, tempval); | ||
| 1661 | out_be32(&uf_regs->upsmr, upsmr); | ||
| 1662 | |||
| 1663 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
| 1664 | ugeth_activate(ugeth); | ||
| 1665 | |||
| 1666 | if (!ugeth->oldlink) { | 1654 | if (!ugeth->oldlink) { |
| 1667 | new_state = 1; | 1655 | new_state = 1; |
| 1668 | ugeth->oldlink = 1; | 1656 | ugeth->oldlink = 1; |
| 1669 | } | 1657 | } |
| 1658 | |||
| 1659 | if (new_state) { | ||
| 1660 | /* | ||
| 1661 | * To change the MAC configuration we need to disable | ||
| 1662 | * the controller. To do so, we have to either grab | ||
| 1663 | * ugeth->lock, which is a bad idea since 'graceful | ||
| 1664 | * stop' commands might take quite a while, or we can | ||
| 1665 | * quiesce driver's activity. | ||
| 1666 | */ | ||
| 1667 | ugeth_quiesce(ugeth); | ||
| 1668 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
| 1669 | |||
| 1670 | out_be32(&ug_regs->maccfg2, tempval); | ||
| 1671 | out_be32(&uf_regs->upsmr, upsmr); | ||
| 1672 | |||
| 1673 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
| 1674 | ugeth_activate(ugeth); | ||
| 1675 | } | ||
| 1670 | } else if (ugeth->oldlink) { | 1676 | } else if (ugeth->oldlink) { |
| 1671 | new_state = 1; | 1677 | new_state = 1; |
| 1672 | ugeth->oldlink = 0; | 1678 | ugeth->oldlink = 0; |
| @@ -3273,7 +3279,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
| 3273 | /* Handle the transmitted buffer and release */ | 3279 | /* Handle the transmitted buffer and release */ |
| 3274 | /* the BD to be used with the current frame */ | 3280 | /* the BD to be used with the current frame */ |
| 3275 | 3281 | ||
| 3276 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | 3282 | if (bd == ugeth->txBd[txQ]) /* queue empty? */ |
| 3277 | break; | 3283 | break; |
| 3278 | 3284 | ||
| 3279 | dev->stats.tx_packets++; | 3285 | dev->stats.tx_packets++; |
| @@ -3601,6 +3607,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state) | |||
| 3601 | if (!netif_running(ndev)) | 3607 | if (!netif_running(ndev)) |
| 3602 | return 0; | 3608 | return 0; |
| 3603 | 3609 | ||
| 3610 | netif_device_detach(ndev); | ||
| 3604 | napi_disable(&ugeth->napi); | 3611 | napi_disable(&ugeth->napi); |
| 3605 | 3612 | ||
| 3606 | /* | 3613 | /* |
| @@ -3659,7 +3666,7 @@ static int ucc_geth_resume(struct of_device *ofdev) | |||
| 3659 | phy_start(ugeth->phydev); | 3666 | phy_start(ugeth->phydev); |
| 3660 | 3667 | ||
| 3661 | napi_enable(&ugeth->napi); | 3668 | napi_enable(&ugeth->napi); |
| 3662 | netif_start_queue(ndev); | 3669 | netif_device_attach(ndev); |
| 3663 | 3670 | ||
| 3664 | return 0; | 3671 | return 0; |
| 3665 | } | 3672 | } |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index a007e2acf651..ef1fbeb11c6e 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
| @@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics { | |||
| 838 | using the maximum is | 838 | using the maximum is |
| 839 | easier */ | 839 | easier */ |
| 840 | #define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 | 840 | #define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 |
| 841 | #define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ | 841 | #define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */ |
| 842 | #define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | 842 | #define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ |
| 843 | #define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | 843 | #define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ |
| 844 | #define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 | 844 | #define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 |
| 845 | #define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ | 845 | #define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ |
| 846 | #define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ | 846 | #define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ |
| 847 | #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This | 847 | #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This |
| 848 | is a | 848 | is a |
| 849 | guess | 849 | guess |
| 850 | */ | 850 | */ |
| @@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics { | |||
| 899 | #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size | 899 | #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size |
| 900 | */ | 900 | */ |
| 901 | #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ | 901 | #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ |
| 902 | #define UCC_GETH_UTFTT_INIT 128 | 902 | #define UCC_GETH_UTFTT_INIT 512 |
| 903 | /* Gigabit Ethernet (1000 Mbps) */ | 903 | /* Gigabit Ethernet (1000 Mbps) */ |
| 904 | #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual | 904 | #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual |
| 905 | FIFO size */ | 905 | FIFO size */ |
| 906 | #define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ | 906 | #define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ |
| 907 | #define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ | 907 | #define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ |
| 908 | #define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual | 908 | #define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual |
| 909 | FIFO size */ | ||
| 910 | #define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */ | ||
| 911 | #define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual | ||
| 909 | FIFO size */ | 912 | FIFO size */ |
| 910 | #define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ | ||
| 911 | #define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ | ||
| 912 | 913 | ||
| 913 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be | 914 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be |
| 914 | set */ | 915 | set */ |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index f78f0903b073..6895f1531238 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
| @@ -286,6 +286,7 @@ struct hso_device { | |||
| 286 | u8 usb_gone; | 286 | u8 usb_gone; |
| 287 | struct work_struct async_get_intf; | 287 | struct work_struct async_get_intf; |
| 288 | struct work_struct async_put_intf; | 288 | struct work_struct async_put_intf; |
| 289 | struct work_struct reset_device; | ||
| 289 | 290 | ||
| 290 | struct usb_device *usb; | 291 | struct usb_device *usb; |
| 291 | struct usb_interface *interface; | 292 | struct usb_interface *interface; |
| @@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial); | |||
| 332 | /* Helper functions */ | 333 | /* Helper functions */ |
| 333 | static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, | 334 | static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, |
| 334 | struct usb_device *usb, gfp_t gfp); | 335 | struct usb_device *usb, gfp_t gfp); |
| 335 | static void log_usb_status(int status, const char *function); | 336 | static void handle_usb_error(int status, const char *function, |
| 337 | struct hso_device *hso_dev); | ||
| 336 | static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, | 338 | static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, |
| 337 | int type, int dir); | 339 | int type, int dir); |
| 338 | static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); | 340 | static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); |
| @@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data); | |||
| 350 | static int hso_put_activity(struct hso_device *hso_dev); | 352 | static int hso_put_activity(struct hso_device *hso_dev); |
| 351 | static int hso_get_activity(struct hso_device *hso_dev); | 353 | static int hso_get_activity(struct hso_device *hso_dev); |
| 352 | static void tiocmget_intr_callback(struct urb *urb); | 354 | static void tiocmget_intr_callback(struct urb *urb); |
| 355 | static void reset_device(struct work_struct *data); | ||
| 353 | /*****************************************************************************/ | 356 | /*****************************************************************************/ |
| 354 | /* Helping functions */ | 357 | /* Helping functions */ |
| 355 | /*****************************************************************************/ | 358 | /*****************************************************************************/ |
| @@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = { | |||
| 461 | {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ | 464 | {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ |
| 462 | {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ | 465 | {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ |
| 463 | {USB_DEVICE(0x0af0, 0x7701)}, | 466 | {USB_DEVICE(0x0af0, 0x7701)}, |
| 467 | {USB_DEVICE(0x0af0, 0x7706)}, | ||
| 464 | {USB_DEVICE(0x0af0, 0x7801)}, | 468 | {USB_DEVICE(0x0af0, 0x7801)}, |
| 465 | {USB_DEVICE(0x0af0, 0x7901)}, | 469 | {USB_DEVICE(0x0af0, 0x7901)}, |
| 470 | {USB_DEVICE(0x0af0, 0x7A01)}, | ||
| 471 | {USB_DEVICE(0x0af0, 0x7A05)}, | ||
| 466 | {USB_DEVICE(0x0af0, 0x8200)}, | 472 | {USB_DEVICE(0x0af0, 0x8200)}, |
| 467 | {USB_DEVICE(0x0af0, 0x8201)}, | 473 | {USB_DEVICE(0x0af0, 0x8201)}, |
| 474 | {USB_DEVICE(0x0af0, 0x8300)}, | ||
| 475 | {USB_DEVICE(0x0af0, 0x8302)}, | ||
| 476 | {USB_DEVICE(0x0af0, 0x8304)}, | ||
| 477 | {USB_DEVICE(0x0af0, 0x8400)}, | ||
| 468 | {USB_DEVICE(0x0af0, 0xd035)}, | 478 | {USB_DEVICE(0x0af0, 0xd035)}, |
| 469 | {USB_DEVICE(0x0af0, 0xd055)}, | 479 | {USB_DEVICE(0x0af0, 0xd055)}, |
| 470 | {USB_DEVICE(0x0af0, 0xd155)}, | 480 | {USB_DEVICE(0x0af0, 0xd155)}, |
| @@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = { | |||
| 473 | {USB_DEVICE(0x0af0, 0xd157)}, | 483 | {USB_DEVICE(0x0af0, 0xd157)}, |
| 474 | {USB_DEVICE(0x0af0, 0xd257)}, | 484 | {USB_DEVICE(0x0af0, 0xd257)}, |
| 475 | {USB_DEVICE(0x0af0, 0xd357)}, | 485 | {USB_DEVICE(0x0af0, 0xd357)}, |
| 486 | {USB_DEVICE(0x0af0, 0xd058)}, | ||
| 487 | {USB_DEVICE(0x0af0, 0xc100)}, | ||
| 476 | {} | 488 | {} |
| 477 | }; | 489 | }; |
| 478 | MODULE_DEVICE_TABLE(usb, hso_ids); | 490 | MODULE_DEVICE_TABLE(usb, hso_ids); |
| @@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial) | |||
| 655 | spin_unlock_irqrestore(&serial_table_lock, flags); | 667 | spin_unlock_irqrestore(&serial_table_lock, flags); |
| 656 | } | 668 | } |
| 657 | 669 | ||
| 658 | /* log a meaningful explanation of an USB status */ | 670 | static void handle_usb_error(int status, const char *function, |
| 659 | static void log_usb_status(int status, const char *function) | 671 | struct hso_device *hso_dev) |
| 660 | { | 672 | { |
| 661 | char *explanation; | 673 | char *explanation; |
| 662 | 674 | ||
| @@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function) | |||
| 685 | case -EMSGSIZE: | 697 | case -EMSGSIZE: |
| 686 | explanation = "internal error"; | 698 | explanation = "internal error"; |
| 687 | break; | 699 | break; |
| 700 | case -EILSEQ: | ||
| 701 | case -EPROTO: | ||
| 702 | case -ETIME: | ||
| 703 | case -ETIMEDOUT: | ||
| 704 | explanation = "protocol error"; | ||
| 705 | if (hso_dev) | ||
| 706 | schedule_work(&hso_dev->reset_device); | ||
| 707 | break; | ||
| 688 | default: | 708 | default: |
| 689 | explanation = "unknown status"; | 709 | explanation = "unknown status"; |
| 690 | break; | 710 | break; |
| 691 | } | 711 | } |
| 712 | |||
| 713 | /* log a meaningful explanation of an USB status */ | ||
| 692 | D1("%s: received USB status - %s (%d)", function, explanation, status); | 714 | D1("%s: received USB status - %s (%d)", function, explanation, status); |
| 693 | } | 715 | } |
| 694 | 716 | ||
| @@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb) | |||
| 762 | /* log status, but don't act on it, we don't need to resubmit anything | 784 | /* log status, but don't act on it, we don't need to resubmit anything |
| 763 | * anyhow */ | 785 | * anyhow */ |
| 764 | if (status) | 786 | if (status) |
| 765 | log_usb_status(status, __func__); | 787 | handle_usb_error(status, __func__, odev->parent); |
| 766 | 788 | ||
| 767 | hso_put_activity(odev->parent); | 789 | hso_put_activity(odev->parent); |
| 768 | 790 | ||
| @@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb, | |||
| 806 | result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); | 828 | result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); |
| 807 | if (result) { | 829 | if (result) { |
| 808 | dev_warn(&odev->parent->interface->dev, | 830 | dev_warn(&odev->parent->interface->dev, |
| 809 | "failed mux_bulk_tx_urb %d", result); | 831 | "failed mux_bulk_tx_urb %d\n", result); |
| 810 | net->stats.tx_errors++; | 832 | net->stats.tx_errors++; |
| 811 | netif_start_queue(net); | 833 | netif_start_queue(net); |
| 812 | } else { | 834 | } else { |
| @@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb) | |||
| 998 | 1020 | ||
| 999 | /* is al ok? (Filip: Who's Al ?) */ | 1021 | /* is al ok? (Filip: Who's Al ?) */ |
| 1000 | if (status) { | 1022 | if (status) { |
| 1001 | log_usb_status(status, __func__); | 1023 | handle_usb_error(status, __func__, odev->parent); |
| 1002 | return; | 1024 | return; |
| 1003 | } | 1025 | } |
| 1004 | 1026 | ||
| @@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb) | |||
| 1019 | if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { | 1041 | if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { |
| 1020 | u32 rest; | 1042 | u32 rest; |
| 1021 | u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; | 1043 | u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; |
| 1022 | rest = urb->actual_length % odev->in_endp->wMaxPacketSize; | 1044 | rest = urb->actual_length % |
| 1045 | le16_to_cpu(odev->in_endp->wMaxPacketSize); | ||
| 1023 | if (((rest == 5) || (rest == 6)) && | 1046 | if (((rest == 5) || (rest == 6)) && |
| 1024 | !memcmp(((u8 *) urb->transfer_buffer) + | 1047 | !memcmp(((u8 *) urb->transfer_buffer) + |
| 1025 | urb->actual_length - 4, crc_check, 4)) { | 1048 | urb->actual_length - 4, crc_check, 4)) { |
| @@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb) | |||
| 1053 | result = usb_submit_urb(urb, GFP_ATOMIC); | 1076 | result = usb_submit_urb(urb, GFP_ATOMIC); |
| 1054 | if (result) | 1077 | if (result) |
| 1055 | dev_warn(&odev->parent->interface->dev, | 1078 | dev_warn(&odev->parent->interface->dev, |
| 1056 | "%s failed submit mux_bulk_rx_urb %d", __func__, | 1079 | "%s failed submit mux_bulk_rx_urb %d\n", __func__, |
| 1057 | result); | 1080 | result); |
| 1058 | } | 1081 | } |
| 1059 | 1082 | ||
| @@ -1207,7 +1230,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) | |||
| 1207 | D1("serial == NULL"); | 1230 | D1("serial == NULL"); |
| 1208 | return; | 1231 | return; |
| 1209 | } else if (status) { | 1232 | } else if (status) { |
| 1210 | log_usb_status(status, __func__); | 1233 | handle_usb_error(status, __func__, serial->parent); |
| 1211 | return; | 1234 | return; |
| 1212 | } | 1235 | } |
| 1213 | 1236 | ||
| @@ -1225,7 +1248,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) | |||
| 1225 | u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; | 1248 | u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; |
| 1226 | rest = | 1249 | rest = |
| 1227 | urb->actual_length % | 1250 | urb->actual_length % |
| 1228 | serial->in_endp->wMaxPacketSize; | 1251 | le16_to_cpu(serial->in_endp->wMaxPacketSize); |
| 1229 | if (((rest == 5) || (rest == 6)) && | 1252 | if (((rest == 5) || (rest == 6)) && |
| 1230 | !memcmp(((u8 *) urb->transfer_buffer) + | 1253 | !memcmp(((u8 *) urb->transfer_buffer) + |
| 1231 | urb->actual_length - 4, crc_check, 4)) { | 1254 | urb->actual_length - 4, crc_check, 4)) { |
| @@ -1513,7 +1536,7 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
| 1513 | if (!serial) | 1536 | if (!serial) |
| 1514 | return; | 1537 | return; |
| 1515 | if (status) { | 1538 | if (status) { |
| 1516 | log_usb_status(status, __func__); | 1539 | handle_usb_error(status, __func__, serial->parent); |
| 1517 | return; | 1540 | return; |
| 1518 | } | 1541 | } |
| 1519 | tiocmget = serial->tiocmget; | 1542 | tiocmget = serial->tiocmget; |
| @@ -1700,6 +1723,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1700 | D1("no tty structures"); | 1723 | D1("no tty structures"); |
| 1701 | return -EINVAL; | 1724 | return -EINVAL; |
| 1702 | } | 1725 | } |
| 1726 | |||
| 1727 | if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) | ||
| 1728 | return -EINVAL; | ||
| 1729 | |||
| 1703 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; | 1730 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; |
| 1704 | 1731 | ||
| 1705 | spin_lock_irqsave(&serial->serial_lock, flags); | 1732 | spin_lock_irqsave(&serial->serial_lock, flags); |
| @@ -1838,7 +1865,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port, | |||
| 1838 | result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); | 1865 | result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); |
| 1839 | if (result) { | 1866 | if (result) { |
| 1840 | dev_err(&ctrl_urb->dev->dev, | 1867 | dev_err(&ctrl_urb->dev->dev, |
| 1841 | "%s failed submit ctrl_urb %d type %d", __func__, | 1868 | "%s failed submit ctrl_urb %d type %d\n", __func__, |
| 1842 | result, type); | 1869 | result, type); |
| 1843 | return result; | 1870 | return result; |
| 1844 | } | 1871 | } |
| @@ -1888,7 +1915,7 @@ static void intr_callback(struct urb *urb) | |||
| 1888 | 1915 | ||
| 1889 | /* status check */ | 1916 | /* status check */ |
| 1890 | if (status) { | 1917 | if (status) { |
| 1891 | log_usb_status(status, __func__); | 1918 | handle_usb_error(status, __func__, NULL); |
| 1892 | return; | 1919 | return; |
| 1893 | } | 1920 | } |
| 1894 | D4("\n--- Got intr callback 0x%02X ---", status); | 1921 | D4("\n--- Got intr callback 0x%02X ---", status); |
| @@ -1905,18 +1932,18 @@ static void intr_callback(struct urb *urb) | |||
| 1905 | if (serial != NULL) { | 1932 | if (serial != NULL) { |
| 1906 | D1("Pending read interrupt on port %d\n", i); | 1933 | D1("Pending read interrupt on port %d\n", i); |
| 1907 | spin_lock(&serial->serial_lock); | 1934 | spin_lock(&serial->serial_lock); |
| 1908 | if (serial->rx_state == RX_IDLE) { | 1935 | if (serial->rx_state == RX_IDLE && |
| 1936 | serial->open_count > 0) { | ||
| 1909 | /* Setup and send a ctrl req read on | 1937 | /* Setup and send a ctrl req read on |
| 1910 | * port i */ | 1938 | * port i */ |
| 1911 | if (!serial->rx_urb_filled[0]) { | 1939 | if (!serial->rx_urb_filled[0]) { |
| 1912 | serial->rx_state = RX_SENT; | 1940 | serial->rx_state = RX_SENT; |
| 1913 | hso_mux_serial_read(serial); | 1941 | hso_mux_serial_read(serial); |
| 1914 | } else | 1942 | } else |
| 1915 | serial->rx_state = RX_PENDING; | 1943 | serial->rx_state = RX_PENDING; |
| 1916 | |||
| 1917 | } else { | 1944 | } else { |
| 1918 | D1("Already pending a read on " | 1945 | D1("Already a read pending on " |
| 1919 | "port %d\n", i); | 1946 | "port %d or port not open\n", i); |
| 1920 | } | 1947 | } |
| 1921 | spin_unlock(&serial->serial_lock); | 1948 | spin_unlock(&serial->serial_lock); |
| 1922 | } | 1949 | } |
| @@ -1958,7 +1985,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) | |||
| 1958 | tty = tty_kref_get(serial->tty); | 1985 | tty = tty_kref_get(serial->tty); |
| 1959 | spin_unlock(&serial->serial_lock); | 1986 | spin_unlock(&serial->serial_lock); |
| 1960 | if (status) { | 1987 | if (status) { |
| 1961 | log_usb_status(status, __func__); | 1988 | handle_usb_error(status, __func__, serial->parent); |
| 1962 | tty_kref_put(tty); | 1989 | tty_kref_put(tty); |
| 1963 | return; | 1990 | return; |
| 1964 | } | 1991 | } |
| @@ -2014,7 +2041,7 @@ static void ctrl_callback(struct urb *urb) | |||
| 2014 | tty = tty_kref_get(serial->tty); | 2041 | tty = tty_kref_get(serial->tty); |
| 2015 | spin_unlock(&serial->serial_lock); | 2042 | spin_unlock(&serial->serial_lock); |
| 2016 | if (status) { | 2043 | if (status) { |
| 2017 | log_usb_status(status, __func__); | 2044 | handle_usb_error(status, __func__, serial->parent); |
| 2018 | tty_kref_put(tty); | 2045 | tty_kref_put(tty); |
| 2019 | return; | 2046 | return; |
| 2020 | } | 2047 | } |
| @@ -2358,12 +2385,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, | |||
| 2358 | serial->tx_data_length = tx_size; | 2385 | serial->tx_data_length = tx_size; |
| 2359 | serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); | 2386 | serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); |
| 2360 | if (!serial->tx_data) { | 2387 | if (!serial->tx_data) { |
| 2361 | dev_err(dev, "%s - Out of memory", __func__); | 2388 | dev_err(dev, "%s - Out of memory\n", __func__); |
| 2362 | goto exit; | 2389 | goto exit; |
| 2363 | } | 2390 | } |
| 2364 | serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); | 2391 | serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); |
| 2365 | if (!serial->tx_buffer) { | 2392 | if (!serial->tx_buffer) { |
| 2366 | dev_err(dev, "%s - Out of memory", __func__); | 2393 | dev_err(dev, "%s - Out of memory\n", __func__); |
| 2367 | goto exit; | 2394 | goto exit; |
| 2368 | } | 2395 | } |
| 2369 | 2396 | ||
| @@ -2391,6 +2418,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf, | |||
| 2391 | 2418 | ||
| 2392 | INIT_WORK(&hso_dev->async_get_intf, async_get_intf); | 2419 | INIT_WORK(&hso_dev->async_get_intf, async_get_intf); |
| 2393 | INIT_WORK(&hso_dev->async_put_intf, async_put_intf); | 2420 | INIT_WORK(&hso_dev->async_put_intf, async_put_intf); |
| 2421 | INIT_WORK(&hso_dev->reset_device, reset_device); | ||
| 2394 | 2422 | ||
| 2395 | return hso_dev; | 2423 | return hso_dev; |
| 2396 | } | 2424 | } |
| @@ -2831,13 +2859,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) | |||
| 2831 | 2859 | ||
| 2832 | mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); | 2860 | mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); |
| 2833 | if (!mux->shared_intr_urb) { | 2861 | if (!mux->shared_intr_urb) { |
| 2834 | dev_err(&interface->dev, "Could not allocate intr urb?"); | 2862 | dev_err(&interface->dev, "Could not allocate intr urb?\n"); |
| 2835 | goto exit; | 2863 | goto exit; |
| 2836 | } | 2864 | } |
| 2837 | mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize, | 2865 | mux->shared_intr_buf = |
| 2838 | GFP_KERNEL); | 2866 | kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), |
| 2867 | GFP_KERNEL); | ||
| 2839 | if (!mux->shared_intr_buf) { | 2868 | if (!mux->shared_intr_buf) { |
| 2840 | dev_err(&interface->dev, "Could not allocate intr buf?"); | 2869 | dev_err(&interface->dev, "Could not allocate intr buf?\n"); |
| 2841 | goto exit; | 2870 | goto exit; |
| 2842 | } | 2871 | } |
| 2843 | 2872 | ||
| @@ -3132,6 +3161,26 @@ out: | |||
| 3132 | return result; | 3161 | return result; |
| 3133 | } | 3162 | } |
| 3134 | 3163 | ||
| 3164 | static void reset_device(struct work_struct *data) | ||
| 3165 | { | ||
| 3166 | struct hso_device *hso_dev = | ||
| 3167 | container_of(data, struct hso_device, reset_device); | ||
| 3168 | struct usb_device *usb = hso_dev->usb; | ||
| 3169 | int result; | ||
| 3170 | |||
| 3171 | if (hso_dev->usb_gone) { | ||
| 3172 | D1("No reset during disconnect\n"); | ||
| 3173 | } else { | ||
| 3174 | result = usb_lock_device_for_reset(usb, hso_dev->interface); | ||
| 3175 | if (result < 0) | ||
| 3176 | D1("unable to lock device for reset: %d\n", result); | ||
| 3177 | else { | ||
| 3178 | usb_reset_device(usb); | ||
| 3179 | usb_unlock_device(usb); | ||
| 3180 | } | ||
| 3181 | } | ||
| 3182 | } | ||
| 3183 | |||
| 3135 | static void hso_serial_ref_free(struct kref *ref) | 3184 | static void hso_serial_ref_free(struct kref *ref) |
| 3136 | { | 3185 | { |
| 3137 | struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); | 3186 | struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); |
| @@ -3232,13 +3281,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, | |||
| 3232 | usb_rcvintpipe(usb, | 3281 | usb_rcvintpipe(usb, |
| 3233 | shared_int->intr_endp->bEndpointAddress & 0x7F), | 3282 | shared_int->intr_endp->bEndpointAddress & 0x7F), |
| 3234 | shared_int->shared_intr_buf, | 3283 | shared_int->shared_intr_buf, |
| 3235 | shared_int->intr_endp->wMaxPacketSize, | 3284 | 1, |
| 3236 | intr_callback, shared_int, | 3285 | intr_callback, shared_int, |
| 3237 | shared_int->intr_endp->bInterval); | 3286 | shared_int->intr_endp->bInterval); |
| 3238 | 3287 | ||
| 3239 | result = usb_submit_urb(shared_int->shared_intr_urb, gfp); | 3288 | result = usb_submit_urb(shared_int->shared_intr_urb, gfp); |
| 3240 | if (result) | 3289 | if (result) |
| 3241 | dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__, | 3290 | dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__, |
| 3242 | result); | 3291 | result); |
| 3243 | 3292 | ||
| 3244 | return result; | 3293 | return result; |
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index f14d225404da..fd19db0d2504 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
| @@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) | |||
| 270 | get_registers(dev, PHYCNT, 1, data); | 270 | get_registers(dev, PHYCNT, 1, data); |
| 271 | } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); | 271 | } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); |
| 272 | 272 | ||
| 273 | if (i < MII_TIMEOUT) { | 273 | if (i <= MII_TIMEOUT) { |
| 274 | get_registers(dev, PHYDAT, 2, data); | 274 | get_registers(dev, PHYDAT, 2, data); |
| 275 | *reg = data[0] | (data[1] << 8); | 275 | *reg = data[0] | (data[1] << 8); |
| 276 | return 0; | 276 | return 0; |
| @@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) | |||
| 295 | get_registers(dev, PHYCNT, 1, data); | 295 | get_registers(dev, PHYCNT, 1, data); |
| 296 | } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); | 296 | } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); |
| 297 | 297 | ||
| 298 | if (i < MII_TIMEOUT) | 298 | if (i <= MII_TIMEOUT) |
| 299 | return 0; | 299 | return 0; |
| 300 | else | 300 | else |
| 301 | return 1; | 301 | return 1; |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 593e01f64e9b..611b80435955 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
| @@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32; | |||
| 102 | #include <linux/ethtool.h> | 102 | #include <linux/ethtool.h> |
| 103 | #include <linux/crc32.h> | 103 | #include <linux/crc32.h> |
| 104 | #include <linux/bitops.h> | 104 | #include <linux/bitops.h> |
| 105 | #include <linux/workqueue.h> | ||
| 105 | #include <asm/processor.h> /* Processor type for cache alignment. */ | 106 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
| 106 | #include <asm/io.h> | 107 | #include <asm/io.h> |
| 107 | #include <asm/irq.h> | 108 | #include <asm/irq.h> |
| @@ -389,6 +390,7 @@ struct rhine_private { | |||
| 389 | struct net_device *dev; | 390 | struct net_device *dev; |
| 390 | struct napi_struct napi; | 391 | struct napi_struct napi; |
| 391 | spinlock_t lock; | 392 | spinlock_t lock; |
| 393 | struct work_struct reset_task; | ||
| 392 | 394 | ||
| 393 | /* Frequently used values: keep some adjacent for cache effect. */ | 395 | /* Frequently used values: keep some adjacent for cache effect. */ |
| 394 | u32 quirks; | 396 | u32 quirks; |
| @@ -407,6 +409,7 @@ struct rhine_private { | |||
| 407 | static int mdio_read(struct net_device *dev, int phy_id, int location); | 409 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
| 408 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | 410 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
| 409 | static int rhine_open(struct net_device *dev); | 411 | static int rhine_open(struct net_device *dev); |
| 412 | static void rhine_reset_task(struct work_struct *work); | ||
| 410 | static void rhine_tx_timeout(struct net_device *dev); | 413 | static void rhine_tx_timeout(struct net_device *dev); |
| 411 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | 414 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
| 412 | struct net_device *dev); | 415 | struct net_device *dev); |
| @@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
| 775 | dev->irq = pdev->irq; | 778 | dev->irq = pdev->irq; |
| 776 | 779 | ||
| 777 | spin_lock_init(&rp->lock); | 780 | spin_lock_init(&rp->lock); |
| 781 | INIT_WORK(&rp->reset_task, rhine_reset_task); | ||
| 782 | |||
| 778 | rp->mii_if.dev = dev; | 783 | rp->mii_if.dev = dev; |
| 779 | rp->mii_if.mdio_read = mdio_read; | 784 | rp->mii_if.mdio_read = mdio_read; |
| 780 | rp->mii_if.mdio_write = mdio_write; | 785 | rp->mii_if.mdio_write = mdio_write; |
| @@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev) | |||
| 1179 | return 0; | 1184 | return 0; |
| 1180 | } | 1185 | } |
| 1181 | 1186 | ||
| 1182 | static void rhine_tx_timeout(struct net_device *dev) | 1187 | static void rhine_reset_task(struct work_struct *work) |
| 1183 | { | 1188 | { |
| 1184 | struct rhine_private *rp = netdev_priv(dev); | 1189 | struct rhine_private *rp = container_of(work, struct rhine_private, |
| 1185 | void __iomem *ioaddr = rp->base; | 1190 | reset_task); |
| 1186 | 1191 | struct net_device *dev = rp->dev; | |
| 1187 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " | ||
| 1188 | "%4.4x, resetting...\n", | ||
| 1189 | dev->name, ioread16(ioaddr + IntrStatus), | ||
| 1190 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | ||
| 1191 | 1192 | ||
| 1192 | /* protect against concurrent rx interrupts */ | 1193 | /* protect against concurrent rx interrupts */ |
| 1193 | disable_irq(rp->pdev->irq); | 1194 | disable_irq(rp->pdev->irq); |
| 1194 | 1195 | ||
| 1195 | napi_disable(&rp->napi); | 1196 | napi_disable(&rp->napi); |
| 1196 | 1197 | ||
| 1197 | spin_lock(&rp->lock); | 1198 | spin_lock_bh(&rp->lock); |
| 1198 | 1199 | ||
| 1199 | /* clear all descriptors */ | 1200 | /* clear all descriptors */ |
| 1200 | free_tbufs(dev); | 1201 | free_tbufs(dev); |
| @@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev) | |||
| 1206 | rhine_chip_reset(dev); | 1207 | rhine_chip_reset(dev); |
| 1207 | init_registers(dev); | 1208 | init_registers(dev); |
| 1208 | 1209 | ||
| 1209 | spin_unlock(&rp->lock); | 1210 | spin_unlock_bh(&rp->lock); |
| 1210 | enable_irq(rp->pdev->irq); | 1211 | enable_irq(rp->pdev->irq); |
| 1211 | 1212 | ||
| 1212 | dev->trans_start = jiffies; | 1213 | dev->trans_start = jiffies; |
| @@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev) | |||
| 1214 | netif_wake_queue(dev); | 1215 | netif_wake_queue(dev); |
| 1215 | } | 1216 | } |
| 1216 | 1217 | ||
| 1218 | static void rhine_tx_timeout(struct net_device *dev) | ||
| 1219 | { | ||
| 1220 | struct rhine_private *rp = netdev_priv(dev); | ||
| 1221 | void __iomem *ioaddr = rp->base; | ||
| 1222 | |||
| 1223 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " | ||
| 1224 | "%4.4x, resetting...\n", | ||
| 1225 | dev->name, ioread16(ioaddr + IntrStatus), | ||
| 1226 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); | ||
| 1227 | |||
| 1228 | schedule_work(&rp->reset_task); | ||
| 1229 | } | ||
| 1230 | |||
| 1217 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | 1231 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
| 1218 | struct net_device *dev) | 1232 | struct net_device *dev) |
| 1219 | { | 1233 | { |
| @@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev) | |||
| 1830 | struct rhine_private *rp = netdev_priv(dev); | 1844 | struct rhine_private *rp = netdev_priv(dev); |
| 1831 | void __iomem *ioaddr = rp->base; | 1845 | void __iomem *ioaddr = rp->base; |
| 1832 | 1846 | ||
| 1833 | spin_lock_irq(&rp->lock); | ||
| 1834 | |||
| 1835 | netif_stop_queue(dev); | ||
| 1836 | napi_disable(&rp->napi); | 1847 | napi_disable(&rp->napi); |
| 1848 | cancel_work_sync(&rp->reset_task); | ||
| 1849 | netif_stop_queue(dev); | ||
| 1850 | |||
| 1851 | spin_lock_irq(&rp->lock); | ||
| 1837 | 1852 | ||
| 1838 | if (debug > 1) | 1853 | if (debug > 1) |
| 1839 | printk(KERN_DEBUG "%s: Shutting down ethercard, " | 1854 | printk(KERN_DEBUG "%s: Shutting down ethercard, " |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 4ceb441f2687..c93f58f5c6f2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
| @@ -2237,8 +2237,6 @@ static int velocity_open(struct net_device *dev) | |||
| 2237 | /* Ensure chip is running */ | 2237 | /* Ensure chip is running */ |
| 2238 | pci_set_power_state(vptr->pdev, PCI_D0); | 2238 | pci_set_power_state(vptr->pdev, PCI_D0); |
| 2239 | 2239 | ||
| 2240 | velocity_give_many_rx_descs(vptr); | ||
| 2241 | |||
| 2242 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | 2240 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
| 2243 | 2241 | ||
| 2244 | ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, | 2242 | ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, |
| @@ -2250,6 +2248,8 @@ static int velocity_open(struct net_device *dev) | |||
| 2250 | goto out; | 2248 | goto out; |
| 2251 | } | 2249 | } |
| 2252 | 2250 | ||
| 2251 | velocity_give_many_rx_descs(vptr); | ||
| 2252 | |||
| 2253 | mac_enable_int(vptr->mac_regs); | 2253 | mac_enable_int(vptr->mac_regs); |
| 2254 | netif_start_queue(dev); | 2254 | netif_start_queue(dev); |
| 2255 | napi_enable(&vptr->napi); | 2255 | napi_enable(&vptr->napi); |
| @@ -2339,10 +2339,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
| 2339 | 2339 | ||
| 2340 | dev->mtu = new_mtu; | 2340 | dev->mtu = new_mtu; |
| 2341 | 2341 | ||
| 2342 | velocity_give_many_rx_descs(vptr); | ||
| 2343 | |||
| 2344 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | 2342 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
| 2345 | 2343 | ||
| 2344 | velocity_give_many_rx_descs(vptr); | ||
| 2345 | |||
| 2346 | mac_enable_int(vptr->mac_regs); | 2346 | mac_enable_int(vptr->mac_regs); |
| 2347 | netif_start_queue(dev); | 2347 | netif_start_queue(dev); |
| 2348 | 2348 | ||
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index f1c4b2a1e867..b9685e82f7b6 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
| @@ -310,7 +310,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) | |||
| 310 | dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, | 310 | dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, |
| 311 | rx_priv->data_size, PCI_DMA_FROMDEVICE); | 311 | rx_priv->data_size, PCI_DMA_FROMDEVICE); |
| 312 | 312 | ||
| 313 | if (dma_addr == 0) { | 313 | if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { |
| 314 | ring->stats.pci_map_fail++; | 314 | ring->stats.pci_map_fail++; |
| 315 | return -EIO; | 315 | return -EIO; |
| 316 | } | 316 | } |
| @@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 4087 | goto _exit0; | 4087 | goto _exit0; |
| 4088 | } | 4088 | } |
| 4089 | 4089 | ||
| 4090 | if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { | 4090 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
| 4091 | vxge_debug_ll_config(VXGE_TRACE, | 4091 | vxge_debug_ll_config(VXGE_TRACE, |
| 4092 | "%s : using 64bit DMA", __func__); | 4092 | "%s : using 64bit DMA", __func__); |
| 4093 | 4093 | ||
| 4094 | high_dma = 1; | 4094 | high_dma = 1; |
| 4095 | 4095 | ||
| 4096 | if (pci_set_consistent_dma_mask(pdev, | 4096 | if (pci_set_consistent_dma_mask(pdev, |
| 4097 | 0xffffffffffffffffULL)) { | 4097 | DMA_BIT_MASK(64))) { |
| 4098 | vxge_debug_init(VXGE_ERR, | 4098 | vxge_debug_init(VXGE_ERR, |
| 4099 | "%s : unable to obtain 64bit DMA for " | 4099 | "%s : unable to obtain 64bit DMA for " |
| 4100 | "consistent allocations", __func__); | 4100 | "consistent allocations", __func__); |
| 4101 | ret = -ENOMEM; | 4101 | ret = -ENOMEM; |
| 4102 | goto _exit1; | 4102 | goto _exit1; |
| 4103 | } | 4103 | } |
| 4104 | } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { | 4104 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { |
| 4105 | vxge_debug_ll_config(VXGE_TRACE, | 4105 | vxge_debug_ll_config(VXGE_TRACE, |
| 4106 | "%s : using 32bit DMA", __func__); | 4106 | "%s : using 32bit DMA", __func__); |
| 4107 | } else { | 4107 | } else { |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index a4c086f069b1..e63b7c40d0ee 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
| @@ -1903,17 +1903,6 @@ accept: | |||
| 1903 | rxs->noise = sc->ah->ah_noise_floor; | 1903 | rxs->noise = sc->ah->ah_noise_floor; |
| 1904 | rxs->signal = rxs->noise + rs.rs_rssi; | 1904 | rxs->signal = rxs->noise + rs.rs_rssi; |
| 1905 | 1905 | ||
| 1906 | /* An rssi of 35 indicates you should be able use | ||
| 1907 | * 54 Mbps reliably. A more elaborate scheme can be used | ||
| 1908 | * here but it requires a map of SNR/throughput for each | ||
| 1909 | * possible mode used */ | ||
| 1910 | rxs->qual = rs.rs_rssi * 100 / 35; | ||
| 1911 | |||
| 1912 | /* rssi can be more than 35 though, anything above that | ||
| 1913 | * should be considered at 100% */ | ||
| 1914 | if (rxs->qual > 100) | ||
| 1915 | rxs->qual = 100; | ||
| 1916 | |||
| 1917 | rxs->antenna = rs.rs_antenna; | 1906 | rxs->antenna = rs.rs_antenna; |
| 1918 | rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); | 1907 | rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); |
| 1919 | rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); | 1908 | rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); |
| @@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc) | |||
| 2381 | */ | 2370 | */ |
| 2382 | ath5k_stop_locked(sc); | 2371 | ath5k_stop_locked(sc); |
| 2383 | 2372 | ||
| 2373 | /* Set PHY calibration interval */ | ||
| 2374 | ah->ah_cal_intval = ath5k_calinterval; | ||
| 2375 | |||
| 2384 | /* | 2376 | /* |
| 2385 | * The basic interface to setting the hardware in a good | 2377 | * The basic interface to setting the hardware in a good |
| 2386 | * state is ``reset''. On return the hardware is known to | 2378 | * state is ``reset''. On return the hardware is known to |
| @@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc) | |||
| 2408 | 2400 | ||
| 2409 | /* Set ack to be sent at low bit-rates */ | 2401 | /* Set ack to be sent at low bit-rates */ |
| 2410 | ath5k_hw_set_ack_bitrate_high(ah, false); | 2402 | ath5k_hw_set_ack_bitrate_high(ah, false); |
| 2411 | |||
| 2412 | /* Set PHY calibration inteval */ | ||
| 2413 | ah->ah_cal_intval = ath5k_calinterval; | ||
| 2414 | |||
| 2415 | ret = 0; | 2403 | ret = 0; |
| 2416 | done: | 2404 | done: |
| 2417 | mmiowb(); | 2405 | mmiowb(); |
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 71b84d91dcff..efc420cd42bf 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
| @@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) | |||
| 186 | wait = wait_time; | 186 | wait = wait_time; |
| 187 | while (ath9k_hw_numtxpending(ah, q)) { | 187 | while (ath9k_hw_numtxpending(ah, q)) { |
| 188 | if ((--wait) == 0) { | 188 | if ((--wait) == 0) { |
| 189 | ath_print(common, ATH_DBG_QUEUE, | 189 | ath_print(common, ATH_DBG_FATAL, |
| 190 | "Failed to stop TX DMA in 100 " | 190 | "Failed to stop TX DMA in 100 " |
| 191 | "msec after killing last frame\n"); | 191 | "msec after killing last frame\n"); |
| 192 | break; | 192 | break; |
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 0c87771383f0..e185479e295e 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h | |||
| @@ -77,6 +77,9 @@ | |||
| 77 | #define ATH9K_TXERR_XTXOP 0x08 | 77 | #define ATH9K_TXERR_XTXOP 0x08 |
| 78 | #define ATH9K_TXERR_TIMER_EXPIRED 0x10 | 78 | #define ATH9K_TXERR_TIMER_EXPIRED 0x10 |
| 79 | #define ATH9K_TX_ACKED 0x20 | 79 | #define ATH9K_TX_ACKED 0x20 |
| 80 | #define ATH9K_TXERR_MASK \ | ||
| 81 | (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \ | ||
| 82 | ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED) | ||
| 80 | 83 | ||
| 81 | #define ATH9K_TX_BA 0x01 | 84 | #define ATH9K_TX_BA 0x01 |
| 82 | #define ATH9K_TX_PWRMGMT 0x02 | 85 | #define ATH9K_TX_PWRMGMT 0x02 |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c48743452515..996eb90263cc 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) | |||
| 1973 | struct ieee80211_hw *hw = sc->hw; | 1973 | struct ieee80211_hw *hw = sc->hw; |
| 1974 | int r; | 1974 | int r; |
| 1975 | 1975 | ||
| 1976 | /* Stop ANI */ | ||
| 1977 | del_timer_sync(&common->ani.timer); | ||
| 1978 | |||
| 1976 | ath9k_hw_set_interrupts(ah, 0); | 1979 | ath9k_hw_set_interrupts(ah, 0); |
| 1977 | ath_drain_all_txq(sc, retry_tx); | 1980 | ath_drain_all_txq(sc, retry_tx); |
| 1978 | ath_stoprecv(sc); | 1981 | ath_stoprecv(sc); |
| @@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) | |||
| 2014 | } | 2017 | } |
| 2015 | } | 2018 | } |
| 2016 | 2019 | ||
| 2020 | /* Start ANI */ | ||
| 2021 | ath_start_ani(common); | ||
| 2022 | |||
| 2017 | return r; | 2023 | return r; |
| 2018 | } | 2024 | } |
| 2019 | 2025 | ||
| @@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
| 2508 | return; /* another wiphy still in use */ | 2514 | return; /* another wiphy still in use */ |
| 2509 | } | 2515 | } |
| 2510 | 2516 | ||
| 2517 | /* Ensure HW is awake when we try to shut it down. */ | ||
| 2518 | ath9k_ps_wakeup(sc); | ||
| 2519 | |||
| 2511 | if (ah->btcoex_hw.enabled) { | 2520 | if (ah->btcoex_hw.enabled) { |
| 2512 | ath9k_hw_btcoex_disable(ah); | 2521 | ath9k_hw_btcoex_disable(ah); |
| 2513 | if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) | 2522 | if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) |
| @@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
| 2528 | /* disable HAL and put h/w to sleep */ | 2537 | /* disable HAL and put h/w to sleep */ |
| 2529 | ath9k_hw_disable(ah); | 2538 | ath9k_hw_disable(ah); |
| 2530 | ath9k_hw_configpcipowersave(ah, 1, 1); | 2539 | ath9k_hw_configpcipowersave(ah, 1, 1); |
| 2540 | ath9k_ps_restore(sc); | ||
| 2541 | |||
| 2542 | /* Finally, put the chip in FULL SLEEP mode */ | ||
| 2531 | ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); | 2543 | ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); |
| 2532 | 2544 | ||
| 2533 | sc->sc_flags |= SC_OP_INVALID; | 2545 | sc->sc_flags |= SC_OP_INVALID; |
| @@ -2641,8 +2653,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | |||
| 2641 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || | 2653 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || |
| 2642 | (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || | 2654 | (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || |
| 2643 | (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { | 2655 | (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { |
| 2656 | ath9k_ps_wakeup(sc); | ||
| 2644 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); | 2657 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
| 2645 | ath_beacon_return(sc, avp); | 2658 | ath_beacon_return(sc, avp); |
| 2659 | ath9k_ps_restore(sc); | ||
| 2646 | } | 2660 | } |
| 2647 | 2661 | ||
| 2648 | sc->sc_flags &= ~SC_OP_BEACONS; | 2662 | sc->sc_flags &= ~SC_OP_BEACONS; |
| @@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, | |||
| 3091 | case IEEE80211_AMPDU_RX_STOP: | 3105 | case IEEE80211_AMPDU_RX_STOP: |
| 3092 | break; | 3106 | break; |
| 3093 | case IEEE80211_AMPDU_TX_START: | 3107 | case IEEE80211_AMPDU_TX_START: |
| 3108 | ath9k_ps_wakeup(sc); | ||
| 3094 | ath_tx_aggr_start(sc, sta, tid, ssn); | 3109 | ath_tx_aggr_start(sc, sta, tid, ssn); |
| 3095 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | 3110 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 3111 | ath9k_ps_restore(sc); | ||
| 3096 | break; | 3112 | break; |
| 3097 | case IEEE80211_AMPDU_TX_STOP: | 3113 | case IEEE80211_AMPDU_TX_STOP: |
| 3114 | ath9k_ps_wakeup(sc); | ||
| 3098 | ath_tx_aggr_stop(sc, sta, tid); | 3115 | ath_tx_aggr_stop(sc, sta, tid); |
| 3099 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | 3116 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 3117 | ath9k_ps_restore(sc); | ||
| 3100 | break; | 3118 | break; |
| 3101 | case IEEE80211_AMPDU_TX_OPERATIONAL: | 3119 | case IEEE80211_AMPDU_TX_OPERATIONAL: |
| 3120 | ath9k_ps_wakeup(sc); | ||
| 3102 | ath_tx_aggr_resume(sc, sta, tid); | 3121 | ath_tx_aggr_resume(sc, sta, tid); |
| 3122 | ath9k_ps_restore(sc); | ||
| 3103 | break; | 3123 | break; |
| 3104 | default: | 3124 | default: |
| 3105 | ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, | 3125 | ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, |
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 5321f735e5a0..f7af5ea54753 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
| @@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common) | |||
| 96 | pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); | 96 | pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | const static struct ath_bus_ops ath_pci_bus_ops = { | 99 | static const struct ath_bus_ops ath_pci_bus_ops = { |
| 100 | .read_cachesize = ath_pci_read_cachesize, | 100 | .read_cachesize = ath_pci_read_cachesize, |
| 101 | .cleanup = ath_pci_cleanup, | 101 | .cleanup = ath_pci_cleanup, |
| 102 | .eeprom_read = ath_pci_eeprom_read, | 102 | .eeprom_read = ath_pci_eeprom_read, |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 2a11cc57ceea..fa12b9060b0b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) | |||
| 1108 | if (npend) { | 1108 | if (npend) { |
| 1109 | int r; | 1109 | int r; |
| 1110 | 1110 | ||
| 1111 | ath_print(common, ATH_DBG_XMIT, | 1111 | ath_print(common, ATH_DBG_FATAL, |
| 1112 | "Unable to stop TxDMA. Reset HAL!\n"); | 1112 | "Unable to stop TxDMA. Reset HAL!\n"); |
| 1113 | 1113 | ||
| 1114 | spin_lock_bh(&sc->sc_resetlock); | 1114 | spin_lock_bh(&sc->sc_resetlock); |
| 1115 | r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); | 1115 | r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); |
| 1116 | if (r) | 1116 | if (r) |
| 1117 | ath_print(common, ATH_DBG_FATAL, | 1117 | ath_print(common, ATH_DBG_FATAL, |
| 1118 | "Unable to reset hardware; reset status %d\n", | 1118 | "Unable to reset hardware; reset status %d\n", |
| @@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb, | |||
| 1414 | * For HT capable stations, we save tidno for later use. | 1414 | * For HT capable stations, we save tidno for later use. |
| 1415 | * We also override seqno set by upper layer with the one | 1415 | * We also override seqno set by upper layer with the one |
| 1416 | * in tx aggregation state. | 1416 | * in tx aggregation state. |
| 1417 | * | ||
| 1418 | * If fragmentation is on, the sequence number is | ||
| 1419 | * not overridden, since it has been | ||
| 1420 | * incremented by the fragmentation routine. | ||
| 1421 | * | ||
| 1422 | * FIXME: check if the fragmentation threshold exceeds | ||
| 1423 | * IEEE80211 max. | ||
| 1424 | */ | 1417 | */ |
| 1425 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | 1418 | tid = ATH_AN_2_TID(an, bf->bf_tidno); |
| 1426 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << | 1419 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
| 1427 | IEEE80211_SEQ_SEQ_SHIFT); | ||
| 1428 | bf->bf_seqno = tid->seq_next; | 1420 | bf->bf_seqno = tid->seq_next; |
| 1429 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | 1421 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); |
| 1430 | } | 1422 | } |
| @@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, | |||
| 1636 | bf->bf_keyix = ATH9K_TXKEYIX_INVALID; | 1628 | bf->bf_keyix = ATH9K_TXKEYIX_INVALID; |
| 1637 | } | 1629 | } |
| 1638 | 1630 | ||
| 1639 | if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR)) | 1631 | if (ieee80211_is_data_qos(fc) && bf_isht(bf) && |
| 1632 | (sc->sc_flags & SC_OP_TXAGGR)) | ||
| 1640 | assign_aggr_tid_seqno(skb, bf); | 1633 | assign_aggr_tid_seqno(skb, bf); |
| 1641 | 1634 | ||
| 1642 | bf->bf_mpdu = skb; | 1635 | bf->bf_mpdu = skb; |
| @@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1780 | struct ath_wiphy *aphy = hw->priv; | 1773 | struct ath_wiphy *aphy = hw->priv; |
| 1781 | struct ath_softc *sc = aphy->sc; | 1774 | struct ath_softc *sc = aphy->sc; |
| 1782 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1775 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 1783 | int hdrlen, padsize; | 1776 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
| 1777 | int padpos, padsize; | ||
| 1784 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1778 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 1785 | struct ath_tx_control txctl; | 1779 | struct ath_tx_control txctl; |
| 1786 | 1780 | ||
| @@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1792 | * BSSes. | 1786 | * BSSes. |
| 1793 | */ | 1787 | */ |
| 1794 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | 1788 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { |
| 1795 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
| 1796 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | 1789 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) |
| 1797 | sc->tx.seq_no += 0x10; | 1790 | sc->tx.seq_no += 0x10; |
| 1798 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 1791 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
| @@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1800 | } | 1793 | } |
| 1801 | 1794 | ||
| 1802 | /* Add the padding after the header if this is not already done */ | 1795 | /* Add the padding after the header if this is not already done */ |
| 1803 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 1796 | padpos = ath9k_cmn_padpos(hdr->frame_control); |
| 1804 | if (hdrlen & 3) { | 1797 | padsize = padpos & 3; |
| 1805 | padsize = hdrlen % 4; | 1798 | if (padsize && skb->len>padpos) { |
| 1806 | if (skb_headroom(skb) < padsize) { | 1799 | if (skb_headroom(skb) < padsize) { |
| 1807 | ath_print(common, ATH_DBG_XMIT, | 1800 | ath_print(common, ATH_DBG_XMIT, |
| 1808 | "TX CABQ padding failed\n"); | 1801 | "TX CABQ padding failed\n"); |
| @@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1810 | return; | 1803 | return; |
| 1811 | } | 1804 | } |
| 1812 | skb_push(skb, padsize); | 1805 | skb_push(skb, padsize); |
| 1813 | memmove(skb->data, skb->data + padsize, hdrlen); | 1806 | memmove(skb->data, skb->data + padsize, padpos); |
| 1814 | } | 1807 | } |
| 1815 | 1808 | ||
| 1816 | txctl.txq = sc->beacon.cabq; | 1809 | txctl.txq = sc->beacon.cabq; |
| @@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
| 1838 | struct ieee80211_hw *hw = sc->hw; | 1831 | struct ieee80211_hw *hw = sc->hw; |
| 1839 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 1832 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
| 1840 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1833 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
| 1841 | int hdrlen, padsize; | 1834 | struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; |
| 1835 | int padpos, padsize; | ||
| 1842 | 1836 | ||
| 1843 | ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); | 1837 | ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); |
| 1844 | 1838 | ||
| @@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
| 1853 | tx_info->flags |= IEEE80211_TX_STAT_ACK; | 1847 | tx_info->flags |= IEEE80211_TX_STAT_ACK; |
| 1854 | } | 1848 | } |
| 1855 | 1849 | ||
| 1856 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 1850 | padpos = ath9k_cmn_padpos(hdr->frame_control); |
| 1857 | padsize = hdrlen & 3; | 1851 | padsize = padpos & 3; |
| 1858 | if (padsize && hdrlen >= 24) { | 1852 | if (padsize && skb->len>padpos+padsize) { |
| 1859 | /* | 1853 | /* |
| 1860 | * Remove MAC header padding before giving the frame back to | 1854 | * Remove MAC header padding before giving the frame back to |
| 1861 | * mac80211. | 1855 | * mac80211. |
| 1862 | */ | 1856 | */ |
| 1863 | memmove(skb->data + padsize, skb->data, hdrlen); | 1857 | memmove(skb->data + padsize, skb->data, padpos); |
| 1864 | skb_pull(skb, padsize); | 1858 | skb_pull(skb, padsize); |
| 1865 | } | 1859 | } |
| 1866 | 1860 | ||
| @@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
| 2078 | &txq->axq_q, lastbf->list.prev); | 2072 | &txq->axq_q, lastbf->list.prev); |
| 2079 | 2073 | ||
| 2080 | txq->axq_depth--; | 2074 | txq->axq_depth--; |
| 2081 | txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT); | 2075 | txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); |
| 2082 | txq->axq_tx_inprogress = false; | 2076 | txq->axq_tx_inprogress = false; |
| 2083 | spin_unlock_bh(&txq->axq_lock); | 2077 | spin_unlock_bh(&txq->axq_lock); |
| 2084 | 2078 | ||
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 027be275e035..88d1fd02d40a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
| @@ -383,160 +383,44 @@ static inline | |||
| 383 | } | 383 | } |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | /* Check if a DMA region fits the device constraints. | ||
| 387 | * Returns true, if the region is OK for usage with this device. */ | ||
| 388 | static inline bool b43_dma_address_ok(struct b43_dmaring *ring, | ||
| 389 | dma_addr_t addr, size_t size) | ||
| 390 | { | ||
| 391 | switch (ring->type) { | ||
| 392 | case B43_DMA_30BIT: | ||
| 393 | if ((u64)addr + size > (1ULL << 30)) | ||
| 394 | return 0; | ||
| 395 | break; | ||
| 396 | case B43_DMA_32BIT: | ||
| 397 | if ((u64)addr + size > (1ULL << 32)) | ||
| 398 | return 0; | ||
| 399 | break; | ||
| 400 | case B43_DMA_64BIT: | ||
| 401 | /* Currently we can't have addresses beyond | ||
| 402 | * 64bit in the kernel. */ | ||
| 403 | break; | ||
| 404 | } | ||
| 405 | return 1; | ||
| 406 | } | ||
| 407 | |||
| 408 | #define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0) | ||
| 409 | #define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0) | ||
| 410 | |||
| 411 | static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base, | ||
| 412 | dma_addr_t dmaaddr, size_t size) | ||
| 413 | { | ||
| 414 | ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE); | ||
| 415 | free_pages((unsigned long)base, get_order(size)); | ||
| 416 | } | ||
| 417 | |||
| 418 | static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring, | ||
| 419 | dma_addr_t *dmaaddr, size_t size, | ||
| 420 | gfp_t gfp_flags) | ||
| 421 | { | ||
| 422 | void *base; | ||
| 423 | |||
| 424 | base = (void *)__get_free_pages(gfp_flags, get_order(size)); | ||
| 425 | if (!base) | ||
| 426 | return NULL; | ||
| 427 | memset(base, 0, size); | ||
| 428 | *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size, | ||
| 429 | DMA_TO_DEVICE); | ||
| 430 | if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) { | ||
| 431 | free_pages((unsigned long)base, get_order(size)); | ||
| 432 | return NULL; | ||
| 433 | } | ||
| 434 | |||
| 435 | return base; | ||
| 436 | } | ||
| 437 | |||
| 438 | static void * b43_get_and_map_ringmem(struct b43_dmaring *ring, | ||
| 439 | dma_addr_t *dmaaddr, size_t size) | ||
| 440 | { | ||
| 441 | void *base; | ||
| 442 | |||
| 443 | base = __b43_get_and_map_ringmem(ring, dmaaddr, size, | ||
| 444 | GFP_KERNEL); | ||
| 445 | if (!base) { | ||
| 446 | b43err(ring->dev->wl, "Failed to allocate or map pages " | ||
| 447 | "for DMA ringmemory\n"); | ||
| 448 | return NULL; | ||
| 449 | } | ||
| 450 | if (!b43_dma_address_ok(ring, *dmaaddr, size)) { | ||
| 451 | /* The memory does not fit our device constraints. | ||
| 452 | * Retry with GFP_DMA set to get lower memory. */ | ||
| 453 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
| 454 | base = __b43_get_and_map_ringmem(ring, dmaaddr, size, | ||
| 455 | GFP_KERNEL | GFP_DMA); | ||
| 456 | if (!base) { | ||
| 457 | b43err(ring->dev->wl, "Failed to allocate or map pages " | ||
| 458 | "in the GFP_DMA region for DMA ringmemory\n"); | ||
| 459 | return NULL; | ||
| 460 | } | ||
| 461 | if (!b43_dma_address_ok(ring, *dmaaddr, size)) { | ||
| 462 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
| 463 | b43err(ring->dev->wl, "Failed to allocate DMA " | ||
| 464 | "ringmemory that fits device constraints\n"); | ||
| 465 | return NULL; | ||
| 466 | } | ||
| 467 | } | ||
| 468 | /* We expect the memory to be 4k aligned, at least. */ | ||
| 469 | if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) { | ||
| 470 | b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); | ||
| 471 | return NULL; | ||
| 472 | } | ||
| 473 | |||
| 474 | return base; | ||
| 475 | } | ||
| 476 | |||
| 477 | static int alloc_ringmemory(struct b43_dmaring *ring) | 386 | static int alloc_ringmemory(struct b43_dmaring *ring) |
| 478 | { | 387 | { |
| 479 | unsigned int required; | 388 | gfp_t flags = GFP_KERNEL; |
| 480 | void *base; | 389 | |
| 481 | dma_addr_t dmaaddr; | 390 | /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K |
| 482 | 391 | * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing | |
| 483 | /* There are several requirements to the descriptor ring memory: | 392 | * has shown that 4K is sufficient for the latter as long as the buffer |
| 484 | * - The memory region needs to fit the address constraints for the | 393 | * does not cross an 8K boundary. |
| 485 | * device (same as for frame buffers). | 394 | * |
| 486 | * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. | 395 | * For unknown reasons - possibly a hardware error - the BCM4311 rev |
| 487 | * - For 64bit DMA devices, the descriptor ring must be 8k aligned. | 396 | * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, |
| 397 | * which accounts for the GFP_DMA flag below. | ||
| 398 | * | ||
| 399 | * The flags here must match the flags in free_ringmemory below! | ||
| 488 | */ | 400 | */ |
| 489 | |||
| 490 | if (ring->type == B43_DMA_64BIT) | 401 | if (ring->type == B43_DMA_64BIT) |
| 491 | required = ring->nr_slots * sizeof(struct b43_dmadesc64); | 402 | flags |= GFP_DMA; |
| 492 | else | 403 | ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, |
| 493 | required = ring->nr_slots * sizeof(struct b43_dmadesc32); | 404 | B43_DMA_RINGMEMSIZE, |
| 494 | if (B43_WARN_ON(required > 0x1000)) | 405 | &(ring->dmabase), flags); |
| 406 | if (!ring->descbase) { | ||
| 407 | b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); | ||
| 495 | return -ENOMEM; | 408 | return -ENOMEM; |
| 496 | |||
| 497 | ring->alloc_descsize = 0x1000; | ||
| 498 | base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); | ||
| 499 | if (!base) | ||
| 500 | return -ENOMEM; | ||
| 501 | ring->alloc_descbase = base; | ||
| 502 | ring->alloc_dmabase = dmaaddr; | ||
| 503 | |||
| 504 | if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) { | ||
| 505 | /* We're on <=32bit DMA, or we already got 8k aligned memory. | ||
| 506 | * That's all we need, so we're fine. */ | ||
| 507 | ring->descbase = base; | ||
| 508 | ring->dmabase = dmaaddr; | ||
| 509 | return 0; | ||
| 510 | } | ||
| 511 | b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize); | ||
| 512 | |||
| 513 | /* Ok, we failed at the 8k alignment requirement. | ||
| 514 | * Try to force-align the memory region now. */ | ||
| 515 | ring->alloc_descsize = 0x2000; | ||
| 516 | base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); | ||
| 517 | if (!base) | ||
| 518 | return -ENOMEM; | ||
| 519 | ring->alloc_descbase = base; | ||
| 520 | ring->alloc_dmabase = dmaaddr; | ||
| 521 | |||
| 522 | if (is_8k_aligned(dmaaddr)) { | ||
| 523 | /* We're already 8k aligned. That Ok, too. */ | ||
| 524 | ring->descbase = base; | ||
| 525 | ring->dmabase = dmaaddr; | ||
| 526 | return 0; | ||
| 527 | } | 409 | } |
| 528 | /* Force-align it to 8k */ | 410 | memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); |
| 529 | ring->descbase = (void *)((u8 *)base + 0x1000); | ||
| 530 | ring->dmabase = dmaaddr + 0x1000; | ||
| 531 | B43_WARN_ON(!is_8k_aligned(ring->dmabase)); | ||
| 532 | 411 | ||
| 533 | return 0; | 412 | return 0; |
| 534 | } | 413 | } |
| 535 | 414 | ||
| 536 | static void free_ringmemory(struct b43_dmaring *ring) | 415 | static void free_ringmemory(struct b43_dmaring *ring) |
| 537 | { | 416 | { |
| 538 | b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, | 417 | gfp_t flags = GFP_KERNEL; |
| 539 | ring->alloc_dmabase, ring->alloc_descsize); | 418 | |
| 419 | if (ring->type == B43_DMA_64BIT) | ||
| 420 | flags |= GFP_DMA; | ||
| 421 | |||
| 422 | ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, | ||
| 423 | ring->descbase, ring->dmabase, flags); | ||
| 540 | } | 424 | } |
| 541 | 425 | ||
| 542 | /* Reset the RX DMA channel */ | 426 | /* Reset the RX DMA channel */ |
| @@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, | |||
| 646 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) | 530 | if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) |
| 647 | return 1; | 531 | return 1; |
| 648 | 532 | ||
| 649 | if (!b43_dma_address_ok(ring, addr, buffersize)) { | 533 | switch (ring->type) { |
| 650 | /* We can't support this address. Unmap it again. */ | 534 | case B43_DMA_30BIT: |
| 651 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | 535 | if ((u64)addr + buffersize > (1ULL << 30)) |
| 652 | return 1; | 536 | goto address_error; |
| 537 | break; | ||
| 538 | case B43_DMA_32BIT: | ||
| 539 | if ((u64)addr + buffersize > (1ULL << 32)) | ||
| 540 | goto address_error; | ||
| 541 | break; | ||
| 542 | case B43_DMA_64BIT: | ||
| 543 | /* Currently we can't have addresses beyond | ||
| 544 | * 64bit in the kernel. */ | ||
| 545 | break; | ||
| 653 | } | 546 | } |
| 654 | 547 | ||
| 655 | /* The address is OK. */ | 548 | /* The address is OK. */ |
| 656 | return 0; | 549 | return 0; |
| 550 | |||
| 551 | address_error: | ||
| 552 | /* We can't support this address. Unmap it again. */ | ||
| 553 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | ||
| 554 | |||
| 555 | return 1; | ||
| 657 | } | 556 | } |
| 658 | 557 | ||
| 659 | static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) | 558 | static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) |
| @@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | |||
| 715 | meta->dmaaddr = dmaaddr; | 614 | meta->dmaaddr = dmaaddr; |
| 716 | ring->ops->fill_descriptor(ring, desc, dmaaddr, | 615 | ring->ops->fill_descriptor(ring, desc, dmaaddr, |
| 717 | ring->rx_buffersize, 0, 0, 0); | 616 | ring->rx_buffersize, 0, 0, 0); |
| 718 | ssb_dma_sync_single_for_device(ring->dev->dev, | ||
| 719 | ring->alloc_dmabase, | ||
| 720 | ring->alloc_descsize, DMA_TO_DEVICE); | ||
| 721 | 617 | ||
| 722 | return 0; | 618 | return 0; |
| 723 | } | 619 | } |
| @@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring, | |||
| 1354 | } | 1250 | } |
| 1355 | /* Now transfer the whole frame. */ | 1251 | /* Now transfer the whole frame. */ |
| 1356 | wmb(); | 1252 | wmb(); |
| 1357 | ssb_dma_sync_single_for_device(ring->dev->dev, | ||
| 1358 | ring->alloc_dmabase, | ||
| 1359 | ring->alloc_descsize, DMA_TO_DEVICE); | ||
| 1360 | ops->poke_tx(ring, next_slot(ring, slot)); | 1253 | ops->poke_tx(ring, next_slot(ring, slot)); |
| 1361 | return 0; | 1254 | return 0; |
| 1362 | 1255 | ||
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index e607b392314c..f7ab37c4cdbc 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h | |||
| @@ -157,6 +157,7 @@ struct b43_dmadesc_generic { | |||
| 157 | } __attribute__ ((__packed__)); | 157 | } __attribute__ ((__packed__)); |
| 158 | 158 | ||
| 159 | /* Misc DMA constants */ | 159 | /* Misc DMA constants */ |
| 160 | #define B43_DMA_RINGMEMSIZE PAGE_SIZE | ||
| 160 | #define B43_DMA0_RX_FRAMEOFFSET 30 | 161 | #define B43_DMA0_RX_FRAMEOFFSET 30 |
| 161 | 162 | ||
| 162 | /* DMA engine tuning knobs */ | 163 | /* DMA engine tuning knobs */ |
| @@ -246,12 +247,6 @@ struct b43_dmaring { | |||
| 246 | /* The QOS priority assigned to this ring. Only used for TX rings. | 247 | /* The QOS priority assigned to this ring. Only used for TX rings. |
| 247 | * This is the mac80211 "queue" value. */ | 248 | * This is the mac80211 "queue" value. */ |
| 248 | u8 queue_prio; | 249 | u8 queue_prio; |
| 249 | /* Pointers and size of the originally allocated and mapped memory | ||
| 250 | * region for the descriptor ring. */ | ||
| 251 | void *alloc_descbase; | ||
| 252 | dma_addr_t alloc_dmabase; | ||
| 253 | unsigned int alloc_descsize; | ||
| 254 | /* Pointer to our wireless device. */ | ||
| 255 | struct b43_wldev *dev; | 250 | struct b43_wldev *dev; |
| 256 | #ifdef CONFIG_B43_DEBUG | 251 | #ifdef CONFIG_B43_DEBUG |
| 257 | /* Maximum number of used slots. */ | 252 | /* Maximum number of used slots. */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 7da1dab933d9..234891d8cc10 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
| @@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, | |||
| 681 | snr = rx_stats_sig_avg / rx_stats_noise_diff; | 681 | snr = rx_stats_sig_avg / rx_stats_noise_diff; |
| 682 | rx_status.noise = rx_status.signal - | 682 | rx_status.noise = rx_status.signal - |
| 683 | iwl3945_calc_db_from_ratio(snr); | 683 | iwl3945_calc_db_from_ratio(snr); |
| 684 | rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, | ||
| 685 | rx_status.noise); | ||
| 686 | |||
| 687 | /* If noise info not available, calculate signal quality indicator (%) | ||
| 688 | * using just the dBm signal level. */ | ||
| 689 | } else { | 684 | } else { |
| 690 | rx_status.noise = priv->last_rx_noise; | 685 | rx_status.noise = priv->last_rx_noise; |
| 691 | rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0); | ||
| 692 | } | 686 | } |
| 693 | 687 | ||
| 694 | 688 | ||
| 695 | IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", | 689 | IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n", |
| 696 | rx_status.signal, rx_status.noise, rx_status.qual, | 690 | rx_status.signal, rx_status.noise, |
| 697 | rx_stats_sig_avg, rx_stats_noise_diff); | 691 | rx_stats_sig_avg, rx_stats_noise_diff); |
| 698 | 692 | ||
| 699 | header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); | 693 | header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); |
| @@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) | |||
| 1835 | rc = -EIO; | 1829 | rc = -EIO; |
| 1836 | } | 1830 | } |
| 1837 | 1831 | ||
| 1838 | priv->alloc_rxb_page--; | 1832 | iwl_free_pages(priv, cmd.reply_page); |
| 1839 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); | ||
| 1840 | 1833 | ||
| 1841 | return rc; | 1834 | return rc; |
| 1842 | } | 1835 | } |
| @@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { | |||
| 2836 | .use_isr_legacy = true, | 2829 | .use_isr_legacy = true, |
| 2837 | .ht_greenfield_support = false, | 2830 | .ht_greenfield_support = false, |
| 2838 | .led_compensation = 64, | 2831 | .led_compensation = 64, |
| 2832 | .broken_powersave = true, | ||
| 2839 | }; | 2833 | }; |
| 2840 | 2834 | ||
| 2841 | static struct iwl_cfg iwl3945_abg_cfg = { | 2835 | static struct iwl_cfg iwl3945_abg_cfg = { |
| @@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = { | |||
| 2852 | .use_isr_legacy = true, | 2846 | .use_isr_legacy = true, |
| 2853 | .ht_greenfield_support = false, | 2847 | .ht_greenfield_support = false, |
| 2854 | .led_compensation = 64, | 2848 | .led_compensation = 64, |
| 2849 | .broken_powersave = true, | ||
| 2855 | }; | 2850 | }; |
| 2856 | 2851 | ||
| 2857 | struct pci_device_id iwl3945_hw_card_ids[] = { | 2852 | struct pci_device_id iwl3945_hw_card_ids[] = { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index ecc23ec1f6a4..531fa125f5a6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h | |||
| @@ -222,7 +222,6 @@ struct iwl3945_ibss_seq { | |||
| 222 | * | 222 | * |
| 223 | *****************************************************************************/ | 223 | *****************************************************************************/ |
| 224 | extern int iwl3945_calc_db_from_ratio(int sig_ratio); | 224 | extern int iwl3945_calc_db_from_ratio(int sig_ratio); |
| 225 | extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm); | ||
| 226 | extern void iwl3945_rx_replenish(void *data); | 225 | extern void iwl3945_rx_replenish(void *data); |
| 227 | extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | 226 | extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); |
| 228 | extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, | 227 | extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 386513b601f5..484c5fdf7c2a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
| @@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, | |||
| 1204 | iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); | 1204 | iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); |
| 1205 | 1205 | ||
| 1206 | /* calculate tx gain adjustment based on power supply voltage */ | 1206 | /* calculate tx gain adjustment based on power supply voltage */ |
| 1207 | voltage = priv->calib_info->voltage; | 1207 | voltage = le16_to_cpu(priv->calib_info->voltage); |
| 1208 | init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); | 1208 | init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); |
| 1209 | voltage_compensation = | 1209 | voltage_compensation = |
| 1210 | iwl4965_get_voltage_compensation(voltage, init_voltage); | 1210 | iwl4965_get_voltage_compensation(voltage, init_voltage); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h index 4ef6804a455a..bc056e9ab85f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h | |||
| @@ -92,11 +92,15 @@ | |||
| 92 | 92 | ||
| 93 | static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) | 93 | static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) |
| 94 | { | 94 | { |
| 95 | u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, | 95 | u16 temperature, voltage; |
| 96 | EEPROM_5000_TEMPERATURE); | 96 | __le16 *temp_calib = |
| 97 | /* offset = temperature - voltage / coef */ | 97 | (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); |
| 98 | s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); | 98 | |
| 99 | return offset; | 99 | temperature = le16_to_cpu(temp_calib[0]); |
| 100 | voltage = le16_to_cpu(temp_calib[1]); | ||
| 101 | |||
| 102 | /* offset = temp - volt / coeff */ | ||
| 103 | return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); | ||
| 100 | } | 104 | } |
| 101 | 105 | ||
| 102 | /* Fixed (non-configurable) rx data from phy */ | 106 | /* Fixed (non-configurable) rx data from phy */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index e2f8615c8c9b..33a5866538e7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
| @@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv) | |||
| 333 | static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) | 333 | static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) |
| 334 | { | 334 | { |
| 335 | struct iwl_calib_xtal_freq_cmd cmd; | 335 | struct iwl_calib_xtal_freq_cmd cmd; |
| 336 | u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | 336 | __le16 *xtal_calib = |
| 337 | (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
| 337 | 338 | ||
| 338 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; | 339 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; |
| 339 | cmd.hdr.first_group = 0; | 340 | cmd.hdr.first_group = 0; |
| 340 | cmd.hdr.groups_num = 1; | 341 | cmd.hdr.groups_num = 1; |
| 341 | cmd.hdr.data_valid = 1; | 342 | cmd.hdr.data_valid = 1; |
| 342 | cmd.cap_pin1 = (u8)xtal_calib[0]; | 343 | cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); |
| 343 | cmd.cap_pin2 = (u8)xtal_calib[1]; | 344 | cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); |
| 344 | return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], | 345 | return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], |
| 345 | (u8 *)&cmd, sizeof(cmd)); | 346 | (u8 *)&cmd, sizeof(cmd)); |
| 346 | } | 347 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index fe511cbf012e..b93e49158196 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
| @@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { | |||
| 150 | }; | 150 | }; |
| 151 | 151 | ||
| 152 | /* mbps, mcs */ | 152 | /* mbps, mcs */ |
| 153 | const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { | 153 | static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { |
| 154 | { "1", "BPSK DSSS"}, | 154 | { "1", "BPSK DSSS"}, |
| 155 | { "2", "QPSK DSSS"}, | 155 | { "2", "QPSK DSSS"}, |
| 156 | {"5.5", "BPSK CCK"}, | 156 | {"5.5", "BPSK CCK"}, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index b8377efb3ba7..1c9866daf815 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
| @@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log) | |||
| 1842 | } | 1842 | } |
| 1843 | 1843 | ||
| 1844 | #ifdef CONFIG_IWLWIFI_DEBUG | 1844 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 1845 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) | 1845 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) |
| 1846 | size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) | 1846 | size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) |
| 1847 | ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; | 1847 | ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; |
| 1848 | #else | 1848 | #else |
| @@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv) | |||
| 3173 | 3173 | ||
| 3174 | priv->ibss_beacon = NULL; | 3174 | priv->ibss_beacon = NULL; |
| 3175 | 3175 | ||
| 3176 | spin_lock_init(&priv->lock); | ||
| 3177 | spin_lock_init(&priv->sta_lock); | 3176 | spin_lock_init(&priv->sta_lock); |
| 3178 | spin_lock_init(&priv->hcmd_lock); | 3177 | spin_lock_init(&priv->hcmd_lock); |
| 3179 | 3178 | ||
| @@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3361 | (unsigned long long) pci_resource_len(pdev, 0)); | 3360 | (unsigned long long) pci_resource_len(pdev, 0)); |
| 3362 | IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); | 3361 | IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); |
| 3363 | 3362 | ||
| 3364 | /* this spin lock will be used in apm_ops.init and EEPROM access | 3363 | /* these spin locks will be used in apm_ops.init and EEPROM access |
| 3365 | * we should init now | 3364 | * we should init now |
| 3366 | */ | 3365 | */ |
| 3367 | spin_lock_init(&priv->reg_lock); | 3366 | spin_lock_init(&priv->reg_lock); |
| 3367 | spin_lock_init(&priv->lock); | ||
| 3368 | iwl_hw_detect(priv); | 3368 | iwl_hw_detect(priv); |
| 3369 | IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", | 3369 | IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", |
| 3370 | priv->cfg->name, priv->hw_rev); | 3370 | priv->cfg->name, priv->hw_rev); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index a7bfae01f19b..1ec8cb4d5eae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h | |||
| @@ -77,8 +77,7 @@ | |||
| 77 | * The MAC (uCode processor, etc.) does not need to be powered up for accessing | 77 | * The MAC (uCode processor, etc.) does not need to be powered up for accessing |
| 78 | * the CSR registers. | 78 | * the CSR registers. |
| 79 | * | 79 | * |
| 80 | * NOTE: Newer devices using one-time-programmable (OTP) memory | 80 | * NOTE: Device does need to be awake in order to read this memory |
| 81 | * require device to be awake in order to read this memory | ||
| 82 | * via CSR_EEPROM and CSR_OTP registers | 81 | * via CSR_EEPROM and CSR_OTP registers |
| 83 | */ | 82 | */ |
| 84 | #define CSR_BASE (0x000) | 83 | #define CSR_BASE (0x000) |
| @@ -111,9 +110,8 @@ | |||
| 111 | /* | 110 | /* |
| 112 | * EEPROM and OTP (one-time-programmable) memory reads | 111 | * EEPROM and OTP (one-time-programmable) memory reads |
| 113 | * | 112 | * |
| 114 | * NOTE: For (newer) devices using OTP, device must be awake, initialized via | 113 | * NOTE: Device must be awake, initialized via apm_ops.init(), |
| 115 | * apm_ops.init() in order to read. Older devices (3945/4965/5000) | 114 | * in order to read. |
| 116 | * use EEPROM and do not require this. | ||
| 117 | */ | 115 | */ |
| 118 | #define CSR_EEPROM_REG (CSR_BASE+0x02c) | 116 | #define CSR_EEPROM_REG (CSR_BASE+0x02c) |
| 119 | #define CSR_EEPROM_GP (CSR_BASE+0x030) | 117 | #define CSR_EEPROM_GP (CSR_BASE+0x030) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 2673e9a4db92..165d1f6e2dd9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
| @@ -1168,7 +1168,7 @@ struct iwl_priv { | |||
| 1168 | u32 last_beacon_time; | 1168 | u32 last_beacon_time; |
| 1169 | u64 last_tsf; | 1169 | u64 last_tsf; |
| 1170 | 1170 | ||
| 1171 | /* eeprom */ | 1171 | /* eeprom -- this is in the card's little endian byte order */ |
| 1172 | u8 *eeprom; | 1172 | u8 *eeprom; |
| 1173 | int nvm_device_type; | 1173 | int nvm_device_type; |
| 1174 | struct iwl_eeprom_calib_info *calib_info; | 1174 | struct iwl_eeprom_calib_info *calib_info; |
| @@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch) | |||
| 1353 | return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; | 1353 | return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; |
| 1354 | } | 1354 | } |
| 1355 | 1355 | ||
| 1356 | static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page) | ||
| 1357 | { | ||
| 1358 | __free_pages(page, priv->hw_params.rx_page_order); | ||
| 1359 | priv->alloc_rxb_page--; | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page) | ||
| 1363 | { | ||
| 1364 | free_pages(page, priv->hw_params.rx_page_order); | ||
| 1365 | priv->alloc_rxb_page--; | ||
| 1366 | } | ||
| 1356 | #endif /* __iwl_dev_h__ */ | 1367 | #endif /* __iwl_dev_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 3946e5c03f81..4a30969689ff 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
| @@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv) | |||
| 370 | return ret; | 370 | return ret; |
| 371 | } | 371 | } |
| 372 | 372 | ||
| 373 | static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) | 373 | static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) |
| 374 | { | 374 | { |
| 375 | int ret = 0; | 375 | int ret = 0; |
| 376 | u32 r; | 376 | u32 r; |
| @@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) | |||
| 404 | CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); | 404 | CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); |
| 405 | IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); | 405 | IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); |
| 406 | } | 406 | } |
| 407 | *eeprom_data = le16_to_cpu((__force __le16)(r >> 16)); | 407 | *eeprom_data = cpu_to_le16(r >> 16); |
| 408 | return 0; | 408 | return 0; |
| 409 | } | 409 | } |
| 410 | 410 | ||
| @@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) | |||
| 413 | */ | 413 | */ |
| 414 | static bool iwl_is_otp_empty(struct iwl_priv *priv) | 414 | static bool iwl_is_otp_empty(struct iwl_priv *priv) |
| 415 | { | 415 | { |
| 416 | u16 next_link_addr = 0, link_value; | 416 | u16 next_link_addr = 0; |
| 417 | __le16 link_value; | ||
| 417 | bool is_empty = false; | 418 | bool is_empty = false; |
| 418 | 419 | ||
| 419 | /* locate the beginning of OTP link list */ | 420 | /* locate the beginning of OTP link list */ |
| @@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv) | |||
| 443 | static int iwl_find_otp_image(struct iwl_priv *priv, | 444 | static int iwl_find_otp_image(struct iwl_priv *priv, |
| 444 | u16 *validblockaddr) | 445 | u16 *validblockaddr) |
| 445 | { | 446 | { |
| 446 | u16 next_link_addr = 0, link_value = 0, valid_addr; | 447 | u16 next_link_addr = 0, valid_addr; |
| 448 | __le16 link_value = 0; | ||
| 447 | int usedblocks = 0; | 449 | int usedblocks = 0; |
| 448 | 450 | ||
| 449 | /* set addressing mode to absolute to traverse the link list */ | 451 | /* set addressing mode to absolute to traverse the link list */ |
| @@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | |||
| 463 | * check for more block on the link list | 465 | * check for more block on the link list |
| 464 | */ | 466 | */ |
| 465 | valid_addr = next_link_addr; | 467 | valid_addr = next_link_addr; |
| 466 | next_link_addr = link_value * sizeof(u16); | 468 | next_link_addr = le16_to_cpu(link_value) * sizeof(u16); |
| 467 | IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", | 469 | IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", |
| 468 | usedblocks, next_link_addr); | 470 | usedblocks, next_link_addr); |
| 469 | if (iwl_read_otp_word(priv, next_link_addr, &link_value)) | 471 | if (iwl_read_otp_word(priv, next_link_addr, &link_value)) |
| @@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | |||
| 497 | */ | 499 | */ |
| 498 | int iwl_eeprom_init(struct iwl_priv *priv) | 500 | int iwl_eeprom_init(struct iwl_priv *priv) |
| 499 | { | 501 | { |
| 500 | u16 *e; | 502 | __le16 *e; |
| 501 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); | 503 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); |
| 502 | int sz; | 504 | int sz; |
| 503 | int ret; | 505 | int ret; |
| @@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
| 516 | ret = -ENOMEM; | 518 | ret = -ENOMEM; |
| 517 | goto alloc_err; | 519 | goto alloc_err; |
| 518 | } | 520 | } |
| 519 | e = (u16 *)priv->eeprom; | 521 | e = (__le16 *)priv->eeprom; |
| 520 | 522 | ||
| 521 | if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { | 523 | priv->cfg->ops->lib->apm_ops.init(priv); |
| 522 | /* OTP reads require powered-up chip */ | ||
| 523 | priv->cfg->ops->lib->apm_ops.init(priv); | ||
| 524 | } | ||
| 525 | 524 | ||
| 526 | ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); | 525 | ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); |
| 527 | if (ret < 0) { | 526 | if (ret < 0) { |
| @@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
| 562 | } | 561 | } |
| 563 | for (addr = validblockaddr; addr < validblockaddr + sz; | 562 | for (addr = validblockaddr; addr < validblockaddr + sz; |
| 564 | addr += sizeof(u16)) { | 563 | addr += sizeof(u16)) { |
| 565 | u16 eeprom_data; | 564 | __le16 eeprom_data; |
| 566 | 565 | ||
| 567 | ret = iwl_read_otp_word(priv, addr, &eeprom_data); | 566 | ret = iwl_read_otp_word(priv, addr, &eeprom_data); |
| 568 | if (ret) | 567 | if (ret) |
| @@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
| 570 | e[cache_addr / 2] = eeprom_data; | 569 | e[cache_addr / 2] = eeprom_data; |
| 571 | cache_addr += sizeof(u16); | 570 | cache_addr += sizeof(u16); |
| 572 | } | 571 | } |
| 573 | |||
| 574 | /* | ||
| 575 | * Now that OTP reads are complete, reset chip to save | ||
| 576 | * power until we load uCode during "up". | ||
| 577 | */ | ||
| 578 | priv->cfg->ops->lib->apm_ops.stop(priv); | ||
| 579 | |||
| 580 | } else { | 572 | } else { |
| 581 | /* eeprom is an array of 16bit values */ | 573 | /* eeprom is an array of 16bit values */ |
| 582 | for (addr = 0; addr < sz; addr += sizeof(u16)) { | 574 | for (addr = 0; addr < sz; addr += sizeof(u16)) { |
| @@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | |||
| 594 | goto done; | 586 | goto done; |
| 595 | } | 587 | } |
| 596 | r = _iwl_read_direct32(priv, CSR_EEPROM_REG); | 588 | r = _iwl_read_direct32(priv, CSR_EEPROM_REG); |
| 597 | e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); | 589 | e[addr / 2] = cpu_to_le16(r >> 16); |
| 598 | } | 590 | } |
| 599 | } | 591 | } |
| 600 | ret = 0; | 592 | ret = 0; |
| @@ -603,6 +595,8 @@ done: | |||
| 603 | err: | 595 | err: |
| 604 | if (ret) | 596 | if (ret) |
| 605 | iwl_eeprom_free(priv); | 597 | iwl_eeprom_free(priv); |
| 598 | /* Reset chip to save power until we load uCode during "up". */ | ||
| 599 | priv->cfg->ops->lib->apm_ops.stop(priv); | ||
| 606 | alloc_err: | 600 | alloc_err: |
| 607 | return ret; | 601 | return ret; |
| 608 | } | 602 | } |
| @@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv, | |||
| 755 | ch_info->ht40_eeprom = *eeprom_ch; | 749 | ch_info->ht40_eeprom = *eeprom_ch; |
| 756 | ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; | 750 | ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; |
| 757 | ch_info->ht40_flags = eeprom_ch->flags; | 751 | ch_info->ht40_flags = eeprom_ch->flags; |
| 758 | ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; | 752 | if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) |
| 753 | ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; | ||
| 759 | 754 | ||
| 760 | return 0; | 755 | return 0; |
| 761 | } | 756 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 5cd2b66bbe45..0cd9c02ee044 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
| @@ -137,7 +137,7 @@ struct iwl_eeprom_channel { | |||
| 137 | * | 137 | * |
| 138 | */ | 138 | */ |
| 139 | struct iwl_eeprom_enhanced_txpwr { | 139 | struct iwl_eeprom_enhanced_txpwr { |
| 140 | u16 common; | 140 | __le16 common; |
| 141 | s8 chain_a_max; | 141 | s8 chain_a_max; |
| 142 | s8 chain_b_max; | 142 | s8 chain_b_max; |
| 143 | s8 chain_c_max; | 143 | s8 chain_c_max; |
| @@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info { | |||
| 360 | struct iwl_eeprom_calib_info { | 360 | struct iwl_eeprom_calib_info { |
| 361 | u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ | 361 | u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ |
| 362 | u8 saturation_power52; /* half-dBm */ | 362 | u8 saturation_power52; /* half-dBm */ |
| 363 | s16 voltage; /* signed */ | 363 | __le16 voltage; /* signed */ |
| 364 | struct iwl_eeprom_calib_subband_info | 364 | struct iwl_eeprom_calib_subband_info |
| 365 | band_info[EEPROM_TX_POWER_BANDS]; | 365 | band_info[EEPROM_TX_POWER_BANDS]; |
| 366 | } __attribute__ ((packed)); | 366 | } __attribute__ ((packed)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index a23165948202..30e9ea6d54ec 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c | |||
| @@ -234,7 +234,7 @@ cancel: | |||
| 234 | } | 234 | } |
| 235 | fail: | 235 | fail: |
| 236 | if (cmd->reply_page) { | 236 | if (cmd->reply_page) { |
| 237 | free_pages(cmd->reply_page, priv->hw_params.rx_page_order); | 237 | iwl_free_pages(priv, cmd->reply_page); |
| 238 | cmd->reply_page = 0; | 238 | cmd->reply_page = 0; |
| 239 | } | 239 | } |
| 240 | out: | 240 | out: |
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 6090bc15a6d5..6f36b6e79f5e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
| @@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
| 345 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | 345 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
| 346 | PAGE_SIZE << priv->hw_params.rx_page_order, | 346 | PAGE_SIZE << priv->hw_params.rx_page_order, |
| 347 | PCI_DMA_FROMDEVICE); | 347 | PCI_DMA_FROMDEVICE); |
| 348 | __free_pages(rxq->pool[i].page, | 348 | __iwl_free_pages(priv, rxq->pool[i].page); |
| 349 | priv->hw_params.rx_page_order); | ||
| 350 | rxq->pool[i].page = NULL; | 349 | rxq->pool[i].page = NULL; |
| 351 | priv->alloc_rxb_page--; | ||
| 352 | } | 350 | } |
| 353 | } | 351 | } |
| 354 | 352 | ||
| @@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
| 416 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | 414 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
| 417 | PAGE_SIZE << priv->hw_params.rx_page_order, | 415 | PAGE_SIZE << priv->hw_params.rx_page_order, |
| 418 | PCI_DMA_FROMDEVICE); | 416 | PCI_DMA_FROMDEVICE); |
| 419 | priv->alloc_rxb_page--; | 417 | __iwl_free_pages(priv, rxq->pool[i].page); |
| 420 | __free_pages(rxq->pool[i].page, | ||
| 421 | priv->hw_params.rx_page_order); | ||
| 422 | rxq->pool[i].page = NULL; | 418 | rxq->pool[i].page = NULL; |
| 423 | } | 419 | } |
| 424 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | 420 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); |
| @@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv, | |||
| 654 | } | 650 | } |
| 655 | EXPORT_SYMBOL(iwl_reply_statistics); | 651 | EXPORT_SYMBOL(iwl_reply_statistics); |
| 656 | 652 | ||
| 657 | #define PERFECT_RSSI (-20) /* dBm */ | ||
| 658 | #define WORST_RSSI (-95) /* dBm */ | ||
| 659 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
| 660 | |||
| 661 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
| 662 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
| 663 | * about formulas used below. */ | ||
| 664 | static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
| 665 | { | ||
| 666 | int sig_qual; | ||
| 667 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
| 668 | |||
| 669 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
| 670 | * as indicator; formula is (signal dbm - noise dbm). | ||
| 671 | * SNR at or above 40 is a great signal (100%). | ||
| 672 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
| 673 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
| 674 | if (noise_dbm) { | ||
| 675 | if (rssi_dbm - noise_dbm >= 40) | ||
| 676 | return 100; | ||
| 677 | else if (rssi_dbm < noise_dbm) | ||
| 678 | return 0; | ||
| 679 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
| 680 | |||
| 681 | /* Else use just the signal level. | ||
| 682 | * This formula is a least squares fit of data points collected and | ||
| 683 | * compared with a reference system that had a percentage (%) display | ||
| 684 | * for signal quality. */ | ||
| 685 | } else | ||
| 686 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
| 687 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
| 688 | (RSSI_RANGE * RSSI_RANGE); | ||
| 689 | |||
| 690 | if (sig_qual > 100) | ||
| 691 | sig_qual = 100; | ||
| 692 | else if (sig_qual < 1) | ||
| 693 | sig_qual = 0; | ||
| 694 | |||
| 695 | return sig_qual; | ||
| 696 | } | ||
| 697 | |||
| 698 | /* Calc max signal level (dBm) among 3 possible receivers */ | 653 | /* Calc max signal level (dBm) among 3 possible receivers */ |
| 699 | static inline int iwl_calc_rssi(struct iwl_priv *priv, | 654 | static inline int iwl_calc_rssi(struct iwl_priv *priv, |
| 700 | struct iwl_rx_phy_res *rx_resp) | 655 | struct iwl_rx_phy_res *rx_resp) |
| @@ -1105,11 +1060,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, | |||
| 1105 | if (iwl_is_associated(priv) && | 1060 | if (iwl_is_associated(priv) && |
| 1106 | !test_bit(STATUS_SCANNING, &priv->status)) { | 1061 | !test_bit(STATUS_SCANNING, &priv->status)) { |
| 1107 | rx_status.noise = priv->last_rx_noise; | 1062 | rx_status.noise = priv->last_rx_noise; |
| 1108 | rx_status.qual = iwl_calc_sig_qual(rx_status.signal, | ||
| 1109 | rx_status.noise); | ||
| 1110 | } else { | 1063 | } else { |
| 1111 | rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 1064 | rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; |
| 1112 | rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0); | ||
| 1113 | } | 1065 | } |
| 1114 | 1066 | ||
| 1115 | /* Reset beacon noise level if not associated. */ | 1067 | /* Reset beacon noise level if not associated. */ |
| @@ -1122,8 +1074,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, | |||
| 1122 | iwl_dbg_report_frame(priv, phy_res, len, header, 1); | 1074 | iwl_dbg_report_frame(priv, phy_res, len, header, 1); |
| 1123 | #endif | 1075 | #endif |
| 1124 | iwl_dbg_log_rx_data_frame(priv, len, header); | 1076 | iwl_dbg_log_rx_data_frame(priv, len, header); |
| 1125 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n", | 1077 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n", |
| 1126 | rx_status.signal, rx_status.noise, rx_status.qual, | 1078 | rx_status.signal, rx_status.noise, |
| 1127 | (unsigned long long)rx_status.mactime); | 1079 | (unsigned long long)rx_status.mactime); |
| 1128 | 1080 | ||
| 1129 | /* | 1081 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index a2b2b8315ff9..fa1c89ba6459 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
| @@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv) | |||
| 144 | clear_bit(STATUS_SCAN_HW, &priv->status); | 144 | clear_bit(STATUS_SCAN_HW, &priv->status); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | priv->alloc_rxb_page--; | 147 | iwl_free_pages(priv, cmd.reply_page); |
| 148 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); | ||
| 149 | 148 | ||
| 150 | return ret; | 149 | return ret; |
| 151 | } | 150 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index cd6a6901216e..cde09a890b73 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
| @@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, | |||
| 164 | break; | 164 | break; |
| 165 | } | 165 | } |
| 166 | } | 166 | } |
| 167 | 167 | iwl_free_pages(priv, cmd.reply_page); | |
| 168 | priv->alloc_rxb_page--; | ||
| 169 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); | ||
| 170 | 168 | ||
| 171 | return ret; | 169 | return ret; |
| 172 | } | 170 | } |
| @@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, | |||
| 391 | break; | 389 | break; |
| 392 | } | 390 | } |
| 393 | } | 391 | } |
| 394 | 392 | iwl_free_pages(priv, cmd.reply_page); | |
| 395 | priv->alloc_rxb_page--; | ||
| 396 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); | ||
| 397 | 393 | ||
| 398 | return ret; | 394 | return ret; |
| 399 | } | 395 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 00da5e152d46..87ce2bd292c7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
| @@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
| 407 | int txq_id; | 407 | int txq_id; |
| 408 | 408 | ||
| 409 | /* Tx queues */ | 409 | /* Tx queues */ |
| 410 | if (priv->txq) | 410 | if (priv->txq) { |
| 411 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | 411 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; |
| 412 | txq_id++) | 412 | txq_id++) |
| 413 | if (txq_id == IWL_CMD_QUEUE_NUM) | 413 | if (txq_id == IWL_CMD_QUEUE_NUM) |
| 414 | iwl_cmd_queue_free(priv); | 414 | iwl_cmd_queue_free(priv); |
| 415 | else | 415 | else |
| 416 | iwl_tx_queue_free(priv, txq_id); | 416 | iwl_tx_queue_free(priv, txq_id); |
| 417 | } | ||
| 417 | iwl_free_dma_ptr(priv, &priv->kw); | 418 | iwl_free_dma_ptr(priv, &priv->kw); |
| 418 | 419 | ||
| 419 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | 420 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 2a28a1f8b1fe..f8e4e4b18d02 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
| @@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
| 548 | txq = &priv->txq[txq_id]; | 548 | txq = &priv->txq[txq_id]; |
| 549 | q = &txq->q; | 549 | q = &txq->q; |
| 550 | 550 | ||
| 551 | if ((iwl_queue_space(q) < q->high_mark)) | ||
| 552 | goto drop; | ||
| 553 | |||
| 551 | spin_lock_irqsave(&priv->lock, flags); | 554 | spin_lock_irqsave(&priv->lock, flags); |
| 552 | 555 | ||
| 553 | idx = get_cmd_index(q, q->write_ptr, 0); | 556 | idx = get_cmd_index(q, q->write_ptr, 0); |
| @@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, | |||
| 812 | break; | 815 | break; |
| 813 | } | 816 | } |
| 814 | 817 | ||
| 815 | free_pages(cmd.reply_page, priv->hw_params.rx_page_order); | 818 | iwl_free_pages(priv, cmd.reply_page); |
| 816 | 819 | ||
| 817 | return rc; | 820 | return rc; |
| 818 | } | 821 | } |
| @@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
| 1198 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | 1201 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
| 1199 | PAGE_SIZE << priv->hw_params.rx_page_order, | 1202 | PAGE_SIZE << priv->hw_params.rx_page_order, |
| 1200 | PCI_DMA_FROMDEVICE); | 1203 | PCI_DMA_FROMDEVICE); |
| 1201 | priv->alloc_rxb_page--; | 1204 | __iwl_free_pages(priv, rxq->pool[i].page); |
| 1202 | __free_pages(rxq->pool[i].page, | ||
| 1203 | priv->hw_params.rx_page_order); | ||
| 1204 | rxq->pool[i].page = NULL; | 1205 | rxq->pool[i].page = NULL; |
| 1205 | } | 1206 | } |
| 1206 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | 1207 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); |
| @@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx | |||
| 1247 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | 1248 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
| 1248 | PAGE_SIZE << priv->hw_params.rx_page_order, | 1249 | PAGE_SIZE << priv->hw_params.rx_page_order, |
| 1249 | PCI_DMA_FROMDEVICE); | 1250 | PCI_DMA_FROMDEVICE); |
| 1250 | __free_pages(rxq->pool[i].page, | 1251 | __iwl_free_pages(priv, rxq->pool[i].page); |
| 1251 | priv->hw_params.rx_page_order); | ||
| 1252 | rxq->pool[i].page = NULL; | 1252 | rxq->pool[i].page = NULL; |
| 1253 | priv->alloc_rxb_page--; | ||
| 1254 | } | 1253 | } |
| 1255 | } | 1254 | } |
| 1256 | 1255 | ||
| @@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio) | |||
| 1300 | return (int)ratio2dB[sig_ratio]; | 1299 | return (int)ratio2dB[sig_ratio]; |
| 1301 | } | 1300 | } |
| 1302 | 1301 | ||
| 1303 | #define PERFECT_RSSI (-20) /* dBm */ | ||
| 1304 | #define WORST_RSSI (-95) /* dBm */ | ||
| 1305 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
| 1306 | |||
| 1307 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
| 1308 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
| 1309 | * about formulas used below. */ | ||
| 1310 | int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
| 1311 | { | ||
| 1312 | int sig_qual; | ||
| 1313 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
| 1314 | |||
| 1315 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
| 1316 | * as indicator; formula is (signal dbm - noise dbm). | ||
| 1317 | * SNR at or above 40 is a great signal (100%). | ||
| 1318 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
| 1319 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
| 1320 | if (noise_dbm) { | ||
| 1321 | if (rssi_dbm - noise_dbm >= 40) | ||
| 1322 | return 100; | ||
| 1323 | else if (rssi_dbm < noise_dbm) | ||
| 1324 | return 0; | ||
| 1325 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
| 1326 | |||
| 1327 | /* Else use just the signal level. | ||
| 1328 | * This formula is a least squares fit of data points collected and | ||
| 1329 | * compared with a reference system that had a percentage (%) display | ||
| 1330 | * for signal quality. */ | ||
| 1331 | } else | ||
| 1332 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
| 1333 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
| 1334 | (RSSI_RANGE * RSSI_RANGE); | ||
| 1335 | |||
| 1336 | if (sig_qual > 100) | ||
| 1337 | sig_qual = 100; | ||
| 1338 | else if (sig_qual < 1) | ||
| 1339 | sig_qual = 0; | ||
| 1340 | |||
| 1341 | return sig_qual; | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | /** | 1302 | /** |
| 1345 | * iwl3945_rx_handle - Main entry function for receiving responses from uCode | 1303 | * iwl3945_rx_handle - Main entry function for receiving responses from uCode |
| 1346 | * | 1304 | * |
| @@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log) | |||
| 1688 | } | 1646 | } |
| 1689 | 1647 | ||
| 1690 | #ifdef CONFIG_IWLWIFI_DEBUG | 1648 | #ifdef CONFIG_IWLWIFI_DEBUG |
| 1691 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)) | 1649 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) |
| 1692 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | 1650 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) |
| 1693 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | 1651 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; |
| 1694 | #else | 1652 | #else |
| @@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) | |||
| 3867 | priv->retry_rate = 1; | 3825 | priv->retry_rate = 1; |
| 3868 | priv->ibss_beacon = NULL; | 3826 | priv->ibss_beacon = NULL; |
| 3869 | 3827 | ||
| 3870 | spin_lock_init(&priv->lock); | ||
| 3871 | spin_lock_init(&priv->sta_lock); | 3828 | spin_lock_init(&priv->sta_lock); |
| 3872 | spin_lock_init(&priv->hcmd_lock); | 3829 | spin_lock_init(&priv->hcmd_lock); |
| 3873 | 3830 | ||
| @@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
| 3936 | /* Tell mac80211 our characteristics */ | 3893 | /* Tell mac80211 our characteristics */ |
| 3937 | hw->flags = IEEE80211_HW_SIGNAL_DBM | | 3894 | hw->flags = IEEE80211_HW_SIGNAL_DBM | |
| 3938 | IEEE80211_HW_NOISE_DBM | | 3895 | IEEE80211_HW_NOISE_DBM | |
| 3939 | IEEE80211_HW_SPECTRUM_MGMT | | 3896 | IEEE80211_HW_SPECTRUM_MGMT; |
| 3940 | IEEE80211_HW_SUPPORTS_PS | | 3897 | |
| 3941 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | 3898 | if (!priv->cfg->broken_powersave) |
| 3899 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | | ||
| 3900 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
| 3942 | 3901 | ||
| 3943 | hw->wiphy->interface_modes = | 3902 | hw->wiphy->interface_modes = |
| 3944 | BIT(NL80211_IFTYPE_STATION) | | 3903 | BIT(NL80211_IFTYPE_STATION) | |
| @@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
| 4057 | * PCI Tx retries from interfering with C3 CPU state */ | 4016 | * PCI Tx retries from interfering with C3 CPU state */ |
| 4058 | pci_write_config_byte(pdev, 0x41, 0x00); | 4017 | pci_write_config_byte(pdev, 0x41, 0x00); |
| 4059 | 4018 | ||
| 4060 | /* this spin lock will be used in apm_ops.init and EEPROM access | 4019 | /* these spin locks will be used in apm_ops.init and EEPROM access |
| 4061 | * we should init now | 4020 | * we should init now |
| 4062 | */ | 4021 | */ |
| 4063 | spin_lock_init(&priv->reg_lock); | 4022 | spin_lock_init(&priv->reg_lock); |
| 4023 | spin_lock_init(&priv->lock); | ||
| 4064 | 4024 | ||
| 4065 | /*********************** | 4025 | /*********************** |
| 4066 | * 4. Read EEPROM | 4026 | * 4. Read EEPROM |
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h index 5a26bb05a33a..842811142bef 100644 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h | |||
| @@ -268,7 +268,7 @@ struct iwm_priv { | |||
| 268 | 268 | ||
| 269 | struct sk_buff_head rx_list; | 269 | struct sk_buff_head rx_list; |
| 270 | struct list_head rx_tickets; | 270 | struct list_head rx_tickets; |
| 271 | struct list_head rx_packets[IWM_RX_ID_HASH]; | 271 | struct list_head rx_packets[IWM_RX_ID_HASH + 1]; |
| 272 | struct workqueue_struct *rx_wq; | 272 | struct workqueue_struct *rx_wq; |
| 273 | struct work_struct rx_worker; | 273 | struct work_struct rx_worker; |
| 274 | 274 | ||
| @@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm); | |||
| 349 | int iwm_down(struct iwm_priv *iwm); | 349 | int iwm_down(struct iwm_priv *iwm); |
| 350 | 350 | ||
| 351 | /* TX API */ | 351 | /* TX API */ |
| 352 | u16 iwm_tid_to_queue(u16 tid); | 352 | int iwm_tid_to_queue(u16 tid); |
| 353 | void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages); | 353 | void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages); |
| 354 | void iwm_tx_worker(struct work_struct *work); | 354 | void iwm_tx_worker(struct work_struct *work); |
| 355 | int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 355 | int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index e4f0f8705f65..c4c0d23c63ec 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
| @@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev) | |||
| 76 | */ | 76 | */ |
| 77 | static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; | 77 | static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; |
| 78 | 78 | ||
| 79 | u16 iwm_tid_to_queue(u16 tid) | 79 | int iwm_tid_to_queue(u16 tid) |
| 80 | { | 80 | { |
| 81 | if (tid > IWM_UMAC_TID_NR - 2) | 81 | if (tid > IWM_UMAC_TID_NR - 2) |
| 82 | return -EINVAL; | 82 | return -EINVAL; |
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 1c57c1f72cba..6d6ed7485175 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c | |||
| @@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf, | |||
| 1126 | 1126 | ||
| 1127 | if (!stop) { | 1127 | if (!stop) { |
| 1128 | struct iwm_tx_queue *txq; | 1128 | struct iwm_tx_queue *txq; |
| 1129 | u16 queue = iwm_tid_to_queue(bit); | 1129 | int queue = iwm_tid_to_queue(bit); |
| 1130 | 1130 | ||
| 1131 | if (queue < 0) | 1131 | if (queue < 0) |
| 1132 | continue; | 1132 | continue; |
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index 2f91c9b808af..92b7a357a5e4 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/delay.h> | 2 | #include <linux/delay.h> |
| 3 | #include <linux/etherdevice.h> | 3 | #include <linux/etherdevice.h> |
| 4 | #include <linux/netdevice.h> | 4 | #include <linux/netdevice.h> |
| 5 | #include <linux/if_ether.h> | ||
| 5 | #include <linux/if_arp.h> | 6 | #include <linux/if_arp.h> |
| 6 | #include <linux/kthread.h> | 7 | #include <linux/kthread.h> |
| 7 | #include <linux/kfifo.h> | 8 | #include <linux/kfifo.h> |
| @@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv) | |||
| 351 | 352 | ||
| 352 | mesh_dev->netdev_ops = &mesh_netdev_ops; | 353 | mesh_dev->netdev_ops = &mesh_netdev_ops; |
| 353 | mesh_dev->ethtool_ops = &lbs_ethtool_ops; | 354 | mesh_dev->ethtool_ops = &lbs_ethtool_ops; |
| 354 | memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, | 355 | memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); |
| 355 | sizeof(priv->dev->dev_addr)); | ||
| 356 | 356 | ||
| 357 | SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); | 357 | SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); |
| 358 | 358 | ||
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index c6a6c042b82f..b0b1c7841500 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c | |||
| @@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan) | |||
| 567 | chan_count = lbs_scan_create_channel_list(priv, chan_list); | 567 | chan_count = lbs_scan_create_channel_list(priv, chan_list); |
| 568 | 568 | ||
| 569 | netif_stop_queue(priv->dev); | 569 | netif_stop_queue(priv->dev); |
| 570 | netif_carrier_off(priv->dev); | 570 | if (priv->mesh_dev) |
| 571 | if (priv->mesh_dev) { | ||
| 572 | netif_stop_queue(priv->mesh_dev); | 571 | netif_stop_queue(priv->mesh_dev); |
| 573 | netif_carrier_off(priv->mesh_dev); | ||
| 574 | } | ||
| 575 | 572 | ||
| 576 | /* Prepare to continue an interrupted scan */ | 573 | /* Prepare to continue an interrupted scan */ |
| 577 | lbs_deb_scan("chan_count %d, scan_channel %d\n", | 574 | lbs_deb_scan("chan_count %d, scan_channel %d\n", |
| @@ -635,16 +632,13 @@ out2: | |||
| 635 | priv->scan_channel = 0; | 632 | priv->scan_channel = 0; |
| 636 | 633 | ||
| 637 | out: | 634 | out: |
| 638 | if (priv->connect_status == LBS_CONNECTED) { | 635 | if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) |
| 639 | netif_carrier_on(priv->dev); | 636 | netif_wake_queue(priv->dev); |
| 640 | if (!priv->tx_pending_len) | 637 | |
| 641 | netif_wake_queue(priv->dev); | 638 | if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && |
| 642 | } | 639 | !priv->tx_pending_len) |
| 643 | if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) { | 640 | netif_wake_queue(priv->mesh_dev); |
| 644 | netif_carrier_on(priv->mesh_dev); | 641 | |
| 645 | if (!priv->tx_pending_len) | ||
| 646 | netif_wake_queue(priv->mesh_dev); | ||
| 647 | } | ||
| 648 | kfree(chan_list); | 642 | kfree(chan_list); |
| 649 | 643 | ||
| 650 | lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); | 644 | lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); |
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index a8eb9e1fcf36..4b1aab593a84 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c | |||
| @@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, | |||
| 2025 | if (priv->connect_status == LBS_CONNECTED) { | 2025 | if (priv->connect_status == LBS_CONNECTED) { |
| 2026 | memcpy(extra, priv->curbssparams.ssid, | 2026 | memcpy(extra, priv->curbssparams.ssid, |
| 2027 | priv->curbssparams.ssid_len); | 2027 | priv->curbssparams.ssid_len); |
| 2028 | extra[priv->curbssparams.ssid_len] = '\0'; | ||
| 2029 | } else { | 2028 | } else { |
| 2030 | memset(extra, 0, 32); | 2029 | memset(extra, 0, 32); |
| 2031 | extra[priv->curbssparams.ssid_len] = '\0'; | ||
| 2032 | } | 2030 | } |
| 2033 | /* | 2031 | /* |
| 2034 | * If none, we may want to get the one that was set | 2032 | * If none, we may want to get the one that was set |
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 019431d2f8a9..26a1abd5bb03 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c | |||
| @@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) | |||
| 495 | stats.band = IEEE80211_BAND_2GHZ; | 495 | stats.band = IEEE80211_BAND_2GHZ; |
| 496 | stats.signal = prxpd->snr; | 496 | stats.signal = prxpd->snr; |
| 497 | stats.noise = prxpd->nf; | 497 | stats.noise = prxpd->nf; |
| 498 | stats.qual = prxpd->snr - prxpd->nf; | ||
| 499 | /* Marvell rate index has a hole at value 4 */ | 498 | /* Marvell rate index has a hole at value 4 */ |
| 500 | if (prxpd->rx_rate > 4) | 499 | if (prxpd->rx_rate > 4) |
| 501 | --prxpd->rx_rate; | 500 | --prxpd->rx_rate; |
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 7698fdd6a3a2..31ca241f7753 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #define MAX_RID_LEN 1024 | 23 | #define MAX_RID_LEN 1024 |
| 24 | 24 | ||
| 25 | /* Helper routine to record keys | 25 | /* Helper routine to record keys |
| 26 | * Do not call from interrupt context */ | 26 | * It is called under orinoco_lock so it may not sleep */ |
| 27 | static int orinoco_set_key(struct orinoco_private *priv, int index, | 27 | static int orinoco_set_key(struct orinoco_private *priv, int index, |
| 28 | enum orinoco_alg alg, const u8 *key, int key_len, | 28 | enum orinoco_alg alg, const u8 *key, int key_len, |
| 29 | const u8 *seq, int seq_len) | 29 | const u8 *seq, int seq_len) |
| @@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index, | |||
| 32 | kzfree(priv->keys[index].seq); | 32 | kzfree(priv->keys[index].seq); |
| 33 | 33 | ||
| 34 | if (key_len) { | 34 | if (key_len) { |
| 35 | priv->keys[index].key = kzalloc(key_len, GFP_KERNEL); | 35 | priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); |
| 36 | if (!priv->keys[index].key) | 36 | if (!priv->keys[index].key) |
| 37 | goto nomem; | 37 | goto nomem; |
| 38 | } else | 38 | } else |
| 39 | priv->keys[index].key = NULL; | 39 | priv->keys[index].key = NULL; |
| 40 | 40 | ||
| 41 | if (seq_len) { | 41 | if (seq_len) { |
| 42 | priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL); | 42 | priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC); |
| 43 | if (!priv->keys[index].seq) | 43 | if (!priv->keys[index].seq) |
| 44 | goto free_key; | 44 | goto free_key; |
| 45 | } else | 45 | } else |
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index c5fe867665e6..1a7eae357fef 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h | |||
| @@ -1323,7 +1323,7 @@ | |||
| 1323 | #define PAIRWISE_KEY_ENTRY(__idx) \ | 1323 | #define PAIRWISE_KEY_ENTRY(__idx) \ |
| 1324 | ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) | 1324 | ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) |
| 1325 | #define MAC_IVEIV_ENTRY(__idx) \ | 1325 | #define MAC_IVEIV_ENTRY(__idx) \ |
| 1326 | ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) ) | 1326 | ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) ) |
| 1327 | #define MAC_WCID_ATTR_ENTRY(__idx) \ | 1327 | #define MAC_WCID_ATTR_ENTRY(__idx) \ |
| 1328 | ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) | 1328 | ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) |
| 1329 | #define SHARED_KEY_ENTRY(__idx) \ | 1329 | #define SHARED_KEY_ENTRY(__idx) \ |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index eb1e1d00bec3..27bf887f1453 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
| 38 | 38 | ||
| 39 | #include "rt2x00.h" | 39 | #include "rt2x00.h" |
| 40 | #ifdef CONFIG_RT2800USB | 40 | #if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) |
| 41 | #include "rt2x00usb.h" | 41 | #include "rt2x00usb.h" |
| 42 | #endif | 42 | #endif |
| 43 | #include "rt2800lib.h" | 43 | #include "rt2800lib.h" |
| @@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) | |||
| 1121 | 1121 | ||
| 1122 | if (rt2x00_intf_is_usb(rt2x00dev)) { | 1122 | if (rt2x00_intf_is_usb(rt2x00dev)) { |
| 1123 | rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); | 1123 | rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); |
| 1124 | #ifdef CONFIG_RT2800USB | 1124 | #if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) |
| 1125 | rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, | 1125 | rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, |
| 1126 | USB_MODE_RESET, REGISTER_TIMEOUT); | 1126 | USB_MODE_RESET, REGISTER_TIMEOUT); |
| 1127 | #endif | 1127 | #endif |
| @@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
| 2022 | u16 eeprom; | 2022 | u16 eeprom; |
| 2023 | 2023 | ||
| 2024 | /* | 2024 | /* |
| 2025 | * Disable powersaving as default on PCI devices. | ||
| 2026 | */ | ||
| 2027 | if (rt2x00_intf_is_pci(rt2x00dev)) | ||
| 2028 | rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | ||
| 2029 | |||
| 2030 | /* | ||
| 2025 | * Initialize all hw fields. | 2031 | * Initialize all hw fields. |
| 2026 | */ | 2032 | */ |
| 2027 | rt2x00dev->hw->flags = | 2033 | rt2x00dev->hw->flags = |
| @@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
| 2074 | IEEE80211_HT_CAP_SGI_20 | | 2080 | IEEE80211_HT_CAP_SGI_20 | |
| 2075 | IEEE80211_HT_CAP_SGI_40 | | 2081 | IEEE80211_HT_CAP_SGI_40 | |
| 2076 | IEEE80211_HT_CAP_TX_STBC | | 2082 | IEEE80211_HT_CAP_TX_STBC | |
| 2077 | IEEE80211_HT_CAP_RX_STBC | | 2083 | IEEE80211_HT_CAP_RX_STBC; |
| 2078 | IEEE80211_HT_CAP_PSMP_SUPPORT; | ||
| 2079 | spec->ht.ampdu_factor = 3; | 2084 | spec->ht.ampdu_factor = 3; |
| 2080 | spec->ht.ampdu_density = 4; | 2085 | spec->ht.ampdu_density = 4; |
| 2081 | spec->ht.mcs.tx_params = | 2086 | spec->ht.mcs.tx_params = |
| @@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, | |||
| 2140 | rt2800_register_multiread(rt2x00dev, offset, | 2145 | rt2800_register_multiread(rt2x00dev, offset, |
| 2141 | &iveiv_entry, sizeof(iveiv_entry)); | 2146 | &iveiv_entry, sizeof(iveiv_entry)); |
| 2142 | 2147 | ||
| 2143 | memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16)); | 2148 | memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16)); |
| 2144 | memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32)); | 2149 | memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32)); |
| 2145 | } | 2150 | } |
| 2146 | 2151 | ||
| 2147 | static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) | 2152 | static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index af85d18cdbe7..ab95346cf6a3 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
| @@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
| 922 | { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, | 922 | { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, |
| 923 | { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, | 923 | { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, |
| 924 | { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, | 924 | { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, |
| 925 | { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, | ||
| 925 | /* Logitec */ | 926 | /* Logitec */ |
| 926 | { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, | 927 | { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, |
| 927 | { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, | 928 | { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 687e17dc2e9f..0ca589306d71 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
| @@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
| 2539 | unsigned int i; | 2539 | unsigned int i; |
| 2540 | 2540 | ||
| 2541 | /* | 2541 | /* |
| 2542 | * Disable powersaving as default. | ||
| 2543 | */ | ||
| 2544 | rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | ||
| 2545 | |||
| 2546 | /* | ||
| 2542 | * Initialize all hw fields. | 2547 | * Initialize all hw fields. |
| 2543 | */ | 2548 | */ |
| 2544 | rt2x00dev->hw->flags = | 2549 | rt2x00dev->hw->flags = |
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index a1a3dd15c664..8a40a1439984 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c | |||
| @@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) | |||
| 132 | 132 | ||
| 133 | rx_status.antenna = (flags2 >> 15) & 1; | 133 | rx_status.antenna = (flags2 >> 15) & 1; |
| 134 | /* TODO: improve signal/rssi reporting */ | 134 | /* TODO: improve signal/rssi reporting */ |
| 135 | rx_status.qual = flags2 & 0xFF; | ||
| 136 | rx_status.signal = (flags2 >> 8) & 0x7F; | 135 | rx_status.signal = (flags2 >> 8) & 0x7F; |
| 137 | /* XXX: is this correct? */ | 136 | /* XXX: is this correct? */ |
| 138 | rx_status.rate_idx = (flags >> 20) & 0xF; | 137 | rx_status.rate_idx = (flags >> 20) & 0xF; |
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c index 2e733e7bdfd4..28a808674080 100644 --- a/drivers/net/wireless/wl12xx/wl1251_boot.c +++ b/drivers/net/wireless/wl12xx/wl1251_boot.c | |||
| @@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl) | |||
| 256 | } | 256 | } |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | if (loop >= INIT_LOOP) { | 259 | if (loop > INIT_LOOP) { |
| 260 | wl1251_error("timeout waiting for the hardware to " | 260 | wl1251_error("timeout waiting for the hardware to " |
| 261 | "complete initialization"); | 261 | "complete initialization"); |
| 262 | return -EIO; | 262 | return -EIO; |
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c index 886a9bc39cc1..c3385b3d246c 100644 --- a/drivers/net/wireless/wl12xx/wl1271_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c | |||
| @@ -777,7 +777,7 @@ out: | |||
| 777 | return ret; | 777 | return ret; |
| 778 | } | 778 | } |
| 779 | 779 | ||
| 780 | static int wl1271_build_basic_rates(char *rates, u8 band) | 780 | static int wl1271_build_basic_rates(u8 *rates, u8 band) |
| 781 | { | 781 | { |
| 782 | u8 index = 0; | 782 | u8 index = 0; |
| 783 | 783 | ||
| @@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band) | |||
| 804 | return index; | 804 | return index; |
| 805 | } | 805 | } |
| 806 | 806 | ||
| 807 | static int wl1271_build_extended_rates(char *rates, u8 band) | 807 | static int wl1271_build_extended_rates(u8 *rates, u8 band) |
| 808 | { | 808 | { |
| 809 | u8 index = 0; | 809 | u8 index = 0; |
| 810 | 810 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index dfa1b9bc22c8..7ca95c414fa8 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c | |||
| @@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) | |||
| 1325 | return r; | 1325 | return r; |
| 1326 | } | 1326 | } |
| 1327 | 1327 | ||
| 1328 | static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size) | ||
| 1329 | { | ||
| 1330 | static const u16 constants[] = { | ||
| 1331 | 715, 655, 585, 540, 470, 410, 360, 315, | ||
| 1332 | 270, 235, 205, 175, 150, 125, 105, 85, | ||
| 1333 | 65, 50, 40, 25, 15 | ||
| 1334 | }; | ||
| 1335 | |||
| 1336 | int i; | ||
| 1337 | u32 x; | ||
| 1338 | |||
| 1339 | /* It seems that their quality parameter is somehow per signal | ||
| 1340 | * and is now transferred per bit. | ||
| 1341 | */ | ||
| 1342 | switch (zd_rate) { | ||
| 1343 | case ZD_OFDM_RATE_6M: | ||
| 1344 | case ZD_OFDM_RATE_12M: | ||
| 1345 | case ZD_OFDM_RATE_24M: | ||
| 1346 | size *= 2; | ||
| 1347 | break; | ||
| 1348 | case ZD_OFDM_RATE_9M: | ||
| 1349 | case ZD_OFDM_RATE_18M: | ||
| 1350 | case ZD_OFDM_RATE_36M: | ||
| 1351 | case ZD_OFDM_RATE_54M: | ||
| 1352 | size *= 4; | ||
| 1353 | size /= 3; | ||
| 1354 | break; | ||
| 1355 | case ZD_OFDM_RATE_48M: | ||
| 1356 | size *= 3; | ||
| 1357 | size /= 2; | ||
| 1358 | break; | ||
| 1359 | default: | ||
| 1360 | return -EINVAL; | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | x = (10000 * status_quality)/size; | ||
| 1364 | for (i = 0; i < ARRAY_SIZE(constants); i++) { | ||
| 1365 | if (x > constants[i]) | ||
| 1366 | break; | ||
| 1367 | } | ||
| 1368 | |||
| 1369 | switch (zd_rate) { | ||
| 1370 | case ZD_OFDM_RATE_6M: | ||
| 1371 | case ZD_OFDM_RATE_9M: | ||
| 1372 | i += 3; | ||
| 1373 | break; | ||
| 1374 | case ZD_OFDM_RATE_12M: | ||
| 1375 | case ZD_OFDM_RATE_18M: | ||
| 1376 | i += 5; | ||
| 1377 | break; | ||
| 1378 | case ZD_OFDM_RATE_24M: | ||
| 1379 | case ZD_OFDM_RATE_36M: | ||
| 1380 | i += 9; | ||
| 1381 | break; | ||
| 1382 | case ZD_OFDM_RATE_48M: | ||
| 1383 | case ZD_OFDM_RATE_54M: | ||
| 1384 | i += 15; | ||
| 1385 | break; | ||
| 1386 | default: | ||
| 1387 | return -EINVAL; | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | return i; | ||
| 1391 | } | ||
| 1392 | |||
| 1393 | static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size) | ||
| 1394 | { | ||
| 1395 | int r; | ||
| 1396 | |||
| 1397 | r = ofdm_qual_db(status_quality, zd_rate, size); | ||
| 1398 | ZD_ASSERT(r >= 0); | ||
| 1399 | if (r < 0) | ||
| 1400 | r = 0; | ||
| 1401 | |||
| 1402 | r = (r * 100)/29; | ||
| 1403 | return r <= 100 ? r : 100; | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | static unsigned int log10times100(unsigned int x) | ||
| 1407 | { | ||
| 1408 | static const u8 log10[] = { | ||
| 1409 | 0, | ||
| 1410 | 0, 30, 47, 60, 69, 77, 84, 90, 95, 100, | ||
| 1411 | 104, 107, 111, 114, 117, 120, 123, 125, 127, 130, | ||
| 1412 | 132, 134, 136, 138, 139, 141, 143, 144, 146, 147, | ||
| 1413 | 149, 150, 151, 153, 154, 155, 156, 157, 159, 160, | ||
| 1414 | 161, 162, 163, 164, 165, 166, 167, 168, 169, 169, | ||
| 1415 | 170, 171, 172, 173, 174, 174, 175, 176, 177, 177, | ||
| 1416 | 178, 179, 179, 180, 181, 181, 182, 183, 183, 184, | ||
| 1417 | 185, 185, 186, 186, 187, 188, 188, 189, 189, 190, | ||
| 1418 | 190, 191, 191, 192, 192, 193, 193, 194, 194, 195, | ||
| 1419 | 195, 196, 196, 197, 197, 198, 198, 199, 199, 200, | ||
| 1420 | 200, 200, 201, 201, 202, 202, 202, 203, 203, 204, | ||
| 1421 | 204, 204, 205, 205, 206, 206, 206, 207, 207, 207, | ||
| 1422 | 208, 208, 208, 209, 209, 210, 210, 210, 211, 211, | ||
| 1423 | 211, 212, 212, 212, 213, 213, 213, 213, 214, 214, | ||
| 1424 | 214, 215, 215, 215, 216, 216, 216, 217, 217, 217, | ||
| 1425 | 217, 218, 218, 218, 219, 219, 219, 219, 220, 220, | ||
| 1426 | 220, 220, 221, 221, 221, 222, 222, 222, 222, 223, | ||
| 1427 | 223, 223, 223, 224, 224, 224, 224, | ||
| 1428 | }; | ||
| 1429 | |||
| 1430 | return x < ARRAY_SIZE(log10) ? log10[x] : 225; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | enum { | ||
| 1434 | MAX_CCK_EVM_DB = 45, | ||
| 1435 | }; | ||
| 1436 | |||
| 1437 | static int cck_evm_db(u8 status_quality) | ||
| 1438 | { | ||
| 1439 | return (20 * log10times100(status_quality)) / 100; | ||
| 1440 | } | ||
| 1441 | |||
| 1442 | static int cck_snr_db(u8 status_quality) | ||
| 1443 | { | ||
| 1444 | int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality); | ||
| 1445 | ZD_ASSERT(r >= 0); | ||
| 1446 | return r; | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | static int cck_qual_percent(u8 status_quality) | ||
| 1450 | { | ||
| 1451 | int r; | ||
| 1452 | |||
| 1453 | r = cck_snr_db(status_quality); | ||
| 1454 | r = (100*r)/17; | ||
| 1455 | return r <= 100 ? r : 100; | ||
| 1456 | } | ||
| 1457 | |||
| 1458 | static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) | 1328 | static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) |
| 1459 | { | 1329 | { |
| 1460 | return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); | 1330 | return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); |
| 1461 | } | 1331 | } |
| 1462 | 1332 | ||
| 1463 | u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, | ||
| 1464 | const struct rx_status *status) | ||
| 1465 | { | ||
| 1466 | return (status->frame_status&ZD_RX_OFDM) ? | ||
| 1467 | ofdm_qual_percent(status->signal_quality_ofdm, | ||
| 1468 | zd_rate_from_ofdm_plcp_header(rx_frame), | ||
| 1469 | size) : | ||
| 1470 | cck_qual_percent(status->signal_quality_cck); | ||
| 1471 | } | ||
| 1472 | |||
| 1473 | /** | 1333 | /** |
| 1474 | * zd_rx_rate - report zd-rate | 1334 | * zd_rx_rate - report zd-rate |
| 1475 | * @rx_frame - received frame | 1335 | * @rx_frame - received frame |
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index 9fd8f3508d66..f8bbf7d302ae 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h | |||
| @@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) | |||
| 929 | 929 | ||
| 930 | struct rx_status; | 930 | struct rx_status; |
| 931 | 931 | ||
| 932 | u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, | ||
| 933 | const struct rx_status *status); | ||
| 934 | |||
| 935 | u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status); | 932 | u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status); |
| 936 | 933 | ||
| 937 | struct zd_mc_hash { | 934 | struct zd_mc_hash { |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index cf51e8f8174b..8ebf5c33955d 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
| @@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) | |||
| 828 | stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; | 828 | stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; |
| 829 | stats.band = IEEE80211_BAND_2GHZ; | 829 | stats.band = IEEE80211_BAND_2GHZ; |
| 830 | stats.signal = status->signal_strength; | 830 | stats.signal = status->signal_strength; |
| 831 | stats.qual = zd_rx_qual_percent(buffer, | ||
| 832 | length - sizeof(struct rx_status), | ||
| 833 | status); | ||
| 834 | 831 | ||
| 835 | rate = zd_rx_rate(buffer, status); | 832 | rate = zd_rx_rate(buffer, status); |
| 836 | 833 | ||
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index bd588eb8e922..8e210cd76e55 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
| @@ -121,7 +121,7 @@ struct controller { | |||
| 121 | #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 | 121 | #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 |
| 122 | #define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 | 122 | #define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 |
| 123 | 123 | ||
| 124 | /* AMD PCIX bridge registers */ | 124 | /* AMD PCI-X bridge registers */ |
| 125 | #define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C | 125 | #define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C |
| 126 | #define PCIX_MISCII_OFFSET 0x48 | 126 | #define PCIX_MISCII_OFFSET 0x48 |
| 127 | #define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 | 127 | #define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80 |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index e56f9bed6f2b..417312528ddf 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -305,7 +305,7 @@ struct device_domain_info { | |||
| 305 | int segment; /* PCI domain */ | 305 | int segment; /* PCI domain */ |
| 306 | u8 bus; /* PCI bus number */ | 306 | u8 bus; /* PCI bus number */ |
| 307 | u8 devfn; /* PCI devfn number */ | 307 | u8 devfn; /* PCI devfn number */ |
| 308 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | 308 | struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ |
| 309 | struct intel_iommu *iommu; /* IOMMU used by this device */ | 309 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
| 310 | struct dmar_domain *domain; /* pointer to domain */ | 310 | struct dmar_domain *domain; /* pointer to domain */ |
| 311 | }; | 311 | }; |
| @@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, | |||
| 1604 | return ret; | 1604 | return ret; |
| 1605 | parent = parent->bus->self; | 1605 | parent = parent->bus->self; |
| 1606 | } | 1606 | } |
| 1607 | if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ | 1607 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
| 1608 | return domain_context_mapping_one(domain, | 1608 | return domain_context_mapping_one(domain, |
| 1609 | pci_domain_nr(tmp->subordinate), | 1609 | pci_domain_nr(tmp->subordinate), |
| 1610 | tmp->subordinate->number, 0, | 1610 | tmp->subordinate->number, 0, |
| @@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, | |||
| 3325 | parent->devfn); | 3325 | parent->devfn); |
| 3326 | parent = parent->bus->self; | 3326 | parent = parent->bus->self; |
| 3327 | } | 3327 | } |
| 3328 | if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */ | 3328 | if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ |
| 3329 | iommu_detach_dev(iommu, | 3329 | iommu_detach_dev(iommu, |
| 3330 | tmp->subordinate->number, 0); | 3330 | tmp->subordinate->number, 0); |
| 3331 | else /* this is a legacy PCI bridge */ | 3331 | else /* this is a legacy PCI bridge */ |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 8b65a489581b..95b849130ad4 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
| @@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 528 | 528 | ||
| 529 | bridge = pci_find_upstream_pcie_bridge(dev); | 529 | bridge = pci_find_upstream_pcie_bridge(dev); |
| 530 | if (bridge) { | 530 | if (bridge) { |
| 531 | if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */ | 531 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
| 532 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | 532 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
| 533 | (bridge->bus->number << 8) | dev->bus->number); | 533 | (bridge->bus->number << 8) | dev->bus->number); |
| 534 | else /* this is a legacy PCI bridge */ | 534 | else /* this is a legacy PCI bridge */ |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index cc617ddd33d0..7e2829538a4c 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev) | |||
| 112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | 112 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) |
| 113 | { | 113 | { |
| 114 | while (bus->parent) { | 114 | while (bus->parent) { |
| 115 | struct pci_dev *bridge = bus->self; | 115 | if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) |
| 116 | int ret; | ||
| 117 | |||
| 118 | ret = acpi_pm_device_sleep_wake(&bridge->dev, enable); | ||
| 119 | if (!ret || pci_is_pcie(bridge)) | ||
| 120 | return; | 116 | return; |
| 121 | bus = bus->parent; | 117 | bus = bus->parent; |
| 122 | } | 118 | } |
| @@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | |||
| 131 | if (acpi_pci_can_wakeup(dev)) | 127 | if (acpi_pci_can_wakeup(dev)) |
| 132 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | 128 | return acpi_pm_device_sleep_wake(&dev->dev, enable); |
| 133 | 129 | ||
| 134 | if (!pci_is_pcie(dev)) | 130 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); |
| 135 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); | ||
| 136 | |||
| 137 | return 0; | 131 | return 0; |
| 138 | } | 132 | } |
| 139 | 133 | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c5df94e86678..807224ec8351 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev, | |||
| 75 | int len; | 75 | int len; |
| 76 | 76 | ||
| 77 | #ifdef CONFIG_NUMA | 77 | #ifdef CONFIG_NUMA |
| 78 | mask = cpumask_of_node(dev_to_node(dev)); | 78 | mask = (dev_to_node(dev) == -1) ? cpu_online_mask : |
| 79 | cpumask_of_node(dev_to_node(dev)); | ||
| 79 | #else | 80 | #else |
| 80 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | 81 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); |
| 81 | #endif | 82 | #endif |
| @@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev, | |||
| 93 | int len; | 94 | int len; |
| 94 | 95 | ||
| 95 | #ifdef CONFIG_NUMA | 96 | #ifdef CONFIG_NUMA |
| 96 | mask = cpumask_of_node(dev_to_node(dev)); | 97 | mask = (dev_to_node(dev) == -1) ? cpu_online_mask : |
| 98 | cpumask_of_node(dev_to_node(dev)); | ||
| 97 | #else | 99 | #else |
| 98 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); | 100 | mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); |
| 99 | #endif | 101 | #endif |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0bc27e059019..315fea47e784 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -29,7 +29,17 @@ const char *pci_power_names[] = { | |||
| 29 | }; | 29 | }; |
| 30 | EXPORT_SYMBOL_GPL(pci_power_names); | 30 | EXPORT_SYMBOL_GPL(pci_power_names); |
| 31 | 31 | ||
| 32 | unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; | 32 | unsigned int pci_pm_d3_delay; |
| 33 | |||
| 34 | static void pci_dev_d3_sleep(struct pci_dev *dev) | ||
| 35 | { | ||
| 36 | unsigned int delay = dev->d3_delay; | ||
| 37 | |||
| 38 | if (delay < pci_pm_d3_delay) | ||
| 39 | delay = pci_pm_d3_delay; | ||
| 40 | |||
| 41 | msleep(delay); | ||
| 42 | } | ||
| 33 | 43 | ||
| 34 | #ifdef CONFIG_PCI_DOMAINS | 44 | #ifdef CONFIG_PCI_DOMAINS |
| 35 | int pci_domains_supported = 1; | 45 | int pci_domains_supported = 1; |
| @@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 522 | /* Mandatory power management transition delays */ | 532 | /* Mandatory power management transition delays */ |
| 523 | /* see PCI PM 1.1 5.6.1 table 18 */ | 533 | /* see PCI PM 1.1 5.6.1 table 18 */ |
| 524 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 534 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
| 525 | msleep(pci_pm_d3_delay); | 535 | pci_dev_d3_sleep(dev); |
| 526 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 536 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
| 527 | udelay(PCI_PM_D2_DELAY); | 537 | udelay(PCI_PM_D2_DELAY); |
| 528 | 538 | ||
| @@ -1153,11 +1163,11 @@ pci_disable_device(struct pci_dev *dev) | |||
| 1153 | 1163 | ||
| 1154 | /** | 1164 | /** |
| 1155 | * pcibios_set_pcie_reset_state - set reset state for device dev | 1165 | * pcibios_set_pcie_reset_state - set reset state for device dev |
| 1156 | * @dev: the PCI-E device reset | 1166 | * @dev: the PCIe device reset |
| 1157 | * @state: Reset state to enter into | 1167 | * @state: Reset state to enter into |
| 1158 | * | 1168 | * |
| 1159 | * | 1169 | * |
| 1160 | * Sets the PCI-E reset state for the device. This is the default | 1170 | * Sets the PCIe reset state for the device. This is the default |
| 1161 | * implementation. Architecture implementations can override this. | 1171 | * implementation. Architecture implementations can override this. |
| 1162 | */ | 1172 | */ |
| 1163 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1173 | int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, |
| @@ -1168,7 +1178,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, | |||
| 1168 | 1178 | ||
| 1169 | /** | 1179 | /** |
| 1170 | * pci_set_pcie_reset_state - set reset state for device dev | 1180 | * pci_set_pcie_reset_state - set reset state for device dev |
| 1171 | * @dev: the PCI-E device reset | 1181 | * @dev: the PCIe device reset |
| 1172 | * @state: Reset state to enter into | 1182 | * @state: Reset state to enter into |
| 1173 | * | 1183 | * |
| 1174 | * | 1184 | * |
| @@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
| 1409 | } | 1419 | } |
| 1410 | 1420 | ||
| 1411 | dev->pm_cap = pm; | 1421 | dev->pm_cap = pm; |
| 1422 | dev->d3_delay = PCI_PM_D3_WAIT; | ||
| 1412 | 1423 | ||
| 1413 | dev->d1_support = false; | 1424 | dev->d1_support = false; |
| 1414 | dev->d2_support = false; | 1425 | dev->d2_support = false; |
| @@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
| 2247 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2258 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
| 2248 | csr |= PCI_D3hot; | 2259 | csr |= PCI_D3hot; |
| 2249 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2260 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
| 2250 | msleep(pci_pm_d3_delay); | 2261 | pci_dev_d3_sleep(dev); |
| 2251 | 2262 | ||
| 2252 | csr &= ~PCI_PM_CTRL_STATE_MASK; | 2263 | csr &= ~PCI_PM_CTRL_STATE_MASK; |
| 2253 | csr |= PCI_D0; | 2264 | csr |= PCI_D0; |
| 2254 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); | 2265 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); |
| 2255 | msleep(pci_pm_d3_delay); | 2266 | pci_dev_d3_sleep(dev); |
| 2256 | 2267 | ||
| 2257 | return 0; | 2268 | return 0; |
| 2258 | } | 2269 | } |
| @@ -2296,6 +2307,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
| 2296 | down(&dev->dev.sem); | 2307 | down(&dev->dev.sem); |
| 2297 | } | 2308 | } |
| 2298 | 2309 | ||
| 2310 | rc = pci_dev_specific_reset(dev, probe); | ||
| 2311 | if (rc != -ENOTTY) | ||
| 2312 | goto done; | ||
| 2313 | |||
| 2299 | rc = pcie_flr(dev, probe); | 2314 | rc = pcie_flr(dev, probe); |
| 2300 | if (rc != -ENOTTY) | 2315 | if (rc != -ENOTTY) |
| 2301 | goto done; | 2316 | goto done; |
| @@ -2779,6 +2794,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | |||
| 2779 | return 1; | 2794 | return 1; |
| 2780 | } | 2795 | } |
| 2781 | 2796 | ||
| 2797 | void __weak pci_fixup_cardbus(struct pci_bus *bus) | ||
| 2798 | { | ||
| 2799 | } | ||
| 2800 | EXPORT_SYMBOL(pci_fixup_cardbus); | ||
| 2801 | |||
| 2782 | static int __init pci_setup(char *str) | 2802 | static int __init pci_setup(char *str) |
| 2783 | { | 2803 | { |
| 2784 | while (str) { | 2804 | while (str) { |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 33ed8e0aba1e..fbd0e3adbca3 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev, | |||
| 313 | 313 | ||
| 314 | extern void pci_enable_acs(struct pci_dev *dev); | 314 | extern void pci_enable_acs(struct pci_dev *dev); |
| 315 | 315 | ||
| 316 | struct pci_dev_reset_methods { | ||
| 317 | u16 vendor; | ||
| 318 | u16 device; | ||
| 319 | int (*reset)(struct pci_dev *dev, int probe); | ||
| 320 | }; | ||
| 321 | |||
| 322 | extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); | ||
| 323 | |||
| 316 | #endif /* DRIVERS_PCI_H */ | 324 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug index b8c925c1f6aa..9142949734f5 100644 --- a/drivers/pci/pcie/aer/Kconfig.debug +++ b/drivers/pci/pcie/aer/Kconfig.debug | |||
| @@ -3,14 +3,14 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config PCIEAER_INJECT | 5 | config PCIEAER_INJECT |
| 6 | tristate "PCIE AER error injector support" | 6 | tristate "PCIe AER error injector support" |
| 7 | depends on PCIEAER | 7 | depends on PCIEAER |
| 8 | default n | 8 | default n |
| 9 | help | 9 | help |
| 10 | This enables PCI Express Root Port Advanced Error Reporting | 10 | This enables PCI Express Root Port Advanced Error Reporting |
| 11 | (AER) software error injector. | 11 | (AER) software error injector. |
| 12 | 12 | ||
| 13 | Debuging PCIE AER code is quite difficult because it is hard | 13 | Debugging PCIe AER code is quite difficult because it is hard |
| 14 | to trigger various real hardware errors. Software based | 14 | to trigger various real hardware errors. Software based |
| 15 | error injection can fake almost all kinds of errors with the | 15 | error injection can fake almost all kinds of errors with the |
| 16 | help of a user space helper tool aer-inject, which can be | 16 | help of a user space helper tool aer-inject, which can be |
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 7fcd5331b14c..8c30a9544d61 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * PCIE AER software error injection support. | 2 | * PCIe AER software error injection support. |
| 3 | * | 3 | * |
| 4 | * Debuging PCIE AER code is quite difficult because it is hard to | 4 | * Debuging PCIe AER code is quite difficult because it is hard to |
| 5 | * trigger various real hardware errors. Software based error | 5 | * trigger various real hardware errors. Software based error |
| 6 | * injection can fake almost all kinds of errors with the help of a | 6 | * injection can fake almost all kinds of errors with the help of a |
| 7 | * user space helper tool aer-inject, which can be gotten from: | 7 | * user space helper tool aer-inject, which can be gotten from: |
| @@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 321 | unsigned long flags; | 321 | unsigned long flags; |
| 322 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); | 322 | unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); |
| 323 | int pos_cap_err, rp_pos_cap_err; | 323 | int pos_cap_err, rp_pos_cap_err; |
| 324 | u32 sever; | 324 | u32 sever, mask; |
| 325 | int ret = 0; | 325 | int ret = 0; |
| 326 | 326 | ||
| 327 | dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); | 327 | dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); |
| @@ -374,6 +374,24 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 374 | err->header_log2 = einj->header_log2; | 374 | err->header_log2 = einj->header_log2; |
| 375 | err->header_log3 = einj->header_log3; | 375 | err->header_log3 = einj->header_log3; |
| 376 | 376 | ||
| 377 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask); | ||
| 378 | if (einj->cor_status && !(einj->cor_status & ~mask)) { | ||
| 379 | ret = -EINVAL; | ||
| 380 | printk(KERN_WARNING "The correctable error(s) is masked " | ||
| 381 | "by device\n"); | ||
| 382 | spin_unlock_irqrestore(&inject_lock, flags); | ||
| 383 | goto out_put; | ||
| 384 | } | ||
| 385 | |||
| 386 | pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask); | ||
| 387 | if (einj->uncor_status && !(einj->uncor_status & ~mask)) { | ||
| 388 | ret = -EINVAL; | ||
| 389 | printk(KERN_WARNING "The uncorrectable error(s) is masked " | ||
| 390 | "by device\n"); | ||
| 391 | spin_unlock_irqrestore(&inject_lock, flags); | ||
| 392 | goto out_put; | ||
| 393 | } | ||
| 394 | |||
| 377 | rperr = __find_aer_error_by_dev(rpdev); | 395 | rperr = __find_aer_error_by_dev(rpdev); |
| 378 | if (!rperr) { | 396 | if (!rperr) { |
| 379 | rperr = rperr_alloc; | 397 | rperr = rperr_alloc; |
| @@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 413 | if (ret) | 431 | if (ret) |
| 414 | goto out_put; | 432 | goto out_put; |
| 415 | 433 | ||
| 416 | if (find_aer_device(rpdev, &edev)) | 434 | if (find_aer_device(rpdev, &edev)) { |
| 435 | if (!get_service_data(edev)) { | ||
| 436 | printk(KERN_WARNING "AER service is not initialized\n"); | ||
| 437 | ret = -EINVAL; | ||
| 438 | goto out_put; | ||
| 439 | } | ||
| 417 | aer_irq(-1, edev); | 440 | aer_irq(-1, edev); |
| 441 | } | ||
| 418 | else | 442 | else |
| 419 | ret = -EINVAL; | 443 | ret = -EINVAL; |
| 420 | out_put: | 444 | out_put: |
| @@ -484,5 +508,5 @@ static void __exit aer_inject_exit(void) | |||
| 484 | module_init(aer_inject_init); | 508 | module_init(aer_inject_init); |
| 485 | module_exit(aer_inject_exit); | 509 | module_exit(aer_inject_exit); |
| 486 | 510 | ||
| 487 | MODULE_DESCRIPTION("PCIE AER software error injector"); | 511 | MODULE_DESCRIPTION("PCIe AER software error injector"); |
| 488 | MODULE_LICENSE("GPL"); | 512 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 97a345927b55..21f215f4daa3 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | |||
| 155 | mutex_init(&rpc->rpc_mutex); | 155 | mutex_init(&rpc->rpc_mutex); |
| 156 | init_waitqueue_head(&rpc->wait_release); | 156 | init_waitqueue_head(&rpc->wait_release); |
| 157 | 157 | ||
| 158 | /* Use PCIE bus function to store rpc into PCIE device */ | 158 | /* Use PCIe bus function to store rpc into PCIe device */ |
| 159 | set_service_data(dev, rpc); | 159 | set_service_data(dev, rpc); |
| 160 | 160 | ||
| 161 | return rpc; | 161 | return rpc; |
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 8edb2f300e8f..04814087658d 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | * | 24 | * |
| 25 | * @return: Zero on success. Nonzero otherwise. | 25 | * @return: Zero on success. Nonzero otherwise. |
| 26 | * | 26 | * |
| 27 | * Invoked when PCIE bus loads AER service driver. To avoid conflict with | 27 | * Invoked when PCIe bus loads AER service driver. To avoid conflict with |
| 28 | * BIOS AER support requires BIOS to yield AER control to OS native driver. | 28 | * BIOS AER support requires BIOS to yield AER control to OS native driver. |
| 29 | **/ | 29 | **/ |
| 30 | int aer_osc_setup(struct pcie_device *pciedev) | 30 | int aer_osc_setup(struct pcie_device *pciedev) |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index ae672ca80333..c843a799814d 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev, | |||
| 587 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages | 587 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages |
| 588 | * @rpc: pointer to a Root Port data structure | 588 | * @rpc: pointer to a Root Port data structure |
| 589 | * | 589 | * |
| 590 | * Invoked when PCIE bus loads AER service driver. | 590 | * Invoked when PCIe bus loads AER service driver. |
| 591 | */ | 591 | */ |
| 592 | void aer_enable_rootport(struct aer_rpc *rpc) | 592 | void aer_enable_rootport(struct aer_rpc *rpc) |
| 593 | { | 593 | { |
| @@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) | |||
| 597 | u32 reg32; | 597 | u32 reg32; |
| 598 | 598 | ||
| 599 | pos = pci_pcie_cap(pdev); | 599 | pos = pci_pcie_cap(pdev); |
| 600 | /* Clear PCIE Capability's Device Status */ | 600 | /* Clear PCIe Capability's Device Status */ |
| 601 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); | 601 | pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); |
| 602 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); | 602 | pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); |
| 603 | 603 | ||
| @@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) | |||
| 631 | * disable_root_aer - disable Root Port's interrupts when receiving messages | 631 | * disable_root_aer - disable Root Port's interrupts when receiving messages |
| 632 | * @rpc: pointer to a Root Port data structure | 632 | * @rpc: pointer to a Root Port data structure |
| 633 | * | 633 | * |
| 634 | * Invoked when PCIE bus unloads AER service driver. | 634 | * Invoked when PCIe bus unloads AER service driver. |
| 635 | */ | 635 | */ |
| 636 | static void disable_root_aer(struct aer_rpc *rpc) | 636 | static void disable_root_aer(struct aer_rpc *rpc) |
| 637 | { | 637 | { |
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 44acde72294f..9d3e4c8d0184 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
| @@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
| 184 | 184 | ||
| 185 | if (info->status == 0) { | 185 | if (info->status == 0) { |
| 186 | AER_PR(info, dev, | 186 | AER_PR(info, dev, |
| 187 | "PCIE Bus Error: severity=%s, type=Unaccessible, " | 187 | "PCIe Bus Error: severity=%s, type=Unaccessible, " |
| 188 | "id=%04x(Unregistered Agent ID)\n", | 188 | "id=%04x(Unregistered Agent ID)\n", |
| 189 | aer_error_severity_string[info->severity], id); | 189 | aer_error_severity_string[info->severity], id); |
| 190 | } else { | 190 | } else { |
| @@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | |||
| 194 | agent = AER_GET_AGENT(info->severity, info->status); | 194 | agent = AER_GET_AGENT(info->severity, info->status); |
| 195 | 195 | ||
| 196 | AER_PR(info, dev, | 196 | AER_PR(info, dev, |
| 197 | "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n", | 197 | "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", |
| 198 | aer_error_severity_string[info->severity], | 198 | aer_error_severity_string[info->severity], |
| 199 | aer_error_layer[layer], id, aer_agent_string[agent]); | 199 | aer_error_layer[layer], id, aer_agent_string[agent]); |
| 200 | 200 | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5a01fc7fbf05..be53d98fa384 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * File: drivers/pci/pcie/aspm.c | 2 | * File: drivers/pci/pcie/aspm.c |
| 3 | * Enabling PCIE link L0s/L1 state and Clock Power Management | 3 | * Enabling PCIe link L0s/L1 state and Clock Power Management |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2007 Intel | 5 | * Copyright (C) 2007 Intel |
| 6 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) | 6 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) |
| @@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
| 499 | int pos; | 499 | int pos; |
| 500 | u32 reg32; | 500 | u32 reg32; |
| 501 | /* | 501 | /* |
| 502 | * Some functions in a slot might not all be PCIE functions, | 502 | * Some functions in a slot might not all be PCIe functions, |
| 503 | * very strange. Disable ASPM for the whole slot | 503 | * very strange. Disable ASPM for the whole slot |
| 504 | */ | 504 | */ |
| 505 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { | 505 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 413262eb95b7..b174188ac121 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | */ | 27 | */ |
| 28 | static void release_pcie_device(struct device *dev) | 28 | static void release_pcie_device(struct device *dev) |
| 29 | { | 29 | { |
| 30 | kfree(to_pcie_device(dev)); | 30 | kfree(to_pcie_device(dev)); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | /** | 33 | /** |
| @@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data) | |||
| 346 | { | 346 | { |
| 347 | struct pcie_port_service_driver *service_driver; | 347 | struct pcie_port_service_driver *service_driver; |
| 348 | 348 | ||
| 349 | if ((dev->bus == &pcie_port_bus_type) && | 349 | if ((dev->bus == &pcie_port_bus_type) && dev->driver) { |
| 350 | (dev->driver)) { | 350 | service_driver = to_service_driver(dev->driver); |
| 351 | service_driver = to_service_driver(dev->driver); | 351 | if (service_driver->suspend) |
| 352 | if (service_driver->suspend) | 352 | service_driver->suspend(to_pcie_device(dev)); |
| 353 | service_driver->suspend(to_pcie_device(dev)); | 353 | } |
| 354 | } | ||
| 355 | return 0; | 354 | return 0; |
| 356 | } | 355 | } |
| 357 | 356 | ||
| @@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) | |||
| 494 | 493 | ||
| 495 | return driver_register(&new->driver); | 494 | return driver_register(&new->driver); |
| 496 | } | 495 | } |
| 496 | EXPORT_SYMBOL(pcie_port_service_register); | ||
| 497 | 497 | ||
| 498 | /** | 498 | /** |
| 499 | * pcie_port_service_unregister - unregister PCI Express port service driver | 499 | * pcie_port_service_unregister - unregister PCI Express port service driver |
| @@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) | |||
| 503 | { | 503 | { |
| 504 | driver_unregister(&drv->driver); | 504 | driver_unregister(&drv->driver); |
| 505 | } | 505 | } |
| 506 | |||
| 507 | EXPORT_SYMBOL(pcie_port_service_register); | ||
| 508 | EXPORT_SYMBOL(pcie_port_service_unregister); | 506 | EXPORT_SYMBOL(pcie_port_service_unregister); |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index a49452e2aed9..13c8972886e6 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | */ | 24 | */ |
| 25 | #define DRIVER_VERSION "v1.0" | 25 | #define DRIVER_VERSION "v1.0" |
| 26 | #define DRIVER_AUTHOR "tom.l.nguyen@intel.com" | 26 | #define DRIVER_AUTHOR "tom.l.nguyen@intel.com" |
| 27 | #define DRIVER_DESC "PCIE Port Bus Driver" | 27 | #define DRIVER_DESC "PCIe Port Bus Driver" |
| 28 | MODULE_AUTHOR(DRIVER_AUTHOR); | 28 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 29 | MODULE_DESCRIPTION(DRIVER_DESC); | 29 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 30 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
| @@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
| 63 | * pcie_portdrv_probe - Probe PCI-Express port devices | 63 | * pcie_portdrv_probe - Probe PCI-Express port devices |
| 64 | * @dev: PCI-Express port device being probed | 64 | * @dev: PCI-Express port device being probed |
| 65 | * | 65 | * |
| 66 | * If detected invokes the pcie_port_device_register() method for | 66 | * If detected invokes the pcie_port_device_register() method for |
| 67 | * this port device. | 67 | * this port device. |
| 68 | * | 68 | * |
| 69 | */ | 69 | */ |
| @@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, | |||
| 78 | (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) | 78 | (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) |
| 79 | return -ENODEV; | 79 | return -ENODEV; |
| 80 | 80 | ||
| 81 | if (!dev->irq && dev->pin) { | 81 | if (!dev->irq && dev->pin) { |
| 82 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " | 82 | dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " |
| 83 | "check vendor BIOS\n", dev->vendor, dev->device); | 83 | "check vendor BIOS\n", dev->vendor, dev->device); |
| 84 | } | 84 | } |
| @@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, | |||
| 91 | return 0; | 91 | return 0; |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | static void pcie_portdrv_remove (struct pci_dev *dev) | 94 | static void pcie_portdrv_remove(struct pci_dev *dev) |
| 95 | { | 95 | { |
| 96 | pcie_port_device_remove(dev); | 96 | pcie_port_device_remove(dev); |
| 97 | pci_disable_device(dev); | 97 | pci_disable_device(dev); |
| @@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data) | |||
| 129 | static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, | 129 | static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, |
| 130 | enum pci_channel_state error) | 130 | enum pci_channel_state error) |
| 131 | { | 131 | { |
| 132 | struct aer_broadcast_data result_data = | 132 | struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER}; |
| 133 | {error, PCI_ERS_RESULT_CAN_RECOVER}; | 133 | int ret; |
| 134 | int retval; | ||
| 135 | 134 | ||
| 136 | /* can not fail */ | 135 | /* can not fail */ |
| 137 | retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); | 136 | ret = device_for_each_child(&dev->dev, &data, error_detected_iter); |
| 138 | 137 | ||
| 139 | return result_data.result; | 138 | return data.result; |
| 140 | } | 139 | } |
| 141 | 140 | ||
| 142 | static int mmio_enabled_iter(struct device *device, void *data) | 141 | static int mmio_enabled_iter(struct device *device, void *data) |
| @@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void) | |||
| 290 | return retval; | 289 | return retval; |
| 291 | } | 290 | } |
| 292 | 291 | ||
| 293 | static void __exit pcie_portdrv_exit(void) | 292 | static void __exit pcie_portdrv_exit(void) |
| 294 | { | 293 | { |
| 295 | pci_unregister_driver(&pcie_portdriver); | 294 | pci_unregister_driver(&pcie_portdriver); |
| 296 | pcie_port_bus_unregister(); | 295 | pcie_port_bus_unregister(); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 7cfa7c38d318..c74694345b6e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -2629,14 +2629,86 @@ static int __init pci_apply_final_quirks(void) | |||
| 2629 | if (!pci_cache_line_size) { | 2629 | if (!pci_cache_line_size) { |
| 2630 | printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", | 2630 | printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", |
| 2631 | cls << 2, pci_dfl_cache_line_size << 2); | 2631 | cls << 2, pci_dfl_cache_line_size << 2); |
| 2632 | pci_cache_line_size = cls; | 2632 | pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; |
| 2633 | } | 2633 | } |
| 2634 | 2634 | ||
| 2635 | return 0; | 2635 | return 0; |
| 2636 | } | 2636 | } |
| 2637 | 2637 | ||
| 2638 | fs_initcall_sync(pci_apply_final_quirks); | 2638 | fs_initcall_sync(pci_apply_final_quirks); |
| 2639 | |||
| 2640 | /* | ||
| 2641 | * Followings are device-specific reset methods which can be used to | ||
| 2642 | * reset a single function if other methods (e.g. FLR, PM D0->D3) are | ||
| 2643 | * not available. | ||
| 2644 | */ | ||
| 2645 | static int reset_intel_generic_dev(struct pci_dev *dev, int probe) | ||
| 2646 | { | ||
| 2647 | int pos; | ||
| 2648 | |||
| 2649 | /* only implement PCI_CLASS_SERIAL_USB at present */ | ||
| 2650 | if (dev->class == PCI_CLASS_SERIAL_USB) { | ||
| 2651 | pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); | ||
| 2652 | if (!pos) | ||
| 2653 | return -ENOTTY; | ||
| 2654 | |||
| 2655 | if (probe) | ||
| 2656 | return 0; | ||
| 2657 | |||
| 2658 | pci_write_config_byte(dev, pos + 0x4, 1); | ||
| 2659 | msleep(100); | ||
| 2660 | |||
| 2661 | return 0; | ||
| 2662 | } else { | ||
| 2663 | return -ENOTTY; | ||
| 2664 | } | ||
| 2665 | } | ||
| 2666 | |||
| 2667 | static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) | ||
| 2668 | { | ||
| 2669 | int pos; | ||
| 2670 | |||
| 2671 | pos = pci_find_capability(dev, PCI_CAP_ID_EXP); | ||
| 2672 | if (!pos) | ||
| 2673 | return -ENOTTY; | ||
| 2674 | |||
| 2675 | if (probe) | ||
| 2676 | return 0; | ||
| 2677 | |||
| 2678 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, | ||
| 2679 | PCI_EXP_DEVCTL_BCR_FLR); | ||
| 2680 | msleep(100); | ||
| 2681 | |||
| 2682 | return 0; | ||
| 2683 | } | ||
| 2684 | |||
| 2685 | #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed | ||
| 2686 | |||
| 2687 | static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { | ||
| 2688 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF, | ||
| 2689 | reset_intel_82599_sfp_virtfn }, | ||
| 2690 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, | ||
| 2691 | reset_intel_generic_dev }, | ||
| 2692 | { 0 } | ||
| 2693 | }; | ||
| 2694 | |||
| 2695 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) | ||
| 2696 | { | ||
| 2697 | const struct pci_dev_reset_methods *i; | ||
| 2698 | |||
| 2699 | for (i = pci_dev_reset_methods; i->reset; i++) { | ||
| 2700 | if ((i->vendor == dev->vendor || | ||
| 2701 | i->vendor == (u16)PCI_ANY_ID) && | ||
| 2702 | (i->device == dev->device || | ||
| 2703 | i->device == (u16)PCI_ANY_ID)) | ||
| 2704 | return i->reset(dev, probe); | ||
| 2705 | } | ||
| 2706 | |||
| 2707 | return -ENOTTY; | ||
| 2708 | } | ||
| 2709 | |||
| 2639 | #else | 2710 | #else |
| 2640 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} | 2711 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} |
| 2712 | int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } | ||
| 2641 | #endif | 2713 | #endif |
| 2642 | EXPORT_SYMBOL(pci_fixup_device); | 2714 | EXPORT_SYMBOL(pci_fixup_device); |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 6dae87143258..4a471dc4f4b9 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
| @@ -15,9 +15,9 @@ | |||
| 15 | 15 | ||
| 16 | DECLARE_RWSEM(pci_bus_sem); | 16 | DECLARE_RWSEM(pci_bus_sem); |
| 17 | /* | 17 | /* |
| 18 | * find the upstream PCIE-to-PCI bridge of a PCI device | 18 | * find the upstream PCIe-to-PCI bridge of a PCI device |
| 19 | * if the device is PCIE, return NULL | 19 | * if the device is PCIE, return NULL |
| 20 | * if the device isn't connected to a PCIE bridge (that is its parent is a | 20 | * if the device isn't connected to a PCIe bridge (that is its parent is a |
| 21 | * legacy PCI bridge and the bridge is directly connected to bus 0), return its | 21 | * legacy PCI bridge and the bridge is directly connected to bus 0), return its |
| 22 | * parent | 22 | * parent |
| 23 | */ | 23 | */ |
| @@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev) | |||
| 37 | tmp = pdev; | 37 | tmp = pdev; |
| 38 | continue; | 38 | continue; |
| 39 | } | 39 | } |
| 40 | /* PCI device should connect to a PCIE bridge */ | 40 | /* PCI device should connect to a PCIe bridge */ |
| 41 | if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { | 41 | if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { |
| 42 | /* Busted hardware? */ | 42 | /* Busted hardware? */ |
| 43 | WARN_ON_ONCE(1); | 43 | WARN_ON_ONCE(1); |
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index cdf50f3bc2df..d99f846451a3 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c | |||
| @@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s) | |||
| 222 | unsigned int max, pass; | 222 | unsigned int max, pass; |
| 223 | 223 | ||
| 224 | s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); | 224 | s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); |
| 225 | /* pcibios_fixup_bus(bus); */ | 225 | pci_fixup_cardbus(bus); |
| 226 | 226 | ||
| 227 | max = bus->secondary; | 227 | max = bus->secondary; |
| 228 | for (pass = 0; pass < 2; pass++) | 228 | for (pass = 0; pass < 2; pass++) |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ec4faffe6b05..db32c25e3605 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -231,8 +231,36 @@ config THINKPAD_ACPI | |||
| 231 | 231 | ||
| 232 | This driver was formerly known as ibm-acpi. | 232 | This driver was formerly known as ibm-acpi. |
| 233 | 233 | ||
| 234 | Extra functionality will be available if the rfkill (CONFIG_RFKILL) | ||
| 235 | and/or ALSA (CONFIG_SND) subsystems are available in the kernel. | ||
| 236 | Note that if you want ThinkPad-ACPI to be built-in instead of | ||
| 237 | modular, ALSA and rfkill will also have to be built-in. | ||
| 238 | |||
| 234 | If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. | 239 | If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. |
| 235 | 240 | ||
| 241 | config THINKPAD_ACPI_ALSA_SUPPORT | ||
| 242 | bool "Console audio control ALSA interface" | ||
| 243 | depends on THINKPAD_ACPI | ||
| 244 | depends on SND | ||
| 245 | depends on SND = y || THINKPAD_ACPI = SND | ||
| 246 | default y | ||
| 247 | ---help--- | ||
| 248 | Enables monitoring of the built-in console audio output control | ||
| 249 | (headphone and speakers), which is operated by the mute and (in | ||
| 250 | some ThinkPad models) volume hotkeys. | ||
| 251 | |||
| 252 | If this option is enabled, ThinkPad-ACPI will export an ALSA card | ||
| 253 | with a single read-only mixer control, which should be used for | ||
| 254 | on-screen-display feedback purposes by the Desktop Environment. | ||
| 255 | |||
| 256 | Optionally, the driver will also allow software control (the | ||
| 257 | ALSA mixer will be made read-write). Please refer to the driver | ||
| 258 | documentation for details. | ||
| 259 | |||
| 260 | All IBM models have both volume and mute control. Newer Lenovo | ||
| 261 | models only have mute control (the volume hotkeys are just normal | ||
| 262 | keys and volume control is done through the main HDA mixer). | ||
| 263 | |||
| 236 | config THINKPAD_ACPI_DEBUGFACILITIES | 264 | config THINKPAD_ACPI_DEBUGFACILITIES |
| 237 | bool "Maintainer debug facilities" | 265 | bool "Maintainer debug facilities" |
| 238 | depends on THINKPAD_ACPI | 266 | depends on THINKPAD_ACPI |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 916ccb2b316c..1b1dddbd5744 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
| @@ -202,8 +202,13 @@ static void dell_wmi_notify(u32 value, void *context) | |||
| 202 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | 202 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 203 | static struct key_entry *key; | 203 | static struct key_entry *key; |
| 204 | union acpi_object *obj; | 204 | union acpi_object *obj; |
| 205 | acpi_status status; | ||
| 205 | 206 | ||
| 206 | wmi_get_event_data(value, &response); | 207 | status = wmi_get_event_data(value, &response); |
| 208 | if (status != AE_OK) { | ||
| 209 | printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status); | ||
| 210 | return; | ||
| 211 | } | ||
| 207 | 212 | ||
| 208 | obj = (union acpi_object *)response.pointer; | 213 | obj = (union acpi_object *)response.pointer; |
| 209 | 214 | ||
| @@ -323,8 +328,9 @@ static int __init dell_wmi_input_setup(void) | |||
| 323 | static int __init dell_wmi_init(void) | 328 | static int __init dell_wmi_init(void) |
| 324 | { | 329 | { |
| 325 | int err; | 330 | int err; |
| 331 | acpi_status status; | ||
| 326 | 332 | ||
| 327 | if (wmi_has_guid(DELL_EVENT_GUID)) { | 333 | if (!wmi_has_guid(DELL_EVENT_GUID)) { |
| 328 | printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n"); | 334 | printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n"); |
| 329 | return -ENODEV; | 335 | return -ENODEV; |
| 330 | } | 336 | } |
| @@ -336,14 +342,14 @@ static int __init dell_wmi_init(void) | |||
| 336 | if (err) | 342 | if (err) |
| 337 | return err; | 343 | return err; |
| 338 | 344 | ||
| 339 | err = wmi_install_notify_handler(DELL_EVENT_GUID, | 345 | status = wmi_install_notify_handler(DELL_EVENT_GUID, |
| 340 | dell_wmi_notify, NULL); | 346 | dell_wmi_notify, NULL); |
| 341 | if (err) { | 347 | if (ACPI_FAILURE(status)) { |
| 342 | input_unregister_device(dell_wmi_input_dev); | 348 | input_unregister_device(dell_wmi_input_dev); |
| 343 | printk(KERN_ERR | 349 | printk(KERN_ERR |
| 344 | "dell-wmi: Unable to register notify handler - %d\n", | 350 | "dell-wmi: Unable to register notify handler - %d\n", |
| 345 | err); | 351 | status); |
| 346 | return err; | 352 | return -ENODEV; |
| 347 | } | 353 | } |
| 348 | 354 | ||
| 349 | return 0; | 355 | return 0; |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 8781d8fa7a57..ad4c414dbfbc 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
| @@ -338,8 +338,13 @@ static void hp_wmi_notify(u32 value, void *context) | |||
| 338 | static struct key_entry *key; | 338 | static struct key_entry *key; |
| 339 | union acpi_object *obj; | 339 | union acpi_object *obj; |
| 340 | int eventcode; | 340 | int eventcode; |
| 341 | acpi_status status; | ||
| 341 | 342 | ||
| 342 | wmi_get_event_data(value, &response); | 343 | status = wmi_get_event_data(value, &response); |
| 344 | if (status != AE_OK) { | ||
| 345 | printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status); | ||
| 346 | return; | ||
| 347 | } | ||
| 343 | 348 | ||
| 344 | obj = (union acpi_object *)response.pointer; | 349 | obj = (union acpi_object *)response.pointer; |
| 345 | 350 | ||
| @@ -388,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context) | |||
| 388 | } else | 393 | } else |
| 389 | printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", | 394 | printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", |
| 390 | eventcode); | 395 | eventcode); |
| 391 | |||
| 392 | kfree(obj); | ||
| 393 | } | 396 | } |
| 394 | 397 | ||
| 395 | static int __init hp_wmi_input_setup(void) | 398 | static int __init hp_wmi_input_setup(void) |
| @@ -581,7 +584,7 @@ static int __init hp_wmi_init(void) | |||
| 581 | if (wmi_has_guid(HPWMI_EVENT_GUID)) { | 584 | if (wmi_has_guid(HPWMI_EVENT_GUID)) { |
| 582 | err = wmi_install_notify_handler(HPWMI_EVENT_GUID, | 585 | err = wmi_install_notify_handler(HPWMI_EVENT_GUID, |
| 583 | hp_wmi_notify, NULL); | 586 | hp_wmi_notify, NULL); |
| 584 | if (!err) | 587 | if (ACPI_SUCCESS(err)) |
| 585 | hp_wmi_input_setup(); | 588 | hp_wmi_input_setup(); |
| 586 | } | 589 | } |
| 587 | 590 | ||
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 7f77f908bb01..f5f70d4c6913 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c | |||
| @@ -149,8 +149,13 @@ static void msi_wmi_notify(u32 value, void *context) | |||
| 149 | static struct key_entry *key; | 149 | static struct key_entry *key; |
| 150 | union acpi_object *obj; | 150 | union acpi_object *obj; |
| 151 | ktime_t cur; | 151 | ktime_t cur; |
| 152 | acpi_status status; | ||
| 152 | 153 | ||
| 153 | wmi_get_event_data(value, &response); | 154 | status = wmi_get_event_data(value, &response); |
| 155 | if (status != AE_OK) { | ||
| 156 | printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status); | ||
| 157 | return; | ||
| 158 | } | ||
| 154 | 159 | ||
| 155 | obj = (union acpi_object *)response.pointer; | 160 | obj = (union acpi_object *)response.pointer; |
| 156 | 161 | ||
| @@ -236,7 +241,7 @@ static int __init msi_wmi_init(void) | |||
| 236 | } | 241 | } |
| 237 | err = wmi_install_notify_handler(MSIWMI_EVENT_GUID, | 242 | err = wmi_install_notify_handler(MSIWMI_EVENT_GUID, |
| 238 | msi_wmi_notify, NULL); | 243 | msi_wmi_notify, NULL); |
| 239 | if (err) | 244 | if (ACPI_FAILURE(err)) |
| 240 | return -EINVAL; | 245 | return -EINVAL; |
| 241 | 246 | ||
| 242 | err = msi_wmi_input_setup(); | 247 | err = msi_wmi_input_setup(); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 448c8aeb166b..e67e4feb35cb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -6384,11 +6384,13 @@ static struct ibm_struct brightness_driver_data = { | |||
| 6384 | * and we leave them unchanged. | 6384 | * and we leave them unchanged. |
| 6385 | */ | 6385 | */ |
| 6386 | 6386 | ||
| 6387 | #ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT | ||
| 6388 | |||
| 6387 | #define TPACPI_ALSA_DRVNAME "ThinkPad EC" | 6389 | #define TPACPI_ALSA_DRVNAME "ThinkPad EC" |
| 6388 | #define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" | 6390 | #define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" |
| 6389 | #define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME | 6391 | #define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME |
| 6390 | 6392 | ||
| 6391 | static int alsa_index = SNDRV_DEFAULT_IDX1; | 6393 | static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */ |
| 6392 | static char *alsa_id = "ThinkPadEC"; | 6394 | static char *alsa_id = "ThinkPadEC"; |
| 6393 | static int alsa_enable = SNDRV_DEFAULT_ENABLE1; | 6395 | static int alsa_enable = SNDRV_DEFAULT_ENABLE1; |
| 6394 | 6396 | ||
| @@ -6705,10 +6707,11 @@ static int __init volume_create_alsa_mixer(void) | |||
| 6705 | 6707 | ||
| 6706 | rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE, | 6708 | rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE, |
| 6707 | sizeof(struct tpacpi_alsa_data), &card); | 6709 | sizeof(struct tpacpi_alsa_data), &card); |
| 6708 | if (rc < 0) | 6710 | if (rc < 0 || !card) { |
| 6709 | return rc; | 6711 | printk(TPACPI_ERR |
| 6710 | if (!card) | 6712 | "Failed to create ALSA card structures: %d\n", rc); |
| 6711 | return -ENOMEM; | 6713 | return 1; |
| 6714 | } | ||
| 6712 | 6715 | ||
| 6713 | BUG_ON(!card->private_data); | 6716 | BUG_ON(!card->private_data); |
| 6714 | data = card->private_data; | 6717 | data = card->private_data; |
| @@ -6741,8 +6744,9 @@ static int __init volume_create_alsa_mixer(void) | |||
| 6741 | rc = snd_ctl_add(card, ctl_vol); | 6744 | rc = snd_ctl_add(card, ctl_vol); |
| 6742 | if (rc < 0) { | 6745 | if (rc < 0) { |
| 6743 | printk(TPACPI_ERR | 6746 | printk(TPACPI_ERR |
| 6744 | "Failed to create ALSA volume control\n"); | 6747 | "Failed to create ALSA volume control: %d\n", |
| 6745 | goto err_out; | 6748 | rc); |
| 6749 | goto err_exit; | ||
| 6746 | } | 6750 | } |
| 6747 | data->ctl_vol_id = &ctl_vol->id; | 6751 | data->ctl_vol_id = &ctl_vol->id; |
| 6748 | } | 6752 | } |
| @@ -6750,22 +6754,25 @@ static int __init volume_create_alsa_mixer(void) | |||
| 6750 | ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL); | 6754 | ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL); |
| 6751 | rc = snd_ctl_add(card, ctl_mute); | 6755 | rc = snd_ctl_add(card, ctl_mute); |
| 6752 | if (rc < 0) { | 6756 | if (rc < 0) { |
| 6753 | printk(TPACPI_ERR "Failed to create ALSA mute control\n"); | 6757 | printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n", |
| 6754 | goto err_out; | 6758 | rc); |
| 6759 | goto err_exit; | ||
| 6755 | } | 6760 | } |
| 6756 | data->ctl_mute_id = &ctl_mute->id; | 6761 | data->ctl_mute_id = &ctl_mute->id; |
| 6757 | 6762 | ||
| 6758 | snd_card_set_dev(card, &tpacpi_pdev->dev); | 6763 | snd_card_set_dev(card, &tpacpi_pdev->dev); |
| 6759 | rc = snd_card_register(card); | 6764 | rc = snd_card_register(card); |
| 6760 | |||
| 6761 | err_out: | ||
| 6762 | if (rc < 0) { | 6765 | if (rc < 0) { |
| 6763 | snd_card_free(card); | 6766 | printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc); |
| 6764 | card = NULL; | 6767 | goto err_exit; |
| 6765 | } | 6768 | } |
| 6766 | 6769 | ||
| 6767 | alsa_card = card; | 6770 | alsa_card = card; |
| 6768 | return rc; | 6771 | return 0; |
| 6772 | |||
| 6773 | err_exit: | ||
| 6774 | snd_card_free(card); | ||
| 6775 | return 1; | ||
| 6769 | } | 6776 | } |
| 6770 | 6777 | ||
| 6771 | #define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */ | 6778 | #define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */ |
| @@ -7016,6 +7023,28 @@ static struct ibm_struct volume_driver_data = { | |||
| 7016 | .shutdown = volume_shutdown, | 7023 | .shutdown = volume_shutdown, |
| 7017 | }; | 7024 | }; |
| 7018 | 7025 | ||
| 7026 | #else /* !CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */ | ||
| 7027 | |||
| 7028 | #define alsa_card NULL | ||
| 7029 | |||
| 7030 | static void inline volume_alsa_notify_change(void) | ||
| 7031 | { | ||
| 7032 | } | ||
| 7033 | |||
| 7034 | static int __init volume_init(struct ibm_init_struct *iibm) | ||
| 7035 | { | ||
| 7036 | printk(TPACPI_INFO | ||
| 7037 | "volume: disabled as there is no ALSA support in this kernel\n"); | ||
| 7038 | |||
| 7039 | return 1; | ||
| 7040 | } | ||
| 7041 | |||
| 7042 | static struct ibm_struct volume_driver_data = { | ||
| 7043 | .name = "volume", | ||
| 7044 | }; | ||
| 7045 | |||
| 7046 | #endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */ | ||
| 7047 | |||
| 7019 | /************************************************************************* | 7048 | /************************************************************************* |
| 7020 | * Fan subdriver | 7049 | * Fan subdriver |
| 7021 | */ | 7050 | */ |
| @@ -8738,6 +8767,7 @@ MODULE_PARM_DESC(hotkey_report_mode, | |||
| 8738 | "used for backwards compatibility with userspace, " | 8767 | "used for backwards compatibility with userspace, " |
| 8739 | "see documentation"); | 8768 | "see documentation"); |
| 8740 | 8769 | ||
| 8770 | #ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT | ||
| 8741 | module_param_named(volume_mode, volume_mode, uint, 0444); | 8771 | module_param_named(volume_mode, volume_mode, uint, 0444); |
| 8742 | MODULE_PARM_DESC(volume_mode, | 8772 | MODULE_PARM_DESC(volume_mode, |
| 8743 | "Selects volume control strategy: " | 8773 | "Selects volume control strategy: " |
| @@ -8760,6 +8790,7 @@ module_param_named(id, alsa_id, charp, 0444); | |||
| 8760 | MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer"); | 8790 | MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer"); |
| 8761 | module_param_named(enable, alsa_enable, bool, 0444); | 8791 | module_param_named(enable, alsa_enable, bool, 0444); |
| 8762 | MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer"); | 8792 | MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer"); |
| 8793 | #endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */ | ||
| 8763 | 8794 | ||
| 8764 | #define TPACPI_PARAM(feature) \ | 8795 | #define TPACPI_PARAM(feature) \ |
| 8765 | module_param_call(feature, set_ibm_param, NULL, NULL, 0); \ | 8796 | module_param_call(feature, set_ibm_param, NULL, NULL, 0); \ |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 9f93d6c0f510..b104302fea0a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
| @@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data) | |||
| 492 | if (!guid || !handler) | 492 | if (!guid || !handler) |
| 493 | return AE_BAD_PARAMETER; | 493 | return AE_BAD_PARAMETER; |
| 494 | 494 | ||
| 495 | find_guid(guid, &block); | 495 | if (!find_guid(guid, &block)) |
| 496 | if (!block) | ||
| 497 | return AE_NOT_EXIST; | 496 | return AE_NOT_EXIST; |
| 498 | 497 | ||
| 499 | if (block->handler) | 498 | if (block->handler) |
| @@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid) | |||
| 521 | if (!guid) | 520 | if (!guid) |
| 522 | return AE_BAD_PARAMETER; | 521 | return AE_BAD_PARAMETER; |
| 523 | 522 | ||
| 524 | find_guid(guid, &block); | 523 | if (!find_guid(guid, &block)) |
| 525 | if (!block) | ||
| 526 | return AE_NOT_EXIST; | 524 | return AE_NOT_EXIST; |
| 527 | 525 | ||
| 528 | if (!block->handler) | 526 | if (!block->handler) |
| @@ -716,6 +714,22 @@ static int wmi_class_init(void) | |||
| 716 | return ret; | 714 | return ret; |
| 717 | } | 715 | } |
| 718 | 716 | ||
| 717 | static bool guid_already_parsed(const char *guid_string) | ||
| 718 | { | ||
| 719 | struct guid_block *gblock; | ||
| 720 | struct wmi_block *wblock; | ||
| 721 | struct list_head *p; | ||
| 722 | |||
| 723 | list_for_each(p, &wmi_blocks.list) { | ||
| 724 | wblock = list_entry(p, struct wmi_block, list); | ||
| 725 | gblock = &wblock->gblock; | ||
| 726 | |||
| 727 | if (strncmp(gblock->guid, guid_string, 16) == 0) | ||
| 728 | return true; | ||
| 729 | } | ||
| 730 | return false; | ||
| 731 | } | ||
| 732 | |||
| 719 | /* | 733 | /* |
| 720 | * Parse the _WDG method for the GUID data blocks | 734 | * Parse the _WDG method for the GUID data blocks |
| 721 | */ | 735 | */ |
| @@ -725,6 +739,7 @@ static __init acpi_status parse_wdg(acpi_handle handle) | |||
| 725 | union acpi_object *obj; | 739 | union acpi_object *obj; |
| 726 | struct guid_block *gblock; | 740 | struct guid_block *gblock; |
| 727 | struct wmi_block *wblock; | 741 | struct wmi_block *wblock; |
| 742 | char guid_string[37]; | ||
| 728 | acpi_status status; | 743 | acpi_status status; |
| 729 | u32 i, total; | 744 | u32 i, total; |
| 730 | 745 | ||
| @@ -747,6 +762,19 @@ static __init acpi_status parse_wdg(acpi_handle handle) | |||
| 747 | memcpy(gblock, obj->buffer.pointer, obj->buffer.length); | 762 | memcpy(gblock, obj->buffer.pointer, obj->buffer.length); |
| 748 | 763 | ||
| 749 | for (i = 0; i < total; i++) { | 764 | for (i = 0; i < total; i++) { |
| 765 | /* | ||
| 766 | Some WMI devices, like those for nVidia hooks, have a | ||
| 767 | duplicate GUID. It's not clear what we should do in this | ||
| 768 | case yet, so for now, we'll just ignore the duplicate. | ||
| 769 | Anyone who wants to add support for that device can come | ||
| 770 | up with a better workaround for the mess then. | ||
| 771 | */ | ||
| 772 | if (guid_already_parsed(gblock[i].guid) == true) { | ||
| 773 | wmi_gtoa(gblock[i].guid, guid_string); | ||
| 774 | printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n", | ||
| 775 | guid_string); | ||
| 776 | continue; | ||
| 777 | } | ||
| 750 | wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); | 778 | wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); |
| 751 | if (!wblock) | 779 | if (!wblock) |
| 752 | return AE_NO_MEMORY; | 780 | return AE_NO_MEMORY; |
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c index 9346a862f1f2..9c87ad564803 100644 --- a/drivers/power/pmu_battery.c +++ b/drivers/power/pmu_battery.c | |||
| @@ -89,6 +89,8 @@ static int pmu_bat_get_property(struct power_supply *psy, | |||
| 89 | case POWER_SUPPLY_PROP_STATUS: | 89 | case POWER_SUPPLY_PROP_STATUS: |
| 90 | if (pbi->flags & PMU_BATT_CHARGING) | 90 | if (pbi->flags & PMU_BATT_CHARGING) |
| 91 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | 91 | val->intval = POWER_SUPPLY_STATUS_CHARGING; |
| 92 | else if (pmu_power_flags & PMU_PWR_AC_PRESENT) | ||
| 93 | val->intval = POWER_SUPPLY_STATUS_FULL; | ||
| 92 | else | 94 | else |
| 93 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; | 95 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; |
| 94 | break; | 96 | break; |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index c8c12325e69b..e9aa814ddd23 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
| @@ -1096,9 +1096,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp) | |||
| 1096 | #define cmos_pnp_resume NULL | 1096 | #define cmos_pnp_resume NULL |
| 1097 | #endif | 1097 | #endif |
| 1098 | 1098 | ||
| 1099 | static void cmos_pnp_shutdown(struct device *pdev) | 1099 | static void cmos_pnp_shutdown(struct pnp_dev *pnp) |
| 1100 | { | 1100 | { |
| 1101 | if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev)) | 1101 | if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev)) |
| 1102 | return; | 1102 | return; |
| 1103 | 1103 | ||
| 1104 | cmos_do_shutdown(); | 1104 | cmos_do_shutdown(); |
| @@ -1117,15 +1117,12 @@ static struct pnp_driver cmos_pnp_driver = { | |||
| 1117 | .id_table = rtc_ids, | 1117 | .id_table = rtc_ids, |
| 1118 | .probe = cmos_pnp_probe, | 1118 | .probe = cmos_pnp_probe, |
| 1119 | .remove = __exit_p(cmos_pnp_remove), | 1119 | .remove = __exit_p(cmos_pnp_remove), |
| 1120 | .shutdown = cmos_pnp_shutdown, | ||
| 1120 | 1121 | ||
| 1121 | /* flag ensures resume() gets called, and stops syslog spam */ | 1122 | /* flag ensures resume() gets called, and stops syslog spam */ |
| 1122 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | 1123 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, |
| 1123 | .suspend = cmos_pnp_suspend, | 1124 | .suspend = cmos_pnp_suspend, |
| 1124 | .resume = cmos_pnp_resume, | 1125 | .resume = cmos_pnp_resume, |
| 1125 | .driver = { | ||
| 1126 | .name = (char *)driver_name, | ||
| 1127 | .shutdown = cmos_pnp_shutdown, | ||
| 1128 | } | ||
| 1129 | }; | 1126 | }; |
| 1130 | 1127 | ||
| 1131 | #endif /* CONFIG_PNP */ | 1128 | #endif /* CONFIG_PNP */ |
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index d033414f7599..e1b700a19648 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
| @@ -10,5 +10,5 @@ obj-y += ccw_device.o cmf.o | |||
| 10 | obj-$(CONFIG_CHSC_SCH) += chsc_sch.o | 10 | obj-$(CONFIG_CHSC_SCH) += chsc_sch.o |
| 11 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o | 11 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o |
| 12 | 12 | ||
| 13 | qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o | 13 | qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o |
| 14 | obj-$(CONFIG_QDIO) += qdio.o | 14 | obj-$(CONFIG_QDIO) += qdio.o |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index ff7748a9199d..44f2f6a97f33 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
| @@ -182,6 +182,34 @@ struct scssc_area { | |||
| 182 | u32:32; | 182 | u32:32; |
| 183 | } __attribute__ ((packed)); | 183 | } __attribute__ ((packed)); |
| 184 | 184 | ||
| 185 | struct qdio_dev_perf_stat { | ||
| 186 | unsigned int adapter_int; | ||
| 187 | unsigned int qdio_int; | ||
| 188 | unsigned int pci_request_int; | ||
| 189 | |||
| 190 | unsigned int tasklet_inbound; | ||
| 191 | unsigned int tasklet_inbound_resched; | ||
| 192 | unsigned int tasklet_inbound_resched2; | ||
| 193 | unsigned int tasklet_outbound; | ||
| 194 | |||
| 195 | unsigned int siga_read; | ||
| 196 | unsigned int siga_write; | ||
| 197 | unsigned int siga_sync; | ||
| 198 | |||
| 199 | unsigned int inbound_call; | ||
| 200 | unsigned int inbound_handler; | ||
| 201 | unsigned int stop_polling; | ||
| 202 | unsigned int inbound_queue_full; | ||
| 203 | unsigned int outbound_call; | ||
| 204 | unsigned int outbound_handler; | ||
| 205 | unsigned int fast_requeue; | ||
| 206 | unsigned int target_full; | ||
| 207 | unsigned int eqbs; | ||
| 208 | unsigned int eqbs_partial; | ||
| 209 | unsigned int sqbs; | ||
| 210 | unsigned int sqbs_partial; | ||
| 211 | }; | ||
| 212 | |||
| 185 | struct qdio_input_q { | 213 | struct qdio_input_q { |
| 186 | /* input buffer acknowledgement flag */ | 214 | /* input buffer acknowledgement flag */ |
| 187 | int polling; | 215 | int polling; |
| @@ -269,6 +297,7 @@ struct qdio_irq { | |||
| 269 | u32 *dsci; /* address of device state change indicator */ | 297 | u32 *dsci; /* address of device state change indicator */ |
| 270 | struct ccw_device *cdev; | 298 | struct ccw_device *cdev; |
| 271 | struct dentry *debugfs_dev; | 299 | struct dentry *debugfs_dev; |
| 300 | struct dentry *debugfs_perf; | ||
| 272 | 301 | ||
| 273 | unsigned long int_parm; | 302 | unsigned long int_parm; |
| 274 | struct subchannel_id schid; | 303 | struct subchannel_id schid; |
| @@ -286,9 +315,10 @@ struct qdio_irq { | |||
| 286 | struct ciw aqueue; | 315 | struct ciw aqueue; |
| 287 | 316 | ||
| 288 | struct qdio_ssqd_desc ssqd_desc; | 317 | struct qdio_ssqd_desc ssqd_desc; |
| 289 | |||
| 290 | void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); | 318 | void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); |
| 291 | 319 | ||
| 320 | struct qdio_dev_perf_stat perf_stat; | ||
| 321 | int perf_stat_enabled; | ||
| 292 | /* | 322 | /* |
| 293 | * Warning: Leave these members together at the end so they won't be | 323 | * Warning: Leave these members together at the end so they won't be |
| 294 | * cleared in qdio_setup_irq. | 324 | * cleared in qdio_setup_irq. |
| @@ -311,6 +341,10 @@ struct qdio_irq { | |||
| 311 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ | 341 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ |
| 312 | css_general_characteristics.aif_osa) | 342 | css_general_characteristics.aif_osa) |
| 313 | 343 | ||
| 344 | #define qperf(qdev,attr) qdev->perf_stat.attr | ||
| 345 | #define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ | ||
| 346 | q->irq_ptr->perf_stat.attr++ | ||
| 347 | |||
| 314 | /* the highest iqdio queue is used for multicast */ | 348 | /* the highest iqdio queue is used for multicast */ |
| 315 | static inline int multicast_outbound(struct qdio_q *q) | 349 | static inline int multicast_outbound(struct qdio_q *q) |
| 316 | { | 350 | { |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 76769978285f..f49761ff9a00 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
| @@ -55,13 +55,11 @@ static int qstat_show(struct seq_file *m, void *v) | |||
| 55 | if (!q) | 55 | if (!q) |
| 56 | return 0; | 56 | return 0; |
| 57 | 57 | ||
| 58 | seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); | 58 | seq_printf(m, "DSCI: %d nr_used: %d\n", |
| 59 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); | 59 | *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); |
| 60 | seq_printf(m, "ftc: %d\n", q->first_to_check); | 60 | seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); |
| 61 | seq_printf(m, "last_move: %d\n", q->last_move); | 61 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", |
| 62 | seq_printf(m, "polling: %d\n", q->u.in.polling); | 62 | q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); |
| 63 | seq_printf(m, "ack start: %d\n", q->u.in.ack_start); | ||
| 64 | seq_printf(m, "ack count: %d\n", q->u.in.ack_count); | ||
| 65 | seq_printf(m, "slsb buffer states:\n"); | 63 | seq_printf(m, "slsb buffer states:\n"); |
| 66 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 64 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
| 67 | 65 | ||
| @@ -110,7 +108,6 @@ static ssize_t qstat_seq_write(struct file *file, const char __user *buf, | |||
| 110 | 108 | ||
| 111 | if (!q) | 109 | if (!q) |
| 112 | return 0; | 110 | return 0; |
| 113 | |||
| 114 | if (q->is_input_q) | 111 | if (q->is_input_q) |
| 115 | xchg(q->irq_ptr->dsci, 1); | 112 | xchg(q->irq_ptr->dsci, 1); |
| 116 | local_bh_disable(); | 113 | local_bh_disable(); |
| @@ -134,6 +131,98 @@ static const struct file_operations debugfs_fops = { | |||
| 134 | .release = single_release, | 131 | .release = single_release, |
| 135 | }; | 132 | }; |
| 136 | 133 | ||
| 134 | static char *qperf_names[] = { | ||
| 135 | "Assumed adapter interrupts", | ||
| 136 | "QDIO interrupts", | ||
| 137 | "Requested PCIs", | ||
| 138 | "Inbound tasklet runs", | ||
| 139 | "Inbound tasklet resched", | ||
| 140 | "Inbound tasklet resched2", | ||
| 141 | "Outbound tasklet runs", | ||
| 142 | "SIGA read", | ||
| 143 | "SIGA write", | ||
| 144 | "SIGA sync", | ||
| 145 | "Inbound calls", | ||
| 146 | "Inbound handler", | ||
| 147 | "Inbound stop_polling", | ||
| 148 | "Inbound queue full", | ||
| 149 | "Outbound calls", | ||
| 150 | "Outbound handler", | ||
| 151 | "Outbound fast_requeue", | ||
| 152 | "Outbound target_full", | ||
| 153 | "QEBSM eqbs", | ||
| 154 | "QEBSM eqbs partial", | ||
| 155 | "QEBSM sqbs", | ||
| 156 | "QEBSM sqbs partial" | ||
| 157 | }; | ||
| 158 | |||
| 159 | static int qperf_show(struct seq_file *m, void *v) | ||
| 160 | { | ||
| 161 | struct qdio_irq *irq_ptr = m->private; | ||
| 162 | unsigned int *stat; | ||
| 163 | int i; | ||
| 164 | |||
| 165 | if (!irq_ptr) | ||
| 166 | return 0; | ||
| 167 | if (!irq_ptr->perf_stat_enabled) { | ||
| 168 | seq_printf(m, "disabled\n"); | ||
| 169 | return 0; | ||
| 170 | } | ||
| 171 | stat = (unsigned int *)&irq_ptr->perf_stat; | ||
| 172 | |||
| 173 | for (i = 0; i < ARRAY_SIZE(qperf_names); i++) | ||
| 174 | seq_printf(m, "%26s:\t%u\n", | ||
| 175 | qperf_names[i], *(stat + i)); | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, | ||
| 180 | size_t count, loff_t *off) | ||
| 181 | { | ||
| 182 | struct seq_file *seq = file->private_data; | ||
| 183 | struct qdio_irq *irq_ptr = seq->private; | ||
| 184 | unsigned long val; | ||
| 185 | char buf[8]; | ||
| 186 | int ret; | ||
| 187 | |||
| 188 | if (!irq_ptr) | ||
| 189 | return 0; | ||
| 190 | if (count >= sizeof(buf)) | ||
| 191 | return -EINVAL; | ||
| 192 | if (copy_from_user(&buf, ubuf, count)) | ||
| 193 | return -EFAULT; | ||
| 194 | buf[count] = 0; | ||
| 195 | |||
| 196 | ret = strict_strtoul(buf, 10, &val); | ||
| 197 | if (ret < 0) | ||
| 198 | return ret; | ||
| 199 | |||
| 200 | switch (val) { | ||
| 201 | case 0: | ||
| 202 | irq_ptr->perf_stat_enabled = 0; | ||
| 203 | memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); | ||
| 204 | break; | ||
| 205 | case 1: | ||
| 206 | irq_ptr->perf_stat_enabled = 1; | ||
| 207 | break; | ||
| 208 | } | ||
| 209 | return count; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int qperf_seq_open(struct inode *inode, struct file *filp) | ||
| 213 | { | ||
| 214 | return single_open(filp, qperf_show, | ||
| 215 | filp->f_path.dentry->d_inode->i_private); | ||
| 216 | } | ||
| 217 | |||
| 218 | static struct file_operations debugfs_perf_fops = { | ||
| 219 | .owner = THIS_MODULE, | ||
| 220 | .open = qperf_seq_open, | ||
| 221 | .read = seq_read, | ||
| 222 | .write = qperf_seq_write, | ||
| 223 | .llseek = seq_lseek, | ||
| 224 | .release = single_release, | ||
| 225 | }; | ||
| 137 | static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) | 226 | static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) |
| 138 | { | 227 | { |
| 139 | char name[QDIO_DEBUGFS_NAME_LEN]; | 228 | char name[QDIO_DEBUGFS_NAME_LEN]; |
| @@ -156,6 +245,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) | |||
| 156 | debugfs_root); | 245 | debugfs_root); |
| 157 | if (IS_ERR(irq_ptr->debugfs_dev)) | 246 | if (IS_ERR(irq_ptr->debugfs_dev)) |
| 158 | irq_ptr->debugfs_dev = NULL; | 247 | irq_ptr->debugfs_dev = NULL; |
| 248 | |||
| 249 | irq_ptr->debugfs_perf = debugfs_create_file("statistics", | ||
| 250 | S_IFREG | S_IRUGO | S_IWUSR, | ||
| 251 | irq_ptr->debugfs_dev, irq_ptr, | ||
| 252 | &debugfs_perf_fops); | ||
| 253 | if (IS_ERR(irq_ptr->debugfs_perf)) | ||
| 254 | irq_ptr->debugfs_perf = NULL; | ||
| 255 | |||
| 159 | for_each_input_queue(irq_ptr, q, i) | 256 | for_each_input_queue(irq_ptr, q, i) |
| 160 | setup_debugfs_entry(q, cdev); | 257 | setup_debugfs_entry(q, cdev); |
| 161 | for_each_output_queue(irq_ptr, q, i) | 258 | for_each_output_queue(irq_ptr, q, i) |
| @@ -171,6 +268,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd | |||
| 171 | debugfs_remove(q->debugfs_q); | 268 | debugfs_remove(q->debugfs_q); |
| 172 | for_each_output_queue(irq_ptr, q, i) | 269 | for_each_output_queue(irq_ptr, q, i) |
| 173 | debugfs_remove(q->debugfs_q); | 270 | debugfs_remove(q->debugfs_q); |
| 271 | debugfs_remove(irq_ptr->debugfs_perf); | ||
| 174 | debugfs_remove(irq_ptr->debugfs_dev); | 272 | debugfs_remove(irq_ptr->debugfs_dev); |
| 175 | } | 273 | } |
| 176 | 274 | ||
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index b2275c5000e7..999fe80c4051 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include "device.h" | 22 | #include "device.h" |
| 23 | #include "qdio.h" | 23 | #include "qdio.h" |
| 24 | #include "qdio_debug.h" | 24 | #include "qdio_debug.h" |
| 25 | #include "qdio_perf.h" | ||
| 26 | 25 | ||
| 27 | MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ | 26 | MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ |
| 28 | "Jan Glauber <jang@linux.vnet.ibm.com>"); | 27 | "Jan Glauber <jang@linux.vnet.ibm.com>"); |
| @@ -126,7 +125,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | |||
| 126 | int rc; | 125 | int rc; |
| 127 | 126 | ||
| 128 | BUG_ON(!q->irq_ptr->sch_token); | 127 | BUG_ON(!q->irq_ptr->sch_token); |
| 129 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); | 128 | qperf_inc(q, eqbs); |
| 130 | 129 | ||
| 131 | if (!q->is_input_q) | 130 | if (!q->is_input_q) |
| 132 | nr += q->irq_ptr->nr_input_qs; | 131 | nr += q->irq_ptr->nr_input_qs; |
| @@ -139,7 +138,7 @@ again: | |||
| 139 | * buffers later. | 138 | * buffers later. |
| 140 | */ | 139 | */ |
| 141 | if ((ccq == 96) && (count != tmp_count)) { | 140 | if ((ccq == 96) && (count != tmp_count)) { |
| 142 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); | 141 | qperf_inc(q, eqbs_partial); |
| 143 | return (count - tmp_count); | 142 | return (count - tmp_count); |
| 144 | } | 143 | } |
| 145 | 144 | ||
| @@ -182,7 +181,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, | |||
| 182 | return 0; | 181 | return 0; |
| 183 | 182 | ||
| 184 | BUG_ON(!q->irq_ptr->sch_token); | 183 | BUG_ON(!q->irq_ptr->sch_token); |
| 185 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); | 184 | qperf_inc(q, sqbs); |
| 186 | 185 | ||
| 187 | if (!q->is_input_q) | 186 | if (!q->is_input_q) |
| 188 | nr += q->irq_ptr->nr_input_qs; | 187 | nr += q->irq_ptr->nr_input_qs; |
| @@ -191,7 +190,7 @@ again: | |||
| 191 | rc = qdio_check_ccq(q, ccq); | 190 | rc = qdio_check_ccq(q, ccq); |
| 192 | if (rc == 1) { | 191 | if (rc == 1) { |
| 193 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); | 192 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); |
| 194 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); | 193 | qperf_inc(q, sqbs_partial); |
| 195 | goto again; | 194 | goto again; |
| 196 | } | 195 | } |
| 197 | if (rc < 0) { | 196 | if (rc < 0) { |
| @@ -285,7 +284,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
| 285 | return 0; | 284 | return 0; |
| 286 | 285 | ||
| 287 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); | 286 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); |
| 288 | qdio_perf_stat_inc(&perf_stats.siga_sync); | 287 | qperf_inc(q, siga_sync); |
| 289 | 288 | ||
| 290 | cc = do_siga_sync(q->irq_ptr->schid, output, input); | 289 | cc = do_siga_sync(q->irq_ptr->schid, output, input); |
| 291 | if (cc) | 290 | if (cc) |
| @@ -350,7 +349,7 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
| 350 | int cc; | 349 | int cc; |
| 351 | 350 | ||
| 352 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); | 351 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); |
| 353 | qdio_perf_stat_inc(&perf_stats.siga_in); | 352 | qperf_inc(q, siga_read); |
| 354 | 353 | ||
| 355 | cc = do_siga_input(q->irq_ptr->schid, q->mask); | 354 | cc = do_siga_input(q->irq_ptr->schid, q->mask); |
| 356 | if (cc) | 355 | if (cc) |
| @@ -382,7 +381,7 @@ static inline void qdio_stop_polling(struct qdio_q *q) | |||
| 382 | return; | 381 | return; |
| 383 | 382 | ||
| 384 | q->u.in.polling = 0; | 383 | q->u.in.polling = 0; |
| 385 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); | 384 | qperf_inc(q, stop_polling); |
| 386 | 385 | ||
| 387 | /* show the card that we are not polling anymore */ | 386 | /* show the card that we are not polling anymore */ |
| 388 | if (is_qebsm(q)) { | 387 | if (is_qebsm(q)) { |
| @@ -400,7 +399,7 @@ static void announce_buffer_error(struct qdio_q *q, int count) | |||
| 400 | /* special handling for no target buffer empty */ | 399 | /* special handling for no target buffer empty */ |
| 401 | if ((!q->is_input_q && | 400 | if ((!q->is_input_q && |
| 402 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { | 401 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { |
| 403 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); | 402 | qperf_inc(q, target_full); |
| 404 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", | 403 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", |
| 405 | q->first_to_check); | 404 | q->first_to_check); |
| 406 | return; | 405 | return; |
| @@ -487,7 +486,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
| 487 | inbound_primed(q, count); | 486 | inbound_primed(q, count); |
| 488 | q->first_to_check = add_buf(q->first_to_check, count); | 487 | q->first_to_check = add_buf(q->first_to_check, count); |
| 489 | if (atomic_sub(count, &q->nr_buf_used) == 0) | 488 | if (atomic_sub(count, &q->nr_buf_used) == 0) |
| 490 | qdio_perf_stat_inc(&perf_stats.inbound_queue_full); | 489 | qperf_inc(q, inbound_queue_full); |
| 491 | break; | 490 | break; |
| 492 | case SLSB_P_INPUT_ERROR: | 491 | case SLSB_P_INPUT_ERROR: |
| 493 | announce_buffer_error(q, count); | 492 | announce_buffer_error(q, count); |
| @@ -567,9 +566,10 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
| 567 | count = sub_buf(end, start); | 566 | count = sub_buf(end, start); |
| 568 | 567 | ||
| 569 | if (q->is_input_q) { | 568 | if (q->is_input_q) { |
| 570 | qdio_perf_stat_inc(&perf_stats.inbound_handler); | 569 | qperf_inc(q, inbound_handler); |
| 571 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); | 570 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); |
| 572 | } else | 571 | } else |
| 572 | qperf_inc(q, outbound_handler); | ||
| 573 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", | 573 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", |
| 574 | start, count); | 574 | start, count); |
| 575 | 575 | ||
| @@ -583,24 +583,28 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
| 583 | 583 | ||
| 584 | static void __qdio_inbound_processing(struct qdio_q *q) | 584 | static void __qdio_inbound_processing(struct qdio_q *q) |
| 585 | { | 585 | { |
| 586 | qdio_perf_stat_inc(&perf_stats.tasklet_inbound); | 586 | qperf_inc(q, tasklet_inbound); |
| 587 | again: | 587 | again: |
| 588 | if (!qdio_inbound_q_moved(q)) | 588 | if (!qdio_inbound_q_moved(q)) |
| 589 | return; | 589 | return; |
| 590 | 590 | ||
| 591 | qdio_kick_handler(q); | 591 | qdio_kick_handler(q); |
| 592 | 592 | ||
| 593 | if (!qdio_inbound_q_done(q)) | 593 | if (!qdio_inbound_q_done(q)) { |
| 594 | /* means poll time is not yet over */ | 594 | /* means poll time is not yet over */ |
| 595 | qperf_inc(q, tasklet_inbound_resched); | ||
| 595 | goto again; | 596 | goto again; |
| 597 | } | ||
| 596 | 598 | ||
| 597 | qdio_stop_polling(q); | 599 | qdio_stop_polling(q); |
| 598 | /* | 600 | /* |
| 599 | * We need to check again to not lose initiative after | 601 | * We need to check again to not lose initiative after |
| 600 | * resetting the ACK state. | 602 | * resetting the ACK state. |
| 601 | */ | 603 | */ |
| 602 | if (!qdio_inbound_q_done(q)) | 604 | if (!qdio_inbound_q_done(q)) { |
| 605 | qperf_inc(q, tasklet_inbound_resched2); | ||
| 603 | goto again; | 606 | goto again; |
| 607 | } | ||
| 604 | } | 608 | } |
| 605 | 609 | ||
| 606 | void qdio_inbound_processing(unsigned long data) | 610 | void qdio_inbound_processing(unsigned long data) |
| @@ -688,7 +692,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q) | |||
| 688 | return 0; | 692 | return 0; |
| 689 | 693 | ||
| 690 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); | 694 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); |
| 691 | qdio_perf_stat_inc(&perf_stats.siga_out); | 695 | qperf_inc(q, siga_write); |
| 692 | 696 | ||
| 693 | cc = qdio_siga_output(q, &busy_bit); | 697 | cc = qdio_siga_output(q, &busy_bit); |
| 694 | switch (cc) { | 698 | switch (cc) { |
| @@ -711,7 +715,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q) | |||
| 711 | 715 | ||
| 712 | static void __qdio_outbound_processing(struct qdio_q *q) | 716 | static void __qdio_outbound_processing(struct qdio_q *q) |
| 713 | { | 717 | { |
| 714 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); | 718 | qperf_inc(q, tasklet_outbound); |
| 715 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); | 719 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); |
| 716 | 720 | ||
| 717 | if (qdio_outbound_q_moved(q)) | 721 | if (qdio_outbound_q_moved(q)) |
| @@ -739,12 +743,9 @@ static void __qdio_outbound_processing(struct qdio_q *q) | |||
| 739 | */ | 743 | */ |
| 740 | if (qdio_outbound_q_done(q)) | 744 | if (qdio_outbound_q_done(q)) |
| 741 | del_timer(&q->u.out.timer); | 745 | del_timer(&q->u.out.timer); |
| 742 | else { | 746 | else |
| 743 | if (!timer_pending(&q->u.out.timer)) { | 747 | if (!timer_pending(&q->u.out.timer)) |
| 744 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); | 748 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); |
| 745 | qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); | ||
| 746 | } | ||
| 747 | } | ||
| 748 | return; | 749 | return; |
| 749 | 750 | ||
| 750 | sched: | 751 | sched: |
| @@ -784,7 +785,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
| 784 | 785 | ||
| 785 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 786 | static void __tiqdio_inbound_processing(struct qdio_q *q) |
| 786 | { | 787 | { |
| 787 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | 788 | qperf_inc(q, tasklet_inbound); |
| 788 | qdio_sync_after_thinint(q); | 789 | qdio_sync_after_thinint(q); |
| 789 | 790 | ||
| 790 | /* | 791 | /* |
| @@ -799,7 +800,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
| 799 | qdio_kick_handler(q); | 800 | qdio_kick_handler(q); |
| 800 | 801 | ||
| 801 | if (!qdio_inbound_q_done(q)) { | 802 | if (!qdio_inbound_q_done(q)) { |
| 802 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | 803 | qperf_inc(q, tasklet_inbound_resched); |
| 803 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 804 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { |
| 804 | tasklet_schedule(&q->tasklet); | 805 | tasklet_schedule(&q->tasklet); |
| 805 | return; | 806 | return; |
| @@ -812,7 +813,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
| 812 | * resetting the ACK state. | 813 | * resetting the ACK state. |
| 813 | */ | 814 | */ |
| 814 | if (!qdio_inbound_q_done(q)) { | 815 | if (!qdio_inbound_q_done(q)) { |
| 815 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | 816 | qperf_inc(q, tasklet_inbound_resched2); |
| 816 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 817 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) |
| 817 | tasklet_schedule(&q->tasklet); | 818 | tasklet_schedule(&q->tasklet); |
| 818 | } | 819 | } |
| @@ -851,8 +852,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
| 851 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 852 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) |
| 852 | return; | 853 | return; |
| 853 | 854 | ||
| 854 | qdio_perf_stat_inc(&perf_stats.pci_int); | ||
| 855 | |||
| 856 | for_each_input_queue(irq_ptr, q, i) | 855 | for_each_input_queue(irq_ptr, q, i) |
| 857 | tasklet_schedule(&q->tasklet); | 856 | tasklet_schedule(&q->tasklet); |
| 858 | 857 | ||
| @@ -923,8 +922,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 923 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 922 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
| 924 | int cstat, dstat; | 923 | int cstat, dstat; |
| 925 | 924 | ||
| 926 | qdio_perf_stat_inc(&perf_stats.qdio_int); | ||
| 927 | |||
| 928 | if (!intparm || !irq_ptr) { | 925 | if (!intparm || !irq_ptr) { |
| 929 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); | 926 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); |
| 930 | return; | 927 | return; |
| @@ -1383,6 +1380,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
| 1383 | { | 1380 | { |
| 1384 | int used, diff; | 1381 | int used, diff; |
| 1385 | 1382 | ||
| 1383 | qperf_inc(q, inbound_call); | ||
| 1384 | |||
| 1386 | if (!q->u.in.polling) | 1385 | if (!q->u.in.polling) |
| 1387 | goto set; | 1386 | goto set; |
| 1388 | 1387 | ||
| @@ -1438,14 +1437,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
| 1438 | unsigned char state; | 1437 | unsigned char state; |
| 1439 | int used, rc = 0; | 1438 | int used, rc = 0; |
| 1440 | 1439 | ||
| 1441 | qdio_perf_stat_inc(&perf_stats.outbound_handler); | 1440 | qperf_inc(q, outbound_call); |
| 1442 | 1441 | ||
| 1443 | count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); | 1442 | count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); |
| 1444 | used = atomic_add_return(count, &q->nr_buf_used); | 1443 | used = atomic_add_return(count, &q->nr_buf_used); |
| 1445 | BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); | 1444 | BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); |
| 1446 | 1445 | ||
| 1447 | if (callflags & QDIO_FLAG_PCI_OUT) | 1446 | if (callflags & QDIO_FLAG_PCI_OUT) { |
| 1448 | q->u.out.pci_out_enabled = 1; | 1447 | q->u.out.pci_out_enabled = 1; |
| 1448 | qperf_inc(q, pci_request_int); | ||
| 1449 | } | ||
| 1449 | else | 1450 | else |
| 1450 | q->u.out.pci_out_enabled = 0; | 1451 | q->u.out.pci_out_enabled = 0; |
| 1451 | 1452 | ||
| @@ -1484,7 +1485,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
| 1484 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1485 | if (state != SLSB_CU_OUTPUT_PRIMED) |
| 1485 | rc = qdio_kick_outbound_q(q); | 1486 | rc = qdio_kick_outbound_q(q); |
| 1486 | else | 1487 | else |
| 1487 | qdio_perf_stat_inc(&perf_stats.fast_requeue); | 1488 | qperf_inc(q, fast_requeue); |
| 1488 | 1489 | ||
| 1489 | out: | 1490 | out: |
| 1490 | tasklet_schedule(&q->tasklet); | 1491 | tasklet_schedule(&q->tasklet); |
| @@ -1540,16 +1541,11 @@ static int __init init_QDIO(void) | |||
| 1540 | rc = qdio_debug_init(); | 1541 | rc = qdio_debug_init(); |
| 1541 | if (rc) | 1542 | if (rc) |
| 1542 | goto out_ti; | 1543 | goto out_ti; |
| 1543 | rc = qdio_setup_perf_stats(); | ||
| 1544 | if (rc) | ||
| 1545 | goto out_debug; | ||
| 1546 | rc = tiqdio_register_thinints(); | 1544 | rc = tiqdio_register_thinints(); |
| 1547 | if (rc) | 1545 | if (rc) |
| 1548 | goto out_perf; | 1546 | goto out_debug; |
| 1549 | return 0; | 1547 | return 0; |
| 1550 | 1548 | ||
| 1551 | out_perf: | ||
| 1552 | qdio_remove_perf_stats(); | ||
| 1553 | out_debug: | 1549 | out_debug: |
| 1554 | qdio_debug_exit(); | 1550 | qdio_debug_exit(); |
| 1555 | out_ti: | 1551 | out_ti: |
| @@ -1563,7 +1559,6 @@ static void __exit exit_QDIO(void) | |||
| 1563 | { | 1559 | { |
| 1564 | tiqdio_unregister_thinints(); | 1560 | tiqdio_unregister_thinints(); |
| 1565 | tiqdio_free_memory(); | 1561 | tiqdio_free_memory(); |
| 1566 | qdio_remove_perf_stats(); | ||
| 1567 | qdio_debug_exit(); | 1562 | qdio_debug_exit(); |
| 1568 | qdio_setup_exit(); | 1563 | qdio_setup_exit(); |
| 1569 | } | 1564 | } |
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c deleted file mode 100644 index 54f7c325a3e6..000000000000 --- a/drivers/s390/cio/qdio_perf.c +++ /dev/null | |||
| @@ -1,149 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * drivers/s390/cio/qdio_perf.c | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * | ||
| 6 | * Author: Jan Glauber (jang@linux.vnet.ibm.com) | ||
| 7 | */ | ||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/proc_fs.h> | ||
| 10 | #include <linux/seq_file.h> | ||
| 11 | #include <asm/ccwdev.h> | ||
| 12 | |||
| 13 | #include "cio.h" | ||
| 14 | #include "css.h" | ||
| 15 | #include "device.h" | ||
| 16 | #include "ioasm.h" | ||
| 17 | #include "chsc.h" | ||
| 18 | #include "qdio_debug.h" | ||
| 19 | #include "qdio_perf.h" | ||
| 20 | |||
| 21 | int qdio_performance_stats; | ||
| 22 | struct qdio_perf_stats perf_stats; | ||
| 23 | |||
| 24 | #ifdef CONFIG_PROC_FS | ||
| 25 | static struct proc_dir_entry *qdio_perf_pde; | ||
| 26 | #endif | ||
| 27 | |||
| 28 | /* | ||
| 29 | * procfs functions | ||
| 30 | */ | ||
| 31 | static int qdio_perf_proc_show(struct seq_file *m, void *v) | ||
| 32 | { | ||
| 33 | seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", | ||
| 34 | (long)atomic_long_read(&perf_stats.qdio_int)); | ||
| 35 | seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", | ||
| 36 | (long)atomic_long_read(&perf_stats.pci_int)); | ||
| 37 | seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", | ||
| 38 | (long)atomic_long_read(&perf_stats.thin_int)); | ||
| 39 | seq_printf(m, "\n"); | ||
| 40 | seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", | ||
| 41 | (long)atomic_long_read(&perf_stats.tasklet_inbound)); | ||
| 42 | seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", | ||
| 43 | (long)atomic_long_read(&perf_stats.tasklet_outbound)); | ||
| 44 | seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", | ||
| 45 | (long)atomic_long_read(&perf_stats.tasklet_thinint), | ||
| 46 | (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); | ||
| 47 | seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", | ||
| 48 | (long)atomic_long_read(&perf_stats.thinint_inbound), | ||
| 49 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); | ||
| 50 | seq_printf(m, "\n"); | ||
| 51 | seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", | ||
| 52 | (long)atomic_long_read(&perf_stats.siga_in)); | ||
| 53 | seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", | ||
| 54 | (long)atomic_long_read(&perf_stats.siga_out)); | ||
| 55 | seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", | ||
| 56 | (long)atomic_long_read(&perf_stats.siga_sync)); | ||
| 57 | seq_printf(m, "\n"); | ||
| 58 | seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", | ||
| 59 | (long)atomic_long_read(&perf_stats.inbound_handler)); | ||
| 60 | seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", | ||
| 61 | (long)atomic_long_read(&perf_stats.outbound_handler)); | ||
| 62 | seq_printf(m, "\n"); | ||
| 63 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", | ||
| 64 | (long)atomic_long_read(&perf_stats.fast_requeue)); | ||
| 65 | seq_printf(m, "Number of outbound target full condition\t: %li\n", | ||
| 66 | (long)atomic_long_read(&perf_stats.outbound_target_full)); | ||
| 67 | seq_printf(m, "Number of inbound queue full condition\t\t: %li\n", | ||
| 68 | (long)atomic_long_read(&perf_stats.inbound_queue_full)); | ||
| 69 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", | ||
| 70 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); | ||
| 71 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", | ||
| 72 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); | ||
| 73 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", | ||
| 74 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); | ||
| 75 | seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", | ||
| 76 | (long)atomic_long_read(&perf_stats.debug_eqbs_all), | ||
| 77 | (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); | ||
| 78 | seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", | ||
| 79 | (long)atomic_long_read(&perf_stats.debug_sqbs_all), | ||
| 80 | (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); | ||
| 81 | seq_printf(m, "\n"); | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | static int qdio_perf_seq_open(struct inode *inode, struct file *filp) | ||
| 85 | { | ||
| 86 | return single_open(filp, qdio_perf_proc_show, NULL); | ||
| 87 | } | ||
| 88 | |||
| 89 | static const struct file_operations qdio_perf_proc_fops = { | ||
| 90 | .owner = THIS_MODULE, | ||
| 91 | .open = qdio_perf_seq_open, | ||
| 92 | .read = seq_read, | ||
| 93 | .llseek = seq_lseek, | ||
| 94 | .release = single_release, | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* | ||
| 98 | * sysfs functions | ||
| 99 | */ | ||
| 100 | static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf) | ||
| 101 | { | ||
| 102 | return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); | ||
| 103 | } | ||
| 104 | |||
| 105 | static ssize_t qdio_perf_stats_store(struct bus_type *bus, | ||
| 106 | const char *buf, size_t count) | ||
| 107 | { | ||
| 108 | unsigned long i; | ||
| 109 | |||
| 110 | if (strict_strtoul(buf, 16, &i) != 0) | ||
| 111 | return -EINVAL; | ||
| 112 | if ((i != 0) && (i != 1)) | ||
| 113 | return -EINVAL; | ||
| 114 | if (i == qdio_performance_stats) | ||
| 115 | return count; | ||
| 116 | |||
| 117 | qdio_performance_stats = i; | ||
| 118 | /* reset performance statistics */ | ||
| 119 | if (i == 0) | ||
| 120 | memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); | ||
| 121 | return count; | ||
| 122 | } | ||
| 123 | |||
| 124 | static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show, | ||
| 125 | qdio_perf_stats_store); | ||
| 126 | |||
| 127 | int __init qdio_setup_perf_stats(void) | ||
| 128 | { | ||
| 129 | int rc; | ||
| 130 | |||
| 131 | rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); | ||
| 132 | if (rc) | ||
| 133 | return rc; | ||
| 134 | |||
| 135 | #ifdef CONFIG_PROC_FS | ||
| 136 | memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); | ||
| 137 | qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO, | ||
| 138 | NULL, &qdio_perf_proc_fops); | ||
| 139 | #endif | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | void qdio_remove_perf_stats(void) | ||
| 144 | { | ||
| 145 | #ifdef CONFIG_PROC_FS | ||
| 146 | remove_proc_entry("qdio_perf", NULL); | ||
| 147 | #endif | ||
| 148 | bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); | ||
| 149 | } | ||
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h deleted file mode 100644 index 12454231dc8b..000000000000 --- a/drivers/s390/cio/qdio_perf.h +++ /dev/null | |||
| @@ -1,62 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * drivers/s390/cio/qdio_perf.h | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * | ||
| 6 | * Author: Jan Glauber (jang@linux.vnet.ibm.com) | ||
| 7 | */ | ||
| 8 | #ifndef QDIO_PERF_H | ||
| 9 | #define QDIO_PERF_H | ||
| 10 | |||
| 11 | #include <linux/types.h> | ||
| 12 | #include <asm/atomic.h> | ||
| 13 | |||
| 14 | struct qdio_perf_stats { | ||
| 15 | /* interrupt handler calls */ | ||
| 16 | atomic_long_t qdio_int; | ||
| 17 | atomic_long_t pci_int; | ||
| 18 | atomic_long_t thin_int; | ||
| 19 | |||
| 20 | /* tasklet runs */ | ||
| 21 | atomic_long_t tasklet_inbound; | ||
| 22 | atomic_long_t tasklet_outbound; | ||
| 23 | atomic_long_t tasklet_thinint; | ||
| 24 | atomic_long_t tasklet_thinint_loop; | ||
| 25 | atomic_long_t thinint_inbound; | ||
| 26 | atomic_long_t thinint_inbound_loop; | ||
| 27 | atomic_long_t thinint_inbound_loop2; | ||
| 28 | |||
| 29 | /* signal adapter calls */ | ||
| 30 | atomic_long_t siga_out; | ||
| 31 | atomic_long_t siga_in; | ||
| 32 | atomic_long_t siga_sync; | ||
| 33 | |||
| 34 | /* misc */ | ||
| 35 | atomic_long_t inbound_handler; | ||
| 36 | atomic_long_t outbound_handler; | ||
| 37 | atomic_long_t fast_requeue; | ||
| 38 | atomic_long_t outbound_target_full; | ||
| 39 | atomic_long_t inbound_queue_full; | ||
| 40 | |||
| 41 | /* for debugging */ | ||
| 42 | atomic_long_t debug_tl_out_timer; | ||
| 43 | atomic_long_t debug_stop_polling; | ||
| 44 | atomic_long_t debug_eqbs_all; | ||
| 45 | atomic_long_t debug_eqbs_incomplete; | ||
| 46 | atomic_long_t debug_sqbs_all; | ||
| 47 | atomic_long_t debug_sqbs_incomplete; | ||
| 48 | }; | ||
| 49 | |||
| 50 | extern struct qdio_perf_stats perf_stats; | ||
| 51 | extern int qdio_performance_stats; | ||
| 52 | |||
| 53 | static inline void qdio_perf_stat_inc(atomic_long_t *count) | ||
| 54 | { | ||
| 55 | if (qdio_performance_stats) | ||
| 56 | atomic_long_inc(count); | ||
| 57 | } | ||
| 58 | |||
| 59 | int qdio_setup_perf_stats(void); | ||
| 60 | void qdio_remove_perf_stats(void); | ||
| 61 | |||
| 62 | #endif | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 981a77ea7ee2..091d904d3182 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
| @@ -1,9 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/drivers/s390/cio/thinint_qdio.c | 2 | * linux/drivers/s390/cio/thinint_qdio.c |
| 3 | * | 3 | * |
| 4 | * thin interrupt support for qdio | 4 | * Copyright 2000,2009 IBM Corp. |
| 5 | * | ||
| 6 | * Copyright 2000-2008 IBM Corp. | ||
| 7 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> | 5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> |
| 8 | * Cornelia Huck <cornelia.huck@de.ibm.com> | 6 | * Cornelia Huck <cornelia.huck@de.ibm.com> |
| 9 | * Jan Glauber <jang@linux.vnet.ibm.com> | 7 | * Jan Glauber <jang@linux.vnet.ibm.com> |
| @@ -19,7 +17,6 @@ | |||
| 19 | #include "ioasm.h" | 17 | #include "ioasm.h" |
| 20 | #include "qdio.h" | 18 | #include "qdio.h" |
| 21 | #include "qdio_debug.h" | 19 | #include "qdio_debug.h" |
| 22 | #include "qdio_perf.h" | ||
| 23 | 20 | ||
| 24 | /* | 21 | /* |
| 25 | * Restriction: only 63 iqdio subchannels would have its own indicator, | 22 | * Restriction: only 63 iqdio subchannels would have its own indicator, |
| @@ -132,8 +129,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) | |||
| 132 | { | 129 | { |
| 133 | struct qdio_q *q; | 130 | struct qdio_q *q; |
| 134 | 131 | ||
| 135 | qdio_perf_stat_inc(&perf_stats.thin_int); | ||
| 136 | |||
| 137 | /* | 132 | /* |
| 138 | * SVS only when needed: issue SVS to benefit from iqdio interrupt | 133 | * SVS only when needed: issue SVS to benefit from iqdio interrupt |
| 139 | * avoidance (SVS clears adapter interrupt suppression overwrite) | 134 | * avoidance (SVS clears adapter interrupt suppression overwrite) |
| @@ -154,6 +149,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) | |||
| 154 | list_for_each_entry_rcu(q, &tiq_list, entry) | 149 | list_for_each_entry_rcu(q, &tiq_list, entry) |
| 155 | /* only process queues from changed sets */ | 150 | /* only process queues from changed sets */ |
| 156 | if (*q->irq_ptr->dsci) { | 151 | if (*q->irq_ptr->dsci) { |
| 152 | qperf_inc(q, adapter_int); | ||
| 157 | 153 | ||
| 158 | /* only clear it if the indicator is non-shared */ | 154 | /* only clear it if the indicator is non-shared */ |
| 159 | if (!shared_ind(q->irq_ptr)) | 155 | if (!shared_ind(q->irq_ptr)) |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 3c77bfe0764c..147bb1a69aba 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
| @@ -3398,7 +3398,7 @@ claw_init(void) | |||
| 3398 | goto out_err; | 3398 | goto out_err; |
| 3399 | } | 3399 | } |
| 3400 | CLAW_DBF_TEXT(2, setup, "init_mod"); | 3400 | CLAW_DBF_TEXT(2, setup, "init_mod"); |
| 3401 | claw_root_dev = root_device_register("qeth"); | 3401 | claw_root_dev = root_device_register("claw"); |
| 3402 | ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; | 3402 | ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; |
| 3403 | if (ret) | 3403 | if (ret) |
| 3404 | goto register_err; | 3404 | goto register_err; |
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index 7c815d3327f7..28d86f9df83c 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c | |||
| @@ -522,6 +522,40 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op, | |||
| 522 | set_fan_speeds(fp); | 522 | set_fan_speeds(fp); |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static void destroy_one_temp(struct bbc_cpu_temperature *tp) | ||
| 526 | { | ||
| 527 | bbc_i2c_detach(tp->client); | ||
| 528 | kfree(tp); | ||
| 529 | } | ||
| 530 | |||
| 531 | static void destroy_all_temps(struct bbc_i2c_bus *bp) | ||
| 532 | { | ||
| 533 | struct bbc_cpu_temperature *tp, *tpos; | ||
| 534 | |||
| 535 | list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { | ||
| 536 | list_del(&tp->bp_list); | ||
| 537 | list_del(&tp->glob_list); | ||
| 538 | destroy_one_temp(tp); | ||
| 539 | } | ||
| 540 | } | ||
| 541 | |||
| 542 | static void destroy_one_fan(struct bbc_fan_control *fp) | ||
| 543 | { | ||
| 544 | bbc_i2c_detach(fp->client); | ||
| 545 | kfree(fp); | ||
| 546 | } | ||
| 547 | |||
| 548 | static void destroy_all_fans(struct bbc_i2c_bus *bp) | ||
| 549 | { | ||
| 550 | struct bbc_fan_control *fp, *fpos; | ||
| 551 | |||
| 552 | list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { | ||
| 553 | list_del(&fp->bp_list); | ||
| 554 | list_del(&fp->glob_list); | ||
| 555 | destroy_one_fan(fp); | ||
| 556 | } | ||
| 557 | } | ||
| 558 | |||
| 525 | int bbc_envctrl_init(struct bbc_i2c_bus *bp) | 559 | int bbc_envctrl_init(struct bbc_i2c_bus *bp) |
| 526 | { | 560 | { |
| 527 | struct of_device *op; | 561 | struct of_device *op; |
| @@ -541,6 +575,8 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp) | |||
| 541 | int err = PTR_ERR(kenvctrld_task); | 575 | int err = PTR_ERR(kenvctrld_task); |
| 542 | 576 | ||
| 543 | kenvctrld_task = NULL; | 577 | kenvctrld_task = NULL; |
| 578 | destroy_all_temps(bp); | ||
| 579 | destroy_all_fans(bp); | ||
| 544 | return err; | 580 | return err; |
| 545 | } | 581 | } |
| 546 | } | 582 | } |
| @@ -548,35 +584,11 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp) | |||
| 548 | return 0; | 584 | return 0; |
| 549 | } | 585 | } |
| 550 | 586 | ||
| 551 | static void destroy_one_temp(struct bbc_cpu_temperature *tp) | ||
| 552 | { | ||
| 553 | bbc_i2c_detach(tp->client); | ||
| 554 | kfree(tp); | ||
| 555 | } | ||
| 556 | |||
| 557 | static void destroy_one_fan(struct bbc_fan_control *fp) | ||
| 558 | { | ||
| 559 | bbc_i2c_detach(fp->client); | ||
| 560 | kfree(fp); | ||
| 561 | } | ||
| 562 | |||
| 563 | void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) | 587 | void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) |
| 564 | { | 588 | { |
| 565 | struct bbc_cpu_temperature *tp, *tpos; | ||
| 566 | struct bbc_fan_control *fp, *fpos; | ||
| 567 | |||
| 568 | if (kenvctrld_task) | 589 | if (kenvctrld_task) |
| 569 | kthread_stop(kenvctrld_task); | 590 | kthread_stop(kenvctrld_task); |
| 570 | 591 | ||
| 571 | list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { | 592 | destroy_all_temps(bp); |
| 572 | list_del(&tp->bp_list); | 593 | destroy_all_fans(bp); |
| 573 | list_del(&tp->glob_list); | ||
| 574 | destroy_one_temp(tp); | ||
| 575 | } | ||
| 576 | |||
| 577 | list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { | ||
| 578 | list_del(&fp->bp_list); | ||
| 579 | list_del(&fp->glob_list); | ||
| 580 | destroy_one_fan(fp); | ||
| 581 | } | ||
| 582 | } | 594 | } |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index 26ffdcd5a437..15a00e8b7122 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c | |||
| @@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn) | |||
| 1440 | static int is_cxgb3_dev(struct net_device *dev) | 1440 | static int is_cxgb3_dev(struct net_device *dev) |
| 1441 | { | 1441 | { |
| 1442 | struct cxgb3i_sdev_data *cdata; | 1442 | struct cxgb3i_sdev_data *cdata; |
| 1443 | struct net_device *ndev = dev; | ||
| 1444 | |||
| 1445 | if (dev->priv_flags & IFF_802_1Q_VLAN) | ||
| 1446 | ndev = vlan_dev_real_dev(dev); | ||
| 1443 | 1447 | ||
| 1444 | write_lock(&cdata_rwlock); | 1448 | write_lock(&cdata_rwlock); |
| 1445 | list_for_each_entry(cdata, &cdata_list, list) { | 1449 | list_for_each_entry(cdata, &cdata_list, list) { |
| @@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev) | |||
| 1447 | int i; | 1451 | int i; |
| 1448 | 1452 | ||
| 1449 | for (i = 0; i < ports->nports; i++) | 1453 | for (i = 0; i < ports->nports; i++) |
| 1450 | if (dev == ports->lldevs[i]) { | 1454 | if (ndev == ports->lldevs[i]) { |
| 1451 | write_unlock(&cdata_rwlock); | 1455 | write_unlock(&cdata_rwlock); |
| 1452 | return 1; | 1456 | return 1; |
| 1453 | } | 1457 | } |
| @@ -1566,6 +1570,26 @@ out_err: | |||
| 1566 | return -EINVAL; | 1570 | return -EINVAL; |
| 1567 | } | 1571 | } |
| 1568 | 1572 | ||
| 1573 | /** | ||
| 1574 | * cxgb3i_find_dev - find the interface associated with the given address | ||
| 1575 | * @ipaddr: ip address | ||
| 1576 | */ | ||
| 1577 | static struct net_device * | ||
| 1578 | cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) | ||
| 1579 | { | ||
| 1580 | struct flowi fl; | ||
| 1581 | int err; | ||
| 1582 | struct rtable *rt; | ||
| 1583 | |||
| 1584 | memset(&fl, 0, sizeof(fl)); | ||
| 1585 | fl.nl_u.ip4_u.daddr = ipaddr; | ||
| 1586 | |||
| 1587 | err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); | ||
| 1588 | if (!err) | ||
| 1589 | return (&rt->u.dst)->dev; | ||
| 1590 | |||
| 1591 | return NULL; | ||
| 1592 | } | ||
| 1569 | 1593 | ||
| 1570 | /** | 1594 | /** |
| 1571 | * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address | 1595 | * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address |
| @@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, | |||
| 1581 | struct cxgb3i_sdev_data *cdata; | 1605 | struct cxgb3i_sdev_data *cdata; |
| 1582 | struct t3cdev *cdev; | 1606 | struct t3cdev *cdev; |
| 1583 | __be32 sipv4; | 1607 | __be32 sipv4; |
| 1608 | struct net_device *dstdev; | ||
| 1584 | int err; | 1609 | int err; |
| 1585 | 1610 | ||
| 1586 | c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); | 1611 | c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); |
| @@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, | |||
| 1591 | c3cn->daddr.sin_port = usin->sin_port; | 1616 | c3cn->daddr.sin_port = usin->sin_port; |
| 1592 | c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; | 1617 | c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; |
| 1593 | 1618 | ||
| 1619 | dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); | ||
| 1620 | if (!dstdev || !is_cxgb3_dev(dstdev)) | ||
| 1621 | return -ENETUNREACH; | ||
| 1622 | |||
| 1623 | if (dstdev->priv_flags & IFF_802_1Q_VLAN) | ||
| 1624 | dev = dstdev; | ||
| 1625 | |||
| 1594 | rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, | 1626 | rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, |
| 1595 | c3cn->daddr.sin_addr.s_addr, | 1627 | c3cn->daddr.sin_addr.s_addr, |
| 1596 | c3cn->saddr.sin_port, | 1628 | c3cn->saddr.sin_port, |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index ce522702a6c1..2cc39684ce97 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
| @@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
| 4142 | spin_lock_irq(shost->host_lock); | 4142 | spin_lock_irq(shost->host_lock); |
| 4143 | if (vport->fc_rscn_flush) { | 4143 | if (vport->fc_rscn_flush) { |
| 4144 | /* Another thread is walking fc_rscn_id_list on this vport */ | 4144 | /* Another thread is walking fc_rscn_id_list on this vport */ |
| 4145 | spin_unlock_irq(shost->host_lock); | ||
| 4146 | vport->fc_flag |= FC_RSCN_DISCOVERY; | 4145 | vport->fc_flag |= FC_RSCN_DISCOVERY; |
| 4146 | spin_unlock_irq(shost->host_lock); | ||
| 4147 | /* Send back ACC */ | 4147 | /* Send back ACC */ |
| 4148 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 4148 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
| 4149 | return 0; | 4149 | return 0; |
| @@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
| 5948 | lpfc_initial_fdisc(vport); | 5948 | lpfc_initial_fdisc(vport); |
| 5949 | break; | 5949 | break; |
| 5950 | } | 5950 | } |
| 5951 | |||
| 5952 | } else { | 5951 | } else { |
| 5952 | vport->vpi_state |= LPFC_VPI_REGISTERED; | ||
| 5953 | if (vport == phba->pport) | 5953 | if (vport == phba->pport) |
| 5954 | if (phba->sli_rev < LPFC_SLI_REV4) | 5954 | if (phba->sli_rev < LPFC_SLI_REV4) |
| 5955 | lpfc_issue_fabric_reglogin(vport); | 5955 | lpfc_issue_fabric_reglogin(vport); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 3b9424427652..2445e399fd60 100755 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
| @@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba) | |||
| 747 | 747 | ||
| 748 | if (phba->link_state == LPFC_LINK_DOWN) | 748 | if (phba->link_state == LPFC_LINK_DOWN) |
| 749 | return 0; | 749 | return 0; |
| 750 | |||
| 751 | /* Block all SCSI stack I/Os */ | ||
| 752 | lpfc_scsi_dev_block(phba); | ||
| 753 | |||
| 750 | spin_lock_irq(&phba->hbalock); | 754 | spin_lock_irq(&phba->hbalock); |
| 751 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); | 755 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); |
| 752 | if (phba->link_state > LPFC_LINK_DOWN) { | 756 | if (phba->link_state > LPFC_LINK_DOWN) { |
| @@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 1555 | * to book keeping the FCFIs can be used. | 1559 | * to book keeping the FCFIs can be used. |
| 1556 | */ | 1560 | */ |
| 1557 | if (shdr_status || shdr_add_status) { | 1561 | if (shdr_status || shdr_add_status) { |
| 1558 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 1562 | if (shdr_status == STATUS_FCF_TABLE_EMPTY) { |
| 1559 | "2521 READ_FCF_RECORD mailbox failed " | 1563 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 1560 | "with status x%x add_status x%x, mbx\n", | 1564 | "2726 READ_FCF_RECORD Indicates empty " |
| 1561 | shdr_status, shdr_add_status); | 1565 | "FCF table.\n"); |
| 1566 | } else { | ||
| 1567 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 1568 | "2521 READ_FCF_RECORD mailbox failed " | ||
| 1569 | "with status x%x add_status x%x, mbx\n", | ||
| 1570 | shdr_status, shdr_add_status); | ||
| 1571 | } | ||
| 1562 | goto out; | 1572 | goto out; |
| 1563 | } | 1573 | } |
| 1564 | /* Interpreting the returned information of FCF records */ | 1574 | /* Interpreting the returned information of FCF records */ |
| @@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
| 1698 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 1708 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
| 1699 | return; | 1709 | return; |
| 1700 | } | 1710 | } |
| 1711 | spin_lock_irq(&phba->hbalock); | ||
| 1701 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; | 1712 | vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; |
| 1713 | spin_unlock_irq(&phba->hbalock); | ||
| 1702 | 1714 | ||
| 1703 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) | 1715 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
| 1704 | lpfc_initial_fdisc(vport); | 1716 | lpfc_initial_fdisc(vport); |
| @@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
| 2259 | mb->mbxStatus); | 2271 | mb->mbxStatus); |
| 2260 | break; | 2272 | break; |
| 2261 | } | 2273 | } |
| 2274 | spin_lock_irq(&phba->hbalock); | ||
| 2262 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; | 2275 | vport->vpi_state &= ~LPFC_VPI_REGISTERED; |
| 2276 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | ||
| 2277 | spin_unlock_irq(&phba->hbalock); | ||
| 2263 | vport->unreg_vpi_cmpl = VPORT_OK; | 2278 | vport->unreg_vpi_cmpl = VPORT_OK; |
| 2264 | mempool_free(pmb, phba->mbox_mem_pool); | 2279 | mempool_free(pmb, phba->mbox_mem_pool); |
| 2265 | /* | 2280 | /* |
| @@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
| 4475 | (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) | 4490 | (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) |
| 4476 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 4491 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
| 4477 | lpfc_mbx_unreg_vpi(vports[i]); | 4492 | lpfc_mbx_unreg_vpi(vports[i]); |
| 4493 | spin_lock_irq(&phba->hbalock); | ||
| 4478 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; | 4494 | vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; |
| 4479 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; | 4495 | vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; |
| 4496 | spin_unlock_irq(&phba->hbalock); | ||
| 4480 | } | 4497 | } |
| 4481 | lpfc_destroy_vport_work_array(phba, vports); | 4498 | lpfc_destroy_vport_work_array(phba, vports); |
| 4482 | 4499 | ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1585148a17e5..8a2a1c5935c6 100644..100755 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
| @@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy { | |||
| 1013 | }; | 1013 | }; |
| 1014 | 1014 | ||
| 1015 | #define LPFC_HDR_BUF_SIZE 128 | 1015 | #define LPFC_HDR_BUF_SIZE 128 |
| 1016 | #define LPFC_DATA_BUF_SIZE 4096 | 1016 | #define LPFC_DATA_BUF_SIZE 2048 |
| 1017 | struct rq_context { | 1017 | struct rq_context { |
| 1018 | uint32_t word0; | 1018 | uint32_t word0; |
| 1019 | #define lpfc_rq_context_rq_size_SHIFT 16 | 1019 | #define lpfc_rq_context_rq_size_SHIFT 16 |
| @@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg { | |||
| 1371 | #define STATUS_ERROR_ACITMAIN 0x2a | 1371 | #define STATUS_ERROR_ACITMAIN 0x2a |
| 1372 | #define STATUS_REBOOT_REQUIRED 0x2c | 1372 | #define STATUS_REBOOT_REQUIRED 0x2c |
| 1373 | #define STATUS_FCF_IN_USE 0x3a | 1373 | #define STATUS_FCF_IN_USE 0x3a |
| 1374 | #define STATUS_FCF_TABLE_EMPTY 0x43 | ||
| 1374 | 1375 | ||
| 1375 | struct lpfc_mbx_sli4_config { | 1376 | struct lpfc_mbx_sli4_config { |
| 1376 | struct mbox_header header; | 1377 | struct mbox_header header; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d4da6bdd0e73..b8eb1b6e5e77 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 3006 | struct lpfc_vport *vport; | 3006 | struct lpfc_vport *vport; |
| 3007 | struct lpfc_nodelist *ndlp; | 3007 | struct lpfc_nodelist *ndlp; |
| 3008 | struct Scsi_Host *shost; | 3008 | struct Scsi_Host *shost; |
| 3009 | uint32_t link_state; | ||
| 3009 | 3010 | ||
| 3010 | phba->fc_eventTag = acqe_fcoe->event_tag; | 3011 | phba->fc_eventTag = acqe_fcoe->event_tag; |
| 3011 | phba->fcoe_eventtag = acqe_fcoe->event_tag; | 3012 | phba->fcoe_eventtag = acqe_fcoe->event_tag; |
| @@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
| 3052 | break; | 3053 | break; |
| 3053 | /* | 3054 | /* |
| 3054 | * Currently, driver support only one FCF - so treat this as | 3055 | * Currently, driver support only one FCF - so treat this as |
| 3055 | * a link down. | 3056 | * a link down, but save the link state because we don't want |
| 3057 | * it to be changed to Link Down unless it is already down. | ||
| 3056 | */ | 3058 | */ |
| 3059 | link_state = phba->link_state; | ||
| 3057 | lpfc_linkdown(phba); | 3060 | lpfc_linkdown(phba); |
| 3061 | phba->link_state = link_state; | ||
| 3058 | /* Unregister FCF if no devices connected to it */ | 3062 | /* Unregister FCF if no devices connected to it */ |
| 3059 | lpfc_unregister_unused_fcf(phba); | 3063 | lpfc_unregister_unused_fcf(phba); |
| 3060 | break; | 3064 | break; |
| @@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) | |||
| 7226 | { | 7230 | { |
| 7227 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7231 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 7228 | "2711 PCI channel permanent disable for failure\n"); | 7232 | "2711 PCI channel permanent disable for failure\n"); |
| 7229 | /* Block all SCSI devices' I/Os on the host */ | ||
| 7230 | lpfc_scsi_dev_block(phba); | ||
| 7231 | /* Clean up all driver's outstanding SCSI I/Os */ | 7233 | /* Clean up all driver's outstanding SCSI I/Os */ |
| 7232 | lpfc_sli_flush_fcp_rings(phba); | 7234 | lpfc_sli_flush_fcp_rings(phba); |
| 7233 | } | 7235 | } |
| @@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) | |||
| 7256 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 7258 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
| 7257 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 7259 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; |
| 7258 | 7260 | ||
| 7261 | /* Block all SCSI devices' I/Os on the host */ | ||
| 7262 | lpfc_scsi_dev_block(phba); | ||
| 7263 | |||
| 7259 | switch (state) { | 7264 | switch (state) { |
| 7260 | case pci_channel_io_normal: | 7265 | case pci_channel_io_normal: |
| 7261 | /* Non-fatal error, prepare for recovery */ | 7266 | /* Non-fatal error, prepare for recovery */ |
| @@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 7507 | error = -ENODEV; | 7512 | error = -ENODEV; |
| 7508 | goto out_free_sysfs_attr; | 7513 | goto out_free_sysfs_attr; |
| 7509 | } | 7514 | } |
| 7515 | /* Default to single FCP EQ for non-MSI-X */ | ||
| 7516 | if (phba->intr_type != MSIX) | ||
| 7517 | phba->cfg_fcp_eq_count = 1; | ||
| 7510 | /* Set up SLI-4 HBA */ | 7518 | /* Set up SLI-4 HBA */ |
| 7511 | if (lpfc_sli4_hba_setup(phba)) { | 7519 | if (lpfc_sli4_hba_setup(phba)) { |
| 7512 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7520 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7935667b81a5..589549b2bf0e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, | |||
| 1383 | /* HBQ for ELS and CT traffic. */ | 1383 | /* HBQ for ELS and CT traffic. */ |
| 1384 | static struct lpfc_hbq_init lpfc_els_hbq = { | 1384 | static struct lpfc_hbq_init lpfc_els_hbq = { |
| 1385 | .rn = 1, | 1385 | .rn = 1, |
| 1386 | .entry_count = 200, | 1386 | .entry_count = 256, |
| 1387 | .mask_count = 0, | 1387 | .mask_count = 0, |
| 1388 | .profile = 0, | 1388 | .profile = 0, |
| 1389 | .ring_mask = (1 << LPFC_ELS_RING), | 1389 | .ring_mask = (1 << LPFC_ELS_RING), |
| @@ -1482,8 +1482,11 @@ err: | |||
| 1482 | int | 1482 | int |
| 1483 | lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) | 1483 | lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) |
| 1484 | { | 1484 | { |
| 1485 | return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, | 1485 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 1486 | lpfc_hbq_defs[qno]->add_count)); | 1486 | return 0; |
| 1487 | else | ||
| 1488 | return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, | ||
| 1489 | lpfc_hbq_defs[qno]->add_count); | ||
| 1487 | } | 1490 | } |
| 1488 | 1491 | ||
| 1489 | /** | 1492 | /** |
| @@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) | |||
| 1498 | static int | 1501 | static int |
| 1499 | lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) | 1502 | lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) |
| 1500 | { | 1503 | { |
| 1501 | return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, | 1504 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 1502 | lpfc_hbq_defs[qno]->init_count)); | 1505 | return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, |
| 1506 | lpfc_hbq_defs[qno]->entry_count); | ||
| 1507 | else | ||
| 1508 | return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, | ||
| 1509 | lpfc_hbq_defs[qno]->init_count); | ||
| 1503 | } | 1510 | } |
| 1504 | 1511 | ||
| 1505 | /** | 1512 | /** |
| @@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
| 4110 | if (rc) { | 4117 | if (rc) { |
| 4111 | dma_free_coherent(&phba->pcidev->dev, dma_size, | 4118 | dma_free_coherent(&phba->pcidev->dev, dma_size, |
| 4112 | dmabuf->virt, dmabuf->phys); | 4119 | dmabuf->virt, dmabuf->phys); |
| 4120 | kfree(dmabuf); | ||
| 4113 | return -EIO; | 4121 | return -EIO; |
| 4114 | } | 4122 | } |
| 4115 | 4123 | ||
| @@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 5848 | iocbq->iocb.un.ulpWord[3]); | 5856 | iocbq->iocb.un.ulpWord[3]); |
| 5849 | wqe->generic.word3 = 0; | 5857 | wqe->generic.word3 = 0; |
| 5850 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); | 5858 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); |
| 5851 | bf_set(wqe_xc, &wqe->generic, 1); | ||
| 5852 | /* The entire sequence is transmitted for this IOCB */ | 5859 | /* The entire sequence is transmitted for this IOCB */ |
| 5853 | xmit_len = total_len; | 5860 | xmit_len = total_len; |
| 5854 | cmnd = CMD_XMIT_SEQUENCE64_CR; | 5861 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
| @@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10944 | return dmabuf; | 10951 | return dmabuf; |
| 10945 | } | 10952 | } |
| 10946 | temp_hdr = seq_dmabuf->hbuf.virt; | 10953 | temp_hdr = seq_dmabuf->hbuf.virt; |
| 10947 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { | 10954 | if (be16_to_cpu(new_hdr->fh_seq_cnt) < |
| 10955 | be16_to_cpu(temp_hdr->fh_seq_cnt)) { | ||
| 10948 | list_del_init(&seq_dmabuf->hbuf.list); | 10956 | list_del_init(&seq_dmabuf->hbuf.list); |
| 10949 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | 10957 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); |
| 10950 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); | 10958 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); |
| @@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10955 | list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); | 10963 | list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); |
| 10956 | seq_dmabuf->time_stamp = jiffies; | 10964 | seq_dmabuf->time_stamp = jiffies; |
| 10957 | lpfc_update_rcv_time_stamp(vport); | 10965 | lpfc_update_rcv_time_stamp(vport); |
| 10966 | if (list_empty(&seq_dmabuf->dbuf.list)) { | ||
| 10967 | temp_hdr = dmabuf->hbuf.virt; | ||
| 10968 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); | ||
| 10969 | return seq_dmabuf; | ||
| 10970 | } | ||
| 10958 | /* find the correct place in the sequence to insert this frame */ | 10971 | /* find the correct place in the sequence to insert this frame */ |
| 10959 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { | 10972 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { |
| 10960 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | 10973 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| @@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
| 10963 | * If the frame's sequence count is greater than the frame on | 10976 | * If the frame's sequence count is greater than the frame on |
| 10964 | * the list then insert the frame right after this frame | 10977 | * the list then insert the frame right after this frame |
| 10965 | */ | 10978 | */ |
| 10966 | if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { | 10979 | if (be16_to_cpu(new_hdr->fh_seq_cnt) > |
| 10980 | be16_to_cpu(temp_hdr->fh_seq_cnt)) { | ||
| 10967 | list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); | 10981 | list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); |
| 10968 | return seq_dmabuf; | 10982 | return seq_dmabuf; |
| 10969 | } | 10983 | } |
| @@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf) | |||
| 11210 | seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | 11224 | seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 11211 | hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11225 | hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 11212 | /* If there is a hole in the sequence count then fail. */ | 11226 | /* If there is a hole in the sequence count then fail. */ |
| 11213 | if (++seq_count != hdr->fh_seq_cnt) | 11227 | if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) |
| 11214 | return 0; | 11228 | return 0; |
| 11215 | fctl = (hdr->fh_f_ctl[0] << 16 | | 11229 | fctl = (hdr->fh_f_ctl[0] << 16 | |
| 11216 | hdr->fh_f_ctl[1] << 8 | | 11230 | hdr->fh_f_ctl[1] << 8 | |
| @@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 11242 | struct lpfc_iocbq *first_iocbq, *iocbq; | 11256 | struct lpfc_iocbq *first_iocbq, *iocbq; |
| 11243 | struct fc_frame_header *fc_hdr; | 11257 | struct fc_frame_header *fc_hdr; |
| 11244 | uint32_t sid; | 11258 | uint32_t sid; |
| 11259 | struct ulp_bde64 *pbde; | ||
| 11245 | 11260 | ||
| 11246 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11261 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 11247 | /* remove from receive buffer list */ | 11262 | /* remove from receive buffer list */ |
| @@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
| 11283 | if (!iocbq->context3) { | 11298 | if (!iocbq->context3) { |
| 11284 | iocbq->context3 = d_buf; | 11299 | iocbq->context3 = d_buf; |
| 11285 | iocbq->iocb.ulpBdeCount++; | 11300 | iocbq->iocb.ulpBdeCount++; |
| 11286 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = | 11301 | pbde = (struct ulp_bde64 *) |
| 11287 | LPFC_DATA_BUF_SIZE; | 11302 | &iocbq->iocb.unsli3.sli3Words[4]; |
| 11303 | pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; | ||
| 11288 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 11304 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
| 11289 | bf_get(lpfc_rcqe_length, | 11305 | bf_get(lpfc_rcqe_length, |
| 11290 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | 11306 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); |
| @@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, | |||
| 11401 | return; | 11417 | return; |
| 11402 | } | 11418 | } |
| 11403 | /* If not last frame in sequence continue processing frames. */ | 11419 | /* If not last frame in sequence continue processing frames. */ |
| 11404 | if (!lpfc_seq_complete(seq_dmabuf)) { | 11420 | if (!lpfc_seq_complete(seq_dmabuf)) |
| 11405 | /* | ||
| 11406 | * When saving off frames post a new one and mark this | ||
| 11407 | * frame to be freed when it is finished. | ||
| 11408 | **/ | ||
| 11409 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); | ||
| 11410 | dmabuf->tag = -1; | ||
| 11411 | return; | 11421 | return; |
| 11412 | } | 11422 | |
| 11413 | /* Send the complete sequence to the upper layer protocol */ | 11423 | /* Send the complete sequence to the upper layer protocol */ |
| 11414 | lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); | 11424 | lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); |
| 11415 | } | 11425 | } |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 25d66d070cf8..44e5f574236b 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | /* Multi-queue arrangement for fast-path FCP work queues */ | 28 | /* Multi-queue arrangement for fast-path FCP work queues */ |
| 29 | #define LPFC_FN_EQN_MAX 8 | 29 | #define LPFC_FN_EQN_MAX 8 |
| 30 | #define LPFC_SP_EQN_DEF 1 | 30 | #define LPFC_SP_EQN_DEF 1 |
| 31 | #define LPFC_FP_EQN_DEF 1 | 31 | #define LPFC_FP_EQN_DEF 4 |
| 32 | #define LPFC_FP_EQN_MIN 1 | 32 | #define LPFC_FP_EQN_MIN 1 |
| 33 | #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) | 33 | #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) |
| 34 | 34 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c7f3aed2aab8..792f72263f1a 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | * included with this package. * | 18 | * included with this package. * |
| 19 | *******************************************************************/ | 19 | *******************************************************************/ |
| 20 | 20 | ||
| 21 | #define LPFC_DRIVER_VERSION "8.3.6" | 21 | #define LPFC_DRIVER_VERSION "8.3.7" |
| 22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
| 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
| 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7d6dd83d3592..e3c7fa642306 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
| @@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport) | |||
| 512 | return VPORT_OK; | 512 | return VPORT_OK; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | spin_lock_irq(&phba->hbalock); | ||
| 515 | vport->load_flag |= FC_LOADING; | 516 | vport->load_flag |= FC_LOADING; |
| 516 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 517 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
| 518 | spin_unlock_irq(&phba->hbalock); | ||
| 517 | 519 | ||
| 518 | /* Use the Physical nodes Fabric NDLP to determine if the link is | 520 | /* Use the Physical nodes Fabric NDLP to determine if the link is |
| 519 | * up and ready to FDISC. | 521 | * up and ready to FDISC. |
| @@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
| 700 | } | 702 | } |
| 701 | spin_unlock_irq(&phba->ndlp_lock); | 703 | spin_unlock_irq(&phba->ndlp_lock); |
| 702 | } | 704 | } |
| 703 | if (vport->vpi_state != LPFC_VPI_REGISTERED) | 705 | if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) |
| 704 | goto skip_logo; | 706 | goto skip_logo; |
| 705 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 707 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
| 706 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 708 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 99ff99e45bee..708ea3157b60 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
| @@ -4046,7 +4046,7 @@ megasas_aen_polling(struct work_struct *work) | |||
| 4046 | } | 4046 | } |
| 4047 | 4047 | ||
| 4048 | 4048 | ||
| 4049 | static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, | 4049 | static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, |
| 4050 | megasas_sysfs_show_poll_mode_io, | 4050 | megasas_sysfs_show_poll_mode_io, |
| 4051 | megasas_sysfs_set_poll_mode_io); | 4051 | megasas_sysfs_set_poll_mode_io); |
| 4052 | 4052 | ||
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e7d2688fbeba..b6f1ef954af1 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) | |||
| 2483 | sense_copied = 1; | 2483 | sense_copied = 1; |
| 2484 | } | 2484 | } |
| 2485 | 2485 | ||
| 2486 | if (RES_IS_GSCSI(res->cfg_entry)) { | 2486 | if (RES_IS_GSCSI(res->cfg_entry)) |
| 2487 | pmcraid_cancel_all(cmd, sense_copied); | 2487 | pmcraid_cancel_all(cmd, sense_copied); |
| 2488 | } else if (sense_copied) { | 2488 | else if (sense_copied) |
| 2489 | pmcraid_erp_done(cmd); | 2489 | pmcraid_erp_done(cmd); |
| 2490 | return 0; | 2490 | else |
| 2491 | } else { | ||
| 2492 | pmcraid_request_sense(cmd); | 2491 | pmcraid_request_sense(cmd); |
| 2493 | } | ||
| 2494 | 2492 | ||
| 2495 | return 1; | 2493 | return 1; |
| 2496 | 2494 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 21e2bc4d7401..3a9f5b288aee 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, | |||
| 232 | if (off) | 232 | if (off) |
| 233 | return 0; | 233 | return 0; |
| 234 | 234 | ||
| 235 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 236 | return 0; | ||
| 237 | |||
| 235 | if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) | 238 | if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) |
| 236 | return -EINVAL; | 239 | return -EINVAL; |
| 237 | if (start > ha->optrom_size) | 240 | if (start > ha->optrom_size) |
| @@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, | |||
| 379 | struct device, kobj))); | 382 | struct device, kobj))); |
| 380 | struct qla_hw_data *ha = vha->hw; | 383 | struct qla_hw_data *ha = vha->hw; |
| 381 | 384 | ||
| 385 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 386 | return 0; | ||
| 387 | |||
| 382 | if (!capable(CAP_SYS_ADMIN)) | 388 | if (!capable(CAP_SYS_ADMIN)) |
| 383 | return 0; | 389 | return 0; |
| 384 | 390 | ||
| @@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, | |||
| 398 | struct qla_hw_data *ha = vha->hw; | 404 | struct qla_hw_data *ha = vha->hw; |
| 399 | uint8_t *tmp_data; | 405 | uint8_t *tmp_data; |
| 400 | 406 | ||
| 407 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 408 | return 0; | ||
| 409 | |||
| 401 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || | 410 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || |
| 402 | !ha->isp_ops->write_nvram) | 411 | !ha->isp_ops->write_nvram) |
| 403 | return 0; | 412 | return 0; |
| @@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, | |||
| 1238 | char *buf) | 1247 | char *buf) |
| 1239 | { | 1248 | { |
| 1240 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1249 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
| 1241 | int rval; | 1250 | int rval = QLA_FUNCTION_FAILED; |
| 1242 | uint16_t state[5]; | 1251 | uint16_t state[5]; |
| 1243 | 1252 | ||
| 1244 | rval = qla2x00_get_firmware_state(vha, state); | 1253 | if (!vha->hw->flags.eeh_busy) |
| 1254 | rval = qla2x00_get_firmware_state(vha, state); | ||
| 1245 | if (rval != QLA_SUCCESS) | 1255 | if (rval != QLA_SUCCESS) |
| 1246 | memset(state, -1, sizeof(state)); | 1256 | memset(state, -1, sizeof(state)); |
| 1247 | 1257 | ||
| @@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
| 1452 | if (!fcport) | 1462 | if (!fcport) |
| 1453 | return; | 1463 | return; |
| 1454 | 1464 | ||
| 1455 | if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) | 1465 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) |
| 1466 | return; | ||
| 1467 | |||
| 1468 | if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { | ||
| 1456 | qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); | 1469 | qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); |
| 1457 | else | 1470 | return; |
| 1458 | qla2x00_abort_fcport_cmds(fcport); | 1471 | } |
| 1459 | 1472 | ||
| 1460 | /* | 1473 | /* |
| 1461 | * Transport has effectively 'deleted' the rport, clear | 1474 | * Transport has effectively 'deleted' the rport, clear |
| @@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
| 1475 | if (!fcport) | 1488 | if (!fcport) |
| 1476 | return; | 1489 | return; |
| 1477 | 1490 | ||
| 1491 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) | ||
| 1492 | return; | ||
| 1493 | |||
| 1478 | if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { | 1494 | if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { |
| 1479 | qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); | 1495 | qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); |
| 1480 | return; | 1496 | return; |
| @@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
| 1515 | pfc_host_stat = &ha->fc_host_stat; | 1531 | pfc_host_stat = &ha->fc_host_stat; |
| 1516 | memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); | 1532 | memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); |
| 1517 | 1533 | ||
| 1534 | if (test_bit(UNLOADING, &vha->dpc_flags)) | ||
| 1535 | goto done; | ||
| 1536 | |||
| 1537 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 1538 | goto done; | ||
| 1539 | |||
| 1518 | stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); | 1540 | stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); |
| 1519 | if (stats == NULL) { | 1541 | if (stats == NULL) { |
| 1520 | DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", | 1542 | DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index f660dd70b72e..d6d9c86cb058 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ | 26 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ |
| 27 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ | 27 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ |
| 28 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ | 28 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ |
| 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ | 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ |
| 30 | 30 | ||
| 31 | /* | 31 | /* |
| 32 | * Macros use for debugging the driver. | 32 | * Macros use for debugging the driver. |
| @@ -132,6 +132,13 @@ | |||
| 132 | #else | 132 | #else |
| 133 | #define DEBUG16(x) do {} while (0) | 133 | #define DEBUG16(x) do {} while (0) |
| 134 | #endif | 134 | #endif |
| 135 | |||
| 136 | #if defined(QL_DEBUG_LEVEL_17) | ||
| 137 | #define DEBUG17(x) do {x;} while (0) | ||
| 138 | #else | ||
| 139 | #define DEBUG17(x) do {} while (0) | ||
| 140 | #endif | ||
| 141 | |||
| 135 | /* | 142 | /* |
| 136 | * Firmware Dump structure definition | 143 | * Firmware Dump structure definition |
| 137 | */ | 144 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 384afda7dbe9..608e675f68c8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -2256,11 +2256,13 @@ struct qla_hw_data { | |||
| 2256 | uint32_t disable_serdes :1; | 2256 | uint32_t disable_serdes :1; |
| 2257 | uint32_t gpsc_supported :1; | 2257 | uint32_t gpsc_supported :1; |
| 2258 | uint32_t npiv_supported :1; | 2258 | uint32_t npiv_supported :1; |
| 2259 | uint32_t pci_channel_io_perm_failure :1; | ||
| 2259 | uint32_t fce_enabled :1; | 2260 | uint32_t fce_enabled :1; |
| 2260 | uint32_t fac_supported :1; | 2261 | uint32_t fac_supported :1; |
| 2261 | uint32_t chip_reset_done :1; | 2262 | uint32_t chip_reset_done :1; |
| 2262 | uint32_t port0 :1; | 2263 | uint32_t port0 :1; |
| 2263 | uint32_t running_gold_fw :1; | 2264 | uint32_t running_gold_fw :1; |
| 2265 | uint32_t eeh_busy :1; | ||
| 2264 | uint32_t cpu_affinity_enabled :1; | 2266 | uint32_t cpu_affinity_enabled :1; |
| 2265 | uint32_t disable_msix_handshake :1; | 2267 | uint32_t disable_msix_handshake :1; |
| 2266 | } flags; | 2268 | } flags; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0b6801fc6389..f61fb8d01330 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); | |||
| 324 | extern int | 324 | extern int |
| 325 | qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); | 325 | qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); |
| 326 | 326 | ||
| 327 | extern int qla2x00_get_data_rate(scsi_qla_host_t *); | ||
| 327 | /* | 328 | /* |
| 328 | * Global Function Prototypes in qla_isr.c source file. | 329 | * Global Function Prototypes in qla_isr.c source file. |
| 329 | */ | 330 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 73a793539d45..b4a0eac8f96d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
| 269 | vha->flags.online = 0; | 269 | vha->flags.online = 0; |
| 270 | ha->flags.chip_reset_done = 0; | 270 | ha->flags.chip_reset_done = 0; |
| 271 | vha->flags.reset_active = 0; | 271 | vha->flags.reset_active = 0; |
| 272 | ha->flags.pci_channel_io_perm_failure = 0; | ||
| 273 | ha->flags.eeh_busy = 0; | ||
| 272 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 274 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
| 273 | atomic_set(&vha->loop_state, LOOP_DOWN); | 275 | atomic_set(&vha->loop_state, LOOP_DOWN); |
| 274 | vha->device_flags = DFLG_NO_CABLE; | 276 | vha->device_flags = DFLG_NO_CABLE; |
| @@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) | |||
| 581 | uint32_t cnt; | 583 | uint32_t cnt; |
| 582 | uint16_t cmd; | 584 | uint16_t cmd; |
| 583 | 585 | ||
| 586 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 587 | return; | ||
| 588 | |||
| 584 | ha->isp_ops->disable_intrs(ha); | 589 | ha->isp_ops->disable_intrs(ha); |
| 585 | 590 | ||
| 586 | spin_lock_irqsave(&ha->hardware_lock, flags); | 591 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -786,6 +791,12 @@ void | |||
| 786 | qla24xx_reset_chip(scsi_qla_host_t *vha) | 791 | qla24xx_reset_chip(scsi_qla_host_t *vha) |
| 787 | { | 792 | { |
| 788 | struct qla_hw_data *ha = vha->hw; | 793 | struct qla_hw_data *ha = vha->hw; |
| 794 | |||
| 795 | if (pci_channel_offline(ha->pdev) && | ||
| 796 | ha->flags.pci_channel_io_perm_failure) { | ||
| 797 | return; | ||
| 798 | } | ||
| 799 | |||
| 789 | ha->isp_ops->disable_intrs(ha); | 800 | ha->isp_ops->disable_intrs(ha); |
| 790 | 801 | ||
| 791 | /* Perform RISC reset. */ | 802 | /* Perform RISC reset. */ |
| @@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
| 2266 | clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | 2277 | clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
| 2267 | clear_bit(RSCN_UPDATE, &vha->dpc_flags); | 2278 | clear_bit(RSCN_UPDATE, &vha->dpc_flags); |
| 2268 | 2279 | ||
| 2280 | qla2x00_get_data_rate(vha); | ||
| 2281 | |||
| 2269 | /* Determine what we need to do */ | 2282 | /* Determine what we need to do */ |
| 2270 | if (ha->current_topology == ISP_CFG_FL && | 2283 | if (ha->current_topology == ISP_CFG_FL && |
| 2271 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { | 2284 | (test_bit(LOCAL_LOOP_UPDATE, &flags))) { |
| @@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
| 3560 | /* Requeue all commands in outstanding command list. */ | 3573 | /* Requeue all commands in outstanding command list. */ |
| 3561 | qla2x00_abort_all_cmds(vha, DID_RESET << 16); | 3574 | qla2x00_abort_all_cmds(vha, DID_RESET << 16); |
| 3562 | 3575 | ||
| 3576 | if (unlikely(pci_channel_offline(ha->pdev) && | ||
| 3577 | ha->flags.pci_channel_io_perm_failure)) { | ||
| 3578 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | ||
| 3579 | status = 0; | ||
| 3580 | return status; | ||
| 3581 | } | ||
| 3582 | |||
| 3563 | ha->isp_ops->get_flash_version(vha, req->ring); | 3583 | ha->isp_ops->get_flash_version(vha, req->ring); |
| 3564 | 3584 | ||
| 3565 | ha->isp_ops->nvram_config(vha); | 3585 | ha->isp_ops->nvram_config(vha); |
| @@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) | |||
| 4458 | int ret, retries; | 4478 | int ret, retries; |
| 4459 | struct qla_hw_data *ha = vha->hw; | 4479 | struct qla_hw_data *ha = vha->hw; |
| 4460 | 4480 | ||
| 4481 | if (ha->flags.pci_channel_io_perm_failure) | ||
| 4482 | return; | ||
| 4461 | if (!IS_FWI2_CAPABLE(ha)) | 4483 | if (!IS_FWI2_CAPABLE(ha)) |
| 4462 | return; | 4484 | return; |
| 4463 | if (!ha->fw_major_version) | 4485 | if (!ha->fw_major_version) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1692a883f4de..ffd0efdff40e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
| 152 | for (iter = 50; iter--; ) { | 152 | for (iter = 50; iter--; ) { |
| 153 | stat = RD_REG_DWORD(®->u.isp2300.host_status); | 153 | stat = RD_REG_DWORD(®->u.isp2300.host_status); |
| 154 | if (stat & HSR_RISC_PAUSED) { | 154 | if (stat & HSR_RISC_PAUSED) { |
| 155 | if (pci_channel_offline(ha->pdev)) | 155 | if (unlikely(pci_channel_offline(ha->pdev))) |
| 156 | break; | 156 | break; |
| 157 | 157 | ||
| 158 | hccr = RD_REG_WORD(®->hccr); | 158 | hccr = RD_REG_WORD(®->hccr); |
| @@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
| 1846 | reg = &ha->iobase->isp24; | 1846 | reg = &ha->iobase->isp24; |
| 1847 | status = 0; | 1847 | status = 0; |
| 1848 | 1848 | ||
| 1849 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
| 1850 | return IRQ_HANDLED; | ||
| 1851 | |||
| 1849 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1852 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 1850 | vha = pci_get_drvdata(ha->pdev); | 1853 | vha = pci_get_drvdata(ha->pdev); |
| 1851 | for (iter = 50; iter--; ) { | 1854 | for (iter = 50; iter--; ) { |
| 1852 | stat = RD_REG_DWORD(®->host_status); | 1855 | stat = RD_REG_DWORD(®->host_status); |
| 1853 | if (stat & HSRX_RISC_PAUSED) { | 1856 | if (stat & HSRX_RISC_PAUSED) { |
| 1854 | if (pci_channel_offline(ha->pdev)) | 1857 | if (unlikely(pci_channel_offline(ha->pdev))) |
| 1855 | break; | 1858 | break; |
| 1856 | 1859 | ||
| 1857 | hccr = RD_REG_DWORD(®->hccr); | 1860 | hccr = RD_REG_DWORD(®->hccr); |
| @@ -1992,7 +1995,7 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
| 1992 | do { | 1995 | do { |
| 1993 | stat = RD_REG_DWORD(®->host_status); | 1996 | stat = RD_REG_DWORD(®->host_status); |
| 1994 | if (stat & HSRX_RISC_PAUSED) { | 1997 | if (stat & HSRX_RISC_PAUSED) { |
| 1995 | if (pci_channel_offline(ha->pdev)) | 1998 | if (unlikely(pci_channel_offline(ha->pdev))) |
| 1996 | break; | 1999 | break; |
| 1997 | 2000 | ||
| 1998 | hccr = RD_REG_DWORD(®->hccr); | 2001 | hccr = RD_REG_DWORD(®->hccr); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 05d595d9a7ef..056e4d4505f3 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
| 56 | 56 | ||
| 57 | DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); | 57 | DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); |
| 58 | 58 | ||
| 59 | if (ha->flags.pci_channel_io_perm_failure) { | ||
| 60 | DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " | ||
| 61 | "Exiting.\n", __func__, vha->host_no)); | ||
| 62 | return QLA_FUNCTION_TIMEOUT; | ||
| 63 | } | ||
| 64 | |||
| 59 | /* | 65 | /* |
| 60 | * Wait for active mailbox commands to finish by waiting at most tov | 66 | * Wait for active mailbox commands to finish by waiting at most tov |
| 61 | * seconds. This is to serialize actual issuing of mailbox cmds during | 67 | * seconds. This is to serialize actual issuing of mailbox cmds during |
| @@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
| 154 | /* Check for pending interrupts. */ | 160 | /* Check for pending interrupts. */ |
| 155 | qla2x00_poll(ha->rsp_q_map[0]); | 161 | qla2x00_poll(ha->rsp_q_map[0]); |
| 156 | 162 | ||
| 157 | if (command != MBC_LOAD_RISC_RAM_EXTENDED && | 163 | if (!ha->flags.mbox_int && |
| 158 | !ha->flags.mbox_int) | 164 | !(IS_QLA2200(ha) && |
| 165 | command == MBC_LOAD_RISC_RAM_EXTENDED)) | ||
| 159 | msleep(10); | 166 | msleep(10); |
| 160 | } /* while */ | 167 | } /* while */ |
| 168 | DEBUG17(qla_printk(KERN_WARNING, ha, | ||
| 169 | "Waited %d sec\n", | ||
| 170 | (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); | ||
| 161 | } | 171 | } |
| 162 | 172 | ||
| 163 | /* Check whether we timed out */ | 173 | /* Check whether we timed out */ |
| @@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
| 227 | 237 | ||
| 228 | if (rval == QLA_FUNCTION_TIMEOUT && | 238 | if (rval == QLA_FUNCTION_TIMEOUT && |
| 229 | mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { | 239 | mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { |
| 230 | if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { | 240 | if (!io_lock_on || (mcp->flags & IOCTL_CMD) || |
| 241 | ha->flags.eeh_busy) { | ||
| 231 | /* not in dpc. schedule it for dpc to take over. */ | 242 | /* not in dpc. schedule it for dpc to take over. */ |
| 232 | DEBUG(printk("%s(%ld): timeout schedule " | 243 | DEBUG(printk("%s(%ld): timeout schedule " |
| 233 | "isp_abort_needed.\n", __func__, | 244 | "isp_abort_needed.\n", __func__, |
| @@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
| 237 | base_vha->host_no)); | 248 | base_vha->host_no)); |
| 238 | qla_printk(KERN_WARNING, ha, | 249 | qla_printk(KERN_WARNING, ha, |
| 239 | "Mailbox command timeout occurred. Scheduling ISP " | 250 | "Mailbox command timeout occurred. Scheduling ISP " |
| 240 | "abort.\n"); | 251 | "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy); |
| 241 | set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); | 252 | set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); |
| 242 | qla2xxx_wake_dpc(vha); | 253 | qla2xxx_wake_dpc(vha); |
| 243 | } else if (!abort_active) { | 254 | } else if (!abort_active) { |
| @@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, | |||
| 2530 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2541 | if (!IS_FWI2_CAPABLE(vha->hw)) |
| 2531 | return QLA_FUNCTION_FAILED; | 2542 | return QLA_FUNCTION_FAILED; |
| 2532 | 2543 | ||
| 2544 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | ||
| 2545 | return QLA_FUNCTION_FAILED; | ||
| 2546 | |||
| 2533 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | 2547 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); |
| 2534 | 2548 | ||
| 2535 | mcp->mb[0] = MBC_TRACE_CONTROL; | 2549 | mcp->mb[0] = MBC_TRACE_CONTROL; |
| @@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) | |||
| 2565 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2579 | if (!IS_FWI2_CAPABLE(vha->hw)) |
| 2566 | return QLA_FUNCTION_FAILED; | 2580 | return QLA_FUNCTION_FAILED; |
| 2567 | 2581 | ||
| 2582 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | ||
| 2583 | return QLA_FUNCTION_FAILED; | ||
| 2584 | |||
| 2568 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | 2585 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); |
| 2569 | 2586 | ||
| 2570 | mcp->mb[0] = MBC_TRACE_CONTROL; | 2587 | mcp->mb[0] = MBC_TRACE_CONTROL; |
| @@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, | |||
| 2595 | if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) | 2612 | if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) |
| 2596 | return QLA_FUNCTION_FAILED; | 2613 | return QLA_FUNCTION_FAILED; |
| 2597 | 2614 | ||
| 2615 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | ||
| 2616 | return QLA_FUNCTION_FAILED; | ||
| 2617 | |||
| 2598 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | 2618 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); |
| 2599 | 2619 | ||
| 2600 | mcp->mb[0] = MBC_TRACE_CONTROL; | 2620 | mcp->mb[0] = MBC_TRACE_CONTROL; |
| @@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) | |||
| 2639 | if (!IS_FWI2_CAPABLE(vha->hw)) | 2659 | if (!IS_FWI2_CAPABLE(vha->hw)) |
| 2640 | return QLA_FUNCTION_FAILED; | 2660 | return QLA_FUNCTION_FAILED; |
| 2641 | 2661 | ||
| 2662 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | ||
| 2663 | return QLA_FUNCTION_FAILED; | ||
| 2664 | |||
| 2642 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | 2665 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); |
| 2643 | 2666 | ||
| 2644 | mcp->mb[0] = MBC_TRACE_CONTROL; | 2667 | mcp->mb[0] = MBC_TRACE_CONTROL; |
| @@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) | |||
| 3643 | 3666 | ||
| 3644 | return rval; | 3667 | return rval; |
| 3645 | } | 3668 | } |
| 3669 | |||
| 3670 | int | ||
| 3671 | qla2x00_get_data_rate(scsi_qla_host_t *vha) | ||
| 3672 | { | ||
| 3673 | int rval; | ||
| 3674 | mbx_cmd_t mc; | ||
| 3675 | mbx_cmd_t *mcp = &mc; | ||
| 3676 | struct qla_hw_data *ha = vha->hw; | ||
| 3677 | |||
| 3678 | if (!IS_FWI2_CAPABLE(ha)) | ||
| 3679 | return QLA_FUNCTION_FAILED; | ||
| 3680 | |||
| 3681 | DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); | ||
| 3682 | |||
| 3683 | mcp->mb[0] = MBC_DATA_RATE; | ||
| 3684 | mcp->mb[1] = 0; | ||
| 3685 | mcp->out_mb = MBX_1|MBX_0; | ||
| 3686 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | ||
| 3687 | mcp->tov = MBX_TOV_SECONDS; | ||
| 3688 | mcp->flags = 0; | ||
| 3689 | rval = qla2x00_mailbox_command(vha, mcp); | ||
| 3690 | if (rval != QLA_SUCCESS) { | ||
| 3691 | DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", | ||
| 3692 | __func__, vha->host_no, rval, mcp->mb[0])); | ||
| 3693 | } else { | ||
| 3694 | DEBUG11(printk(KERN_INFO | ||
| 3695 | "%s(%ld): done.\n", __func__, vha->host_no)); | ||
| 3696 | if (mcp->mb[1] != 0x7) | ||
| 3697 | ha->link_data_rate = mcp->mb[1]; | ||
| 3698 | } | ||
| 3699 | |||
| 3700 | return rval; | ||
| 3701 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 2a4c7f4e7b69..b901aa267e7d 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -639,8 +639,10 @@ static void qla_do_work(struct work_struct *work) | |||
| 639 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); | 639 | struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); |
| 640 | struct scsi_qla_host *vha; | 640 | struct scsi_qla_host *vha; |
| 641 | 641 | ||
| 642 | spin_lock_irq(&rsp->hw->hardware_lock); | ||
| 642 | vha = qla25xx_get_host(rsp); | 643 | vha = qla25xx_get_host(rsp); |
| 643 | qla24xx_process_response_queue(vha, rsp); | 644 | qla24xx_process_response_queue(vha, rsp); |
| 645 | spin_unlock_irq(&rsp->hw->hardware_lock); | ||
| 644 | } | 646 | } |
| 645 | 647 | ||
| 646 | /* create response queue */ | 648 | /* create response queue */ |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2f873d237325..209f50e788a1 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
| 475 | srb_t *sp; | 475 | srb_t *sp; |
| 476 | int rval; | 476 | int rval; |
| 477 | 477 | ||
| 478 | if (unlikely(pci_channel_offline(ha->pdev))) { | 478 | if (ha->flags.eeh_busy) { |
| 479 | if (ha->pdev->error_state == pci_channel_io_frozen) | 479 | if (ha->flags.pci_channel_io_perm_failure) |
| 480 | cmd->result = DID_REQUEUE << 16; | ||
| 481 | else | ||
| 482 | cmd->result = DID_NO_CONNECT << 16; | 480 | cmd->result = DID_NO_CONNECT << 16; |
| 481 | else | ||
| 482 | cmd->result = DID_REQUEUE << 16; | ||
| 483 | goto qc24_fail_command; | 483 | goto qc24_fail_command; |
| 484 | } | 484 | } |
| 485 | 485 | ||
| @@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) | |||
| 552 | #define ABORT_POLLING_PERIOD 1000 | 552 | #define ABORT_POLLING_PERIOD 1000 |
| 553 | #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) | 553 | #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) |
| 554 | unsigned long wait_iter = ABORT_WAIT_ITER; | 554 | unsigned long wait_iter = ABORT_WAIT_ITER; |
| 555 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | ||
| 556 | struct qla_hw_data *ha = vha->hw; | ||
| 555 | int ret = QLA_SUCCESS; | 557 | int ret = QLA_SUCCESS; |
| 556 | 558 | ||
| 559 | if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { | ||
| 560 | DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); | ||
| 561 | return ret; | ||
| 562 | } | ||
| 563 | |||
| 557 | while (CMD_SP(cmd) && wait_iter--) { | 564 | while (CMD_SP(cmd) && wait_iter--) { |
| 558 | msleep(ABORT_POLLING_PERIOD); | 565 | msleep(ABORT_POLLING_PERIOD); |
| 559 | } | 566 | } |
| @@ -1810,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1810 | 1817 | ||
| 1811 | /* Set ISP-type information. */ | 1818 | /* Set ISP-type information. */ |
| 1812 | qla2x00_set_isp_flags(ha); | 1819 | qla2x00_set_isp_flags(ha); |
| 1820 | |||
| 1821 | /* Set EEH reset type to fundamental if required by hba */ | ||
| 1822 | if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { | ||
| 1823 | pdev->needs_freset = 1; | ||
| 1824 | pci_save_state(pdev); | ||
| 1825 | } | ||
| 1826 | |||
| 1813 | /* Configure PCI I/O space */ | 1827 | /* Configure PCI I/O space */ |
| 1814 | ret = qla2x00_iospace_config(ha); | 1828 | ret = qla2x00_iospace_config(ha); |
| 1815 | if (ret) | 1829 | if (ret) |
| @@ -2174,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha) | |||
| 2174 | { | 2188 | { |
| 2175 | struct qla_hw_data *ha = vha->hw; | 2189 | struct qla_hw_data *ha = vha->hw; |
| 2176 | 2190 | ||
| 2191 | qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); | ||
| 2192 | |||
| 2193 | /* Disable timer */ | ||
| 2194 | if (vha->timer_active) | ||
| 2195 | qla2x00_stop_timer(vha); | ||
| 2196 | |||
| 2197 | /* Kill the kernel thread for this host */ | ||
| 2198 | if (ha->dpc_thread) { | ||
| 2199 | struct task_struct *t = ha->dpc_thread; | ||
| 2200 | |||
| 2201 | /* | ||
| 2202 | * qla2xxx_wake_dpc checks for ->dpc_thread | ||
| 2203 | * so we need to zero it out. | ||
| 2204 | */ | ||
| 2205 | ha->dpc_thread = NULL; | ||
| 2206 | kthread_stop(t); | ||
| 2207 | } | ||
| 2208 | |||
| 2177 | qla25xx_delete_queues(vha); | 2209 | qla25xx_delete_queues(vha); |
| 2178 | 2210 | ||
| 2179 | if (ha->flags.fce_enabled) | 2211 | if (ha->flags.fce_enabled) |
| @@ -2185,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) | |||
| 2185 | /* Stop currently executing firmware. */ | 2217 | /* Stop currently executing firmware. */ |
| 2186 | qla2x00_try_to_stop_firmware(vha); | 2218 | qla2x00_try_to_stop_firmware(vha); |
| 2187 | 2219 | ||
| 2220 | vha->flags.online = 0; | ||
| 2221 | |||
| 2188 | /* turn-off interrupts on the card */ | 2222 | /* turn-off interrupts on the card */ |
| 2189 | if (ha->interrupts_on) | 2223 | if (ha->interrupts_on) |
| 2190 | ha->isp_ops->disable_intrs(ha); | 2224 | ha->isp_ops->disable_intrs(ha); |
| @@ -2859,6 +2893,13 @@ qla2x00_do_dpc(void *data) | |||
| 2859 | if (!base_vha->flags.init_done) | 2893 | if (!base_vha->flags.init_done) |
| 2860 | continue; | 2894 | continue; |
| 2861 | 2895 | ||
| 2896 | if (ha->flags.eeh_busy) { | ||
| 2897 | DEBUG17(qla_printk(KERN_WARNING, ha, | ||
| 2898 | "qla2x00_do_dpc: dpc_flags: %lx\n", | ||
| 2899 | base_vha->dpc_flags)); | ||
| 2900 | continue; | ||
| 2901 | } | ||
| 2902 | |||
| 2862 | DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); | 2903 | DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); |
| 2863 | 2904 | ||
| 2864 | ha->dpc_active = 1; | 2905 | ha->dpc_active = 1; |
| @@ -3049,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
| 3049 | int index; | 3090 | int index; |
| 3050 | srb_t *sp; | 3091 | srb_t *sp; |
| 3051 | int t; | 3092 | int t; |
| 3093 | uint16_t w; | ||
| 3052 | struct qla_hw_data *ha = vha->hw; | 3094 | struct qla_hw_data *ha = vha->hw; |
| 3053 | struct req_que *req; | 3095 | struct req_que *req; |
| 3096 | |||
| 3097 | /* Hardware read to raise pending EEH errors during mailbox waits. */ | ||
| 3098 | if (!pci_channel_offline(ha->pdev)) | ||
| 3099 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | ||
| 3054 | /* | 3100 | /* |
| 3055 | * Ports - Port down timer. | 3101 | * Ports - Port down timer. |
| 3056 | * | 3102 | * |
| @@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void) | |||
| 3252 | static pci_ers_result_t | 3298 | static pci_ers_result_t |
| 3253 | qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | 3299 | qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
| 3254 | { | 3300 | { |
| 3255 | scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); | 3301 | scsi_qla_host_t *vha = pci_get_drvdata(pdev); |
| 3302 | struct qla_hw_data *ha = vha->hw; | ||
| 3303 | |||
| 3304 | DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", | ||
| 3305 | state)); | ||
| 3256 | 3306 | ||
| 3257 | switch (state) { | 3307 | switch (state) { |
| 3258 | case pci_channel_io_normal: | 3308 | case pci_channel_io_normal: |
| 3309 | ha->flags.eeh_busy = 0; | ||
| 3259 | return PCI_ERS_RESULT_CAN_RECOVER; | 3310 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 3260 | case pci_channel_io_frozen: | 3311 | case pci_channel_io_frozen: |
| 3312 | ha->flags.eeh_busy = 1; | ||
| 3261 | pci_disable_device(pdev); | 3313 | pci_disable_device(pdev); |
| 3262 | return PCI_ERS_RESULT_NEED_RESET; | 3314 | return PCI_ERS_RESULT_NEED_RESET; |
| 3263 | case pci_channel_io_perm_failure: | 3315 | case pci_channel_io_perm_failure: |
| 3264 | qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); | 3316 | ha->flags.pci_channel_io_perm_failure = 1; |
| 3317 | qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); | ||
| 3265 | return PCI_ERS_RESULT_DISCONNECT; | 3318 | return PCI_ERS_RESULT_DISCONNECT; |
| 3266 | } | 3319 | } |
| 3267 | return PCI_ERS_RESULT_NEED_RESET; | 3320 | return PCI_ERS_RESULT_NEED_RESET; |
| @@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) | |||
| 3312 | struct qla_hw_data *ha = base_vha->hw; | 3365 | struct qla_hw_data *ha = base_vha->hw; |
| 3313 | int rc; | 3366 | int rc; |
| 3314 | 3367 | ||
| 3368 | DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); | ||
| 3369 | |||
| 3315 | if (ha->mem_only) | 3370 | if (ha->mem_only) |
| 3316 | rc = pci_enable_device_mem(pdev); | 3371 | rc = pci_enable_device_mem(pdev); |
| 3317 | else | 3372 | else |
| @@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) | |||
| 3320 | if (rc) { | 3375 | if (rc) { |
| 3321 | qla_printk(KERN_WARNING, ha, | 3376 | qla_printk(KERN_WARNING, ha, |
| 3322 | "Can't re-enable PCI device after reset.\n"); | 3377 | "Can't re-enable PCI device after reset.\n"); |
| 3323 | |||
| 3324 | return ret; | 3378 | return ret; |
| 3325 | } | 3379 | } |
| 3326 | pci_set_master(pdev); | ||
| 3327 | 3380 | ||
| 3328 | if (ha->isp_ops->pci_config(base_vha)) | 3381 | if (ha->isp_ops->pci_config(base_vha)) |
| 3329 | return ret; | 3382 | return ret; |
| 3330 | 3383 | ||
| 3384 | #ifdef QL_DEBUG_LEVEL_17 | ||
| 3385 | { | ||
| 3386 | uint8_t b; | ||
| 3387 | uint32_t i; | ||
| 3388 | |||
| 3389 | printk("slot_reset_1: "); | ||
| 3390 | for (i = 0; i < 256; i++) { | ||
| 3391 | pci_read_config_byte(ha->pdev, i, &b); | ||
| 3392 | printk("%s%02x", (i%16) ? " " : "\n", b); | ||
| 3393 | } | ||
| 3394 | printk("\n"); | ||
| 3395 | } | ||
| 3396 | #endif | ||
| 3331 | set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); | 3397 | set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); |
| 3332 | if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) | 3398 | if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) |
| 3333 | ret = PCI_ERS_RESULT_RECOVERED; | 3399 | ret = PCI_ERS_RESULT_RECOVERED; |
| 3334 | clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); | 3400 | clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); |
| 3335 | 3401 | ||
| 3402 | DEBUG17(qla_printk(KERN_WARNING, ha, | ||
| 3403 | "slot_reset-return:ret=%x\n", ret)); | ||
| 3404 | |||
| 3336 | return ret; | 3405 | return ret; |
| 3337 | } | 3406 | } |
| 3338 | 3407 | ||
| @@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev) | |||
| 3343 | struct qla_hw_data *ha = base_vha->hw; | 3412 | struct qla_hw_data *ha = base_vha->hw; |
| 3344 | int ret; | 3413 | int ret; |
| 3345 | 3414 | ||
| 3415 | DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); | ||
| 3416 | |||
| 3346 | ret = qla2x00_wait_for_hba_online(base_vha); | 3417 | ret = qla2x00_wait_for_hba_online(base_vha); |
| 3347 | if (ret != QLA_SUCCESS) { | 3418 | if (ret != QLA_SUCCESS) { |
| 3348 | qla_printk(KERN_ERR, ha, | 3419 | qla_printk(KERN_ERR, ha, |
| 3349 | "the device failed to resume I/O " | 3420 | "the device failed to resume I/O " |
| 3350 | "from slot/link_reset"); | 3421 | "from slot/link_reset"); |
| 3351 | } | 3422 | } |
| 3423 | |||
| 3424 | ha->flags.eeh_busy = 0; | ||
| 3425 | |||
| 3352 | pci_cleanup_aer_uncorrect_error_status(pdev); | 3426 | pci_cleanup_aer_uncorrect_error_status(pdev); |
| 3353 | } | 3427 | } |
| 3354 | 3428 | ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c482220f7eed..a65dd95507c6 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.03.01-k8" | 10 | #define QLA2XXX_VERSION "8.03.01-k9" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
| 13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 3058bb1aff95..fd7b15be7640 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
| @@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
| 623 | } | 623 | } |
| 624 | break; | 624 | break; |
| 625 | case INQUIRY: | 625 | case INQUIRY: |
| 626 | if (lun >= host->max_lun) { | ||
| 627 | cmd->result = DID_NO_CONNECT << 16; | ||
| 628 | done(cmd); | ||
| 629 | return 0; | ||
| 630 | } | ||
| 626 | if (id != host->max_id - 1) | 631 | if (id != host->max_id - 1) |
| 627 | break; | 632 | break; |
| 628 | if (!lun && !cmd->device->channel && | 633 | if (!lun && !cmd->device->channel && |
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c index 1e3d19397a59..8681f1345056 100644 --- a/drivers/serial/21285.c +++ b/drivers/serial/21285.c | |||
| @@ -58,7 +58,7 @@ static const char serial21285_name[] = "Footbridge UART"; | |||
| 58 | static void serial21285_stop_tx(struct uart_port *port) | 58 | static void serial21285_stop_tx(struct uart_port *port) |
| 59 | { | 59 | { |
| 60 | if (tx_enabled(port)) { | 60 | if (tx_enabled(port)) { |
| 61 | disable_irq(IRQ_CONTX); | 61 | disable_irq_nosync(IRQ_CONTX); |
| 62 | tx_enabled(port) = 0; | 62 | tx_enabled(port) = 0; |
| 63 | } | 63 | } |
| 64 | } | 64 | } |
| @@ -74,7 +74,7 @@ static void serial21285_start_tx(struct uart_port *port) | |||
| 74 | static void serial21285_stop_rx(struct uart_port *port) | 74 | static void serial21285_stop_rx(struct uart_port *port) |
| 75 | { | 75 | { |
| 76 | if (rx_enabled(port)) { | 76 | if (rx_enabled(port)) { |
| 77 | disable_irq(IRQ_CONRX); | 77 | disable_irq_nosync(IRQ_CONRX); |
| 78 | rx_enabled(port) = 0; | 78 | rx_enabled(port) = 0; |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index fc413f0f8dd2..0ee7239c5d69 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
| @@ -819,6 +819,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 819 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), | 819 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), |
| 820 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), | 820 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), |
| 821 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ | 821 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ |
| 822 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */ | ||
| 822 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ | 823 | PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ |
| 823 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ | 824 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ |
| 824 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ | 825 | PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ |
| @@ -827,7 +828,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 827 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), | 828 | PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), |
| 828 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), | 829 | PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), |
| 829 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), | 830 | PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), |
| 830 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), | 831 | PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"), |
| 831 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), | 832 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), |
| 832 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83), | 833 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83), |
| 833 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490), | 834 | PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490), |
| @@ -861,6 +862,18 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 861 | }; | 862 | }; |
| 862 | MODULE_DEVICE_TABLE(pcmcia, serial_ids); | 863 | MODULE_DEVICE_TABLE(pcmcia, serial_ids); |
| 863 | 864 | ||
| 865 | MODULE_FIRMWARE("cis/PCMLM28.cis"); | ||
| 866 | MODULE_FIRMWARE("cis/DP83903.cis"); | ||
| 867 | MODULE_FIRMWARE("cis/3CCFEM556.cis"); | ||
| 868 | MODULE_FIRMWARE("cis/3CXEM556.cis"); | ||
| 869 | MODULE_FIRMWARE("cis/SW_8xx_SER.cis"); | ||
| 870 | MODULE_FIRMWARE("cis/SW_7xx_SER.cis"); | ||
| 871 | MODULE_FIRMWARE("cis/SW_555_SER.cis"); | ||
| 872 | MODULE_FIRMWARE("cis/MT5634ZLX.cis"); | ||
| 873 | MODULE_FIRMWARE("cis/COMpad2.cis"); | ||
| 874 | MODULE_FIRMWARE("cis/COMpad4.cis"); | ||
| 875 | MODULE_FIRMWARE("cis/RS-COM-2P.cis"); | ||
| 876 | |||
| 864 | static struct pcmcia_driver serial_cs_driver = { | 877 | static struct pcmcia_driver serial_cs_driver = { |
| 865 | .owner = THIS_MODULE, | 878 | .owner = THIS_MODULE, |
| 866 | .drv = { | 879 | .drv = { |
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c index 409ca9643528..a3a7f8938175 100644 --- a/drivers/video/backlight/omap1_bl.c +++ b/drivers/video/backlight/omap1_bl.c | |||
| @@ -139,8 +139,6 @@ static int omapbl_probe(struct platform_device *pdev) | |||
| 139 | if (!pdata) | 139 | if (!pdata) |
| 140 | return -ENXIO; | 140 | return -ENXIO; |
| 141 | 141 | ||
| 142 | omapbl_ops.check_fb = pdata->check_fb; | ||
| 143 | |||
| 144 | bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL); | 142 | bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL); |
| 145 | if (unlikely(!bl)) | 143 | if (unlikely(!bl)) |
| 146 | return -ENOMEM; | 144 | return -ENOMEM; |
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index c7c6455f1fa8..e192b058a688 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c | |||
| @@ -189,11 +189,6 @@ static struct { | |||
| 189 | struct omapfb_color_key color_key; | 189 | struct omapfb_color_key color_key; |
| 190 | } dispc; | 190 | } dispc; |
| 191 | 191 | ||
| 192 | static struct platform_device omapdss_device = { | ||
| 193 | .name = "omapdss", | ||
| 194 | .id = -1, | ||
| 195 | }; | ||
| 196 | |||
| 197 | static void enable_lcd_clocks(int enable); | 192 | static void enable_lcd_clocks(int enable); |
| 198 | 193 | ||
| 199 | static void inline dispc_write_reg(int idx, u32 val) | 194 | static void inline dispc_write_reg(int idx, u32 val) |
| @@ -920,20 +915,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev) | |||
| 920 | 915 | ||
| 921 | static int get_dss_clocks(void) | 916 | static int get_dss_clocks(void) |
| 922 | { | 917 | { |
| 923 | dispc.dss_ick = clk_get(&omapdss_device.dev, "ick"); | 918 | dispc.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick"); |
| 924 | if (IS_ERR(dispc.dss_ick)) { | 919 | if (IS_ERR(dispc.dss_ick)) { |
| 925 | dev_err(dispc.fbdev->dev, "can't get ick\n"); | 920 | dev_err(dispc.fbdev->dev, "can't get ick\n"); |
| 926 | return PTR_ERR(dispc.dss_ick); | 921 | return PTR_ERR(dispc.dss_ick); |
| 927 | } | 922 | } |
| 928 | 923 | ||
| 929 | dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck"); | 924 | dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck"); |
| 930 | if (IS_ERR(dispc.dss1_fck)) { | 925 | if (IS_ERR(dispc.dss1_fck)) { |
| 931 | dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); | 926 | dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); |
| 932 | clk_put(dispc.dss_ick); | 927 | clk_put(dispc.dss_ick); |
| 933 | return PTR_ERR(dispc.dss1_fck); | 928 | return PTR_ERR(dispc.dss1_fck); |
| 934 | } | 929 | } |
| 935 | 930 | ||
| 936 | dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck"); | 931 | dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck"); |
| 937 | if (IS_ERR(dispc.dss_54m_fck)) { | 932 | if (IS_ERR(dispc.dss_54m_fck)) { |
| 938 | dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); | 933 | dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); |
| 939 | clk_put(dispc.dss_ick); | 934 | clk_put(dispc.dss_ick); |
| @@ -1385,12 +1380,6 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode, | |||
| 1385 | int skip_init = 0; | 1380 | int skip_init = 0; |
| 1386 | int i; | 1381 | int i; |
| 1387 | 1382 | ||
| 1388 | r = platform_device_register(&omapdss_device); | ||
| 1389 | if (r) { | ||
| 1390 | dev_err(fbdev->dev, "can't register omapdss device\n"); | ||
| 1391 | return r; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | memset(&dispc, 0, sizeof(dispc)); | 1383 | memset(&dispc, 0, sizeof(dispc)); |
| 1395 | 1384 | ||
| 1396 | dispc.base = ioremap(DISPC_BASE, SZ_1K); | 1385 | dispc.base = ioremap(DISPC_BASE, SZ_1K); |
| @@ -1534,7 +1523,6 @@ static void omap_dispc_cleanup(void) | |||
| 1534 | free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); | 1523 | free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); |
| 1535 | put_dss_clocks(); | 1524 | put_dss_clocks(); |
| 1536 | iounmap(dispc.base); | 1525 | iounmap(dispc.base); |
| 1537 | platform_device_unregister(&omapdss_device); | ||
| 1538 | } | 1526 | } |
| 1539 | 1527 | ||
| 1540 | const struct lcd_ctrl omap2_int_ctrl = { | 1528 | const struct lcd_ctrl omap2_int_ctrl = { |
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c index a9007c5d1fad..4802419da83b 100644 --- a/drivers/video/omap/lcd_htcherald.c +++ b/drivers/video/omap/lcd_htcherald.c | |||
| @@ -115,12 +115,12 @@ struct platform_driver htcherald_panel_driver = { | |||
| 115 | }, | 115 | }, |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | static int htcherald_panel_drv_init(void) | 118 | static int __init htcherald_panel_drv_init(void) |
| 119 | { | 119 | { |
| 120 | return platform_driver_register(&htcherald_panel_driver); | 120 | return platform_driver_register(&htcherald_panel_driver); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static void htcherald_panel_drv_cleanup(void) | 123 | static void __exit htcherald_panel_drv_cleanup(void) |
| 124 | { | 124 | { |
| 125 | platform_driver_unregister(&htcherald_panel_driver); | 125 | platform_driver_unregister(&htcherald_panel_driver); |
| 126 | } | 126 | } |
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h index 46e4714014e8..af3c9e571ec3 100644 --- a/drivers/video/omap/omapfb.h +++ b/drivers/video/omap/omapfb.h | |||
| @@ -203,6 +203,8 @@ struct omapfb_device { | |||
| 203 | 203 | ||
| 204 | struct omapfb_mem_desc mem_desc; | 204 | struct omapfb_mem_desc mem_desc; |
| 205 | struct fb_info *fb_info[OMAPFB_PLANE_NUM]; | 205 | struct fb_info *fb_info[OMAPFB_PLANE_NUM]; |
| 206 | |||
| 207 | struct platform_device *dssdev; /* dummy dev for clocks */ | ||
| 206 | }; | 208 | }; |
| 207 | 209 | ||
| 208 | #ifdef CONFIG_ARCH_OMAP1 | 210 | #ifdef CONFIG_ARCH_OMAP1 |
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index c7f59a5ccdbc..2c4f470fa086 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c | |||
| @@ -83,6 +83,19 @@ static struct caps_table_struct color_caps[] = { | |||
| 83 | { 1 << OMAPFB_COLOR_YUY422, "YUY422", }, | 83 | { 1 << OMAPFB_COLOR_YUY422, "YUY422", }, |
| 84 | }; | 84 | }; |
| 85 | 85 | ||
| 86 | static void omapdss_release(struct device *dev) | ||
| 87 | { | ||
| 88 | } | ||
| 89 | |||
| 90 | /* dummy device for clocks */ | ||
| 91 | static struct platform_device omapdss_device = { | ||
| 92 | .name = "omapdss", | ||
| 93 | .id = -1, | ||
| 94 | .dev = { | ||
| 95 | .release = omapdss_release, | ||
| 96 | }, | ||
| 97 | }; | ||
| 98 | |||
| 86 | /* | 99 | /* |
| 87 | * --------------------------------------------------------------------------- | 100 | * --------------------------------------------------------------------------- |
| 88 | * LCD panel | 101 | * LCD panel |
| @@ -1700,6 +1713,7 @@ static int omapfb_do_probe(struct platform_device *pdev, | |||
| 1700 | 1713 | ||
| 1701 | fbdev->dev = &pdev->dev; | 1714 | fbdev->dev = &pdev->dev; |
| 1702 | fbdev->panel = panel; | 1715 | fbdev->panel = panel; |
| 1716 | fbdev->dssdev = &omapdss_device; | ||
| 1703 | platform_set_drvdata(pdev, fbdev); | 1717 | platform_set_drvdata(pdev, fbdev); |
| 1704 | 1718 | ||
| 1705 | mutex_init(&fbdev->rqueue_mutex); | 1719 | mutex_init(&fbdev->rqueue_mutex); |
| @@ -1814,8 +1828,16 @@ cleanup: | |||
| 1814 | 1828 | ||
| 1815 | static int omapfb_probe(struct platform_device *pdev) | 1829 | static int omapfb_probe(struct platform_device *pdev) |
| 1816 | { | 1830 | { |
| 1831 | int r; | ||
| 1832 | |||
| 1817 | BUG_ON(fbdev_pdev != NULL); | 1833 | BUG_ON(fbdev_pdev != NULL); |
| 1818 | 1834 | ||
| 1835 | r = platform_device_register(&omapdss_device); | ||
| 1836 | if (r) { | ||
| 1837 | dev_err(&pdev->dev, "can't register omapdss device\n"); | ||
| 1838 | return r; | ||
| 1839 | } | ||
| 1840 | |||
| 1819 | /* Delay actual initialization until the LCD is registered */ | 1841 | /* Delay actual initialization until the LCD is registered */ |
| 1820 | fbdev_pdev = pdev; | 1842 | fbdev_pdev = pdev; |
| 1821 | if (fbdev_panel != NULL) | 1843 | if (fbdev_panel != NULL) |
| @@ -1843,6 +1865,9 @@ static int omapfb_remove(struct platform_device *pdev) | |||
| 1843 | fbdev->state = OMAPFB_DISABLED; | 1865 | fbdev->state = OMAPFB_DISABLED; |
| 1844 | omapfb_free_resources(fbdev, saved_state); | 1866 | omapfb_free_resources(fbdev, saved_state); |
| 1845 | 1867 | ||
| 1868 | platform_device_unregister(&omapdss_device); | ||
| 1869 | fbdev->dssdev = NULL; | ||
| 1870 | |||
| 1846 | return 0; | 1871 | return 0; |
| 1847 | } | 1872 | } |
| 1848 | 1873 | ||
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c index fed7b1bda19c..1162603c72e5 100644 --- a/drivers/video/omap/rfbi.c +++ b/drivers/video/omap/rfbi.c | |||
| @@ -83,13 +83,13 @@ static inline u32 rfbi_read_reg(int idx) | |||
| 83 | 83 | ||
| 84 | static int rfbi_get_clocks(void) | 84 | static int rfbi_get_clocks(void) |
| 85 | { | 85 | { |
| 86 | rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick"); | 86 | rfbi.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick"); |
| 87 | if (IS_ERR(rfbi.dss_ick)) { | 87 | if (IS_ERR(rfbi.dss_ick)) { |
| 88 | dev_err(rfbi.fbdev->dev, "can't get ick\n"); | 88 | dev_err(rfbi.fbdev->dev, "can't get ick\n"); |
| 89 | return PTR_ERR(rfbi.dss_ick); | 89 | return PTR_ERR(rfbi.dss_ick); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck"); | 92 | rfbi.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck"); |
| 93 | if (IS_ERR(rfbi.dss1_fck)) { | 93 | if (IS_ERR(rfbi.dss1_fck)) { |
| 94 | dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); | 94 | dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); |
| 95 | clk_put(rfbi.dss_ick); | 95 | clk_put(rfbi.dss_ick); |
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 71d8dec30635..c63ce767b277 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig | |||
| @@ -25,6 +25,13 @@ config OMAP2_DSS_DEBUG_SUPPORT | |||
| 25 | This enables debug messages. You need to enable printing | 25 | This enables debug messages. You need to enable printing |
| 26 | with 'debug' module parameter. | 26 | with 'debug' module parameter. |
| 27 | 27 | ||
| 28 | config OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 29 | bool "Collect DSS IRQ statistics" | ||
| 30 | depends on OMAP2_DSS_DEBUG_SUPPORT | ||
| 31 | default n | ||
| 32 | help | ||
| 33 | Collect DSS IRQ statistics, printable via debugfs | ||
| 34 | |||
| 28 | config OMAP2_DSS_RFBI | 35 | config OMAP2_DSS_RFBI |
| 29 | bool "RFBI support" | 36 | bool "RFBI support" |
| 30 | default n | 37 | default n |
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 29497a0c9a91..82918eec6d2e 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c | |||
| @@ -124,6 +124,7 @@ static void restore_all_ctx(void) | |||
| 124 | dss_clk_disable_all_no_ctx(); | 124 | dss_clk_disable_all_no_ctx(); |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) | ||
| 127 | /* CLOCKS */ | 128 | /* CLOCKS */ |
| 128 | static void core_dump_clocks(struct seq_file *s) | 129 | static void core_dump_clocks(struct seq_file *s) |
| 129 | { | 130 | { |
| @@ -149,6 +150,7 @@ static void core_dump_clocks(struct seq_file *s) | |||
| 149 | clocks[i]->usecount); | 150 | clocks[i]->usecount); |
| 150 | } | 151 | } |
| 151 | } | 152 | } |
| 153 | #endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */ | ||
| 152 | 154 | ||
| 153 | static int dss_get_clock(struct clk **clock, const char *clk_name) | 155 | static int dss_get_clock(struct clk **clock, const char *clk_name) |
| 154 | { | 156 | { |
| @@ -395,6 +397,14 @@ static int dss_initialize_debugfs(void) | |||
| 395 | debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, | 397 | debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, |
| 396 | &dss_debug_dump_clocks, &dss_debug_fops); | 398 | &dss_debug_dump_clocks, &dss_debug_fops); |
| 397 | 399 | ||
| 400 | debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir, | ||
| 401 | &dispc_dump_irqs, &dss_debug_fops); | ||
| 402 | |||
| 403 | #ifdef CONFIG_OMAP2_DSS_DSI | ||
| 404 | debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir, | ||
| 405 | &dsi_dump_irqs, &dss_debug_fops); | ||
| 406 | #endif | ||
| 407 | |||
| 398 | debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, | 408 | debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, |
| 399 | &dss_dump_regs, &dss_debug_fops); | 409 | &dss_dump_regs, &dss_debug_fops); |
| 400 | debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir, | 410 | debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir, |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 6dabf4b2f005..de8bfbac9e26 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
| @@ -148,6 +148,12 @@ static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES, | |||
| 148 | DISPC_VID_ATTRIBUTES(0), | 148 | DISPC_VID_ATTRIBUTES(0), |
| 149 | DISPC_VID_ATTRIBUTES(1) }; | 149 | DISPC_VID_ATTRIBUTES(1) }; |
| 150 | 150 | ||
| 151 | struct dispc_irq_stats { | ||
| 152 | unsigned long last_reset; | ||
| 153 | unsigned irq_count; | ||
| 154 | unsigned irqs[32]; | ||
| 155 | }; | ||
| 156 | |||
| 151 | static struct { | 157 | static struct { |
| 152 | void __iomem *base; | 158 | void __iomem *base; |
| 153 | 159 | ||
| @@ -160,6 +166,11 @@ static struct { | |||
| 160 | struct work_struct error_work; | 166 | struct work_struct error_work; |
| 161 | 167 | ||
| 162 | u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; | 168 | u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; |
| 169 | |||
| 170 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 171 | spinlock_t irq_stats_lock; | ||
| 172 | struct dispc_irq_stats irq_stats; | ||
| 173 | #endif | ||
| 163 | } dispc; | 174 | } dispc; |
| 164 | 175 | ||
| 165 | static void _omap_dispc_set_irqs(void); | 176 | static void _omap_dispc_set_irqs(void); |
| @@ -1443,7 +1454,10 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height, | |||
| 1443 | do_div(tmp, 2 * out_height * ppl); | 1454 | do_div(tmp, 2 * out_height * ppl); |
| 1444 | fclk = tmp; | 1455 | fclk = tmp; |
| 1445 | 1456 | ||
| 1446 | if (height > 2 * out_height && ppl != out_width) { | 1457 | if (height > 2 * out_height) { |
| 1458 | if (ppl == out_width) | ||
| 1459 | return 0; | ||
| 1460 | |||
| 1447 | tmp = pclk * (height - 2 * out_height) * out_width; | 1461 | tmp = pclk * (height - 2 * out_height) * out_width; |
| 1448 | do_div(tmp, 2 * out_height * (ppl - out_width)); | 1462 | do_div(tmp, 2 * out_height * (ppl - out_width)); |
| 1449 | fclk = max(fclk, (u32) tmp); | 1463 | fclk = max(fclk, (u32) tmp); |
| @@ -1623,7 +1637,7 @@ static int _dispc_setup_plane(enum omap_plane plane, | |||
| 1623 | DSSDBG("required fclk rate = %lu Hz\n", fclk); | 1637 | DSSDBG("required fclk rate = %lu Hz\n", fclk); |
| 1624 | DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); | 1638 | DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); |
| 1625 | 1639 | ||
| 1626 | if (fclk > dispc_fclk_rate()) { | 1640 | if (!fclk || fclk > dispc_fclk_rate()) { |
| 1627 | DSSERR("failed to set up scaling, " | 1641 | DSSERR("failed to set up scaling, " |
| 1628 | "required fclk rate = %lu Hz, " | 1642 | "required fclk rate = %lu Hz, " |
| 1629 | "current fclk rate = %lu Hz\n", | 1643 | "current fclk rate = %lu Hz\n", |
| @@ -2247,6 +2261,50 @@ void dispc_dump_clocks(struct seq_file *s) | |||
| 2247 | enable_clocks(0); | 2261 | enable_clocks(0); |
| 2248 | } | 2262 | } |
| 2249 | 2263 | ||
| 2264 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 2265 | void dispc_dump_irqs(struct seq_file *s) | ||
| 2266 | { | ||
| 2267 | unsigned long flags; | ||
| 2268 | struct dispc_irq_stats stats; | ||
| 2269 | |||
| 2270 | spin_lock_irqsave(&dispc.irq_stats_lock, flags); | ||
| 2271 | |||
| 2272 | stats = dispc.irq_stats; | ||
| 2273 | memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats)); | ||
| 2274 | dispc.irq_stats.last_reset = jiffies; | ||
| 2275 | |||
| 2276 | spin_unlock_irqrestore(&dispc.irq_stats_lock, flags); | ||
| 2277 | |||
| 2278 | seq_printf(s, "period %u ms\n", | ||
| 2279 | jiffies_to_msecs(jiffies - stats.last_reset)); | ||
| 2280 | |||
| 2281 | seq_printf(s, "irqs %d\n", stats.irq_count); | ||
| 2282 | #define PIS(x) \ | ||
| 2283 | seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]); | ||
| 2284 | |||
| 2285 | PIS(FRAMEDONE); | ||
| 2286 | PIS(VSYNC); | ||
| 2287 | PIS(EVSYNC_EVEN); | ||
| 2288 | PIS(EVSYNC_ODD); | ||
| 2289 | PIS(ACBIAS_COUNT_STAT); | ||
| 2290 | PIS(PROG_LINE_NUM); | ||
| 2291 | PIS(GFX_FIFO_UNDERFLOW); | ||
| 2292 | PIS(GFX_END_WIN); | ||
| 2293 | PIS(PAL_GAMMA_MASK); | ||
| 2294 | PIS(OCP_ERR); | ||
| 2295 | PIS(VID1_FIFO_UNDERFLOW); | ||
| 2296 | PIS(VID1_END_WIN); | ||
| 2297 | PIS(VID2_FIFO_UNDERFLOW); | ||
| 2298 | PIS(VID2_END_WIN); | ||
| 2299 | PIS(SYNC_LOST); | ||
| 2300 | PIS(SYNC_LOST_DIGIT); | ||
| 2301 | PIS(WAKEUP); | ||
| 2302 | #undef PIS | ||
| 2303 | } | ||
| 2304 | #else | ||
| 2305 | void dispc_dump_irqs(struct seq_file *s) { } | ||
| 2306 | #endif | ||
| 2307 | |||
| 2250 | void dispc_dump_regs(struct seq_file *s) | 2308 | void dispc_dump_regs(struct seq_file *s) |
| 2251 | { | 2309 | { |
| 2252 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) | 2310 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) |
| @@ -2665,6 +2723,13 @@ void dispc_irq_handler(void) | |||
| 2665 | 2723 | ||
| 2666 | irqstatus = dispc_read_reg(DISPC_IRQSTATUS); | 2724 | irqstatus = dispc_read_reg(DISPC_IRQSTATUS); |
| 2667 | 2725 | ||
| 2726 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 2727 | spin_lock(&dispc.irq_stats_lock); | ||
| 2728 | dispc.irq_stats.irq_count++; | ||
| 2729 | dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs); | ||
| 2730 | spin_unlock(&dispc.irq_stats_lock); | ||
| 2731 | #endif | ||
| 2732 | |||
| 2668 | #ifdef DEBUG | 2733 | #ifdef DEBUG |
| 2669 | if (dss_debug) | 2734 | if (dss_debug) |
| 2670 | print_irq_status(irqstatus); | 2735 | print_irq_status(irqstatus); |
| @@ -3012,6 +3077,11 @@ int dispc_init(void) | |||
| 3012 | 3077 | ||
| 3013 | spin_lock_init(&dispc.irq_lock); | 3078 | spin_lock_init(&dispc.irq_lock); |
| 3014 | 3079 | ||
| 3080 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 3081 | spin_lock_init(&dispc.irq_stats_lock); | ||
| 3082 | dispc.irq_stats.last_reset = jiffies; | ||
| 3083 | #endif | ||
| 3084 | |||
| 3015 | INIT_WORK(&dispc.error_work, dispc_error_worker); | 3085 | INIT_WORK(&dispc.error_work, dispc_error_worker); |
| 3016 | 3086 | ||
| 3017 | dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS); | 3087 | dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS); |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 5936487b5def..6122178f5f85 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
| @@ -204,6 +204,14 @@ struct dsi_update_region { | |||
| 204 | struct omap_dss_device *device; | 204 | struct omap_dss_device *device; |
| 205 | }; | 205 | }; |
| 206 | 206 | ||
| 207 | struct dsi_irq_stats { | ||
| 208 | unsigned long last_reset; | ||
| 209 | unsigned irq_count; | ||
| 210 | unsigned dsi_irqs[32]; | ||
| 211 | unsigned vc_irqs[4][32]; | ||
| 212 | unsigned cio_irqs[32]; | ||
| 213 | }; | ||
| 214 | |||
| 207 | static struct | 215 | static struct |
| 208 | { | 216 | { |
| 209 | void __iomem *base; | 217 | void __iomem *base; |
| @@ -258,6 +266,11 @@ static struct | |||
| 258 | #endif | 266 | #endif |
| 259 | int debug_read; | 267 | int debug_read; |
| 260 | int debug_write; | 268 | int debug_write; |
| 269 | |||
| 270 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 271 | spinlock_t irq_stats_lock; | ||
| 272 | struct dsi_irq_stats irq_stats; | ||
| 273 | #endif | ||
| 261 | } dsi; | 274 | } dsi; |
| 262 | 275 | ||
| 263 | #ifdef DEBUG | 276 | #ifdef DEBUG |
| @@ -528,6 +541,12 @@ void dsi_irq_handler(void) | |||
| 528 | 541 | ||
| 529 | irqstatus = dsi_read_reg(DSI_IRQSTATUS); | 542 | irqstatus = dsi_read_reg(DSI_IRQSTATUS); |
| 530 | 543 | ||
| 544 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 545 | spin_lock(&dsi.irq_stats_lock); | ||
| 546 | dsi.irq_stats.irq_count++; | ||
| 547 | dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs); | ||
| 548 | #endif | ||
| 549 | |||
| 531 | if (irqstatus & DSI_IRQ_ERROR_MASK) { | 550 | if (irqstatus & DSI_IRQ_ERROR_MASK) { |
| 532 | DSSERR("DSI error, irqstatus %x\n", irqstatus); | 551 | DSSERR("DSI error, irqstatus %x\n", irqstatus); |
| 533 | print_irq_status(irqstatus); | 552 | print_irq_status(irqstatus); |
| @@ -549,6 +568,10 @@ void dsi_irq_handler(void) | |||
| 549 | 568 | ||
| 550 | vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i)); | 569 | vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i)); |
| 551 | 570 | ||
| 571 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 572 | dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]); | ||
| 573 | #endif | ||
| 574 | |||
| 552 | if (vcstatus & DSI_VC_IRQ_BTA) | 575 | if (vcstatus & DSI_VC_IRQ_BTA) |
| 553 | complete(&dsi.bta_completion); | 576 | complete(&dsi.bta_completion); |
| 554 | 577 | ||
| @@ -568,6 +591,10 @@ void dsi_irq_handler(void) | |||
| 568 | if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { | 591 | if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { |
| 569 | ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); | 592 | ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); |
| 570 | 593 | ||
| 594 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 595 | dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs); | ||
| 596 | #endif | ||
| 597 | |||
| 571 | dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); | 598 | dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); |
| 572 | /* flush posted write */ | 599 | /* flush posted write */ |
| 573 | dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); | 600 | dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); |
| @@ -579,6 +606,10 @@ void dsi_irq_handler(void) | |||
| 579 | dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); | 606 | dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); |
| 580 | /* flush posted write */ | 607 | /* flush posted write */ |
| 581 | dsi_read_reg(DSI_IRQSTATUS); | 608 | dsi_read_reg(DSI_IRQSTATUS); |
| 609 | |||
| 610 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 611 | spin_unlock(&dsi.irq_stats_lock); | ||
| 612 | #endif | ||
| 582 | } | 613 | } |
| 583 | 614 | ||
| 584 | 615 | ||
| @@ -797,12 +828,12 @@ static int dsi_pll_power(enum dsi_pll_power_state state) | |||
| 797 | 828 | ||
| 798 | /* PLL_PWR_STATUS */ | 829 | /* PLL_PWR_STATUS */ |
| 799 | while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { | 830 | while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { |
| 800 | udelay(1); | 831 | if (++t > 1000) { |
| 801 | if (t++ > 1000) { | ||
| 802 | DSSERR("Failed to set DSI PLL power mode to %d\n", | 832 | DSSERR("Failed to set DSI PLL power mode to %d\n", |
| 803 | state); | 833 | state); |
| 804 | return -ENODEV; | 834 | return -ENODEV; |
| 805 | } | 835 | } |
| 836 | udelay(1); | ||
| 806 | } | 837 | } |
| 807 | 838 | ||
| 808 | return 0; | 839 | return 0; |
| @@ -1226,6 +1257,95 @@ void dsi_dump_clocks(struct seq_file *s) | |||
| 1226 | enable_clocks(0); | 1257 | enable_clocks(0); |
| 1227 | } | 1258 | } |
| 1228 | 1259 | ||
| 1260 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 1261 | void dsi_dump_irqs(struct seq_file *s) | ||
| 1262 | { | ||
| 1263 | unsigned long flags; | ||
| 1264 | struct dsi_irq_stats stats; | ||
| 1265 | |||
| 1266 | spin_lock_irqsave(&dsi.irq_stats_lock, flags); | ||
| 1267 | |||
| 1268 | stats = dsi.irq_stats; | ||
| 1269 | memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats)); | ||
| 1270 | dsi.irq_stats.last_reset = jiffies; | ||
| 1271 | |||
| 1272 | spin_unlock_irqrestore(&dsi.irq_stats_lock, flags); | ||
| 1273 | |||
| 1274 | seq_printf(s, "period %u ms\n", | ||
| 1275 | jiffies_to_msecs(jiffies - stats.last_reset)); | ||
| 1276 | |||
| 1277 | seq_printf(s, "irqs %d\n", stats.irq_count); | ||
| 1278 | #define PIS(x) \ | ||
| 1279 | seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); | ||
| 1280 | |||
| 1281 | seq_printf(s, "-- DSI interrupts --\n"); | ||
| 1282 | PIS(VC0); | ||
| 1283 | PIS(VC1); | ||
| 1284 | PIS(VC2); | ||
| 1285 | PIS(VC3); | ||
| 1286 | PIS(WAKEUP); | ||
| 1287 | PIS(RESYNC); | ||
| 1288 | PIS(PLL_LOCK); | ||
| 1289 | PIS(PLL_UNLOCK); | ||
| 1290 | PIS(PLL_RECALL); | ||
| 1291 | PIS(COMPLEXIO_ERR); | ||
| 1292 | PIS(HS_TX_TIMEOUT); | ||
| 1293 | PIS(LP_RX_TIMEOUT); | ||
| 1294 | PIS(TE_TRIGGER); | ||
| 1295 | PIS(ACK_TRIGGER); | ||
| 1296 | PIS(SYNC_LOST); | ||
| 1297 | PIS(LDO_POWER_GOOD); | ||
| 1298 | PIS(TA_TIMEOUT); | ||
| 1299 | #undef PIS | ||
| 1300 | |||
| 1301 | #define PIS(x) \ | ||
| 1302 | seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ | ||
| 1303 | stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ | ||
| 1304 | stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ | ||
| 1305 | stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ | ||
| 1306 | stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); | ||
| 1307 | |||
| 1308 | seq_printf(s, "-- VC interrupts --\n"); | ||
| 1309 | PIS(CS); | ||
| 1310 | PIS(ECC_CORR); | ||
| 1311 | PIS(PACKET_SENT); | ||
| 1312 | PIS(FIFO_TX_OVF); | ||
| 1313 | PIS(FIFO_RX_OVF); | ||
| 1314 | PIS(BTA); | ||
| 1315 | PIS(ECC_NO_CORR); | ||
| 1316 | PIS(FIFO_TX_UDF); | ||
| 1317 | PIS(PP_BUSY_CHANGE); | ||
| 1318 | #undef PIS | ||
| 1319 | |||
| 1320 | #define PIS(x) \ | ||
| 1321 | seq_printf(s, "%-20s %10d\n", #x, \ | ||
| 1322 | stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); | ||
| 1323 | |||
| 1324 | seq_printf(s, "-- CIO interrupts --\n"); | ||
| 1325 | PIS(ERRSYNCESC1); | ||
| 1326 | PIS(ERRSYNCESC2); | ||
| 1327 | PIS(ERRSYNCESC3); | ||
| 1328 | PIS(ERRESC1); | ||
| 1329 | PIS(ERRESC2); | ||
| 1330 | PIS(ERRESC3); | ||
| 1331 | PIS(ERRCONTROL1); | ||
| 1332 | PIS(ERRCONTROL2); | ||
| 1333 | PIS(ERRCONTROL3); | ||
| 1334 | PIS(STATEULPS1); | ||
| 1335 | PIS(STATEULPS2); | ||
| 1336 | PIS(STATEULPS3); | ||
| 1337 | PIS(ERRCONTENTIONLP0_1); | ||
| 1338 | PIS(ERRCONTENTIONLP1_1); | ||
| 1339 | PIS(ERRCONTENTIONLP0_2); | ||
| 1340 | PIS(ERRCONTENTIONLP1_2); | ||
| 1341 | PIS(ERRCONTENTIONLP0_3); | ||
| 1342 | PIS(ERRCONTENTIONLP1_3); | ||
| 1343 | PIS(ULPSACTIVENOT_ALL0); | ||
| 1344 | PIS(ULPSACTIVENOT_ALL1); | ||
| 1345 | #undef PIS | ||
| 1346 | } | ||
| 1347 | #endif | ||
| 1348 | |||
| 1229 | void dsi_dump_regs(struct seq_file *s) | 1349 | void dsi_dump_regs(struct seq_file *s) |
| 1230 | { | 1350 | { |
| 1231 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) | 1351 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) |
| @@ -1321,12 +1441,12 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state) | |||
| 1321 | 1441 | ||
| 1322 | /* PWR_STATUS */ | 1442 | /* PWR_STATUS */ |
| 1323 | while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { | 1443 | while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { |
| 1324 | udelay(1); | 1444 | if (++t > 1000) { |
| 1325 | if (t++ > 1000) { | ||
| 1326 | DSSERR("failed to set complexio power state to " | 1445 | DSSERR("failed to set complexio power state to " |
| 1327 | "%d\n", state); | 1446 | "%d\n", state); |
| 1328 | return -ENODEV; | 1447 | return -ENODEV; |
| 1329 | } | 1448 | } |
| 1449 | udelay(1); | ||
| 1330 | } | 1450 | } |
| 1331 | 1451 | ||
| 1332 | return 0; | 1452 | return 0; |
| @@ -1526,10 +1646,10 @@ static void dsi_complexio_uninit(void) | |||
| 1526 | 1646 | ||
| 1527 | static int _dsi_wait_reset(void) | 1647 | static int _dsi_wait_reset(void) |
| 1528 | { | 1648 | { |
| 1529 | int i = 0; | 1649 | int t = 0; |
| 1530 | 1650 | ||
| 1531 | while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { | 1651 | while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { |
| 1532 | if (i++ > 5) { | 1652 | if (++t > 5) { |
| 1533 | DSSERR("soft reset failed\n"); | 1653 | DSSERR("soft reset failed\n"); |
| 1534 | return -ENODEV; | 1654 | return -ENODEV; |
| 1535 | } | 1655 | } |
| @@ -1999,7 +2119,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) | |||
| 1999 | return -EINVAL; | 2119 | return -EINVAL; |
| 2000 | } | 2120 | } |
| 2001 | 2121 | ||
| 2002 | data_id = data_type | channel << 6; | 2122 | data_id = data_type | dsi.vc[channel].dest_per << 6; |
| 2003 | 2123 | ||
| 2004 | r = (data_id << 0) | (data << 8) | (ecc << 24); | 2124 | r = (data_id << 0) | (data << 8) | (ecc << 24); |
| 2005 | 2125 | ||
| @@ -2011,7 +2131,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) | |||
| 2011 | int dsi_vc_send_null(int channel) | 2131 | int dsi_vc_send_null(int channel) |
| 2012 | { | 2132 | { |
| 2013 | u8 nullpkg[] = {0, 0, 0, 0}; | 2133 | u8 nullpkg[] = {0, 0, 0, 0}; |
| 2014 | return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0); | 2134 | return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0); |
| 2015 | } | 2135 | } |
| 2016 | EXPORT_SYMBOL(dsi_vc_send_null); | 2136 | EXPORT_SYMBOL(dsi_vc_send_null); |
| 2017 | 2137 | ||
| @@ -2058,7 +2178,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) | |||
| 2058 | int r; | 2178 | int r; |
| 2059 | 2179 | ||
| 2060 | if (dsi.debug_read) | 2180 | if (dsi.debug_read) |
| 2061 | DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd); | 2181 | DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); |
| 2062 | 2182 | ||
| 2063 | r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); | 2183 | r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); |
| 2064 | if (r) | 2184 | if (r) |
| @@ -2586,7 +2706,6 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
| 2586 | /* using fifo not empty */ | 2706 | /* using fifo not empty */ |
| 2587 | /* TX_FIFO_NOT_EMPTY */ | 2707 | /* TX_FIFO_NOT_EMPTY */ |
| 2588 | while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { | 2708 | while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { |
| 2589 | udelay(1); | ||
| 2590 | fifo_stalls++; | 2709 | fifo_stalls++; |
| 2591 | if (fifo_stalls > 0xfffff) { | 2710 | if (fifo_stalls > 0xfffff) { |
| 2592 | DSSERR("fifo stalls overflow, pixels left %d\n", | 2711 | DSSERR("fifo stalls overflow, pixels left %d\n", |
| @@ -2594,6 +2713,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, | |||
| 2594 | dsi_if_enable(0); | 2713 | dsi_if_enable(0); |
| 2595 | return -EIO; | 2714 | return -EIO; |
| 2596 | } | 2715 | } |
| 2716 | udelay(1); | ||
| 2597 | } | 2717 | } |
| 2598 | #elif 1 | 2718 | #elif 1 |
| 2599 | /* using fifo emptiness */ | 2719 | /* using fifo emptiness */ |
| @@ -2812,11 +2932,15 @@ static int dsi_set_update_mode(struct omap_dss_device *dssdev, | |||
| 2812 | 2932 | ||
| 2813 | static int dsi_set_te(struct omap_dss_device *dssdev, bool enable) | 2933 | static int dsi_set_te(struct omap_dss_device *dssdev, bool enable) |
| 2814 | { | 2934 | { |
| 2815 | int r; | 2935 | int r = 0; |
| 2816 | r = dssdev->driver->enable_te(dssdev, enable); | 2936 | |
| 2817 | /* XXX for some reason, DSI TE breaks if we don't wait here. | 2937 | if (dssdev->driver->enable_te) { |
| 2818 | * Panel bug? Needs more studying */ | 2938 | r = dssdev->driver->enable_te(dssdev, enable); |
| 2819 | msleep(100); | 2939 | /* XXX for some reason, DSI TE breaks if we don't wait here. |
| 2940 | * Panel bug? Needs more studying */ | ||
| 2941 | msleep(100); | ||
| 2942 | } | ||
| 2943 | |||
| 2820 | return r; | 2944 | return r; |
| 2821 | } | 2945 | } |
| 2822 | 2946 | ||
| @@ -3637,6 +3761,11 @@ int dsi_init(struct platform_device *pdev) | |||
| 3637 | spin_lock_init(&dsi.errors_lock); | 3761 | spin_lock_init(&dsi.errors_lock); |
| 3638 | dsi.errors = 0; | 3762 | dsi.errors = 0; |
| 3639 | 3763 | ||
| 3764 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 3765 | spin_lock_init(&dsi.irq_stats_lock); | ||
| 3766 | dsi.irq_stats.last_reset = jiffies; | ||
| 3767 | #endif | ||
| 3768 | |||
| 3640 | init_completion(&dsi.bta_completion); | 3769 | init_completion(&dsi.bta_completion); |
| 3641 | init_completion(&dsi.update_completion); | 3770 | init_completion(&dsi.update_completion); |
| 3642 | 3771 | ||
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 9b05ee65a15d..0a26b7d84d41 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
| @@ -467,14 +467,14 @@ static irqreturn_t dss_irq_handler_omap3(int irq, void *arg) | |||
| 467 | 467 | ||
| 468 | static int _omap_dss_wait_reset(void) | 468 | static int _omap_dss_wait_reset(void) |
| 469 | { | 469 | { |
| 470 | unsigned timeout = 1000; | 470 | int t = 0; |
| 471 | 471 | ||
| 472 | while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { | 472 | while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { |
| 473 | udelay(1); | 473 | if (++t > 1000) { |
| 474 | if (!--timeout) { | ||
| 475 | DSSERR("soft reset failed\n"); | 474 | DSSERR("soft reset failed\n"); |
| 476 | return -ENODEV; | 475 | return -ENODEV; |
| 477 | } | 476 | } |
| 477 | udelay(1); | ||
| 478 | } | 478 | } |
| 479 | 479 | ||
| 480 | return 0; | 480 | return 0; |
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 8da5ac42151b..2bcb1245d6c2 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h | |||
| @@ -240,6 +240,7 @@ int dsi_init(struct platform_device *pdev); | |||
| 240 | void dsi_exit(void); | 240 | void dsi_exit(void); |
| 241 | 241 | ||
| 242 | void dsi_dump_clocks(struct seq_file *s); | 242 | void dsi_dump_clocks(struct seq_file *s); |
| 243 | void dsi_dump_irqs(struct seq_file *s); | ||
| 243 | void dsi_dump_regs(struct seq_file *s); | 244 | void dsi_dump_regs(struct seq_file *s); |
| 244 | 245 | ||
| 245 | void dsi_save_context(void); | 246 | void dsi_save_context(void); |
| @@ -268,6 +269,7 @@ int dpi_init_display(struct omap_dss_device *dssdev); | |||
| 268 | int dispc_init(void); | 269 | int dispc_init(void); |
| 269 | void dispc_exit(void); | 270 | void dispc_exit(void); |
| 270 | void dispc_dump_clocks(struct seq_file *s); | 271 | void dispc_dump_clocks(struct seq_file *s); |
| 272 | void dispc_dump_irqs(struct seq_file *s); | ||
| 271 | void dispc_dump_regs(struct seq_file *s); | 273 | void dispc_dump_regs(struct seq_file *s); |
| 272 | void dispc_irq_handler(void); | 274 | void dispc_irq_handler(void); |
| 273 | void dispc_fake_vsync_irq(void); | 275 | void dispc_fake_vsync_irq(void); |
| @@ -367,4 +369,16 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t); | |||
| 367 | unsigned long rfbi_get_max_tx_rate(void); | 369 | unsigned long rfbi_get_max_tx_rate(void); |
| 368 | int rfbi_init_display(struct omap_dss_device *display); | 370 | int rfbi_init_display(struct omap_dss_device *display); |
| 369 | 371 | ||
| 372 | |||
| 373 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | ||
| 374 | static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr) | ||
| 375 | { | ||
| 376 | int b; | ||
| 377 | for (b = 0; b < 32; ++b) { | ||
| 378 | if (irqstatus & (1 << b)) | ||
| 379 | irq_arr[b]++; | ||
| 380 | } | ||
| 381 | } | ||
| 382 | #endif | ||
| 383 | |||
| 370 | #endif | 384 | #endif |
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index d0b3006ad8a5..b936495c065d 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c | |||
| @@ -120,7 +120,7 @@ static struct { | |||
| 120 | 120 | ||
| 121 | struct omap_dss_device *dssdev[2]; | 121 | struct omap_dss_device *dssdev[2]; |
| 122 | 122 | ||
| 123 | struct kfifo *cmd_fifo; | 123 | struct kfifo cmd_fifo; |
| 124 | spinlock_t cmd_lock; | 124 | spinlock_t cmd_lock; |
| 125 | struct completion cmd_done; | 125 | struct completion cmd_done; |
| 126 | atomic_t cmd_fifo_full; | 126 | atomic_t cmd_fifo_full; |
| @@ -1011,20 +1011,20 @@ static void process_cmd_fifo(void) | |||
| 1011 | return; | 1011 | return; |
| 1012 | 1012 | ||
| 1013 | while (true) { | 1013 | while (true) { |
| 1014 | spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); | 1014 | spin_lock_irqsave(&rfbi.cmd_lock, flags); |
| 1015 | 1015 | ||
| 1016 | len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p, | 1016 | len = kfifo_out(&rfbi.cmd_fifo, (unsigned char *)&p, |
| 1017 | sizeof(struct update_param)); | 1017 | sizeof(struct update_param)); |
| 1018 | if (len == 0) { | 1018 | if (len == 0) { |
| 1019 | DSSDBG("nothing more in fifo\n"); | 1019 | DSSDBG("nothing more in fifo\n"); |
| 1020 | atomic_set(&rfbi.cmd_pending, 0); | 1020 | atomic_set(&rfbi.cmd_pending, 0); |
| 1021 | spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); | 1021 | spin_unlock_irqrestore(&rfbi.cmd_lock, flags); |
| 1022 | break; | 1022 | break; |
| 1023 | } | 1023 | } |
| 1024 | 1024 | ||
| 1025 | /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/ | 1025 | /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/ |
| 1026 | 1026 | ||
| 1027 | spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); | 1027 | spin_unlock_irqrestore(&rfbi.cmd_lock, flags); |
| 1028 | 1028 | ||
| 1029 | BUG_ON(len != sizeof(struct update_param)); | 1029 | BUG_ON(len != sizeof(struct update_param)); |
| 1030 | BUG_ON(p.rfbi_module > 1); | 1030 | BUG_ON(p.rfbi_module > 1); |
| @@ -1052,25 +1052,25 @@ static void rfbi_push_cmd(struct update_param *p) | |||
| 1052 | unsigned long flags; | 1052 | unsigned long flags; |
| 1053 | int available; | 1053 | int available; |
| 1054 | 1054 | ||
| 1055 | spin_lock_irqsave(rfbi.cmd_fifo->lock, flags); | 1055 | spin_lock_irqsave(&rfbi.cmd_lock, flags); |
| 1056 | available = RFBI_CMD_FIFO_LEN_BYTES - | 1056 | available = RFBI_CMD_FIFO_LEN_BYTES - |
| 1057 | __kfifo_len(rfbi.cmd_fifo); | 1057 | kfifo_len(&rfbi.cmd_fifo); |
| 1058 | 1058 | ||
| 1059 | /* DSSDBG("%d bytes left in fifo\n", available); */ | 1059 | /* DSSDBG("%d bytes left in fifo\n", available); */ |
| 1060 | if (available < sizeof(struct update_param)) { | 1060 | if (available < sizeof(struct update_param)) { |
| 1061 | DSSDBG("Going to wait because FIFO FULL..\n"); | 1061 | DSSDBG("Going to wait because FIFO FULL..\n"); |
| 1062 | spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); | 1062 | spin_unlock_irqrestore(&rfbi.cmd_lock, flags); |
| 1063 | atomic_inc(&rfbi.cmd_fifo_full); | 1063 | atomic_inc(&rfbi.cmd_fifo_full); |
| 1064 | wait_for_completion(&rfbi.cmd_done); | 1064 | wait_for_completion(&rfbi.cmd_done); |
| 1065 | /*DSSDBG("Woke up because fifo not full anymore\n");*/ | 1065 | /*DSSDBG("Woke up because fifo not full anymore\n");*/ |
| 1066 | continue; | 1066 | continue; |
| 1067 | } | 1067 | } |
| 1068 | 1068 | ||
| 1069 | ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p, | 1069 | ret = kfifo_in(&rfbi.cmd_fifo, (unsigned char *)p, |
| 1070 | sizeof(struct update_param)); | 1070 | sizeof(struct update_param)); |
| 1071 | /* DSSDBG("pushed %d bytes\n", ret);*/ | 1071 | /* DSSDBG("pushed %d bytes\n", ret);*/ |
| 1072 | 1072 | ||
| 1073 | spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags); | 1073 | spin_unlock_irqrestore(&rfbi.cmd_lock, flags); |
| 1074 | 1074 | ||
| 1075 | BUG_ON(ret != sizeof(struct update_param)); | 1075 | BUG_ON(ret != sizeof(struct update_param)); |
| 1076 | 1076 | ||
| @@ -1155,12 +1155,12 @@ int rfbi_init(void) | |||
| 1155 | { | 1155 | { |
| 1156 | u32 rev; | 1156 | u32 rev; |
| 1157 | u32 l; | 1157 | u32 l; |
| 1158 | int r; | ||
| 1158 | 1159 | ||
| 1159 | spin_lock_init(&rfbi.cmd_lock); | 1160 | spin_lock_init(&rfbi.cmd_lock); |
| 1160 | rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL, | 1161 | r = kfifo_alloc(&rfbi.cmd_fifo, RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL); |
| 1161 | &rfbi.cmd_lock); | 1162 | if (r) |
| 1162 | if (IS_ERR(rfbi.cmd_fifo)) | 1163 | return r; |
| 1163 | return -ENOMEM; | ||
| 1164 | 1164 | ||
| 1165 | init_completion(&rfbi.cmd_done); | 1165 | init_completion(&rfbi.cmd_done); |
| 1166 | atomic_set(&rfbi.cmd_fifo_full, 0); | 1166 | atomic_set(&rfbi.cmd_fifo_full, 0); |
| @@ -1196,7 +1196,7 @@ void rfbi_exit(void) | |||
| 1196 | { | 1196 | { |
| 1197 | DSSDBG("rfbi_exit\n"); | 1197 | DSSDBG("rfbi_exit\n"); |
| 1198 | 1198 | ||
| 1199 | kfifo_free(rfbi.cmd_fifo); | 1199 | kfifo_free(&rfbi.cmd_fifo); |
| 1200 | 1200 | ||
| 1201 | iounmap(rfbi.base); | 1201 | iounmap(rfbi.base); |
| 1202 | } | 1202 | } |
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index ef299839858a..d17caef6915a 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c | |||
| @@ -1311,6 +1311,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi) | |||
| 1311 | if (rg->vrfb.vaddr[0]) { | 1311 | if (rg->vrfb.vaddr[0]) { |
| 1312 | iounmap(rg->vrfb.vaddr[0]); | 1312 | iounmap(rg->vrfb.vaddr[0]); |
| 1313 | omap_vrfb_release_ctx(&rg->vrfb); | 1313 | omap_vrfb_release_ctx(&rg->vrfb); |
| 1314 | rg->vrfb.vaddr[0] = NULL; | ||
| 1314 | } | 1315 | } |
| 1315 | } | 1316 | } |
| 1316 | 1317 | ||
| @@ -2114,6 +2115,11 @@ static int omapfb_probe(struct platform_device *pdev) | |||
| 2114 | dssdev = NULL; | 2115 | dssdev = NULL; |
| 2115 | for_each_dss_dev(dssdev) { | 2116 | for_each_dss_dev(dssdev) { |
| 2116 | omap_dss_get_device(dssdev); | 2117 | omap_dss_get_device(dssdev); |
| 2118 | if (!dssdev->driver) { | ||
| 2119 | dev_err(&pdev->dev, "no driver for display\n"); | ||
| 2120 | r = -EINVAL; | ||
| 2121 | goto cleanup; | ||
| 2122 | } | ||
| 2117 | fbdev->displays[fbdev->num_displays++] = dssdev; | 2123 | fbdev->displays[fbdev->num_displays++] = dssdev; |
| 2118 | } | 2124 | } |
| 2119 | 2125 | ||
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 415858b421b3..825b665245bb 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
| @@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi, | |||
| 1221 | static int pxafb_smart_thread(void *arg) | 1221 | static int pxafb_smart_thread(void *arg) |
| 1222 | { | 1222 | { |
| 1223 | struct pxafb_info *fbi = arg; | 1223 | struct pxafb_info *fbi = arg; |
| 1224 | struct pxafb_mach_info *inf; | 1224 | struct pxafb_mach_info *inf = fbi->dev->platform_data; |
| 1225 | 1225 | ||
| 1226 | if (!fbi || !fbi->dev->platform_data->smart_update) { | 1226 | if (!inf->smart_update) { |
| 1227 | pr_err("%s: not properly initialized, thread terminated\n", | 1227 | pr_err("%s: not properly initialized, thread terminated\n", |
| 1228 | __func__); | 1228 | __func__); |
| 1229 | return -EINVAL; | 1229 | return -EINVAL; |
