diff options
Diffstat (limited to 'drivers')
53 files changed, 617 insertions, 300 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index a7799a99f2d9..8a851d0f4384 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -254,13 +254,6 @@ config ACPI_PCI_SLOT | |||
| 254 | help you correlate PCI bus addresses with the physical geography | 254 | help you correlate PCI bus addresses with the physical geography |
| 255 | of your slots. If you are unsure, say N. | 255 | of your slots. If you are unsure, say N. |
| 256 | 256 | ||
| 257 | config ACPI_SYSTEM | ||
| 258 | bool | ||
| 259 | default y | ||
| 260 | help | ||
| 261 | This driver will enable your system to shut down using ACPI, and | ||
| 262 | dump your ACPI DSDT table using /proc/acpi/dsdt. | ||
| 263 | |||
| 264 | config X86_PM_TIMER | 257 | config X86_PM_TIMER |
| 265 | bool "Power Management Timer Support" if EMBEDDED | 258 | bool "Power Management Timer Support" if EMBEDDED |
| 266 | depends on X86 | 259 | depends on X86 |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 65d90c720b5a..b130ea0d0759 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -52,7 +52,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o | |||
| 52 | obj-$(CONFIG_ACPI_CONTAINER) += container.o | 52 | obj-$(CONFIG_ACPI_CONTAINER) += container.o |
| 53 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o | 53 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o |
| 54 | obj-y += power.o | 54 | obj-y += power.o |
| 55 | obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o | 55 | obj-y += system.o event.o |
| 56 | obj-$(CONFIG_ACPI_DEBUG) += debug.o | 56 | obj-$(CONFIG_ACPI_DEBUG) += debug.o |
| 57 | obj-$(CONFIG_ACPI_NUMA) += numa.o | 57 | obj-$(CONFIG_ACPI_NUMA) += numa.o |
| 58 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o | 58 | obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 65132f920459..69cbc57c2d1c 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -138,6 +138,29 @@ static int acpi_battery_technology(struct acpi_battery *battery) | |||
| 138 | 138 | ||
| 139 | static int acpi_battery_get_state(struct acpi_battery *battery); | 139 | static int acpi_battery_get_state(struct acpi_battery *battery); |
| 140 | 140 | ||
| 141 | static int acpi_battery_is_charged(struct acpi_battery *battery) | ||
| 142 | { | ||
| 143 | /* either charging or discharging */ | ||
| 144 | if (battery->state != 0) | ||
| 145 | return 0; | ||
| 146 | |||
| 147 | /* battery not reporting charge */ | ||
| 148 | if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || | ||
| 149 | battery->capacity_now == 0) | ||
| 150 | return 0; | ||
| 151 | |||
| 152 | /* good batteries update full_charge as the batteries degrade */ | ||
| 153 | if (battery->full_charge_capacity == battery->capacity_now) | ||
| 154 | return 1; | ||
| 155 | |||
| 156 | /* fallback to using design values for broken batteries */ | ||
| 157 | if (battery->design_capacity == battery->capacity_now) | ||
| 158 | return 1; | ||
| 159 | |||
| 160 | /* we don't do any sort of metric based on percentages */ | ||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 141 | static int acpi_battery_get_property(struct power_supply *psy, | 164 | static int acpi_battery_get_property(struct power_supply *psy, |
| 142 | enum power_supply_property psp, | 165 | enum power_supply_property psp, |
| 143 | union power_supply_propval *val) | 166 | union power_supply_propval *val) |
| @@ -155,7 +178,7 @@ static int acpi_battery_get_property(struct power_supply *psy, | |||
| 155 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; | 178 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; |
| 156 | else if (battery->state & 0x02) | 179 | else if (battery->state & 0x02) |
| 157 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | 180 | val->intval = POWER_SUPPLY_STATUS_CHARGING; |
| 158 | else if (battery->state == 0) | 181 | else if (acpi_battery_is_charged(battery)) |
| 159 | val->intval = POWER_SUPPLY_STATUS_FULL; | 182 | val->intval = POWER_SUPPLY_STATUS_FULL; |
| 160 | else | 183 | else |
| 161 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; | 184 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 5c2f5d343be6..2fe15060dcdc 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -120,6 +120,8 @@ static struct acpi_ec { | |||
| 120 | spinlock_t curr_lock; | 120 | spinlock_t curr_lock; |
| 121 | } *boot_ec, *first_ec; | 121 | } *boot_ec, *first_ec; |
| 122 | 122 | ||
| 123 | static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ | ||
| 124 | |||
| 123 | /* -------------------------------------------------------------------------- | 125 | /* -------------------------------------------------------------------------- |
| 124 | Transaction Management | 126 | Transaction Management |
| 125 | -------------------------------------------------------------------------- */ | 127 | -------------------------------------------------------------------------- */ |
| @@ -259,6 +261,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | |||
| 259 | clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); | 261 | clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); |
| 260 | acpi_disable_gpe(NULL, ec->gpe); | 262 | acpi_disable_gpe(NULL, ec->gpe); |
| 261 | } | 263 | } |
| 264 | if (EC_FLAGS_MSI) | ||
| 265 | udelay(ACPI_EC_DELAY); | ||
| 262 | /* start transaction */ | 266 | /* start transaction */ |
| 263 | spin_lock_irqsave(&ec->curr_lock, tmp); | 267 | spin_lock_irqsave(&ec->curr_lock, tmp); |
| 264 | /* following two actions should be kept atomic */ | 268 | /* following two actions should be kept atomic */ |
| @@ -967,6 +971,11 @@ int __init acpi_ec_ecdt_probe(void) | |||
| 967 | /* | 971 | /* |
| 968 | * Generate a boot ec context | 972 | * Generate a boot ec context |
| 969 | */ | 973 | */ |
| 974 | if (dmi_name_in_vendors("Micro-Star") || | ||
| 975 | dmi_name_in_vendors("Notebook")) { | ||
| 976 | pr_info(PREFIX "Enabling special treatment for EC from MSI.\n"); | ||
| 977 | EC_FLAGS_MSI = 1; | ||
| 978 | } | ||
| 970 | status = acpi_get_table(ACPI_SIG_ECDT, 1, | 979 | status = acpi_get_table(ACPI_SIG_ECDT, 1, |
| 971 | (struct acpi_table_header **)&ecdt_ptr); | 980 | (struct acpi_table_header **)&ecdt_ptr); |
| 972 | if (ACPI_SUCCESS(status)) { | 981 | if (ACPI_SUCCESS(status)) { |
diff --git a/drivers/base/base.h b/drivers/base/base.h index 0a5f055dffba..9f50f1b545dc 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
| @@ -88,8 +88,6 @@ extern void driver_detach(struct device_driver *drv); | |||
| 88 | extern int driver_probe_device(struct device_driver *drv, struct device *dev); | 88 | extern int driver_probe_device(struct device_driver *drv, struct device *dev); |
| 89 | 89 | ||
| 90 | extern void sysdev_shutdown(void); | 90 | extern void sysdev_shutdown(void); |
| 91 | extern int sysdev_suspend(pm_message_t state); | ||
| 92 | extern int sysdev_resume(void); | ||
| 93 | 91 | ||
| 94 | extern char *make_class_name(const char *name, struct kobject *kobj); | 92 | extern char *make_class_name(const char *name, struct kobject *kobj); |
| 95 | 93 | ||
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 315bed8d5e7f..135231239103 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
| @@ -18,9 +18,11 @@ | |||
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 21 | #include <linux/delay.h> | ||
| 21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 22 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
| 23 | #include <linux/wait.h> | 24 | #include <linux/wait.h> |
| 25 | #include <linux/async.h> | ||
| 24 | 26 | ||
| 25 | #include "base.h" | 27 | #include "base.h" |
| 26 | #include "power/power.h" | 28 | #include "power/power.h" |
| @@ -168,6 +170,21 @@ int driver_probe_done(void) | |||
| 168 | } | 170 | } |
| 169 | 171 | ||
| 170 | /** | 172 | /** |
| 173 | * wait_for_device_probe | ||
| 174 | * Wait for device probing to be completed. | ||
| 175 | * | ||
| 176 | * Note: this function polls at 100 msec intervals. | ||
| 177 | */ | ||
| 178 | int wait_for_device_probe(void) | ||
| 179 | { | ||
| 180 | /* wait for the known devices to complete their probing */ | ||
| 181 | while (driver_probe_done() != 0) | ||
| 182 | msleep(100); | ||
| 183 | async_synchronize_full(); | ||
| 184 | return 0; | ||
| 185 | } | ||
| 186 | |||
| 187 | /** | ||
| 171 | * driver_probe_device - attempt to bind device & driver together | 188 | * driver_probe_device - attempt to bind device & driver together |
| 172 | * @drv: driver to bind a device to | 189 | * @drv: driver to bind a device to |
| 173 | * @dev: device to try to bind to the driver | 190 | * @dev: device to try to bind to the driver |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 670c9d6c1407..2d14f4ae6c01 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -333,7 +333,6 @@ static void dpm_power_up(pm_message_t state) | |||
| 333 | */ | 333 | */ |
| 334 | void device_power_up(pm_message_t state) | 334 | void device_power_up(pm_message_t state) |
| 335 | { | 335 | { |
| 336 | sysdev_resume(); | ||
| 337 | dpm_power_up(state); | 336 | dpm_power_up(state); |
| 338 | } | 337 | } |
| 339 | EXPORT_SYMBOL_GPL(device_power_up); | 338 | EXPORT_SYMBOL_GPL(device_power_up); |
| @@ -577,8 +576,6 @@ int device_power_down(pm_message_t state) | |||
| 577 | } | 576 | } |
| 578 | dev->power.status = DPM_OFF_IRQ; | 577 | dev->power.status = DPM_OFF_IRQ; |
| 579 | } | 578 | } |
| 580 | if (!error) | ||
| 581 | error = sysdev_suspend(state); | ||
| 582 | if (error) | 579 | if (error) |
| 583 | dpm_power_up(resume_event(state)); | 580 | dpm_power_up(resume_event(state)); |
| 584 | return error; | 581 | return error; |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index c98c31ec2f75..b428c8c4bc64 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
| @@ -303,7 +303,6 @@ void sysdev_unregister(struct sys_device * sysdev) | |||
| 303 | * is guaranteed by virtue of the fact that child devices are registered | 303 | * is guaranteed by virtue of the fact that child devices are registered |
| 304 | * after their parents. | 304 | * after their parents. |
| 305 | */ | 305 | */ |
| 306 | |||
| 307 | void sysdev_shutdown(void) | 306 | void sysdev_shutdown(void) |
| 308 | { | 307 | { |
| 309 | struct sysdev_class * cls; | 308 | struct sysdev_class * cls; |
| @@ -363,7 +362,6 @@ static void __sysdev_resume(struct sys_device *dev) | |||
| 363 | * This is only called by the device PM core, so we let them handle | 362 | * This is only called by the device PM core, so we let them handle |
| 364 | * all synchronization. | 363 | * all synchronization. |
| 365 | */ | 364 | */ |
| 366 | |||
| 367 | int sysdev_suspend(pm_message_t state) | 365 | int sysdev_suspend(pm_message_t state) |
| 368 | { | 366 | { |
| 369 | struct sysdev_class * cls; | 367 | struct sysdev_class * cls; |
| @@ -432,7 +430,7 @@ aux_driver: | |||
| 432 | } | 430 | } |
| 433 | return ret; | 431 | return ret; |
| 434 | } | 432 | } |
| 435 | 433 | EXPORT_SYMBOL_GPL(sysdev_suspend); | |
| 436 | 434 | ||
| 437 | /** | 435 | /** |
| 438 | * sysdev_resume - Bring system devices back to life. | 436 | * sysdev_resume - Bring system devices back to life. |
| @@ -442,7 +440,6 @@ aux_driver: | |||
| 442 | * | 440 | * |
| 443 | * Note: Interrupts are disabled when called. | 441 | * Note: Interrupts are disabled when called. |
| 444 | */ | 442 | */ |
| 445 | |||
| 446 | int sysdev_resume(void) | 443 | int sysdev_resume(void) |
| 447 | { | 444 | { |
| 448 | struct sysdev_class * cls; | 445 | struct sysdev_class * cls; |
| @@ -463,7 +460,7 @@ int sysdev_resume(void) | |||
| 463 | } | 460 | } |
| 464 | return 0; | 461 | return 0; |
| 465 | } | 462 | } |
| 466 | 463 | EXPORT_SYMBOL_GPL(sysdev_resume); | |
| 467 | 464 | ||
| 468 | int __init system_bus_init(void) | 465 | int __init system_bus_init(void) |
| 469 | { | 466 | { |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 69e1df7dfa14..4234c11c1e4c 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
| @@ -1730,7 +1730,7 @@ static int __init fd_test_drive_present( int drive ) | |||
| 1730 | 1730 | ||
| 1731 | timeout = jiffies + 2*HZ+HZ/2; | 1731 | timeout = jiffies + 2*HZ+HZ/2; |
| 1732 | while (time_before(jiffies, timeout)) | 1732 | while (time_before(jiffies, timeout)) |
| 1733 | if (!(mfp.par_dt_reg & 0x20)) | 1733 | if (!(st_mfp.par_dt_reg & 0x20)) |
| 1734 | break; | 1734 | break; |
| 1735 | 1735 | ||
| 1736 | status = FDC_READ( FDCREG_STATUS ); | 1736 | status = FDC_READ( FDCREG_STATUS ); |
| @@ -1747,7 +1747,7 @@ static int __init fd_test_drive_present( int drive ) | |||
| 1747 | /* dummy seek command to make WP bit accessible */ | 1747 | /* dummy seek command to make WP bit accessible */ |
| 1748 | FDC_WRITE( FDCREG_DATA, 0 ); | 1748 | FDC_WRITE( FDCREG_DATA, 0 ); |
| 1749 | FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK ); | 1749 | FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK ); |
| 1750 | while( mfp.par_dt_reg & 0x20 ) | 1750 | while( st_mfp.par_dt_reg & 0x20 ) |
| 1751 | ; | 1751 | ; |
| 1752 | status = FDC_READ( FDCREG_STATUS ); | 1752 | status = FDC_READ( FDCREG_STATUS ); |
| 1753 | } | 1753 | } |
diff --git a/drivers/char/scc.h b/drivers/char/scc.h index 93998f5baff5..341b1142bea8 100644 --- a/drivers/char/scc.h +++ b/drivers/char/scc.h | |||
| @@ -387,7 +387,7 @@ struct scc_port { | |||
| 387 | /* The SCC needs 3.5 PCLK cycles recovery time between to register | 387 | /* The SCC needs 3.5 PCLK cycles recovery time between to register |
| 388 | * accesses. PCLK runs with 8 MHz on an Atari, so this delay is 3.5 * | 388 | * accesses. PCLK runs with 8 MHz on an Atari, so this delay is 3.5 * |
| 389 | * 125 ns = 437.5 ns. This is too short for udelay(). | 389 | * 125 ns = 437.5 ns. This is too short for udelay(). |
| 390 | * 10/16/95: A tstb mfp.par_dt_reg takes 600ns (sure?) and thus should be | 390 | * 10/16/95: A tstb st_mfp.par_dt_reg takes 600ns (sure?) and thus should be |
| 391 | * quite right | 391 | * quite right |
| 392 | */ | 392 | */ |
| 393 | 393 | ||
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index f146e90404fa..518f2a25d91e 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
| @@ -1746,9 +1746,10 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd, | |||
| 1746 | sx_dprintk(SX_DEBUG_FIRMWARE, "returning type= %ld\n", rc); | 1746 | sx_dprintk(SX_DEBUG_FIRMWARE, "returning type= %ld\n", rc); |
| 1747 | break; | 1747 | break; |
| 1748 | case SXIO_DO_RAMTEST: | 1748 | case SXIO_DO_RAMTEST: |
| 1749 | if (sx_initialized) /* Already initialized: better not ramtest the board. */ | 1749 | if (sx_initialized) { /* Already initialized: better not ramtest the board. */ |
| 1750 | rc = -EPERM; | 1750 | rc = -EPERM; |
| 1751 | break; | 1751 | break; |
| 1752 | } | ||
| 1752 | if (IS_SX_BOARD(board)) { | 1753 | if (IS_SX_BOARD(board)) { |
| 1753 | rc = do_memtest(board, 0, 0x7000); | 1754 | rc = do_memtest(board, 0, 0x7000); |
| 1754 | if (!rc) | 1755 | if (!rc) |
| @@ -1788,7 +1789,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd, | |||
| 1788 | nbytes - i : SX_CHUNK_SIZE)) { | 1789 | nbytes - i : SX_CHUNK_SIZE)) { |
| 1789 | kfree(tmp); | 1790 | kfree(tmp); |
| 1790 | rc = -EFAULT; | 1791 | rc = -EFAULT; |
| 1791 | break; | 1792 | goto out; |
| 1792 | } | 1793 | } |
| 1793 | memcpy_toio(board->base2 + offset + i, tmp, | 1794 | memcpy_toio(board->base2 + offset + i, tmp, |
| 1794 | (i + SX_CHUNK_SIZE > nbytes) ? | 1795 | (i + SX_CHUNK_SIZE > nbytes) ? |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bfce0992fefb..94a768871734 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -1741,9 +1741,8 @@ out: | |||
| 1741 | * RETURNS: | 1741 | * RETURNS: |
| 1742 | * Zero on success, errno on failure. | 1742 | * Zero on success, errno on failure. |
| 1743 | */ | 1743 | */ |
| 1744 | void drm_fb_release(struct file *filp) | 1744 | void drm_fb_release(struct drm_file *priv) |
| 1745 | { | 1745 | { |
| 1746 | struct drm_file *priv = filp->private_data; | ||
| 1747 | struct drm_device *dev = priv->minor->dev; | 1746 | struct drm_device *dev = priv->minor->dev; |
| 1748 | struct drm_framebuffer *fb, *tfb; | 1747 | struct drm_framebuffer *fb, *tfb; |
| 1749 | 1748 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 964c5eb1fada..733028b4d45e 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -512,8 +512,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
| 512 | if (drm_mode_equal(&saved_mode, &crtc->mode)) { | 512 | if (drm_mode_equal(&saved_mode, &crtc->mode)) { |
| 513 | if (saved_x != crtc->x || saved_y != crtc->y || | 513 | if (saved_x != crtc->x || saved_y != crtc->y || |
| 514 | depth_changed || bpp_changed) { | 514 | depth_changed || bpp_changed) { |
| 515 | crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, | 515 | ret = !crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, |
| 516 | old_fb); | 516 | old_fb); |
| 517 | goto done; | 517 | goto done; |
| 518 | } | 518 | } |
| 519 | } | 519 | } |
| @@ -552,7 +552,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
| 552 | /* Set up the DPLL and any encoders state that needs to adjust or depend | 552 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
| 553 | * on the DPLL. | 553 | * on the DPLL. |
| 554 | */ | 554 | */ |
| 555 | crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); | 555 | ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); |
| 556 | if (!ret) | ||
| 557 | goto done; | ||
| 556 | 558 | ||
| 557 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 559 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 558 | 560 | ||
| @@ -752,6 +754,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 752 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, | 754 | if (!drm_crtc_helper_set_mode(set->crtc, set->mode, |
| 753 | set->x, set->y, | 755 | set->x, set->y, |
| 754 | old_fb)) { | 756 | old_fb)) { |
| 757 | DRM_ERROR("failed to set mode on crtc %p\n", | ||
| 758 | set->crtc); | ||
| 755 | ret = -EINVAL; | 759 | ret = -EINVAL; |
| 756 | goto fail_set_mode; | 760 | goto fail_set_mode; |
| 757 | } | 761 | } |
| @@ -765,7 +769,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 765 | old_fb = set->crtc->fb; | 769 | old_fb = set->crtc->fb; |
| 766 | if (set->crtc->fb != set->fb) | 770 | if (set->crtc->fb != set->fb) |
| 767 | set->crtc->fb = set->fb; | 771 | set->crtc->fb = set->fb; |
| 768 | crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); | 772 | ret = crtc_funcs->mode_set_base(set->crtc, |
| 773 | set->x, set->y, old_fb); | ||
| 774 | if (ret != 0) | ||
| 775 | goto fail_set_mode; | ||
| 769 | } | 776 | } |
| 770 | 777 | ||
| 771 | kfree(save_encoders); | 778 | kfree(save_encoders); |
| @@ -775,8 +782,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 775 | fail_set_mode: | 782 | fail_set_mode: |
| 776 | set->crtc->enabled = save_enabled; | 783 | set->crtc->enabled = save_enabled; |
| 777 | count = 0; | 784 | count = 0; |
| 778 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | 785 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 786 | if (!connector->encoder) | ||
| 787 | continue; | ||
| 788 | |||
| 779 | connector->encoder->crtc = save_crtcs[count++]; | 789 | connector->encoder->crtc = save_crtcs[count++]; |
| 790 | } | ||
| 780 | fail_no_encoder: | 791 | fail_no_encoder: |
| 781 | kfree(save_crtcs); | 792 | kfree(save_crtcs); |
| 782 | count = 0; | 793 | count = 0; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index b06a53715853..6c020fe5431c 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
| @@ -457,6 +457,9 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 457 | if (dev->driver->driver_features & DRIVER_GEM) | 457 | if (dev->driver->driver_features & DRIVER_GEM) |
| 458 | drm_gem_release(dev, file_priv); | 458 | drm_gem_release(dev, file_priv); |
| 459 | 459 | ||
| 460 | if (dev->driver->driver_features & DRIVER_MODESET) | ||
| 461 | drm_fb_release(file_priv); | ||
| 462 | |||
| 460 | mutex_lock(&dev->ctxlist_mutex); | 463 | mutex_lock(&dev->ctxlist_mutex); |
| 461 | if (!list_empty(&dev->ctxlist)) { | 464 | if (!list_empty(&dev->ctxlist)) { |
| 462 | struct drm_ctx_list *pos, *n; | 465 | struct drm_ctx_list *pos, *n; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6915fb82d0b0..88d3368ffddd 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
| @@ -104,8 +104,8 @@ drm_gem_init(struct drm_device *dev) | |||
| 104 | 104 | ||
| 105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, | 105 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, |
| 106 | DRM_FILE_PAGE_OFFSET_SIZE)) { | 106 | DRM_FILE_PAGE_OFFSET_SIZE)) { |
| 107 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
| 108 | drm_ht_remove(&mm->offset_hash); | 107 | drm_ht_remove(&mm->offset_hash); |
| 108 | drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); | ||
| 109 | return -ENOMEM; | 109 | return -ENOMEM; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| @@ -295,35 +295,37 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |||
| 295 | return -EBADF; | 295 | return -EBADF; |
| 296 | 296 | ||
| 297 | again: | 297 | again: |
| 298 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) | 298 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { |
| 299 | return -ENOMEM; | 299 | ret = -ENOMEM; |
| 300 | goto err; | ||
| 301 | } | ||
| 300 | 302 | ||
| 301 | spin_lock(&dev->object_name_lock); | 303 | spin_lock(&dev->object_name_lock); |
| 302 | if (obj->name) { | 304 | if (!obj->name) { |
| 303 | args->name = obj->name; | 305 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, |
| 306 | &obj->name); | ||
| 307 | args->name = (uint64_t) obj->name; | ||
| 304 | spin_unlock(&dev->object_name_lock); | 308 | spin_unlock(&dev->object_name_lock); |
| 305 | return 0; | ||
| 306 | } | ||
| 307 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, | ||
| 308 | &obj->name); | ||
| 309 | spin_unlock(&dev->object_name_lock); | ||
| 310 | if (ret == -EAGAIN) | ||
| 311 | goto again; | ||
| 312 | 309 | ||
| 313 | if (ret != 0) { | 310 | if (ret == -EAGAIN) |
| 314 | mutex_lock(&dev->struct_mutex); | 311 | goto again; |
| 315 | drm_gem_object_unreference(obj); | ||
| 316 | mutex_unlock(&dev->struct_mutex); | ||
| 317 | return ret; | ||
| 318 | } | ||
| 319 | 312 | ||
| 320 | /* | 313 | if (ret != 0) |
| 321 | * Leave the reference from the lookup around as the | 314 | goto err; |
| 322 | * name table now holds one | ||
| 323 | */ | ||
| 324 | args->name = (uint64_t) obj->name; | ||
| 325 | 315 | ||
| 326 | return 0; | 316 | /* Allocate a reference for the name table. */ |
| 317 | drm_gem_object_reference(obj); | ||
| 318 | } else { | ||
| 319 | args->name = (uint64_t) obj->name; | ||
| 320 | spin_unlock(&dev->object_name_lock); | ||
| 321 | ret = 0; | ||
| 322 | } | ||
| 323 | |||
| 324 | err: | ||
| 325 | mutex_lock(&dev->struct_mutex); | ||
| 326 | drm_gem_object_unreference(obj); | ||
| 327 | mutex_unlock(&dev->struct_mutex); | ||
| 328 | return ret; | ||
| 327 | } | 329 | } |
| 328 | 330 | ||
| 329 | /** | 331 | /** |
| @@ -448,6 +450,7 @@ drm_gem_object_handle_free(struct kref *kref) | |||
| 448 | spin_lock(&dev->object_name_lock); | 450 | spin_lock(&dev->object_name_lock); |
| 449 | if (obj->name) { | 451 | if (obj->name) { |
| 450 | idr_remove(&dev->object_name_idr, obj->name); | 452 | idr_remove(&dev->object_name_idr, obj->name); |
| 453 | obj->name = 0; | ||
| 451 | spin_unlock(&dev->object_name_lock); | 454 | spin_unlock(&dev->object_name_lock); |
| 452 | /* | 455 | /* |
| 453 | * The object name held a reference to this object, drop | 456 | * The object name held a reference to this object, drop |
| @@ -460,6 +463,26 @@ drm_gem_object_handle_free(struct kref *kref) | |||
| 460 | } | 463 | } |
| 461 | EXPORT_SYMBOL(drm_gem_object_handle_free); | 464 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
| 462 | 465 | ||
| 466 | void drm_gem_vm_open(struct vm_area_struct *vma) | ||
| 467 | { | ||
| 468 | struct drm_gem_object *obj = vma->vm_private_data; | ||
| 469 | |||
| 470 | drm_gem_object_reference(obj); | ||
| 471 | } | ||
| 472 | EXPORT_SYMBOL(drm_gem_vm_open); | ||
| 473 | |||
| 474 | void drm_gem_vm_close(struct vm_area_struct *vma) | ||
| 475 | { | ||
| 476 | struct drm_gem_object *obj = vma->vm_private_data; | ||
| 477 | struct drm_device *dev = obj->dev; | ||
| 478 | |||
| 479 | mutex_lock(&dev->struct_mutex); | ||
| 480 | drm_gem_object_unreference(obj); | ||
| 481 | mutex_unlock(&dev->struct_mutex); | ||
| 482 | } | ||
| 483 | EXPORT_SYMBOL(drm_gem_vm_close); | ||
| 484 | |||
| 485 | |||
| 463 | /** | 486 | /** |
| 464 | * drm_gem_mmap - memory map routine for GEM objects | 487 | * drm_gem_mmap - memory map routine for GEM objects |
| 465 | * @filp: DRM file pointer | 488 | * @filp: DRM file pointer |
| @@ -521,6 +544,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 521 | #endif | 544 | #endif |
| 522 | vma->vm_page_prot = __pgprot(prot); | 545 | vma->vm_page_prot = __pgprot(prot); |
| 523 | 546 | ||
| 547 | /* Take a ref for this mapping of the object, so that the fault | ||
| 548 | * handler can dereference the mmap offset's pointer to the object. | ||
| 549 | * This reference is cleaned up by the corresponding vm_close | ||
| 550 | * (which should happen whether the vma was created by this call, or | ||
| 551 | * by a vm_open due to mremap or partial unmap or whatever). | ||
| 552 | */ | ||
| 553 | drm_gem_object_reference(obj); | ||
| 554 | |||
| 524 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 555 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
| 525 | drm_vm_open_locked(vma); | 556 | drm_vm_open_locked(vma); |
| 526 | 557 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81f1cff56fd5..2d797ffe8137 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
| 202 | dev_priv->ring.map.flags = 0; | 202 | dev_priv->ring.map.flags = 0; |
| 203 | dev_priv->ring.map.mtrr = 0; | 203 | dev_priv->ring.map.mtrr = 0; |
| 204 | 204 | ||
| 205 | drm_core_ioremap(&dev_priv->ring.map, dev); | 205 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); |
| 206 | 206 | ||
| 207 | if (dev_priv->ring.map.handle == NULL) { | 207 | if (dev_priv->ring.map.handle == NULL) { |
| 208 | i915_dma_cleanup(dev); | 208 | i915_dma_cleanup(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index aac12ee31a46..0692622ee2b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | * | 27 | * |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #include <linux/device.h> | ||
| 30 | #include "drmP.h" | 31 | #include "drmP.h" |
| 31 | #include "drm.h" | 32 | #include "drm.h" |
| 32 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
| @@ -66,6 +67,12 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
| 66 | 67 | ||
| 67 | i915_save_state(dev); | 68 | i915_save_state(dev); |
| 68 | 69 | ||
| 70 | /* If KMS is active, we do the leavevt stuff here */ | ||
| 71 | if (drm_core_check_feature(dev, DRIVER_MODESET) && i915_gem_idle(dev)) { | ||
| 72 | dev_err(&dev->pdev->dev, "GEM idle failed, aborting suspend\n"); | ||
| 73 | return -EBUSY; | ||
| 74 | } | ||
| 75 | |||
| 69 | intel_opregion_free(dev); | 76 | intel_opregion_free(dev); |
| 70 | 77 | ||
| 71 | if (state.event == PM_EVENT_SUSPEND) { | 78 | if (state.event == PM_EVENT_SUSPEND) { |
| @@ -79,6 +86,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
| 79 | 86 | ||
| 80 | static int i915_resume(struct drm_device *dev) | 87 | static int i915_resume(struct drm_device *dev) |
| 81 | { | 88 | { |
| 89 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 90 | int ret = 0; | ||
| 91 | |||
| 82 | pci_set_power_state(dev->pdev, PCI_D0); | 92 | pci_set_power_state(dev->pdev, PCI_D0); |
| 83 | pci_restore_state(dev->pdev); | 93 | pci_restore_state(dev->pdev); |
| 84 | if (pci_enable_device(dev->pdev)) | 94 | if (pci_enable_device(dev->pdev)) |
| @@ -89,11 +99,24 @@ static int i915_resume(struct drm_device *dev) | |||
| 89 | 99 | ||
| 90 | intel_opregion_init(dev); | 100 | intel_opregion_init(dev); |
| 91 | 101 | ||
| 92 | return 0; | 102 | /* KMS EnterVT equivalent */ |
| 103 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
| 104 | mutex_lock(&dev->struct_mutex); | ||
| 105 | dev_priv->mm.suspended = 0; | ||
| 106 | |||
| 107 | ret = i915_gem_init_ringbuffer(dev); | ||
| 108 | if (ret != 0) | ||
| 109 | ret = -1; | ||
| 110 | mutex_unlock(&dev->struct_mutex); | ||
| 111 | } | ||
| 112 | |||
| 113 | return ret; | ||
| 93 | } | 114 | } |
| 94 | 115 | ||
| 95 | static struct vm_operations_struct i915_gem_vm_ops = { | 116 | static struct vm_operations_struct i915_gem_vm_ops = { |
| 96 | .fault = i915_gem_fault, | 117 | .fault = i915_gem_fault, |
| 118 | .open = drm_gem_vm_open, | ||
| 119 | .close = drm_gem_vm_close, | ||
| 97 | }; | 120 | }; |
| 98 | 121 | ||
| 99 | static struct drm_driver driver = { | 122 | static struct drm_driver driver = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7325363164f8..17fa40858d26 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -184,6 +184,8 @@ typedef struct drm_i915_private { | |||
| 184 | unsigned int lvds_dither:1; | 184 | unsigned int lvds_dither:1; |
| 185 | unsigned int lvds_vbt:1; | 185 | unsigned int lvds_vbt:1; |
| 186 | unsigned int int_crt_support:1; | 186 | unsigned int int_crt_support:1; |
| 187 | unsigned int lvds_use_ssc:1; | ||
| 188 | int lvds_ssc_freq; | ||
| 187 | 189 | ||
| 188 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 190 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
| 189 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 191 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
| @@ -616,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); | |||
| 616 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 618 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
| 617 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 619 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
| 618 | unsigned long end); | 620 | unsigned long end); |
| 621 | int i915_gem_idle(struct drm_device *dev); | ||
| 619 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 622 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 620 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 623 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
| 621 | int write); | 624 | int write); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 818576654092..25b337438ca7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -34,10 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
| 36 | 36 | ||
| 37 | static void | ||
| 38 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | ||
| 39 | uint32_t read_domains, | ||
| 40 | uint32_t write_domain); | ||
| 41 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 37 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
| 42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 38 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
| 43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
| @@ -607,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 607 | case -EAGAIN: | 603 | case -EAGAIN: |
| 608 | return VM_FAULT_OOM; | 604 | return VM_FAULT_OOM; |
| 609 | case -EFAULT: | 605 | case -EFAULT: |
| 610 | case -EBUSY: | ||
| 611 | DRM_ERROR("can't insert pfn?? fault or busy...\n"); | ||
| 612 | return VM_FAULT_SIGBUS; | 606 | return VM_FAULT_SIGBUS; |
| 613 | default: | 607 | default: |
| 614 | return VM_FAULT_NOPAGE; | 608 | return VM_FAULT_NOPAGE; |
| @@ -684,6 +678,30 @@ out_free_list: | |||
| 684 | return ret; | 678 | return ret; |
| 685 | } | 679 | } |
| 686 | 680 | ||
| 681 | static void | ||
| 682 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | ||
| 683 | { | ||
| 684 | struct drm_device *dev = obj->dev; | ||
| 685 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
| 686 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 687 | struct drm_map_list *list; | ||
| 688 | |||
| 689 | list = &obj->map_list; | ||
| 690 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
| 691 | |||
| 692 | if (list->file_offset_node) { | ||
| 693 | drm_mm_put_block(list->file_offset_node); | ||
| 694 | list->file_offset_node = NULL; | ||
| 695 | } | ||
| 696 | |||
| 697 | if (list->map) { | ||
| 698 | drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); | ||
| 699 | list->map = NULL; | ||
| 700 | } | ||
| 701 | |||
| 702 | obj_priv->mmap_offset = 0; | ||
| 703 | } | ||
| 704 | |||
| 687 | /** | 705 | /** |
| 688 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 706 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
| 689 | * @obj: object to check | 707 | * @obj: object to check |
| @@ -758,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
| 758 | 776 | ||
| 759 | if (!obj_priv->mmap_offset) { | 777 | if (!obj_priv->mmap_offset) { |
| 760 | ret = i915_gem_create_mmap_offset(obj); | 778 | ret = i915_gem_create_mmap_offset(obj); |
| 761 | if (ret) | 779 | if (ret) { |
| 780 | drm_gem_object_unreference(obj); | ||
| 781 | mutex_unlock(&dev->struct_mutex); | ||
| 762 | return ret; | 782 | return ret; |
| 783 | } | ||
| 763 | } | 784 | } |
| 764 | 785 | ||
| 765 | args->offset = obj_priv->mmap_offset; | 786 | args->offset = obj_priv->mmap_offset; |
| @@ -1996,30 +2017,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
| 1996 | * drm_agp_chipset_flush | 2017 | * drm_agp_chipset_flush |
| 1997 | */ | 2018 | */ |
| 1998 | static void | 2019 | static void |
| 1999 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 2020 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
| 2000 | uint32_t read_domains, | ||
| 2001 | uint32_t write_domain) | ||
| 2002 | { | 2021 | { |
| 2003 | struct drm_device *dev = obj->dev; | 2022 | struct drm_device *dev = obj->dev; |
| 2004 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2023 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 2005 | uint32_t invalidate_domains = 0; | 2024 | uint32_t invalidate_domains = 0; |
| 2006 | uint32_t flush_domains = 0; | 2025 | uint32_t flush_domains = 0; |
| 2007 | 2026 | ||
| 2008 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | 2027 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
| 2009 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | 2028 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
| 2010 | 2029 | ||
| 2011 | #if WATCH_BUF | 2030 | #if WATCH_BUF |
| 2012 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 2031 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
| 2013 | __func__, obj, | 2032 | __func__, obj, |
| 2014 | obj->read_domains, read_domains, | 2033 | obj->read_domains, obj->pending_read_domains, |
| 2015 | obj->write_domain, write_domain); | 2034 | obj->write_domain, obj->pending_write_domain); |
| 2016 | #endif | 2035 | #endif |
| 2017 | /* | 2036 | /* |
| 2018 | * If the object isn't moving to a new write domain, | 2037 | * If the object isn't moving to a new write domain, |
| 2019 | * let the object stay in multiple read domains | 2038 | * let the object stay in multiple read domains |
| 2020 | */ | 2039 | */ |
| 2021 | if (write_domain == 0) | 2040 | if (obj->pending_write_domain == 0) |
| 2022 | read_domains |= obj->read_domains; | 2041 | obj->pending_read_domains |= obj->read_domains; |
| 2023 | else | 2042 | else |
| 2024 | obj_priv->dirty = 1; | 2043 | obj_priv->dirty = 1; |
| 2025 | 2044 | ||
| @@ -2029,15 +2048,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
| 2029 | * any read domains which differ from the old | 2048 | * any read domains which differ from the old |
| 2030 | * write domain | 2049 | * write domain |
| 2031 | */ | 2050 | */ |
| 2032 | if (obj->write_domain && obj->write_domain != read_domains) { | 2051 | if (obj->write_domain && |
| 2052 | obj->write_domain != obj->pending_read_domains) { | ||
| 2033 | flush_domains |= obj->write_domain; | 2053 | flush_domains |= obj->write_domain; |
| 2034 | invalidate_domains |= read_domains & ~obj->write_domain; | 2054 | invalidate_domains |= |
| 2055 | obj->pending_read_domains & ~obj->write_domain; | ||
| 2035 | } | 2056 | } |
| 2036 | /* | 2057 | /* |
| 2037 | * Invalidate any read caches which may have | 2058 | * Invalidate any read caches which may have |
| 2038 | * stale data. That is, any new read domains. | 2059 | * stale data. That is, any new read domains. |
| 2039 | */ | 2060 | */ |
| 2040 | invalidate_domains |= read_domains & ~obj->read_domains; | 2061 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; |
| 2041 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 2062 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { |
| 2042 | #if WATCH_BUF | 2063 | #if WATCH_BUF |
| 2043 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 2064 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
| @@ -2046,9 +2067,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
| 2046 | i915_gem_clflush_object(obj); | 2067 | i915_gem_clflush_object(obj); |
| 2047 | } | 2068 | } |
| 2048 | 2069 | ||
| 2049 | if ((write_domain | flush_domains) != 0) | 2070 | /* The actual obj->write_domain will be updated with |
| 2050 | obj->write_domain = write_domain; | 2071 | * pending_write_domain after we emit the accumulated flush for all |
| 2051 | obj->read_domains = read_domains; | 2072 | * of our domain changes in execbuffers (which clears objects' |
| 2073 | * write_domains). So if we have a current write domain that we | ||
| 2074 | * aren't changing, set pending_write_domain to that. | ||
| 2075 | */ | ||
| 2076 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
| 2077 | obj->pending_write_domain = obj->write_domain; | ||
| 2078 | obj->read_domains = obj->pending_read_domains; | ||
| 2052 | 2079 | ||
| 2053 | dev->invalidate_domains |= invalidate_domains; | 2080 | dev->invalidate_domains |= invalidate_domains; |
| 2054 | dev->flush_domains |= flush_domains; | 2081 | dev->flush_domains |= flush_domains; |
| @@ -2251,6 +2278,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
| 2251 | (int) reloc.offset, | 2278 | (int) reloc.offset, |
| 2252 | reloc.read_domains, | 2279 | reloc.read_domains, |
| 2253 | reloc.write_domain); | 2280 | reloc.write_domain); |
| 2281 | drm_gem_object_unreference(target_obj); | ||
| 2282 | i915_gem_object_unpin(obj); | ||
| 2254 | return -EINVAL; | 2283 | return -EINVAL; |
| 2255 | } | 2284 | } |
| 2256 | 2285 | ||
| @@ -2480,13 +2509,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2480 | if (dev_priv->mm.wedged) { | 2509 | if (dev_priv->mm.wedged) { |
| 2481 | DRM_ERROR("Execbuf while wedged\n"); | 2510 | DRM_ERROR("Execbuf while wedged\n"); |
| 2482 | mutex_unlock(&dev->struct_mutex); | 2511 | mutex_unlock(&dev->struct_mutex); |
| 2483 | return -EIO; | 2512 | ret = -EIO; |
| 2513 | goto pre_mutex_err; | ||
| 2484 | } | 2514 | } |
| 2485 | 2515 | ||
| 2486 | if (dev_priv->mm.suspended) { | 2516 | if (dev_priv->mm.suspended) { |
| 2487 | DRM_ERROR("Execbuf while VT-switched.\n"); | 2517 | DRM_ERROR("Execbuf while VT-switched.\n"); |
| 2488 | mutex_unlock(&dev->struct_mutex); | 2518 | mutex_unlock(&dev->struct_mutex); |
| 2489 | return -EBUSY; | 2519 | ret = -EBUSY; |
| 2520 | goto pre_mutex_err; | ||
| 2490 | } | 2521 | } |
| 2491 | 2522 | ||
| 2492 | /* Look up object handles */ | 2523 | /* Look up object handles */ |
| @@ -2554,9 +2585,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2554 | struct drm_gem_object *obj = object_list[i]; | 2585 | struct drm_gem_object *obj = object_list[i]; |
| 2555 | 2586 | ||
| 2556 | /* Compute new gpu domains and update invalidate/flush */ | 2587 | /* Compute new gpu domains and update invalidate/flush */ |
| 2557 | i915_gem_object_set_to_gpu_domain(obj, | 2588 | i915_gem_object_set_to_gpu_domain(obj); |
| 2558 | obj->pending_read_domains, | ||
| 2559 | obj->pending_write_domain); | ||
| 2560 | } | 2589 | } |
| 2561 | 2590 | ||
| 2562 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2591 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| @@ -2575,6 +2604,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2575 | (void)i915_add_request(dev, dev->flush_domains); | 2604 | (void)i915_add_request(dev, dev->flush_domains); |
| 2576 | } | 2605 | } |
| 2577 | 2606 | ||
| 2607 | for (i = 0; i < args->buffer_count; i++) { | ||
| 2608 | struct drm_gem_object *obj = object_list[i]; | ||
| 2609 | |||
| 2610 | obj->write_domain = obj->pending_write_domain; | ||
| 2611 | } | ||
| 2612 | |||
| 2578 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2613 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 2579 | 2614 | ||
| 2580 | #if WATCH_COHERENCY | 2615 | #if WATCH_COHERENCY |
| @@ -2632,15 +2667,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2632 | 2667 | ||
| 2633 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2668 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 2634 | 2669 | ||
| 2635 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
| 2636 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
| 2637 | (uintptr_t) args->buffers_ptr, | ||
| 2638 | exec_list, | ||
| 2639 | sizeof(*exec_list) * args->buffer_count); | ||
| 2640 | if (ret) | ||
| 2641 | DRM_ERROR("failed to copy %d exec entries " | ||
| 2642 | "back to user (%d)\n", | ||
| 2643 | args->buffer_count, ret); | ||
| 2644 | err: | 2670 | err: |
| 2645 | for (i = 0; i < pinned; i++) | 2671 | for (i = 0; i < pinned; i++) |
| 2646 | i915_gem_object_unpin(object_list[i]); | 2672 | i915_gem_object_unpin(object_list[i]); |
| @@ -2650,6 +2676,18 @@ err: | |||
| 2650 | 2676 | ||
| 2651 | mutex_unlock(&dev->struct_mutex); | 2677 | mutex_unlock(&dev->struct_mutex); |
| 2652 | 2678 | ||
| 2679 | if (!ret) { | ||
| 2680 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
| 2681 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
| 2682 | (uintptr_t) args->buffers_ptr, | ||
| 2683 | exec_list, | ||
| 2684 | sizeof(*exec_list) * args->buffer_count); | ||
| 2685 | if (ret) | ||
| 2686 | DRM_ERROR("failed to copy %d exec entries " | ||
| 2687 | "back to user (%d)\n", | ||
| 2688 | args->buffer_count, ret); | ||
| 2689 | } | ||
| 2690 | |||
| 2653 | pre_mutex_err: | 2691 | pre_mutex_err: |
| 2654 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, | 2692 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, |
| 2655 | DRM_MEM_DRIVER); | 2693 | DRM_MEM_DRIVER); |
| @@ -2753,6 +2791,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
| 2753 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 2791 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { |
| 2754 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 2792 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
| 2755 | args->handle); | 2793 | args->handle); |
| 2794 | drm_gem_object_unreference(obj); | ||
| 2756 | mutex_unlock(&dev->struct_mutex); | 2795 | mutex_unlock(&dev->struct_mutex); |
| 2757 | return -EINVAL; | 2796 | return -EINVAL; |
| 2758 | } | 2797 | } |
| @@ -2833,6 +2872,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 2833 | return -EBADF; | 2872 | return -EBADF; |
| 2834 | } | 2873 | } |
| 2835 | 2874 | ||
| 2875 | /* Update the active list for the hardware's current position. | ||
| 2876 | * Otherwise this only updates on a delayed timer or when irqs are | ||
| 2877 | * actually unmasked, and our working set ends up being larger than | ||
| 2878 | * required. | ||
| 2879 | */ | ||
| 2880 | i915_gem_retire_requests(dev); | ||
| 2881 | |||
| 2836 | obj_priv = obj->driver_private; | 2882 | obj_priv = obj->driver_private; |
| 2837 | /* Don't count being on the flushing list against the object being | 2883 | /* Don't count being on the flushing list against the object being |
| 2838 | * done. Otherwise, a buffer left on the flushing list but not getting | 2884 | * done. Otherwise, a buffer left on the flushing list but not getting |
| @@ -2885,9 +2931,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
| 2885 | void i915_gem_free_object(struct drm_gem_object *obj) | 2931 | void i915_gem_free_object(struct drm_gem_object *obj) |
| 2886 | { | 2932 | { |
| 2887 | struct drm_device *dev = obj->dev; | 2933 | struct drm_device *dev = obj->dev; |
| 2888 | struct drm_gem_mm *mm = dev->mm_private; | ||
| 2889 | struct drm_map_list *list; | ||
| 2890 | struct drm_map *map; | ||
| 2891 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2934 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 2892 | 2935 | ||
| 2893 | while (obj_priv->pin_count > 0) | 2936 | while (obj_priv->pin_count > 0) |
| @@ -2898,19 +2941,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
| 2898 | 2941 | ||
| 2899 | i915_gem_object_unbind(obj); | 2942 | i915_gem_object_unbind(obj); |
| 2900 | 2943 | ||
| 2901 | list = &obj->map_list; | 2944 | i915_gem_free_mmap_offset(obj); |
| 2902 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | ||
| 2903 | |||
| 2904 | if (list->file_offset_node) { | ||
| 2905 | drm_mm_put_block(list->file_offset_node); | ||
| 2906 | list->file_offset_node = NULL; | ||
| 2907 | } | ||
| 2908 | |||
| 2909 | map = list->map; | ||
| 2910 | if (map) { | ||
| 2911 | drm_free(map, sizeof(*map), DRM_MEM_DRIVER); | ||
| 2912 | list->map = NULL; | ||
| 2913 | } | ||
| 2914 | 2945 | ||
| 2915 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | 2946 | drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); |
| 2916 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | 2947 | drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); |
| @@ -2949,7 +2980,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | |||
| 2949 | return 0; | 2980 | return 0; |
| 2950 | } | 2981 | } |
| 2951 | 2982 | ||
| 2952 | static int | 2983 | int |
| 2953 | i915_gem_idle(struct drm_device *dev) | 2984 | i915_gem_idle(struct drm_device *dev) |
| 2954 | { | 2985 | { |
| 2955 | drm_i915_private_t *dev_priv = dev->dev_private; | 2986 | drm_i915_private_t *dev_priv = dev->dev_private; |
| @@ -3095,6 +3126,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 3095 | if (dev_priv->hw_status_page == NULL) { | 3126 | if (dev_priv->hw_status_page == NULL) { |
| 3096 | DRM_ERROR("Failed to map status page.\n"); | 3127 | DRM_ERROR("Failed to map status page.\n"); |
| 3097 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3128 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 3129 | i915_gem_object_unpin(obj); | ||
| 3098 | drm_gem_object_unreference(obj); | 3130 | drm_gem_object_unreference(obj); |
| 3099 | return -EINVAL; | 3131 | return -EINVAL; |
| 3100 | } | 3132 | } |
| @@ -3107,6 +3139,31 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 3107 | return 0; | 3139 | return 0; |
| 3108 | } | 3140 | } |
| 3109 | 3141 | ||
| 3142 | static void | ||
| 3143 | i915_gem_cleanup_hws(struct drm_device *dev) | ||
| 3144 | { | ||
| 3145 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 3146 | struct drm_gem_object *obj; | ||
| 3147 | struct drm_i915_gem_object *obj_priv; | ||
| 3148 | |||
| 3149 | if (dev_priv->hws_obj == NULL) | ||
| 3150 | return; | ||
| 3151 | |||
| 3152 | obj = dev_priv->hws_obj; | ||
| 3153 | obj_priv = obj->driver_private; | ||
| 3154 | |||
| 3155 | kunmap(obj_priv->page_list[0]); | ||
| 3156 | i915_gem_object_unpin(obj); | ||
| 3157 | drm_gem_object_unreference(obj); | ||
| 3158 | dev_priv->hws_obj = NULL; | ||
| 3159 | |||
| 3160 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 3161 | dev_priv->hw_status_page = NULL; | ||
| 3162 | |||
| 3163 | /* Write high address into HWS_PGA when disabling. */ | ||
| 3164 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
| 3165 | } | ||
| 3166 | |||
| 3110 | int | 3167 | int |
| 3111 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3168 | i915_gem_init_ringbuffer(struct drm_device *dev) |
| 3112 | { | 3169 | { |
| @@ -3124,6 +3181,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
| 3124 | obj = drm_gem_object_alloc(dev, 128 * 1024); | 3181 | obj = drm_gem_object_alloc(dev, 128 * 1024); |
| 3125 | if (obj == NULL) { | 3182 | if (obj == NULL) { |
| 3126 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 3183 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
| 3184 | i915_gem_cleanup_hws(dev); | ||
| 3127 | return -ENOMEM; | 3185 | return -ENOMEM; |
| 3128 | } | 3186 | } |
| 3129 | obj_priv = obj->driver_private; | 3187 | obj_priv = obj->driver_private; |
| @@ -3131,6 +3189,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
| 3131 | ret = i915_gem_object_pin(obj, 4096); | 3189 | ret = i915_gem_object_pin(obj, 4096); |
| 3132 | if (ret != 0) { | 3190 | if (ret != 0) { |
| 3133 | drm_gem_object_unreference(obj); | 3191 | drm_gem_object_unreference(obj); |
| 3192 | i915_gem_cleanup_hws(dev); | ||
| 3134 | return ret; | 3193 | return ret; |
| 3135 | } | 3194 | } |
| 3136 | 3195 | ||
| @@ -3148,7 +3207,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
| 3148 | if (ring->map.handle == NULL) { | 3207 | if (ring->map.handle == NULL) { |
| 3149 | DRM_ERROR("Failed to map ringbuffer.\n"); | 3208 | DRM_ERROR("Failed to map ringbuffer.\n"); |
| 3150 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3209 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
| 3210 | i915_gem_object_unpin(obj); | ||
| 3151 | drm_gem_object_unreference(obj); | 3211 | drm_gem_object_unreference(obj); |
| 3212 | i915_gem_cleanup_hws(dev); | ||
| 3152 | return -EINVAL; | 3213 | return -EINVAL; |
| 3153 | } | 3214 | } |
| 3154 | ring->ring_obj = obj; | 3215 | ring->ring_obj = obj; |
| @@ -3228,20 +3289,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) | |||
| 3228 | dev_priv->ring.ring_obj = NULL; | 3289 | dev_priv->ring.ring_obj = NULL; |
| 3229 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | 3290 | memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); |
| 3230 | 3291 | ||
| 3231 | if (dev_priv->hws_obj != NULL) { | 3292 | i915_gem_cleanup_hws(dev); |
| 3232 | struct drm_gem_object *obj = dev_priv->hws_obj; | ||
| 3233 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
| 3234 | |||
| 3235 | kunmap(obj_priv->page_list[0]); | ||
| 3236 | i915_gem_object_unpin(obj); | ||
| 3237 | drm_gem_object_unreference(obj); | ||
| 3238 | dev_priv->hws_obj = NULL; | ||
| 3239 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
| 3240 | dev_priv->hw_status_page = NULL; | ||
| 3241 | |||
| 3242 | /* Write high address into HWS_PGA when disabling. */ | ||
| 3243 | I915_WRITE(HWS_PGA, 0x1ffff000); | ||
| 3244 | } | ||
| 3245 | } | 3293 | } |
| 3246 | 3294 | ||
| 3247 | int | 3295 | int |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index fa1685cba840..7fb4191ef934 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -299,9 +299,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
| 299 | } | 299 | } |
| 300 | obj_priv->stride = args->stride; | 300 | obj_priv->stride = args->stride; |
| 301 | 301 | ||
| 302 | mutex_unlock(&dev->struct_mutex); | ||
| 303 | |||
| 304 | drm_gem_object_unreference(obj); | 302 | drm_gem_object_unreference(obj); |
| 303 | mutex_unlock(&dev->struct_mutex); | ||
| 305 | 304 | ||
| 306 | return 0; | 305 | return 0; |
| 307 | } | 306 | } |
| @@ -340,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
| 340 | DRM_ERROR("unknown tiling mode\n"); | 339 | DRM_ERROR("unknown tiling mode\n"); |
| 341 | } | 340 | } |
| 342 | 341 | ||
| 343 | mutex_unlock(&dev->struct_mutex); | ||
| 344 | |||
| 345 | drm_gem_object_unreference(obj); | 342 | drm_gem_object_unreference(obj); |
| 343 | mutex_unlock(&dev->struct_mutex); | ||
| 346 | 344 | ||
| 347 | return 0; | 345 | return 0; |
| 348 | } | 346 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 4ca82a025525..65be30dccc77 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -135,6 +135,14 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
| 135 | if (general) { | 135 | if (general) { |
| 136 | dev_priv->int_tv_support = general->int_tv_support; | 136 | dev_priv->int_tv_support = general->int_tv_support; |
| 137 | dev_priv->int_crt_support = general->int_crt_support; | 137 | dev_priv->int_crt_support = general->int_crt_support; |
| 138 | dev_priv->lvds_use_ssc = general->enable_ssc; | ||
| 139 | |||
| 140 | if (dev_priv->lvds_use_ssc) { | ||
| 141 | if (IS_I855(dev_priv->dev)) | ||
| 142 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; | ||
| 143 | else | ||
| 144 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; | ||
| 145 | } | ||
| 138 | } | 146 | } |
| 139 | } | 147 | } |
| 140 | 148 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bbdd72909a11..65b635ce28c8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -90,12 +90,12 @@ typedef struct { | |||
| 90 | #define I9XX_DOT_MAX 400000 | 90 | #define I9XX_DOT_MAX 400000 |
| 91 | #define I9XX_VCO_MIN 1400000 | 91 | #define I9XX_VCO_MIN 1400000 |
| 92 | #define I9XX_VCO_MAX 2800000 | 92 | #define I9XX_VCO_MAX 2800000 |
| 93 | #define I9XX_N_MIN 3 | 93 | #define I9XX_N_MIN 1 |
| 94 | #define I9XX_N_MAX 8 | 94 | #define I9XX_N_MAX 6 |
| 95 | #define I9XX_M_MIN 70 | 95 | #define I9XX_M_MIN 70 |
| 96 | #define I9XX_M_MAX 120 | 96 | #define I9XX_M_MAX 120 |
| 97 | #define I9XX_M1_MIN 10 | 97 | #define I9XX_M1_MIN 10 |
| 98 | #define I9XX_M1_MAX 20 | 98 | #define I9XX_M1_MAX 22 |
| 99 | #define I9XX_M2_MIN 5 | 99 | #define I9XX_M2_MIN 5 |
| 100 | #define I9XX_M2_MAX 9 | 100 | #define I9XX_M2_MAX 9 |
| 101 | #define I9XX_P_SDVO_DAC_MIN 5 | 101 | #define I9XX_P_SDVO_DAC_MIN 5 |
| @@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
| 189 | return limit; | 189 | return limit; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ | 192 | static void intel_clock(int refclk, intel_clock_t *clock) |
| 193 | |||
| 194 | static void i8xx_clock(int refclk, intel_clock_t *clock) | ||
| 195 | { | 193 | { |
| 196 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | 194 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
| 197 | clock->p = clock->p1 * clock->p2; | 195 | clock->p = clock->p1 * clock->p2; |
| @@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock) | |||
| 199 | clock->dot = clock->vco / clock->p; | 197 | clock->dot = clock->vco / clock->p; |
| 200 | } | 198 | } |
| 201 | 199 | ||
| 202 | /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ | ||
| 203 | |||
| 204 | static void i9xx_clock(int refclk, intel_clock_t *clock) | ||
| 205 | { | ||
| 206 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | ||
| 207 | clock->p = clock->p1 * clock->p2; | ||
| 208 | clock->vco = refclk * clock->m / (clock->n + 2); | ||
| 209 | clock->dot = clock->vco / clock->p; | ||
| 210 | } | ||
| 211 | |||
| 212 | static void intel_clock(struct drm_device *dev, int refclk, | ||
| 213 | intel_clock_t *clock) | ||
| 214 | { | ||
| 215 | if (IS_I9XX(dev)) | ||
| 216 | i9xx_clock (refclk, clock); | ||
| 217 | else | ||
| 218 | i8xx_clock (refclk, clock); | ||
| 219 | } | ||
| 220 | |||
| 221 | /** | 200 | /** |
| 222 | * Returns whether any output on the specified pipe is of the specified type | 201 | * Returns whether any output on the specified pipe is of the specified type |
| 223 | */ | 202 | */ |
| @@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
| 238 | return false; | 217 | return false; |
| 239 | } | 218 | } |
| 240 | 219 | ||
| 241 | #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } | 220 | #define INTELPllInvalid(s) do { DRM_DEBUG(s); return false; } while (0) |
| 242 | /** | 221 | /** |
| 243 | * Returns whether the given set of divisors are valid for a given refclk with | 222 | * Returns whether the given set of divisors are valid for a given refclk with |
| 244 | * the given connectors. | 223 | * the given connectors. |
| @@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
| 318 | clock.p1 <= limit->p1.max; clock.p1++) { | 297 | clock.p1 <= limit->p1.max; clock.p1++) { |
| 319 | int this_err; | 298 | int this_err; |
| 320 | 299 | ||
| 321 | intel_clock(dev, refclk, &clock); | 300 | intel_clock(refclk, &clock); |
| 322 | 301 | ||
| 323 | if (!intel_PLL_is_valid(crtc, &clock)) | 302 | if (!intel_PLL_is_valid(crtc, &clock)) |
| 324 | continue; | 303 | continue; |
| @@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev) | |||
| 343 | udelay(20000); | 322 | udelay(20000); |
| 344 | } | 323 | } |
| 345 | 324 | ||
| 346 | static void | 325 | static int |
| 347 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 326 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
| 348 | struct drm_framebuffer *old_fb) | 327 | struct drm_framebuffer *old_fb) |
| 349 | { | 328 | { |
| @@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 361 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | 340 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; |
| 362 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | 341 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; |
| 363 | u32 dspcntr, alignment; | 342 | u32 dspcntr, alignment; |
| 343 | int ret; | ||
| 364 | 344 | ||
| 365 | /* no fb bound */ | 345 | /* no fb bound */ |
| 366 | if (!crtc->fb) { | 346 | if (!crtc->fb) { |
| 367 | DRM_DEBUG("No FB bound\n"); | 347 | DRM_DEBUG("No FB bound\n"); |
| 368 | return; | 348 | return 0; |
| 349 | } | ||
| 350 | |||
| 351 | switch (pipe) { | ||
| 352 | case 0: | ||
| 353 | case 1: | ||
| 354 | break; | ||
| 355 | default: | ||
| 356 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | ||
| 357 | return -EINVAL; | ||
| 369 | } | 358 | } |
| 370 | 359 | ||
| 371 | intel_fb = to_intel_framebuffer(crtc->fb); | 360 | intel_fb = to_intel_framebuffer(crtc->fb); |
| @@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 377 | alignment = 64 * 1024; | 366 | alignment = 64 * 1024; |
| 378 | break; | 367 | break; |
| 379 | case I915_TILING_X: | 368 | case I915_TILING_X: |
| 380 | if (IS_I9XX(dev)) | 369 | /* pin() will align the object as required by fence */ |
| 381 | alignment = 1024 * 1024; | 370 | alignment = 0; |
| 382 | else | ||
| 383 | alignment = 512 * 1024; | ||
| 384 | break; | 371 | break; |
| 385 | case I915_TILING_Y: | 372 | case I915_TILING_Y: |
| 386 | /* FIXME: Is this true? */ | 373 | /* FIXME: Is this true? */ |
| 387 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | 374 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
| 388 | return; | 375 | return -EINVAL; |
| 389 | default: | 376 | default: |
| 390 | BUG(); | 377 | BUG(); |
| 391 | } | 378 | } |
| 392 | 379 | ||
| 393 | if (i915_gem_object_pin(intel_fb->obj, alignment)) | 380 | mutex_lock(&dev->struct_mutex); |
| 394 | return; | 381 | ret = i915_gem_object_pin(intel_fb->obj, alignment); |
| 395 | 382 | if (ret != 0) { | |
| 396 | i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); | 383 | mutex_unlock(&dev->struct_mutex); |
| 397 | 384 | return ret; | |
| 398 | Start = obj_priv->gtt_offset; | 385 | } |
| 399 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
| 400 | 386 | ||
| 401 | I915_WRITE(dspstride, crtc->fb->pitch); | 387 | ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); |
| 388 | if (ret != 0) { | ||
| 389 | i915_gem_object_unpin(intel_fb->obj); | ||
| 390 | mutex_unlock(&dev->struct_mutex); | ||
| 391 | return ret; | ||
| 392 | } | ||
| 402 | 393 | ||
| 403 | dspcntr = I915_READ(dspcntr_reg); | 394 | dspcntr = I915_READ(dspcntr_reg); |
| 404 | /* Mask out pixel format bits in case we change it */ | 395 | /* Mask out pixel format bits in case we change it */ |
| @@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 419 | break; | 410 | break; |
| 420 | default: | 411 | default: |
| 421 | DRM_ERROR("Unknown color depth\n"); | 412 | DRM_ERROR("Unknown color depth\n"); |
| 422 | return; | 413 | i915_gem_object_unpin(intel_fb->obj); |
| 414 | mutex_unlock(&dev->struct_mutex); | ||
| 415 | return -EINVAL; | ||
| 423 | } | 416 | } |
| 424 | I915_WRITE(dspcntr_reg, dspcntr); | 417 | I915_WRITE(dspcntr_reg, dspcntr); |
| 425 | 418 | ||
| 419 | Start = obj_priv->gtt_offset; | ||
| 420 | Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); | ||
| 421 | |||
| 426 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); | 422 | DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); |
| 423 | I915_WRITE(dspstride, crtc->fb->pitch); | ||
| 427 | if (IS_I965G(dev)) { | 424 | if (IS_I965G(dev)) { |
| 428 | I915_WRITE(dspbase, Offset); | 425 | I915_WRITE(dspbase, Offset); |
| 429 | I915_READ(dspbase); | 426 | I915_READ(dspbase); |
| @@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 440 | intel_fb = to_intel_framebuffer(old_fb); | 437 | intel_fb = to_intel_framebuffer(old_fb); |
| 441 | i915_gem_object_unpin(intel_fb->obj); | 438 | i915_gem_object_unpin(intel_fb->obj); |
| 442 | } | 439 | } |
| 440 | mutex_unlock(&dev->struct_mutex); | ||
| 443 | 441 | ||
| 444 | if (!dev->primary->master) | 442 | if (!dev->primary->master) |
| 445 | return; | 443 | return 0; |
| 446 | 444 | ||
| 447 | master_priv = dev->primary->master->driver_priv; | 445 | master_priv = dev->primary->master->driver_priv; |
| 448 | if (!master_priv->sarea_priv) | 446 | if (!master_priv->sarea_priv) |
| 449 | return; | 447 | return 0; |
| 450 | 448 | ||
| 451 | switch (pipe) { | 449 | if (pipe) { |
| 452 | case 0: | ||
| 453 | master_priv->sarea_priv->pipeA_x = x; | ||
| 454 | master_priv->sarea_priv->pipeA_y = y; | ||
| 455 | break; | ||
| 456 | case 1: | ||
| 457 | master_priv->sarea_priv->pipeB_x = x; | 450 | master_priv->sarea_priv->pipeB_x = x; |
| 458 | master_priv->sarea_priv->pipeB_y = y; | 451 | master_priv->sarea_priv->pipeB_y = y; |
| 459 | break; | 452 | } else { |
| 460 | default: | 453 | master_priv->sarea_priv->pipeA_x = x; |
| 461 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 454 | master_priv->sarea_priv->pipeA_y = y; |
| 462 | break; | ||
| 463 | } | 455 | } |
| 456 | |||
| 457 | return 0; | ||
| 464 | } | 458 | } |
| 465 | 459 | ||
| 466 | 460 | ||
| @@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev) | |||
| 708 | return 1; | 702 | return 1; |
| 709 | } | 703 | } |
| 710 | 704 | ||
| 711 | static void intel_crtc_mode_set(struct drm_crtc *crtc, | 705 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
| 712 | struct drm_display_mode *mode, | 706 | struct drm_display_mode *mode, |
| 713 | struct drm_display_mode *adjusted_mode, | 707 | struct drm_display_mode *adjusted_mode, |
| 714 | int x, int y, | 708 | int x, int y, |
| 715 | struct drm_framebuffer *old_fb) | 709 | struct drm_framebuffer *old_fb) |
| 716 | { | 710 | { |
| 717 | struct drm_device *dev = crtc->dev; | 711 | struct drm_device *dev = crtc->dev; |
| 718 | struct drm_i915_private *dev_priv = dev->dev_private; | 712 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 732 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; | 726 | int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; |
| 733 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; | 727 | int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; |
| 734 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; | 728 | int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; |
| 735 | int refclk; | 729 | int refclk, num_outputs = 0; |
| 736 | intel_clock_t clock; | 730 | intel_clock_t clock; |
| 737 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 731 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
| 738 | bool ok, is_sdvo = false, is_dvo = false; | 732 | bool ok, is_sdvo = false, is_dvo = false; |
| 739 | bool is_crt = false, is_lvds = false, is_tv = false; | 733 | bool is_crt = false, is_lvds = false, is_tv = false; |
| 740 | struct drm_mode_config *mode_config = &dev->mode_config; | 734 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 741 | struct drm_connector *connector; | 735 | struct drm_connector *connector; |
| 736 | int ret; | ||
| 742 | 737 | ||
| 743 | drm_vblank_pre_modeset(dev, pipe); | 738 | drm_vblank_pre_modeset(dev, pipe); |
| 744 | 739 | ||
| @@ -768,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 768 | is_crt = true; | 763 | is_crt = true; |
| 769 | break; | 764 | break; |
| 770 | } | 765 | } |
| 766 | |||
| 767 | num_outputs++; | ||
| 771 | } | 768 | } |
| 772 | 769 | ||
| 773 | if (IS_I9XX(dev)) { | 770 | if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { |
| 771 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
| 772 | DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); | ||
| 773 | } else if (IS_I9XX(dev)) { | ||
| 774 | refclk = 96000; | 774 | refclk = 96000; |
| 775 | } else { | 775 | } else { |
| 776 | refclk = 48000; | 776 | refclk = 48000; |
| @@ -779,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); | 779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); |
| 780 | if (!ok) { | 780 | if (!ok) { |
| 781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
| 782 | return; | 782 | return -EINVAL; |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
| @@ -829,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 829 | } | 829 | } |
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | if (is_tv) { | 832 | if (is_sdvo && is_tv) |
| 833 | dpll |= PLL_REF_INPUT_TVCLKINBC; | ||
| 834 | else if (is_tv) | ||
| 833 | /* XXX: just matching BIOS for now */ | 835 | /* XXX: just matching BIOS for now */ |
| 834 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 836 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
| 835 | dpll |= 3; | 837 | dpll |= 3; |
| 836 | } | 838 | else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) |
| 839 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | ||
| 837 | else | 840 | else |
| 838 | dpll |= PLL_REF_INPUT_DREFCLK; | 841 | dpll |= PLL_REF_INPUT_DREFCLK; |
| 839 | 842 | ||
| @@ -950,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, | |||
| 950 | I915_WRITE(dspcntr_reg, dspcntr); | 953 | I915_WRITE(dspcntr_reg, dspcntr); |
| 951 | 954 | ||
| 952 | /* Flush the plane changes */ | 955 | /* Flush the plane changes */ |
| 953 | intel_pipe_set_base(crtc, x, y, old_fb); | 956 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
| 957 | if (ret != 0) | ||
| 958 | return ret; | ||
| 954 | 959 | ||
| 955 | drm_vblank_post_modeset(dev, pipe); | 960 | drm_vblank_post_modeset(dev, pipe); |
| 961 | |||
| 962 | return 0; | ||
| 956 | } | 963 | } |
| 957 | 964 | ||
| 958 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ | 965 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
| @@ -1001,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 1001 | temp = CURSOR_MODE_DISABLE; | 1008 | temp = CURSOR_MODE_DISABLE; |
| 1002 | addr = 0; | 1009 | addr = 0; |
| 1003 | bo = NULL; | 1010 | bo = NULL; |
| 1011 | mutex_lock(&dev->struct_mutex); | ||
| 1004 | goto finish; | 1012 | goto finish; |
| 1005 | } | 1013 | } |
| 1006 | 1014 | ||
| @@ -1023,18 +1031,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 1023 | } | 1031 | } |
| 1024 | 1032 | ||
| 1025 | /* we only need to pin inside GTT if cursor is non-phy */ | 1033 | /* we only need to pin inside GTT if cursor is non-phy */ |
| 1034 | mutex_lock(&dev->struct_mutex); | ||
| 1026 | if (!dev_priv->cursor_needs_physical) { | 1035 | if (!dev_priv->cursor_needs_physical) { |
| 1027 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 1036 | ret = i915_gem_object_pin(bo, PAGE_SIZE); |
| 1028 | if (ret) { | 1037 | if (ret) { |
| 1029 | DRM_ERROR("failed to pin cursor bo\n"); | 1038 | DRM_ERROR("failed to pin cursor bo\n"); |
| 1030 | goto fail; | 1039 | goto fail_locked; |
| 1031 | } | 1040 | } |
| 1032 | addr = obj_priv->gtt_offset; | 1041 | addr = obj_priv->gtt_offset; |
| 1033 | } else { | 1042 | } else { |
| 1034 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | 1043 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); |
| 1035 | if (ret) { | 1044 | if (ret) { |
| 1036 | DRM_ERROR("failed to attach phys object\n"); | 1045 | DRM_ERROR("failed to attach phys object\n"); |
| 1037 | goto fail; | 1046 | goto fail_locked; |
| 1038 | } | 1047 | } |
| 1039 | addr = obj_priv->phys_obj->handle->busaddr; | 1048 | addr = obj_priv->phys_obj->handle->busaddr; |
| 1040 | } | 1049 | } |
| @@ -1054,10 +1063,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 1054 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 1063 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
| 1055 | } else | 1064 | } else |
| 1056 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 1065 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
| 1057 | mutex_lock(&dev->struct_mutex); | ||
| 1058 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 1066 | drm_gem_object_unreference(intel_crtc->cursor_bo); |
| 1059 | mutex_unlock(&dev->struct_mutex); | ||
| 1060 | } | 1067 | } |
| 1068 | mutex_unlock(&dev->struct_mutex); | ||
| 1061 | 1069 | ||
| 1062 | intel_crtc->cursor_addr = addr; | 1070 | intel_crtc->cursor_addr = addr; |
| 1063 | intel_crtc->cursor_bo = bo; | 1071 | intel_crtc->cursor_bo = bo; |
| @@ -1065,6 +1073,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 1065 | return 0; | 1073 | return 0; |
| 1066 | fail: | 1074 | fail: |
| 1067 | mutex_lock(&dev->struct_mutex); | 1075 | mutex_lock(&dev->struct_mutex); |
| 1076 | fail_locked: | ||
| 1068 | drm_gem_object_unreference(bo); | 1077 | drm_gem_object_unreference(bo); |
| 1069 | mutex_unlock(&dev->struct_mutex); | 1078 | mutex_unlock(&dev->struct_mutex); |
| 1070 | return ret; | 1079 | return ret; |
| @@ -1292,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
| 1292 | } | 1301 | } |
| 1293 | 1302 | ||
| 1294 | /* XXX: Handle the 100Mhz refclk */ | 1303 | /* XXX: Handle the 100Mhz refclk */ |
| 1295 | i9xx_clock(96000, &clock); | 1304 | intel_clock(96000, &clock); |
| 1296 | } else { | 1305 | } else { |
| 1297 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | 1306 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
| 1298 | 1307 | ||
| @@ -1304,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
| 1304 | if ((dpll & PLL_REF_INPUT_MASK) == | 1313 | if ((dpll & PLL_REF_INPUT_MASK) == |
| 1305 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | 1314 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
| 1306 | /* XXX: might not be 66MHz */ | 1315 | /* XXX: might not be 66MHz */ |
| 1307 | i8xx_clock(66000, &clock); | 1316 | intel_clock(66000, &clock); |
| 1308 | } else | 1317 | } else |
| 1309 | i8xx_clock(48000, &clock); | 1318 | intel_clock(48000, &clock); |
| 1310 | } else { | 1319 | } else { |
| 1311 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 1320 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
| 1312 | clock.p1 = 2; | 1321 | clock.p1 = 2; |
| @@ -1319,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
| 1319 | else | 1328 | else |
| 1320 | clock.p2 = 2; | 1329 | clock.p2 = 2; |
| 1321 | 1330 | ||
| 1322 | i8xx_clock(48000, &clock); | 1331 | intel_clock(48000, &clock); |
| 1323 | } | 1332 | } |
| 1324 | } | 1333 | } |
| 1325 | 1334 | ||
| @@ -1598,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
| 1598 | 1607 | ||
| 1599 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); | 1608 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); |
| 1600 | if (ret) { | 1609 | if (ret) { |
| 1610 | mutex_lock(&dev->struct_mutex); | ||
| 1601 | drm_gem_object_unreference(obj); | 1611 | drm_gem_object_unreference(obj); |
| 1612 | mutex_unlock(&dev->struct_mutex); | ||
| 1602 | return NULL; | 1613 | return NULL; |
| 1603 | } | 1614 | } |
| 1604 | 1615 | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index afd1217b8a02..b7f0ebe9f810 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
| @@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 473 | ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); | 473 | ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); |
| 474 | if (ret) { | 474 | if (ret) { |
| 475 | DRM_ERROR("failed to allocate fb.\n"); | 475 | DRM_ERROR("failed to allocate fb.\n"); |
| 476 | goto out_unref; | 476 | goto out_unpin; |
| 477 | } | 477 | } |
| 478 | 478 | ||
| 479 | list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); | 479 | list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); |
| @@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 484 | info = framebuffer_alloc(sizeof(struct intelfb_par), device); | 484 | info = framebuffer_alloc(sizeof(struct intelfb_par), device); |
| 485 | if (!info) { | 485 | if (!info) { |
| 486 | ret = -ENOMEM; | 486 | ret = -ENOMEM; |
| 487 | goto out_unref; | 487 | goto out_unpin; |
| 488 | } | 488 | } |
| 489 | 489 | ||
| 490 | par = info->par; | 490 | par = info->par; |
| @@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 513 | size); | 513 | size); |
| 514 | if (!info->screen_base) { | 514 | if (!info->screen_base) { |
| 515 | ret = -ENOSPC; | 515 | ret = -ENOSPC; |
| 516 | goto out_unref; | 516 | goto out_unpin; |
| 517 | } | 517 | } |
| 518 | info->screen_size = size; | 518 | info->screen_size = size; |
| 519 | 519 | ||
| @@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
| 608 | mutex_unlock(&dev->struct_mutex); | 608 | mutex_unlock(&dev->struct_mutex); |
| 609 | return 0; | 609 | return 0; |
| 610 | 610 | ||
| 611 | out_unpin: | ||
| 612 | i915_gem_object_unpin(fbo); | ||
| 611 | out_unref: | 613 | out_unref: |
| 612 | drm_gem_object_unreference(fbo); | 614 | drm_gem_object_unreference(fbo); |
| 613 | mutex_unlock(&dev->struct_mutex); | 615 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6d4f91265354..0d211af98854 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -481,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 481 | if (dev_priv->panel_fixed_mode) { | 481 | if (dev_priv->panel_fixed_mode) { |
| 482 | dev_priv->panel_fixed_mode->type |= | 482 | dev_priv->panel_fixed_mode->type |= |
| 483 | DRM_MODE_TYPE_PREFERRED; | 483 | DRM_MODE_TYPE_PREFERRED; |
| 484 | drm_mode_probed_add(connector, | ||
| 485 | dev_priv->panel_fixed_mode); | ||
| 486 | goto out; | 484 | goto out; |
| 487 | } | 485 | } |
| 488 | } | 486 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a30508b639ba..fbe6f3931b1b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -193,7 +193,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
| 193 | 193 | ||
| 194 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} | 194 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} |
| 195 | /** Mapping of command numbers to names, for debug output */ | 195 | /** Mapping of command numbers to names, for debug output */ |
| 196 | const static struct _sdvo_cmd_name { | 196 | static const struct _sdvo_cmd_name { |
| 197 | u8 cmd; | 197 | u8 cmd; |
| 198 | char *name; | 198 | char *name; |
| 199 | } sdvo_cmd_names[] = { | 199 | } sdvo_cmd_names[] = { |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fbb35dc56f5c..56485d67369b 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -411,7 +411,7 @@ struct tv_mode { | |||
| 411 | * These values account for -1s required. | 411 | * These values account for -1s required. |
| 412 | */ | 412 | */ |
| 413 | 413 | ||
| 414 | const static struct tv_mode tv_modes[] = { | 414 | static const struct tv_mode tv_modes[] = { |
| 415 | { | 415 | { |
| 416 | .name = "NTSC-M", | 416 | .name = "NTSC-M", |
| 417 | .clock = 107520, | 417 | .clock = 107520, |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index df4cf97e5d97..92965dbb3c14 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
| @@ -557,8 +557,10 @@ static int radeon_do_engine_reset(struct drm_device * dev) | |||
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, | 559 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, |
| 560 | drm_radeon_private_t * dev_priv) | 560 | drm_radeon_private_t *dev_priv, |
| 561 | struct drm_file *file_priv) | ||
| 561 | { | 562 | { |
| 563 | struct drm_radeon_master_private *master_priv; | ||
| 562 | u32 ring_start, cur_read_ptr; | 564 | u32 ring_start, cur_read_ptr; |
| 563 | u32 tmp; | 565 | u32 tmp; |
| 564 | 566 | ||
| @@ -677,6 +679,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, | |||
| 677 | dev_priv->scratch[2] = 0; | 679 | dev_priv->scratch[2] = 0; |
| 678 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); | 680 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); |
| 679 | 681 | ||
| 682 | /* reset sarea copies of these */ | ||
| 683 | master_priv = file_priv->master->driver_priv; | ||
| 684 | if (master_priv->sarea_priv) { | ||
| 685 | master_priv->sarea_priv->last_frame = 0; | ||
| 686 | master_priv->sarea_priv->last_dispatch = 0; | ||
| 687 | master_priv->sarea_priv->last_clear = 0; | ||
| 688 | } | ||
| 689 | |||
| 680 | radeon_do_wait_for_idle(dev_priv); | 690 | radeon_do_wait_for_idle(dev_priv); |
| 681 | 691 | ||
| 682 | /* Sync everything up */ | 692 | /* Sync everything up */ |
| @@ -1215,7 +1225,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, | |||
| 1215 | } | 1225 | } |
| 1216 | 1226 | ||
| 1217 | radeon_cp_load_microcode(dev_priv); | 1227 | radeon_cp_load_microcode(dev_priv); |
| 1218 | radeon_cp_init_ring_buffer(dev, dev_priv); | 1228 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); |
| 1219 | 1229 | ||
| 1220 | dev_priv->last_buf = 0; | 1230 | dev_priv->last_buf = 0; |
| 1221 | 1231 | ||
| @@ -1281,7 +1291,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) | |||
| 1281 | * | 1291 | * |
| 1282 | * Charl P. Botha <http://cpbotha.net> | 1292 | * Charl P. Botha <http://cpbotha.net> |
| 1283 | */ | 1293 | */ |
| 1284 | static int radeon_do_resume_cp(struct drm_device * dev) | 1294 | static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) |
| 1285 | { | 1295 | { |
| 1286 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1296 | drm_radeon_private_t *dev_priv = dev->dev_private; |
| 1287 | 1297 | ||
| @@ -1304,7 +1314,7 @@ static int radeon_do_resume_cp(struct drm_device * dev) | |||
| 1304 | } | 1314 | } |
| 1305 | 1315 | ||
| 1306 | radeon_cp_load_microcode(dev_priv); | 1316 | radeon_cp_load_microcode(dev_priv); |
| 1307 | radeon_cp_init_ring_buffer(dev, dev_priv); | 1317 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); |
| 1308 | 1318 | ||
| 1309 | radeon_do_engine_reset(dev); | 1319 | radeon_do_engine_reset(dev); |
| 1310 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); | 1320 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); |
| @@ -1479,8 +1489,7 @@ int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_pri | |||
| 1479 | */ | 1489 | */ |
| 1480 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) | 1490 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) |
| 1481 | { | 1491 | { |
| 1482 | 1492 | return radeon_do_resume_cp(dev, file_priv); | |
| 1483 | return radeon_do_resume_cp(dev); | ||
| 1484 | } | 1493 | } |
| 1485 | 1494 | ||
| 1486 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | 1495 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) |
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c index ad4cdd256137..0b28fccec03f 100644 --- a/drivers/parport/parport_atari.c +++ b/drivers/parport/parport_atari.c | |||
| @@ -84,7 +84,7 @@ parport_atari_frob_control(struct parport *p, unsigned char mask, | |||
| 84 | static unsigned char | 84 | static unsigned char |
| 85 | parport_atari_read_status(struct parport *p) | 85 | parport_atari_read_status(struct parport *p) |
| 86 | { | 86 | { |
| 87 | return ((mfp.par_dt_reg & 1 ? 0 : PARPORT_STATUS_BUSY) | | 87 | return ((st_mfp.par_dt_reg & 1 ? 0 : PARPORT_STATUS_BUSY) | |
| 88 | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); | 88 | PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| @@ -193,9 +193,9 @@ static int __init parport_atari_init(void) | |||
| 193 | sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5); | 193 | sound_ym.wd_data = sound_ym.rd_data_reg_sel | (1 << 5); |
| 194 | local_irq_restore(flags); | 194 | local_irq_restore(flags); |
| 195 | /* MFP port I0 as input. */ | 195 | /* MFP port I0 as input. */ |
| 196 | mfp.data_dir &= ~1; | 196 | st_mfp.data_dir &= ~1; |
| 197 | /* MFP port I0 interrupt on high->low edge. */ | 197 | /* MFP port I0 interrupt on high->low edge. */ |
| 198 | mfp.active_edge &= ~1; | 198 | st_mfp.active_edge &= ~1; |
| 199 | p = parport_register_port((unsigned long)&sound_ym.wd_data, | 199 | p = parport_register_port((unsigned long)&sound_ym.wd_data, |
| 200 | IRQ_MFP_BUSY, PARPORT_DMA_NONE, | 200 | IRQ_MFP_BUSY, PARPORT_DMA_NONE, |
| 201 | &parport_atari_ops); | 201 | &parport_atari_ops); |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 65dc41540c62..45940f31fe9e 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
| @@ -166,6 +166,7 @@ struct fujitsu_hotkey_t { | |||
| 166 | struct platform_device *pf_device; | 166 | struct platform_device *pf_device; |
| 167 | struct kfifo *fifo; | 167 | struct kfifo *fifo; |
| 168 | spinlock_t fifo_lock; | 168 | spinlock_t fifo_lock; |
| 169 | int rfkill_supported; | ||
| 169 | int rfkill_state; | 170 | int rfkill_state; |
| 170 | int logolamp_registered; | 171 | int logolamp_registered; |
| 171 | int kblamps_registered; | 172 | int kblamps_registered; |
| @@ -526,7 +527,7 @@ static ssize_t | |||
| 526 | show_lid_state(struct device *dev, | 527 | show_lid_state(struct device *dev, |
| 527 | struct device_attribute *attr, char *buf) | 528 | struct device_attribute *attr, char *buf) |
| 528 | { | 529 | { |
| 529 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) | 530 | if (!(fujitsu_hotkey->rfkill_supported & 0x100)) |
| 530 | return sprintf(buf, "unknown\n"); | 531 | return sprintf(buf, "unknown\n"); |
| 531 | if (fujitsu_hotkey->rfkill_state & 0x100) | 532 | if (fujitsu_hotkey->rfkill_state & 0x100) |
| 532 | return sprintf(buf, "open\n"); | 533 | return sprintf(buf, "open\n"); |
| @@ -538,7 +539,7 @@ static ssize_t | |||
| 538 | show_dock_state(struct device *dev, | 539 | show_dock_state(struct device *dev, |
| 539 | struct device_attribute *attr, char *buf) | 540 | struct device_attribute *attr, char *buf) |
| 540 | { | 541 | { |
| 541 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) | 542 | if (!(fujitsu_hotkey->rfkill_supported & 0x200)) |
| 542 | return sprintf(buf, "unknown\n"); | 543 | return sprintf(buf, "unknown\n"); |
| 543 | if (fujitsu_hotkey->rfkill_state & 0x200) | 544 | if (fujitsu_hotkey->rfkill_state & 0x200) |
| 544 | return sprintf(buf, "docked\n"); | 545 | return sprintf(buf, "docked\n"); |
| @@ -550,7 +551,7 @@ static ssize_t | |||
| 550 | show_radios_state(struct device *dev, | 551 | show_radios_state(struct device *dev, |
| 551 | struct device_attribute *attr, char *buf) | 552 | struct device_attribute *attr, char *buf) |
| 552 | { | 553 | { |
| 553 | if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD) | 554 | if (!(fujitsu_hotkey->rfkill_supported & 0x20)) |
| 554 | return sprintf(buf, "unknown\n"); | 555 | return sprintf(buf, "unknown\n"); |
| 555 | if (fujitsu_hotkey->rfkill_state & 0x20) | 556 | if (fujitsu_hotkey->rfkill_state & 0x20) |
| 556 | return sprintf(buf, "on\n"); | 557 | return sprintf(buf, "on\n"); |
| @@ -928,8 +929,17 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
| 928 | ; /* No action, result is discarded */ | 929 | ; /* No action, result is discarded */ |
| 929 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); | 930 | vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); |
| 930 | 931 | ||
| 931 | fujitsu_hotkey->rfkill_state = | 932 | fujitsu_hotkey->rfkill_supported = |
| 932 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | 933 | call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0); |
| 934 | |||
| 935 | /* Make sure our bitmask of supported functions is cleared if the | ||
| 936 | RFKILL function block is not implemented, like on the S7020. */ | ||
| 937 | if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD) | ||
| 938 | fujitsu_hotkey->rfkill_supported = 0; | ||
| 939 | |||
| 940 | if (fujitsu_hotkey->rfkill_supported) | ||
| 941 | fujitsu_hotkey->rfkill_state = | ||
| 942 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | ||
| 933 | 943 | ||
| 934 | /* Suspect this is a keymap of the application panel, print it */ | 944 | /* Suspect this is a keymap of the application panel, print it */ |
| 935 | printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n", | 945 | printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n", |
| @@ -1005,8 +1015,9 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, | |||
| 1005 | 1015 | ||
| 1006 | input = fujitsu_hotkey->input; | 1016 | input = fujitsu_hotkey->input; |
| 1007 | 1017 | ||
| 1008 | fujitsu_hotkey->rfkill_state = | 1018 | if (fujitsu_hotkey->rfkill_supported) |
| 1009 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | 1019 | fujitsu_hotkey->rfkill_state = |
| 1020 | call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); | ||
| 1010 | 1021 | ||
| 1011 | switch (event) { | 1022 | switch (event) { |
| 1012 | case ACPI_FUJITSU_NOTIFY_CODE1: | 1023 | case ACPI_FUJITSU_NOTIFY_CODE1: |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index a1a511bdec8c..ed1e728763a2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
| @@ -1573,9 +1573,6 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | |||
| 1573 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); | 1573 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); |
| 1574 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; | 1574 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; |
| 1575 | vfc_cmd->tgt_scsi_id = rport->port_id; | 1575 | vfc_cmd->tgt_scsi_id = rport->port_id; |
| 1576 | if ((rport->supported_classes & FC_COS_CLASS3) && | ||
| 1577 | (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3)) | ||
| 1578 | vfc_cmd->flags = IBMVFC_CLASS_3_ERR; | ||
| 1579 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); | 1576 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); |
| 1580 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); | 1577 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); |
| 1581 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); | 1578 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); |
| @@ -3266,6 +3263,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) | |||
| 3266 | return -ENOMEM; | 3263 | return -ENOMEM; |
| 3267 | } | 3264 | } |
| 3268 | 3265 | ||
| 3266 | memset(tgt, 0, sizeof(*tgt)); | ||
| 3269 | tgt->scsi_id = scsi_id; | 3267 | tgt->scsi_id = scsi_id; |
| 3270 | tgt->new_scsi_id = scsi_id; | 3268 | tgt->new_scsi_id = scsi_id; |
| 3271 | tgt->vhost = vhost; | 3269 | tgt->vhost = vhost; |
| @@ -3576,9 +3574,18 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) | |||
| 3576 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | 3574 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) |
| 3577 | { | 3575 | { |
| 3578 | struct ibmvfc_host *vhost = tgt->vhost; | 3576 | struct ibmvfc_host *vhost = tgt->vhost; |
| 3579 | struct fc_rport *rport; | 3577 | struct fc_rport *rport = tgt->rport; |
| 3580 | unsigned long flags; | 3578 | unsigned long flags; |
| 3581 | 3579 | ||
| 3580 | if (rport) { | ||
| 3581 | tgt_dbg(tgt, "Setting rport roles\n"); | ||
| 3582 | fc_remote_port_rolechg(rport, tgt->ids.roles); | ||
| 3583 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
| 3584 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | ||
| 3585 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
| 3586 | return; | ||
| 3587 | } | ||
| 3588 | |||
| 3582 | tgt_dbg(tgt, "Adding rport\n"); | 3589 | tgt_dbg(tgt, "Adding rport\n"); |
| 3583 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); | 3590 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); |
| 3584 | spin_lock_irqsave(vhost->host->host_lock, flags); | 3591 | spin_lock_irqsave(vhost->host->host_lock, flags); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 87dafd0f8d44..b21e071b9862 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define IBMVFC_DRIVER_VERSION "1.0.4" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.4" |
| 33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" | 33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" |
| 34 | 34 | ||
| 35 | #define IBMVFC_DEFAULT_TIMEOUT 15 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
| 36 | #define IBMVFC_INIT_TIMEOUT 120 | 36 | #define IBMVFC_INIT_TIMEOUT 120 |
| 37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 | 37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 |
| 38 | 38 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 74d07d137dae..c9aa7611e408 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
| @@ -432,6 +432,7 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 432 | sdev_printk(KERN_ERR, cmd->device, | 432 | sdev_printk(KERN_ERR, cmd->device, |
| 433 | "Can't allocate memory " | 433 | "Can't allocate memory " |
| 434 | "for indirect table\n"); | 434 | "for indirect table\n"); |
| 435 | scsi_dma_unmap(cmd); | ||
| 435 | return 0; | 436 | return 0; |
| 436 | } | 437 | } |
| 437 | } | 438 | } |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 257c24115de9..809d32d95c76 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -1998,6 +1998,8 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) | |||
| 1998 | if (!shost->can_queue) | 1998 | if (!shost->can_queue) |
| 1999 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; | 1999 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; |
| 2000 | 2000 | ||
| 2001 | if (!shost->transportt->eh_timed_out) | ||
| 2002 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
| 2001 | return scsi_add_host(shost, pdev); | 2003 | return scsi_add_host(shost, pdev); |
| 2002 | } | 2004 | } |
| 2003 | EXPORT_SYMBOL_GPL(iscsi_host_add); | 2005 | EXPORT_SYMBOL_GPL(iscsi_host_add); |
| @@ -2020,7 +2022,6 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, | |||
| 2020 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); | 2022 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); |
| 2021 | if (!shost) | 2023 | if (!shost) |
| 2022 | return NULL; | 2024 | return NULL; |
| 2023 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
| 2024 | 2025 | ||
| 2025 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { | 2026 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { |
| 2026 | if (qdepth != 0) | 2027 | if (qdepth != 0) |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a8f30bdaff69..a7302480bc4a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
| @@ -5258,6 +5258,7 @@ lpfc_send_els_event(struct lpfc_vport *vport, | |||
| 5258 | sizeof(struct lpfc_name)); | 5258 | sizeof(struct lpfc_name)); |
| 5259 | break; | 5259 | break; |
| 5260 | default: | 5260 | default: |
| 5261 | kfree(els_data); | ||
| 5261 | return; | 5262 | return; |
| 5262 | } | 5263 | } |
| 5263 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | 5264 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 33a3c13fd893..f4c57227ec18 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -1265,13 +1265,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
| 1265 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) | 1265 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) |
| 1266 | msleep(1000); | 1266 | msleep(1000); |
| 1267 | 1267 | ||
| 1268 | if (ha->mqenable) { | ||
| 1269 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
| 1270 | qla_printk(KERN_WARNING, ha, | ||
| 1271 | "Queue delete failed.\n"); | ||
| 1272 | vha->req_ques[0] = ha->req_q_map[0]->id; | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | qla24xx_disable_vp(vha); | 1268 | qla24xx_disable_vp(vha); |
| 1276 | 1269 | ||
| 1277 | fc_remove_host(vha->host); | 1270 | fc_remove_host(vha->host); |
| @@ -1293,6 +1286,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
| 1293 | vha->host_no, vha->vp_idx, vha)); | 1286 | vha->host_no, vha->vp_idx, vha)); |
| 1294 | } | 1287 | } |
| 1295 | 1288 | ||
| 1289 | if (ha->mqenable) { | ||
| 1290 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
| 1291 | qla_printk(KERN_WARNING, ha, | ||
| 1292 | "Queue delete failed.\n"); | ||
| 1293 | } | ||
| 1294 | |||
| 1296 | scsi_host_put(vha->host); | 1295 | scsi_host_put(vha->host); |
| 1297 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); | 1296 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); |
| 1298 | return 0; | 1297 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 023ee77fb027..e0c5bb54b258 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -2135,6 +2135,7 @@ struct qla_msix_entry { | |||
| 2135 | /* Work events. */ | 2135 | /* Work events. */ |
| 2136 | enum qla_work_type { | 2136 | enum qla_work_type { |
| 2137 | QLA_EVT_AEN, | 2137 | QLA_EVT_AEN, |
| 2138 | QLA_EVT_IDC_ACK, | ||
| 2138 | }; | 2139 | }; |
| 2139 | 2140 | ||
| 2140 | 2141 | ||
| @@ -2149,6 +2150,10 @@ struct qla_work_evt { | |||
| 2149 | enum fc_host_event_code code; | 2150 | enum fc_host_event_code code; |
| 2150 | u32 data; | 2151 | u32 data; |
| 2151 | } aen; | 2152 | } aen; |
| 2153 | struct { | ||
| 2154 | #define QLA_IDC_ACK_REGS 7 | ||
| 2155 | uint16_t mb[QLA_IDC_ACK_REGS]; | ||
| 2156 | } idc_ack; | ||
| 2152 | } u; | 2157 | } u; |
| 2153 | }; | 2158 | }; |
| 2154 | 2159 | ||
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h index d78d35e681ab..d6ea69df7c5c 100644 --- a/drivers/scsi/qla2xxx/qla_devtbl.h +++ b/drivers/scsi/qla2xxx/qla_devtbl.h | |||
| @@ -72,7 +72,7 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = { | |||
| 72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ | 72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ |
| 73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ | 73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ |
| 74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ | 74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ |
| 75 | "QEM2462" "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ | 75 | "QEM2462", "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ |
| 76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ | 76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ |
| 77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ | 77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ |
| 78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ | 78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 7abb045a0410..ffff42554087 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
| @@ -1402,6 +1402,8 @@ struct access_chip_rsp_84xx { | |||
| 1402 | #define MBA_IDC_NOTIFY 0x8101 | 1402 | #define MBA_IDC_NOTIFY 0x8101 |
| 1403 | #define MBA_IDC_TIME_EXT 0x8102 | 1403 | #define MBA_IDC_TIME_EXT 0x8102 |
| 1404 | 1404 | ||
| 1405 | #define MBC_IDC_ACK 0x101 | ||
| 1406 | |||
| 1405 | struct nvram_81xx { | 1407 | struct nvram_81xx { |
| 1406 | /* NVRAM header. */ | 1408 | /* NVRAM header. */ |
| 1407 | uint8_t id[4]; | 1409 | uint8_t id[4]; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index a336b4bc81a7..6de283f8f111 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -72,6 +72,7 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *); | |||
| 72 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 72 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
| 73 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum | 73 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum |
| 74 | fc_host_event_code, u32); | 74 | fc_host_event_code, u32); |
| 75 | extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); | ||
| 75 | 76 | ||
| 76 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); | 77 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); |
| 77 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, | 78 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, |
| @@ -266,6 +267,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); | |||
| 266 | 267 | ||
| 267 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); | 268 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); |
| 268 | 269 | ||
| 270 | extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *); | ||
| 271 | |||
| 269 | /* | 272 | /* |
| 270 | * Global Function Prototypes in qla_isr.c source file. | 273 | * Global Function Prototypes in qla_isr.c source file. |
| 271 | */ | 274 | */ |
| @@ -376,10 +379,8 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *); | |||
| 376 | 379 | ||
| 377 | /* Globa function prototypes for multi-q */ | 380 | /* Globa function prototypes for multi-q */ |
| 378 | extern int qla25xx_request_irq(struct rsp_que *); | 381 | extern int qla25xx_request_irq(struct rsp_que *); |
| 379 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *, | 382 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); |
| 380 | uint8_t); | 383 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); |
| 381 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *, | ||
| 382 | uint8_t); | ||
| 383 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, | 384 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, |
| 384 | uint16_t, uint8_t, uint8_t); | 385 | uint16_t, uint8_t, uint8_t); |
| 385 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, | 386 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f6368a1d3021..986501759ad4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -1226,9 +1226,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
| 1226 | icb->firmware_options_2 |= | 1226 | icb->firmware_options_2 |= |
| 1227 | __constant_cpu_to_le32(BIT_18); | 1227 | __constant_cpu_to_le32(BIT_18); |
| 1228 | 1228 | ||
| 1229 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); | 1229 | icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); |
| 1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); | 1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); |
| 1231 | ha->rsp_q_map[0]->options = icb->firmware_options_2; | ||
| 1232 | 1231 | ||
| 1233 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); | 1232 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); |
| 1234 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); | 1233 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); |
| @@ -3493,7 +3492,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
| 3493 | rsp = ha->rsp_q_map[i]; | 3492 | rsp = ha->rsp_q_map[i]; |
| 3494 | if (rsp) { | 3493 | if (rsp) { |
| 3495 | rsp->options &= ~BIT_0; | 3494 | rsp->options &= ~BIT_0; |
| 3496 | ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options); | 3495 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
| 3497 | if (ret != QLA_SUCCESS) | 3496 | if (ret != QLA_SUCCESS) |
| 3498 | DEBUG2_17(printk(KERN_WARNING | 3497 | DEBUG2_17(printk(KERN_WARNING |
| 3499 | "%s Rsp que:%d init failed\n", __func__, | 3498 | "%s Rsp que:%d init failed\n", __func__, |
| @@ -3507,7 +3506,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
| 3507 | if (req) { | 3506 | if (req) { |
| 3508 | /* Clear outstanding commands array. */ | 3507 | /* Clear outstanding commands array. */ |
| 3509 | req->options &= ~BIT_0; | 3508 | req->options &= ~BIT_0; |
| 3510 | ret = qla25xx_init_req_que(base_vha, req, req->options); | 3509 | ret = qla25xx_init_req_que(base_vha, req); |
| 3511 | if (ret != QLA_SUCCESS) | 3510 | if (ret != QLA_SUCCESS) |
| 3512 | DEBUG2_17(printk(KERN_WARNING | 3511 | DEBUG2_17(printk(KERN_WARNING |
| 3513 | "%s Req que:%d init failed\n", __func__, | 3512 | "%s Req que:%d init failed\n", __func__, |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e28ad81baf1e..f250e5b7897c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -266,6 +266,40 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
| 266 | } | 266 | } |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | static void | ||
| 270 | qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | ||
| 271 | { | ||
| 272 | static char *event[] = | ||
| 273 | { "Complete", "Request Notification", "Time Extension" }; | ||
| 274 | int rval; | ||
| 275 | struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; | ||
| 276 | uint16_t __iomem *wptr; | ||
| 277 | uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; | ||
| 278 | |||
| 279 | /* Seed data -- mailbox1 -> mailbox7. */ | ||
| 280 | wptr = (uint16_t __iomem *)®24->mailbox1; | ||
| 281 | for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) | ||
| 282 | mb[cnt] = RD_REG_WORD(wptr); | ||
| 283 | |||
| 284 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
| 285 | "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, | ||
| 286 | event[aen & 0xff], | ||
| 287 | mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); | ||
| 288 | |||
| 289 | /* Acknowledgement needed? [Notify && non-zero timeout]. */ | ||
| 290 | timeout = (descr >> 8) & 0xf; | ||
| 291 | if (aen != MBA_IDC_NOTIFY || !timeout) | ||
| 292 | return; | ||
| 293 | |||
| 294 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
| 295 | "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); | ||
| 296 | |||
| 297 | rval = qla2x00_post_idc_ack_work(vha, mb); | ||
| 298 | if (rval != QLA_SUCCESS) | ||
| 299 | qla_printk(KERN_WARNING, vha->hw, | ||
| 300 | "IDC failed to post ACK.\n"); | ||
| 301 | } | ||
| 302 | |||
| 269 | /** | 303 | /** |
| 270 | * qla2x00_async_event() - Process aynchronous events. | 304 | * qla2x00_async_event() - Process aynchronous events. |
| 271 | * @ha: SCSI driver HA context | 305 | * @ha: SCSI driver HA context |
| @@ -714,21 +748,9 @@ skip_rio: | |||
| 714 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); | 748 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); |
| 715 | break; | 749 | break; |
| 716 | case MBA_IDC_COMPLETE: | 750 | case MBA_IDC_COMPLETE: |
| 717 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
| 718 | "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2], | ||
| 719 | mb[3])); | ||
| 720 | break; | ||
| 721 | case MBA_IDC_NOTIFY: | 751 | case MBA_IDC_NOTIFY: |
| 722 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
| 723 | "Request Notification -- %04x %04x %04x\n", vha->host_no, | ||
| 724 | mb[1], mb[2], mb[3])); | ||
| 725 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
| 726 | break; | ||
| 727 | case MBA_IDC_TIME_EXT: | 752 | case MBA_IDC_TIME_EXT: |
| 728 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | 753 | qla81xx_idc_event(vha, mb[0], mb[1]); |
| 729 | "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1], | ||
| 730 | mb[2], mb[3])); | ||
| 731 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
| 732 | break; | 754 | break; |
| 733 | } | 755 | } |
| 734 | 756 | ||
| @@ -1707,7 +1729,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1707 | struct qla_hw_data *ha; | 1729 | struct qla_hw_data *ha; |
| 1708 | struct rsp_que *rsp; | 1730 | struct rsp_que *rsp; |
| 1709 | struct device_reg_24xx __iomem *reg; | 1731 | struct device_reg_24xx __iomem *reg; |
| 1710 | uint16_t msix_disabled_hccr = 0; | ||
| 1711 | 1732 | ||
| 1712 | rsp = (struct rsp_que *) dev_id; | 1733 | rsp = (struct rsp_que *) dev_id; |
| 1713 | if (!rsp) { | 1734 | if (!rsp) { |
| @@ -1720,17 +1741,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
| 1720 | 1741 | ||
| 1721 | spin_lock_irq(&ha->hardware_lock); | 1742 | spin_lock_irq(&ha->hardware_lock); |
| 1722 | 1743 | ||
| 1723 | msix_disabled_hccr = rsp->options; | ||
| 1724 | if (!rsp->id) | ||
| 1725 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22); | ||
| 1726 | else | ||
| 1727 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6); | ||
| 1728 | |||
| 1729 | qla24xx_process_response_queue(rsp); | 1744 | qla24xx_process_response_queue(rsp); |
| 1730 | 1745 | ||
| 1731 | if (!msix_disabled_hccr) | ||
| 1732 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | ||
| 1733 | |||
| 1734 | spin_unlock_irq(&ha->hardware_lock); | 1746 | spin_unlock_irq(&ha->hardware_lock); |
| 1735 | 1747 | ||
| 1736 | return IRQ_HANDLED; | 1748 | return IRQ_HANDLED; |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f94ffbb98e95..4c7504cb3990 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -3090,8 +3090,7 @@ verify_done: | |||
| 3090 | } | 3090 | } |
| 3091 | 3091 | ||
| 3092 | int | 3092 | int |
| 3093 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | 3093 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) |
| 3094 | uint8_t options) | ||
| 3095 | { | 3094 | { |
| 3096 | int rval; | 3095 | int rval; |
| 3097 | unsigned long flags; | 3096 | unsigned long flags; |
| @@ -3101,7 +3100,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
| 3101 | struct qla_hw_data *ha = vha->hw; | 3100 | struct qla_hw_data *ha = vha->hw; |
| 3102 | 3101 | ||
| 3103 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3102 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
| 3104 | mcp->mb[1] = options; | 3103 | mcp->mb[1] = req->options; |
| 3105 | mcp->mb[2] = MSW(LSD(req->dma)); | 3104 | mcp->mb[2] = MSW(LSD(req->dma)); |
| 3106 | mcp->mb[3] = LSW(LSD(req->dma)); | 3105 | mcp->mb[3] = LSW(LSD(req->dma)); |
| 3107 | mcp->mb[6] = MSW(MSD(req->dma)); | 3106 | mcp->mb[6] = MSW(MSD(req->dma)); |
| @@ -3128,7 +3127,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
| 3128 | mcp->tov = 60; | 3127 | mcp->tov = 60; |
| 3129 | 3128 | ||
| 3130 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3129 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3131 | if (!(options & BIT_0)) { | 3130 | if (!(req->options & BIT_0)) { |
| 3132 | WRT_REG_DWORD(®->req_q_in, 0); | 3131 | WRT_REG_DWORD(®->req_q_in, 0); |
| 3133 | WRT_REG_DWORD(®->req_q_out, 0); | 3132 | WRT_REG_DWORD(®->req_q_out, 0); |
| 3134 | } | 3133 | } |
| @@ -3142,8 +3141,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
| 3142 | } | 3141 | } |
| 3143 | 3142 | ||
| 3144 | int | 3143 | int |
| 3145 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | 3144 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) |
| 3146 | uint8_t options) | ||
| 3147 | { | 3145 | { |
| 3148 | int rval; | 3146 | int rval; |
| 3149 | unsigned long flags; | 3147 | unsigned long flags; |
| @@ -3153,7 +3151,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
| 3153 | struct qla_hw_data *ha = vha->hw; | 3151 | struct qla_hw_data *ha = vha->hw; |
| 3154 | 3152 | ||
| 3155 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3153 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
| 3156 | mcp->mb[1] = options; | 3154 | mcp->mb[1] = rsp->options; |
| 3157 | mcp->mb[2] = MSW(LSD(rsp->dma)); | 3155 | mcp->mb[2] = MSW(LSD(rsp->dma)); |
| 3158 | mcp->mb[3] = LSW(LSD(rsp->dma)); | 3156 | mcp->mb[3] = LSW(LSD(rsp->dma)); |
| 3159 | mcp->mb[6] = MSW(MSD(rsp->dma)); | 3157 | mcp->mb[6] = MSW(MSD(rsp->dma)); |
| @@ -3178,7 +3176,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
| 3178 | mcp->tov = 60; | 3176 | mcp->tov = 60; |
| 3179 | 3177 | ||
| 3180 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3178 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3181 | if (!(options & BIT_0)) { | 3179 | if (!(rsp->options & BIT_0)) { |
| 3182 | WRT_REG_DWORD(®->rsp_q_out, 0); | 3180 | WRT_REG_DWORD(®->rsp_q_out, 0); |
| 3183 | WRT_REG_DWORD(®->rsp_q_in, 0); | 3181 | WRT_REG_DWORD(®->rsp_q_in, 0); |
| 3184 | } | 3182 | } |
| @@ -3193,3 +3191,29 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
| 3193 | return rval; | 3191 | return rval; |
| 3194 | } | 3192 | } |
| 3195 | 3193 | ||
| 3194 | int | ||
| 3195 | qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) | ||
| 3196 | { | ||
| 3197 | int rval; | ||
| 3198 | mbx_cmd_t mc; | ||
| 3199 | mbx_cmd_t *mcp = &mc; | ||
| 3200 | |||
| 3201 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | ||
| 3202 | |||
| 3203 | mcp->mb[0] = MBC_IDC_ACK; | ||
| 3204 | memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
| 3205 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
| 3206 | mcp->in_mb = MBX_0; | ||
| 3207 | mcp->tov = MBX_TOV_SECONDS; | ||
| 3208 | mcp->flags = 0; | ||
| 3209 | rval = qla2x00_mailbox_command(vha, mcp); | ||
| 3210 | |||
| 3211 | if (rval != QLA_SUCCESS) { | ||
| 3212 | DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, | ||
| 3213 | vha->host_no, rval, mcp->mb[0])); | ||
| 3214 | } else { | ||
| 3215 | DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); | ||
| 3216 | } | ||
| 3217 | |||
| 3218 | return rval; | ||
| 3219 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f53179c46423..3f23932210c4 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -396,7 +396,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
| 396 | 396 | ||
| 397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); | 397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); |
| 398 | 398 | ||
| 399 | memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES); | 399 | memset(vha->req_ques, 0, sizeof(vha->req_ques)); |
| 400 | vha->req_ques[0] = ha->req_q_map[0]->id; | 400 | vha->req_ques[0] = ha->req_q_map[0]->id; |
| 401 | host->can_queue = ha->req_q_map[0]->length + 128; | 401 | host->can_queue = ha->req_q_map[0]->length + 128; |
| 402 | host->this_id = 255; | 402 | host->this_id = 255; |
| @@ -471,7 +471,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
| 471 | 471 | ||
| 472 | if (req) { | 472 | if (req) { |
| 473 | req->options |= BIT_0; | 473 | req->options |= BIT_0; |
| 474 | ret = qla25xx_init_req_que(vha, req, req->options); | 474 | ret = qla25xx_init_req_que(vha, req); |
| 475 | } | 475 | } |
| 476 | if (ret == QLA_SUCCESS) | 476 | if (ret == QLA_SUCCESS) |
| 477 | qla25xx_free_req_que(vha, req); | 477 | qla25xx_free_req_que(vha, req); |
| @@ -486,7 +486,7 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
| 486 | 486 | ||
| 487 | if (rsp) { | 487 | if (rsp) { |
| 488 | rsp->options |= BIT_0; | 488 | rsp->options |= BIT_0; |
| 489 | ret = qla25xx_init_rsp_que(vha, rsp, rsp->options); | 489 | ret = qla25xx_init_rsp_que(vha, rsp); |
| 490 | } | 490 | } |
| 491 | if (ret == QLA_SUCCESS) | 491 | if (ret == QLA_SUCCESS) |
| 492 | qla25xx_free_rsp_que(vha, rsp); | 492 | qla25xx_free_rsp_que(vha, rsp); |
| @@ -502,7 +502,7 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) | |||
| 502 | 502 | ||
| 503 | req->options |= BIT_3; | 503 | req->options |= BIT_3; |
| 504 | req->qos = qos; | 504 | req->qos = qos; |
| 505 | ret = qla25xx_init_req_que(vha, req, req->options); | 505 | ret = qla25xx_init_req_que(vha, req); |
| 506 | if (ret != QLA_SUCCESS) | 506 | if (ret != QLA_SUCCESS) |
| 507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); | 507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); |
| 508 | /* restore options bit */ | 508 | /* restore options bit */ |
| @@ -632,7 +632,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
| 632 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; | 632 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; |
| 633 | mutex_unlock(&ha->vport_lock); | 633 | mutex_unlock(&ha->vport_lock); |
| 634 | 634 | ||
| 635 | ret = qla25xx_init_req_que(base_vha, req, options); | 635 | ret = qla25xx_init_req_que(base_vha, req); |
| 636 | if (ret != QLA_SUCCESS) { | 636 | if (ret != QLA_SUCCESS) { |
| 637 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 637 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
| 638 | mutex_lock(&ha->vport_lock); | 638 | mutex_lock(&ha->vport_lock); |
| @@ -710,7 +710,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, | |||
| 710 | if (ret) | 710 | if (ret) |
| 711 | goto que_failed; | 711 | goto que_failed; |
| 712 | 712 | ||
| 713 | ret = qla25xx_init_rsp_que(base_vha, rsp, options); | 713 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
| 714 | if (ret != QLA_SUCCESS) { | 714 | if (ret != QLA_SUCCESS) { |
| 715 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 715 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
| 716 | mutex_lock(&ha->vport_lock); | 716 | mutex_lock(&ha->vport_lock); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c11f872d3e10..2f5f72531e23 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -2522,6 +2522,19 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, | |||
| 2522 | return qla2x00_post_work(vha, e, 1); | 2522 | return qla2x00_post_work(vha, e, 1); |
| 2523 | } | 2523 | } |
| 2524 | 2524 | ||
| 2525 | int | ||
| 2526 | qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) | ||
| 2527 | { | ||
| 2528 | struct qla_work_evt *e; | ||
| 2529 | |||
| 2530 | e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); | ||
| 2531 | if (!e) | ||
| 2532 | return QLA_FUNCTION_FAILED; | ||
| 2533 | |||
| 2534 | memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
| 2535 | return qla2x00_post_work(vha, e, 1); | ||
| 2536 | } | ||
| 2537 | |||
| 2525 | static void | 2538 | static void |
| 2526 | qla2x00_do_work(struct scsi_qla_host *vha) | 2539 | qla2x00_do_work(struct scsi_qla_host *vha) |
| 2527 | { | 2540 | { |
| @@ -2539,6 +2552,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
| 2539 | fc_host_post_event(vha->host, fc_get_event_number(), | 2552 | fc_host_post_event(vha->host, fc_get_event_number(), |
| 2540 | e->u.aen.code, e->u.aen.data); | 2553 | e->u.aen.code, e->u.aen.data); |
| 2541 | break; | 2554 | break; |
| 2555 | case QLA_EVT_IDC_ACK: | ||
| 2556 | qla81xx_idc_ack(vha, e->u.idc_ack.mb); | ||
| 2557 | break; | ||
| 2542 | } | 2558 | } |
| 2543 | if (e->flags & QLA_EVT_FLAG_FREE) | 2559 | if (e->flags & QLA_EVT_FLAG_FREE) |
| 2544 | kfree(e); | 2560 | kfree(e); |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 9c3b694c049d..284827926eff 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
| @@ -684,7 +684,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
| 684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, | 684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, |
| 685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); | 685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); |
| 686 | 686 | ||
| 687 | switch (le32_to_cpu(region->code)) { | 687 | switch (le32_to_cpu(region->code) & 0xff) { |
| 688 | case FLT_REG_FW: | 688 | case FLT_REG_FW: |
| 689 | ha->flt_region_fw = start; | 689 | ha->flt_region_fw = start; |
| 690 | break; | 690 | break; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index cfa4c11a4797..79f7053da99b 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.03.00-k2" | 10 | #define QLA2XXX_VERSION "8.03.00-k3" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
| 13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 66505bb79410..8f4de20c9deb 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -317,6 +317,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
| 317 | return sdev; | 317 | return sdev; |
| 318 | 318 | ||
| 319 | out_device_destroy: | 319 | out_device_destroy: |
| 320 | scsi_device_set_state(sdev, SDEV_DEL); | ||
| 320 | transport_destroy_device(&sdev->sdev_gendev); | 321 | transport_destroy_device(&sdev->sdev_gendev); |
| 321 | put_device(&sdev->sdev_gendev); | 322 | put_device(&sdev->sdev_gendev); |
| 322 | out: | 323 | out: |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f0bd3f7a59f..516925d8b570 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
| @@ -1078,7 +1078,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
| 1078 | case BLKTRACESETUP: | 1078 | case BLKTRACESETUP: |
| 1079 | return blk_trace_setup(sdp->device->request_queue, | 1079 | return blk_trace_setup(sdp->device->request_queue, |
| 1080 | sdp->disk->disk_name, | 1080 | sdp->disk->disk_name, |
| 1081 | sdp->device->sdev_gendev.devt, | 1081 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), |
| 1082 | (char *)arg); | 1082 | (char *)arg); |
| 1083 | case BLKTRACESTART: | 1083 | case BLKTRACESTART: |
| 1084 | return blk_trace_startstop(sdp->device->request_queue, 1); | 1084 | return blk_trace_startstop(sdp->device->request_queue, 1); |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 0d934bfbdd9b..b4b39811b445 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
| @@ -2083,6 +2083,20 @@ static int serial8250_startup(struct uart_port *port) | |||
| 2083 | 2083 | ||
| 2084 | serial8250_set_mctrl(&up->port, up->port.mctrl); | 2084 | serial8250_set_mctrl(&up->port, up->port.mctrl); |
| 2085 | 2085 | ||
| 2086 | /* Serial over Lan (SoL) hack: | ||
| 2087 | Intel 8257x Gigabit ethernet chips have a | ||
| 2088 | 16550 emulation, to be used for Serial Over Lan. | ||
| 2089 | Those chips take a longer time than a normal | ||
| 2090 | serial device to signalize that a transmission | ||
| 2091 | data was queued. Due to that, the above test generally | ||
| 2092 | fails. One solution would be to delay the reading of | ||
| 2093 | iir. However, this is not reliable, since the timeout | ||
| 2094 | is variable. So, let's just don't test if we receive | ||
| 2095 | TX irq. This way, we'll never enable UART_BUG_TXEN. | ||
| 2096 | */ | ||
| 2097 | if (up->port.flags & UPF_NO_TXEN_TEST) | ||
| 2098 | goto dont_test_tx_en; | ||
| 2099 | |||
| 2086 | /* | 2100 | /* |
| 2087 | * Do a quick test to see if we receive an | 2101 | * Do a quick test to see if we receive an |
| 2088 | * interrupt when we enable the TX irq. | 2102 | * interrupt when we enable the TX irq. |
| @@ -2102,6 +2116,7 @@ static int serial8250_startup(struct uart_port *port) | |||
| 2102 | up->bugs &= ~UART_BUG_TXEN; | 2116 | up->bugs &= ~UART_BUG_TXEN; |
| 2103 | } | 2117 | } |
| 2104 | 2118 | ||
| 2119 | dont_test_tx_en: | ||
| 2105 | spin_unlock_irqrestore(&up->port.lock, flags); | 2120 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 2106 | 2121 | ||
| 2107 | /* | 2122 | /* |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 536d8e510f66..533f82025adf 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
| @@ -798,6 +798,21 @@ pci_default_setup(struct serial_private *priv, | |||
| 798 | return setup_port(priv, port, bar, offset, board->reg_shift); | 798 | return setup_port(priv, port, bar, offset, board->reg_shift); |
| 799 | } | 799 | } |
| 800 | 800 | ||
| 801 | static int skip_tx_en_setup(struct serial_private *priv, | ||
| 802 | const struct pciserial_board *board, | ||
| 803 | struct uart_port *port, int idx) | ||
| 804 | { | ||
| 805 | port->flags |= UPF_NO_TXEN_TEST; | ||
| 806 | printk(KERN_DEBUG "serial8250: skipping TxEn test for device " | ||
| 807 | "[%04x:%04x] subsystem [%04x:%04x]\n", | ||
| 808 | priv->dev->vendor, | ||
| 809 | priv->dev->device, | ||
| 810 | priv->dev->subsystem_vendor, | ||
| 811 | priv->dev->subsystem_device); | ||
| 812 | |||
| 813 | return pci_default_setup(priv, board, port, idx); | ||
| 814 | } | ||
| 815 | |||
| 801 | /* This should be in linux/pci_ids.h */ | 816 | /* This should be in linux/pci_ids.h */ |
| 802 | #define PCI_VENDOR_ID_SBSMODULARIO 0x124B | 817 | #define PCI_VENDOR_ID_SBSMODULARIO 0x124B |
| 803 | #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B | 818 | #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B |
| @@ -864,6 +879,27 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
| 864 | .init = pci_inteli960ni_init, | 879 | .init = pci_inteli960ni_init, |
| 865 | .setup = pci_default_setup, | 880 | .setup = pci_default_setup, |
| 866 | }, | 881 | }, |
| 882 | { | ||
| 883 | .vendor = PCI_VENDOR_ID_INTEL, | ||
| 884 | .device = PCI_DEVICE_ID_INTEL_8257X_SOL, | ||
| 885 | .subvendor = PCI_ANY_ID, | ||
| 886 | .subdevice = PCI_ANY_ID, | ||
| 887 | .setup = skip_tx_en_setup, | ||
| 888 | }, | ||
| 889 | { | ||
| 890 | .vendor = PCI_VENDOR_ID_INTEL, | ||
| 891 | .device = PCI_DEVICE_ID_INTEL_82573L_SOL, | ||
| 892 | .subvendor = PCI_ANY_ID, | ||
| 893 | .subdevice = PCI_ANY_ID, | ||
| 894 | .setup = skip_tx_en_setup, | ||
| 895 | }, | ||
| 896 | { | ||
| 897 | .vendor = PCI_VENDOR_ID_INTEL, | ||
| 898 | .device = PCI_DEVICE_ID_INTEL_82573E_SOL, | ||
| 899 | .subvendor = PCI_ANY_ID, | ||
| 900 | .subdevice = PCI_ANY_ID, | ||
| 901 | .setup = skip_tx_en_setup, | ||
| 902 | }, | ||
| 867 | /* | 903 | /* |
| 868 | * ITE | 904 | * ITE |
| 869 | */ | 905 | */ |
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 8058572a7428..018850c116c6 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c | |||
| @@ -841,7 +841,7 @@ static int tt_detect(void) | |||
| 841 | tt_dmasnd.ctrl = DMASND_CTRL_OFF; | 841 | tt_dmasnd.ctrl = DMASND_CTRL_OFF; |
| 842 | udelay(20); /* wait a while for things to settle down */ | 842 | udelay(20); /* wait a while for things to settle down */ |
| 843 | } | 843 | } |
| 844 | mono_moni = (mfp.par_dt_reg & 0x80) == 0; | 844 | mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; |
| 845 | 845 | ||
| 846 | tt_get_par(&par); | 846 | tt_get_par(&par); |
| 847 | tt_encode_var(&atafb_predefined[0], &par); | 847 | tt_encode_var(&atafb_predefined[0], &par); |
| @@ -2035,7 +2035,7 @@ static int stste_detect(void) | |||
| 2035 | tt_dmasnd.ctrl = DMASND_CTRL_OFF; | 2035 | tt_dmasnd.ctrl = DMASND_CTRL_OFF; |
| 2036 | udelay(20); /* wait a while for things to settle down */ | 2036 | udelay(20); /* wait a while for things to settle down */ |
| 2037 | } | 2037 | } |
| 2038 | mono_moni = (mfp.par_dt_reg & 0x80) == 0; | 2038 | mono_moni = (st_mfp.par_dt_reg & 0x80) == 0; |
| 2039 | 2039 | ||
| 2040 | stste_get_par(&par); | 2040 | stste_get_par(&par); |
| 2041 | stste_encode_var(&atafb_predefined[0], &par); | 2041 | stste_encode_var(&atafb_predefined[0], &par); |
| @@ -2086,20 +2086,20 @@ static void st_ovsc_switch(void) | |||
| 2086 | return; | 2086 | return; |
| 2087 | local_irq_save(flags); | 2087 | local_irq_save(flags); |
| 2088 | 2088 | ||
| 2089 | mfp.tim_ct_b = 0x10; | 2089 | st_mfp.tim_ct_b = 0x10; |
| 2090 | mfp.active_edge |= 8; | 2090 | st_mfp.active_edge |= 8; |
| 2091 | mfp.tim_ct_b = 0; | 2091 | st_mfp.tim_ct_b = 0; |
| 2092 | mfp.tim_dt_b = 0xf0; | 2092 | st_mfp.tim_dt_b = 0xf0; |
| 2093 | mfp.tim_ct_b = 8; | 2093 | st_mfp.tim_ct_b = 8; |
| 2094 | while (mfp.tim_dt_b > 1) /* TOS does it this way, don't ask why */ | 2094 | while (st_mfp.tim_dt_b > 1) /* TOS does it this way, don't ask why */ |
| 2095 | ; | 2095 | ; |
| 2096 | new = mfp.tim_dt_b; | 2096 | new = st_mfp.tim_dt_b; |
| 2097 | do { | 2097 | do { |
| 2098 | udelay(LINE_DELAY); | 2098 | udelay(LINE_DELAY); |
| 2099 | old = new; | 2099 | old = new; |
| 2100 | new = mfp.tim_dt_b; | 2100 | new = st_mfp.tim_dt_b; |
| 2101 | } while (old != new); | 2101 | } while (old != new); |
| 2102 | mfp.tim_ct_b = 0x10; | 2102 | st_mfp.tim_ct_b = 0x10; |
| 2103 | udelay(SYNC_DELAY); | 2103 | udelay(SYNC_DELAY); |
| 2104 | 2104 | ||
| 2105 | if (atari_switches & ATARI_SWITCH_OVSC_IKBD) | 2105 | if (atari_switches & ATARI_SWITCH_OVSC_IKBD) |
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index e6e299feb51b..2181ce4d7ebd 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
| @@ -2365,7 +2365,6 @@ static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx | |||
| 2365 | static void aty128_set_suspend(struct aty128fb_par *par, int suspend) | 2365 | static void aty128_set_suspend(struct aty128fb_par *par, int suspend) |
| 2366 | { | 2366 | { |
| 2367 | u32 pmgt; | 2367 | u32 pmgt; |
| 2368 | u16 pwr_command; | ||
| 2369 | struct pci_dev *pdev = par->pdev; | 2368 | struct pci_dev *pdev = par->pdev; |
| 2370 | 2369 | ||
| 2371 | if (!par->pm_reg) | 2370 | if (!par->pm_reg) |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 9b91617b9582..56892a142ee2 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
| @@ -45,6 +45,13 @@ static int xen_suspend(void *data) | |||
| 45 | err); | 45 | err); |
| 46 | return err; | 46 | return err; |
| 47 | } | 47 | } |
| 48 | err = sysdev_suspend(PMSG_SUSPEND); | ||
| 49 | if (err) { | ||
| 50 | printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", | ||
| 51 | err); | ||
| 52 | device_power_up(PMSG_RESUME); | ||
| 53 | return err; | ||
| 54 | } | ||
| 48 | 55 | ||
| 49 | xen_mm_pin_all(); | 56 | xen_mm_pin_all(); |
| 50 | gnttab_suspend(); | 57 | gnttab_suspend(); |
| @@ -61,6 +68,7 @@ static int xen_suspend(void *data) | |||
| 61 | gnttab_resume(); | 68 | gnttab_resume(); |
| 62 | xen_mm_unpin_all(); | 69 | xen_mm_unpin_all(); |
| 63 | 70 | ||
| 71 | sysdev_resume(); | ||
| 64 | device_power_up(PMSG_RESUME); | 72 | device_power_up(PMSG_RESUME); |
| 65 | 73 | ||
| 66 | if (!*cancelled) { | 74 | if (!*cancelled) { |
