diff options
author | Dave Airlie <airlied@redhat.com> | 2010-08-03 19:51:27 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-08-03 19:51:27 -0400 |
commit | fa0a6024da61d96a12fab18991b9897292b43253 (patch) | |
tree | 35ba7b067863f649dc37c4b67a3ed740c0d9736d /drivers | |
parent | 4c70b2eae371ebe83019ac47de6088b78124ab36 (diff) | |
parent | 7b824ec2e5d7d086264ecae51e30e3c5e00cdecc (diff) |
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (230 commits)
drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it.
drm/agp/i915: trim stolen space to 32M
drm/i915: Unset cursor if out-of-bounds upon mode change (v4)
drm/i915: Unreference object not handle on creation
drm/i915: Attempt to uncouple object after catastrophic failure in unbind
drm/i915: Repeat unbinding during free if interrupted (v6)
drm/i915: Refactor i915_gem_retire_requests()
drm/i915: Warn if we run out of FIFO space for a mode
drm/i915: Round up the watermark entries (v3)
drm/i915: Typo in (unused) register mask for overlay.
drm/i915: Check overlay stride errata for i830 and i845
drm/i915: Validate the mode for eDP by using fixed panel size
drm/i915: Always use the fixed panel timing for eDP
drm/i915: Enable panel fitting for eDP
drm/i915: Add fixed panel mode parsed from EDID for eDP without fixed mode in VBT
drm/i915/sdvo: Set sync polarity based on actual mode
drm/i915/hdmi: Set sync polarity based on actual mode
drm/i915/pch: Set transcoder sync polarity for DP based on actual mode
drm/i915: Initialize LVDS and eDP outputs before anything else
drm/i915/dp: Correctly report eDP in the core connector type
...
Diffstat (limited to 'drivers')
86 files changed, 1621 insertions, 783 deletions
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index d97b8dce1668..18b3f1468b7d 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -70,6 +70,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
70 | acpi_status acpi_enable(void) | 70 | acpi_status acpi_enable(void) |
71 | { | 71 | { |
72 | acpi_status status; | 72 | acpi_status status; |
73 | int retry; | ||
73 | 74 | ||
74 | ACPI_FUNCTION_TRACE(acpi_enable); | 75 | ACPI_FUNCTION_TRACE(acpi_enable); |
75 | 76 | ||
@@ -98,16 +99,18 @@ acpi_status acpi_enable(void) | |||
98 | 99 | ||
99 | /* Sanity check that transition succeeded */ | 100 | /* Sanity check that transition succeeded */ |
100 | 101 | ||
101 | if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { | 102 | for (retry = 0; retry < 30000; ++retry) { |
102 | ACPI_ERROR((AE_INFO, | 103 | if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { |
103 | "Hardware did not enter ACPI mode")); | 104 | if (retry != 0) |
104 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); | 105 | ACPI_WARNING((AE_INFO, |
106 | "Platform took > %d00 usec to enter ACPI mode", retry)); | ||
107 | return_ACPI_STATUS(AE_OK); | ||
108 | } | ||
109 | acpi_os_stall(100); /* 100 usec */ | ||
105 | } | 110 | } |
106 | 111 | ||
107 | ACPI_DEBUG_PRINT((ACPI_DB_INIT, | 112 | ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode")); |
108 | "Transition to ACPI mode successful\n")); | 113 | return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); |
109 | |||
110 | return_ACPI_STATUS(AE_OK); | ||
111 | } | 114 | } |
112 | 115 | ||
113 | ACPI_EXPORT_SYMBOL(acpi_enable) | 116 | ACPI_EXPORT_SYMBOL(acpi_enable) |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 3026e3fa83ef..dc58402b0a17 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -868,9 +868,15 @@ static void acpi_battery_remove_fs(struct acpi_device *device) | |||
868 | static void acpi_battery_notify(struct acpi_device *device, u32 event) | 868 | static void acpi_battery_notify(struct acpi_device *device, u32 event) |
869 | { | 869 | { |
870 | struct acpi_battery *battery = acpi_driver_data(device); | 870 | struct acpi_battery *battery = acpi_driver_data(device); |
871 | #ifdef CONFIG_ACPI_SYSFS_POWER | ||
872 | struct device *old; | ||
873 | #endif | ||
871 | 874 | ||
872 | if (!battery) | 875 | if (!battery) |
873 | return; | 876 | return; |
877 | #ifdef CONFIG_ACPI_SYSFS_POWER | ||
878 | old = battery->bat.dev; | ||
879 | #endif | ||
874 | acpi_battery_update(battery); | 880 | acpi_battery_update(battery); |
875 | acpi_bus_generate_proc_event(device, event, | 881 | acpi_bus_generate_proc_event(device, event, |
876 | acpi_battery_present(battery)); | 882 | acpi_battery_present(battery)); |
@@ -879,7 +885,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) | |||
879 | acpi_battery_present(battery)); | 885 | acpi_battery_present(battery)); |
880 | #ifdef CONFIG_ACPI_SYSFS_POWER | 886 | #ifdef CONFIG_ACPI_SYSFS_POWER |
881 | /* acpi_battery_update could remove power_supply object */ | 887 | /* acpi_battery_update could remove power_supply object */ |
882 | if (battery->bat.dev) | 888 | if (old && battery->bat.dev) |
883 | power_supply_changed(&battery->bat); | 889 | power_supply_changed(&battery->bat); |
884 | #endif | 890 | #endif |
885 | } | 891 | } |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 01381be05e96..2bb28b9d91c4 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -214,7 +214,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
214 | .ident = "Sony VGN-SR290J", | 214 | .ident = "Sony VGN-SR290J", |
215 | .matches = { | 215 | .matches = { |
216 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | 216 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
217 | DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), | 217 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"), |
218 | }, | 218 | }, |
219 | }, | 219 | }, |
220 | { | 220 | { |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 51284351418f..e9699aaed109 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle) | |||
223 | type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; | 223 | type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; |
224 | cpuid = acpi_get_cpuid(handle, type, acpi_id); | 224 | cpuid = acpi_get_cpuid(handle, type, acpi_id); |
225 | 225 | ||
226 | if (cpuid == -1) | 226 | if ((cpuid == -1) && (num_possible_cpus() > 1)) |
227 | return false; | 227 | return false; |
228 | 228 | ||
229 | return true; | 229 | return true; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b1b385692f46..e9a8026d39f0 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -76,14 +76,19 @@ static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; | |||
76 | module_param(max_cstate, uint, 0000); | 76 | module_param(max_cstate, uint, 0000); |
77 | static unsigned int nocst __read_mostly; | 77 | static unsigned int nocst __read_mostly; |
78 | module_param(nocst, uint, 0000); | 78 | module_param(nocst, uint, 0000); |
79 | static int bm_check_disable __read_mostly; | ||
80 | module_param(bm_check_disable, uint, 0000); | ||
79 | 81 | ||
80 | static unsigned int latency_factor __read_mostly = 2; | 82 | static unsigned int latency_factor __read_mostly = 2; |
81 | module_param(latency_factor, uint, 0644); | 83 | module_param(latency_factor, uint, 0644); |
82 | 84 | ||
85 | #ifdef CONFIG_ACPI_PROCFS | ||
83 | static u64 us_to_pm_timer_ticks(s64 t) | 86 | static u64 us_to_pm_timer_ticks(s64 t) |
84 | { | 87 | { |
85 | return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); | 88 | return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); |
86 | } | 89 | } |
90 | #endif | ||
91 | |||
87 | /* | 92 | /* |
88 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. | 93 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
89 | * For now disable this. Probably a bug somewhere else. | 94 | * For now disable this. Probably a bug somewhere else. |
@@ -763,6 +768,9 @@ static int acpi_idle_bm_check(void) | |||
763 | { | 768 | { |
764 | u32 bm_status = 0; | 769 | u32 bm_status = 0; |
765 | 770 | ||
771 | if (bm_check_disable) | ||
772 | return 0; | ||
773 | |||
766 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); | 774 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); |
767 | if (bm_status) | 775 | if (bm_status) |
768 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); | 776 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); |
@@ -947,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
947 | if (acpi_idle_suspend) | 955 | if (acpi_idle_suspend) |
948 | return(acpi_idle_enter_c1(dev, state)); | 956 | return(acpi_idle_enter_c1(dev, state)); |
949 | 957 | ||
950 | if (acpi_idle_bm_check()) { | 958 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { |
951 | if (dev->safe_state) { | 959 | if (dev->safe_state) { |
952 | dev->last_state = dev->safe_state; | 960 | dev->last_state = dev->safe_state; |
953 | return dev->safe_state->enter(dev, dev->safe_state); | 961 | return dev->safe_state->enter(dev, dev->safe_state); |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 5b7c52e4a00f..2862c781b372 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -82,6 +82,20 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * The ACPI specification wants us to save NVS memory regions during hibernation | ||
86 | * and to restore them during the subsequent resume. Windows does that also for | ||
87 | * suspend to RAM. However, it is known that this mechanism does not work on | ||
88 | * all machines, so we allow the user to disable it with the help of the | ||
89 | * 'acpi_sleep=nonvs' kernel command line option. | ||
90 | */ | ||
91 | static bool nvs_nosave; | ||
92 | |||
93 | void __init acpi_nvs_nosave(void) | ||
94 | { | ||
95 | nvs_nosave = true; | ||
96 | } | ||
97 | |||
98 | /* | ||
85 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the | 99 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the |
86 | * user to request that behavior by using the 'acpi_old_suspend_ordering' | 100 | * user to request that behavior by using the 'acpi_old_suspend_ordering' |
87 | * kernel command line option that causes the following variable to be set. | 101 | * kernel command line option that causes the following variable to be set. |
@@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) | |||
197 | u32 acpi_state = acpi_suspend_states[pm_state]; | 211 | u32 acpi_state = acpi_suspend_states[pm_state]; |
198 | int error = 0; | 212 | int error = 0; |
199 | 213 | ||
200 | error = suspend_nvs_alloc(); | 214 | error = nvs_nosave ? 0 : suspend_nvs_alloc(); |
201 | |||
202 | if (error) | 215 | if (error) |
203 | return error; | 216 | return error; |
204 | 217 | ||
@@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
388 | #endif /* CONFIG_SUSPEND */ | 401 | #endif /* CONFIG_SUSPEND */ |
389 | 402 | ||
390 | #ifdef CONFIG_HIBERNATION | 403 | #ifdef CONFIG_HIBERNATION |
391 | /* | ||
392 | * The ACPI specification wants us to save NVS memory regions during hibernation | ||
393 | * and to restore them during the subsequent resume. However, it is not certain | ||
394 | * if this mechanism is going to work on all machines, so we allow the user to | ||
395 | * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line | ||
396 | * option. | ||
397 | */ | ||
398 | static bool s4_no_nvs; | ||
399 | |||
400 | void __init acpi_s4_no_nvs(void) | ||
401 | { | ||
402 | s4_no_nvs = true; | ||
403 | } | ||
404 | |||
405 | static unsigned long s4_hardware_signature; | 404 | static unsigned long s4_hardware_signature; |
406 | static struct acpi_table_facs *facs; | 405 | static struct acpi_table_facs *facs; |
407 | static bool nosigcheck; | 406 | static bool nosigcheck; |
@@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void) | |||
415 | { | 414 | { |
416 | int error; | 415 | int error; |
417 | 416 | ||
418 | error = s4_no_nvs ? 0 : suspend_nvs_alloc(); | 417 | error = nvs_nosave ? 0 : suspend_nvs_alloc(); |
419 | if (!error) { | 418 | if (!error) { |
420 | acpi_target_sleep_state = ACPI_STATE_S4; | 419 | acpi_target_sleep_state = ACPI_STATE_S4; |
421 | acpi_sleep_tts_switch(acpi_target_sleep_state); | 420 | acpi_sleep_tts_switch(acpi_target_sleep_state); |
@@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void) | |||
510 | error = acpi_sleep_prepare(ACPI_STATE_S4); | 509 | error = acpi_sleep_prepare(ACPI_STATE_S4); |
511 | 510 | ||
512 | if (!error) { | 511 | if (!error) { |
513 | if (!s4_no_nvs) | 512 | if (!nvs_nosave) |
514 | error = suspend_nvs_alloc(); | 513 | error = suspend_nvs_alloc(); |
515 | if (!error) | 514 | if (!error) |
516 | acpi_target_sleep_state = ACPI_STATE_S4; | 515 | acpi_target_sleep_state = ACPI_STATE_S4; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 9630fbdf4e6c..9b9d3bd54e3a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev, | |||
673 | */ | 673 | */ |
674 | if (parent == NULL) | 674 | if (parent == NULL) |
675 | parent_kobj = virtual_device_parent(dev); | 675 | parent_kobj = virtual_device_parent(dev); |
676 | else if (parent->class) | 676 | else if (parent->class && !dev->class->ns_type) |
677 | return &parent->kobj; | 677 | return &parent->kobj; |
678 | else | 678 | else |
679 | parent_kobj = &parent->kobj; | 679 | parent_kobj = &parent->kobj; |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d836a71bf06d..5bbc7be203a6 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -816,9 +816,9 @@ static const struct intel_driver_description { | |||
816 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, | 816 | { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
817 | "HD Graphics", NULL, &intel_i965_driver }, | 817 | "HD Graphics", NULL, &intel_i965_driver }, |
818 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, | 818 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, |
819 | "Sandybridge", NULL, &intel_i965_driver }, | 819 | "Sandybridge", NULL, &intel_gen6_driver }, |
820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, | 820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, |
821 | "Sandybridge", NULL, &intel_i965_driver }, | 821 | "Sandybridge", NULL, &intel_gen6_driver }, |
822 | { 0, 0, NULL, NULL, NULL } | 822 | { 0, 0, NULL, NULL, NULL } |
823 | }; | 823 | }; |
824 | 824 | ||
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 2547465d4658..c05e3e518268 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -60,6 +60,12 @@ | |||
60 | #define I810_PTE_LOCAL 0x00000002 | 60 | #define I810_PTE_LOCAL 0x00000002 |
61 | #define I810_PTE_VALID 0x00000001 | 61 | #define I810_PTE_VALID 0x00000001 |
62 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | 62 | #define I830_PTE_SYSTEM_CACHED 0x00000006 |
63 | /* GT PTE cache control fields */ | ||
64 | #define GEN6_PTE_UNCACHED 0x00000002 | ||
65 | #define GEN6_PTE_LLC 0x00000004 | ||
66 | #define GEN6_PTE_LLC_MLC 0x00000006 | ||
67 | #define GEN6_PTE_GFDT 0x00000008 | ||
68 | |||
63 | #define I810_SMRAM_MISCC 0x70 | 69 | #define I810_SMRAM_MISCC 0x70 |
64 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | 70 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 |
65 | #define I810_GFX_MEM_WIN_32M 0x00010000 | 71 | #define I810_GFX_MEM_WIN_32M 0x00010000 |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9344216183a4..d22ffb811bf2 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #define USE_PCI_DMA_API 1 | 25 | #define USE_PCI_DMA_API 1 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | /* Max amount of stolen space, anything above will be returned to Linux */ | ||
29 | int intel_max_stolen = 32 * 1024 * 1024; | ||
30 | EXPORT_SYMBOL(intel_max_stolen); | ||
31 | |||
28 | static const struct aper_size_info_fixed intel_i810_sizes[] = | 32 | static const struct aper_size_info_fixed intel_i810_sizes[] = |
29 | { | 33 | { |
30 | {64, 16384, 4}, | 34 | {64, 16384, 4}, |
@@ -104,7 +108,7 @@ static int intel_agp_map_memory(struct agp_memory *mem) | |||
104 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | 108 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); |
105 | 109 | ||
106 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | 110 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) |
107 | return -ENOMEM; | 111 | goto err; |
108 | 112 | ||
109 | mem->sg_list = sg = st.sgl; | 113 | mem->sg_list = sg = st.sgl; |
110 | 114 | ||
@@ -113,11 +117,14 @@ static int intel_agp_map_memory(struct agp_memory *mem) | |||
113 | 117 | ||
114 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | 118 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, |
115 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | 119 | mem->page_count, PCI_DMA_BIDIRECTIONAL); |
116 | if (unlikely(!mem->num_sg)) { | 120 | if (unlikely(!mem->num_sg)) |
117 | intel_agp_free_sglist(mem); | 121 | goto err; |
118 | return -ENOMEM; | 122 | |
119 | } | ||
120 | return 0; | 123 | return 0; |
124 | |||
125 | err: | ||
126 | sg_free_table(&st); | ||
127 | return -ENOMEM; | ||
121 | } | 128 | } |
122 | 129 | ||
123 | static void intel_agp_unmap_memory(struct agp_memory *mem) | 130 | static void intel_agp_unmap_memory(struct agp_memory *mem) |
@@ -176,7 +183,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, | |||
176 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || | 183 | if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || |
177 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) | 184 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) |
178 | { | 185 | { |
179 | cache_bits = I830_PTE_SYSTEM_CACHED; | 186 | cache_bits = GEN6_PTE_LLC_MLC; |
180 | } | 187 | } |
181 | 188 | ||
182 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 189 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
@@ -710,7 +717,12 @@ static void intel_i830_init_gtt_entries(void) | |||
710 | break; | 717 | break; |
711 | } | 718 | } |
712 | } | 719 | } |
713 | if (gtt_entries > 0) { | 720 | if (!local && gtt_entries > intel_max_stolen) { |
721 | dev_info(&agp_bridge->dev->dev, | ||
722 | "detected %dK stolen memory, trimming to %dK\n", | ||
723 | gtt_entries / KB(1), intel_max_stolen / KB(1)); | ||
724 | gtt_entries = intel_max_stolen / KB(4); | ||
725 | } else if (gtt_entries > 0) { | ||
714 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", | 726 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", |
715 | gtt_entries / KB(1), local ? "local" : "stolen"); | 727 | gtt_entries / KB(1), local ? "local" : "stolen"); |
716 | gtt_entries /= KB(4); | 728 | gtt_entries /= KB(4); |
@@ -797,6 +809,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) | |||
797 | 809 | ||
798 | /* we have to call this as early as possible after the MMIO base address is known */ | 810 | /* we have to call this as early as possible after the MMIO base address is known */ |
799 | intel_i830_init_gtt_entries(); | 811 | intel_i830_init_gtt_entries(); |
812 | if (intel_private.gtt_entries == 0) { | ||
813 | iounmap(intel_private.registers); | ||
814 | return -ENOMEM; | ||
815 | } | ||
800 | 816 | ||
801 | agp_bridge->gatt_table = NULL; | 817 | agp_bridge->gatt_table = NULL; |
802 | 818 | ||
@@ -1216,17 +1232,20 @@ static int intel_i915_get_gtt_size(void) | |||
1216 | 1232 | ||
1217 | /* G33's GTT size defined in gmch_ctrl */ | 1233 | /* G33's GTT size defined in gmch_ctrl */ |
1218 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); | 1234 | pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); |
1219 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | 1235 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
1220 | case G33_PGETBL_SIZE_1M: | 1236 | case I830_GMCH_GMS_STOLEN_512: |
1237 | size = 512; | ||
1238 | break; | ||
1239 | case I830_GMCH_GMS_STOLEN_1024: | ||
1221 | size = 1024; | 1240 | size = 1024; |
1222 | break; | 1241 | break; |
1223 | case G33_PGETBL_SIZE_2M: | 1242 | case I830_GMCH_GMS_STOLEN_8192: |
1224 | size = 2048; | 1243 | size = 8*1024; |
1225 | break; | 1244 | break; |
1226 | default: | 1245 | default: |
1227 | dev_info(&agp_bridge->dev->dev, | 1246 | dev_info(&agp_bridge->dev->dev, |
1228 | "unknown page table size 0x%x, assuming 512KB\n", | 1247 | "unknown page table size 0x%x, assuming 512KB\n", |
1229 | (gmch_ctrl & G33_PGETBL_SIZE_MASK)); | 1248 | (gmch_ctrl & I830_GMCH_GMS_MASK)); |
1230 | size = 512; | 1249 | size = 512; |
1231 | } | 1250 | } |
1232 | } else { | 1251 | } else { |
@@ -1279,6 +1298,11 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | |||
1279 | 1298 | ||
1280 | /* we have to call this as early as possible after the MMIO base address is known */ | 1299 | /* we have to call this as early as possible after the MMIO base address is known */ |
1281 | intel_i830_init_gtt_entries(); | 1300 | intel_i830_init_gtt_entries(); |
1301 | if (intel_private.gtt_entries == 0) { | ||
1302 | iounmap(intel_private.gtt); | ||
1303 | iounmap(intel_private.registers); | ||
1304 | return -ENOMEM; | ||
1305 | } | ||
1282 | 1306 | ||
1283 | agp_bridge->gatt_table = NULL; | 1307 | agp_bridge->gatt_table = NULL; |
1284 | 1308 | ||
@@ -1306,6 +1330,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | |||
1306 | return addr | bridge->driver->masks[type].mask; | 1330 | return addr | bridge->driver->masks[type].mask; |
1307 | } | 1331 | } |
1308 | 1332 | ||
1333 | static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, | ||
1334 | dma_addr_t addr, int type) | ||
1335 | { | ||
1336 | /* Shift high bits down */ | ||
1337 | addr |= (addr >> 28) & 0xff; | ||
1338 | |||
1339 | /* Type checking must be done elsewhere */ | ||
1340 | return addr | bridge->driver->masks[type].mask; | ||
1341 | } | ||
1342 | |||
1309 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) | 1343 | static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) |
1310 | { | 1344 | { |
1311 | u16 snb_gmch_ctl; | 1345 | u16 snb_gmch_ctl; |
@@ -1387,6 +1421,11 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | |||
1387 | 1421 | ||
1388 | /* we have to call this as early as possible after the MMIO base address is known */ | 1422 | /* we have to call this as early as possible after the MMIO base address is known */ |
1389 | intel_i830_init_gtt_entries(); | 1423 | intel_i830_init_gtt_entries(); |
1424 | if (intel_private.gtt_entries == 0) { | ||
1425 | iounmap(intel_private.gtt); | ||
1426 | iounmap(intel_private.registers); | ||
1427 | return -ENOMEM; | ||
1428 | } | ||
1390 | 1429 | ||
1391 | agp_bridge->gatt_table = NULL; | 1430 | agp_bridge->gatt_table = NULL; |
1392 | 1431 | ||
@@ -1514,6 +1553,39 @@ static const struct agp_bridge_driver intel_i965_driver = { | |||
1514 | #endif | 1553 | #endif |
1515 | }; | 1554 | }; |
1516 | 1555 | ||
1556 | static const struct agp_bridge_driver intel_gen6_driver = { | ||
1557 | .owner = THIS_MODULE, | ||
1558 | .aperture_sizes = intel_i830_sizes, | ||
1559 | .size_type = FIXED_APER_SIZE, | ||
1560 | .num_aperture_sizes = 4, | ||
1561 | .needs_scratch_page = true, | ||
1562 | .configure = intel_i9xx_configure, | ||
1563 | .fetch_size = intel_i9xx_fetch_size, | ||
1564 | .cleanup = intel_i915_cleanup, | ||
1565 | .mask_memory = intel_gen6_mask_memory, | ||
1566 | .masks = intel_i810_masks, | ||
1567 | .agp_enable = intel_i810_agp_enable, | ||
1568 | .cache_flush = global_cache_flush, | ||
1569 | .create_gatt_table = intel_i965_create_gatt_table, | ||
1570 | .free_gatt_table = intel_i830_free_gatt_table, | ||
1571 | .insert_memory = intel_i915_insert_entries, | ||
1572 | .remove_memory = intel_i915_remove_entries, | ||
1573 | .alloc_by_type = intel_i830_alloc_by_type, | ||
1574 | .free_by_type = intel_i810_free_by_type, | ||
1575 | .agp_alloc_page = agp_generic_alloc_page, | ||
1576 | .agp_alloc_pages = agp_generic_alloc_pages, | ||
1577 | .agp_destroy_page = agp_generic_destroy_page, | ||
1578 | .agp_destroy_pages = agp_generic_destroy_pages, | ||
1579 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1580 | .chipset_flush = intel_i915_chipset_flush, | ||
1581 | #ifdef USE_PCI_DMA_API | ||
1582 | .agp_map_page = intel_agp_map_page, | ||
1583 | .agp_unmap_page = intel_agp_unmap_page, | ||
1584 | .agp_map_memory = intel_agp_map_memory, | ||
1585 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
1586 | #endif | ||
1587 | }; | ||
1588 | |||
1517 | static const struct agp_bridge_driver intel_g33_driver = { | 1589 | static const struct agp_bridge_driver intel_g33_driver = { |
1518 | .owner = THIS_MODULE, | 1590 | .owner = THIS_MODULE, |
1519 | .aperture_sizes = intel_i830_sizes, | 1591 | .aperture_sizes = intel_i830_sizes, |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 24314a9cffe8..1030f8420137 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) | |||
623 | 623 | ||
624 | static int tpm_tis_pnp_resume(struct pnp_dev *dev) | 624 | static int tpm_tis_pnp_resume(struct pnp_dev *dev) |
625 | { | 625 | { |
626 | return tpm_pm_resume(&dev->dev); | 626 | struct tpm_chip *chip = pnp_get_drvdata(dev); |
627 | int ret; | ||
628 | |||
629 | ret = tpm_pm_resume(&dev->dev); | ||
630 | if (!ret) | ||
631 | tpm_continue_selftest(chip); | ||
632 | |||
633 | return ret; | ||
627 | } | 634 | } |
628 | 635 | ||
629 | static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { | 636 | static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 063b2184caf5..938b74ea9ffb 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1077,6 +1077,7 @@ err_out_unregister: | |||
1077 | 1077 | ||
1078 | err_unlock_policy: | 1078 | err_unlock_policy: |
1079 | unlock_policy_rwsem_write(cpu); | 1079 | unlock_policy_rwsem_write(cpu); |
1080 | free_cpumask_var(policy->related_cpus); | ||
1080 | err_free_cpumask: | 1081 | err_free_cpumask: |
1081 | free_cpumask_var(policy->cpus); | 1082 | free_cpumask_var(policy->cpus); |
1082 | err_free_policy: | 1083 | err_free_policy: |
@@ -1762,17 +1763,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1762 | dprintk("governor switch\n"); | 1763 | dprintk("governor switch\n"); |
1763 | 1764 | ||
1764 | /* end old governor */ | 1765 | /* end old governor */ |
1765 | if (data->governor) { | 1766 | if (data->governor) |
1766 | /* | ||
1767 | * Need to release the rwsem around governor | ||
1768 | * stop due to lock dependency between | ||
1769 | * cancel_delayed_work_sync and the read lock | ||
1770 | * taken in the delayed work handler. | ||
1771 | */ | ||
1772 | unlock_policy_rwsem_write(data->cpu); | ||
1773 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1767 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1774 | lock_policy_rwsem_write(data->cpu); | ||
1775 | } | ||
1776 | 1768 | ||
1777 | /* start new governor */ | 1769 | /* start new governor */ |
1778 | data->governor = policy->governor; | 1770 | data->governor = policy->governor; |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index cc9357da0e34..e0187d16dd7c 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1300,7 +1300,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1300 | if (devno == 0) | 1300 | if (devno == 0) |
1301 | return -ENODEV; | 1301 | return -ENODEV; |
1302 | 1302 | ||
1303 | i7core_printk(KERN_ERR, | 1303 | i7core_printk(KERN_INFO, |
1304 | "Device not found: dev %02x.%d PCI ID %04x:%04x\n", | 1304 | "Device not found: dev %02x.%d PCI ID %04x:%04x\n", |
1305 | dev_descr->dev, dev_descr->func, | 1305 | dev_descr->dev, dev_descr->func, |
1306 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); | 1306 | PCI_VENDOR_ID_INTEL, dev_descr->dev_id); |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index f39b00a46eda..1052340e6802 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -336,6 +336,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = { | |||
336 | }, | 336 | }, |
337 | {}, | 337 | {}, |
338 | }; | 338 | }; |
339 | MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match); | ||
339 | 340 | ||
340 | static struct of_platform_driver mpc85xx_pci_err_driver = { | 341 | static struct of_platform_driver mpc85xx_pci_err_driver = { |
341 | .probe = mpc85xx_pci_err_probe, | 342 | .probe = mpc85xx_pci_err_probe, |
@@ -650,6 +651,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = { | |||
650 | { .compatible = "fsl,p2020-l2-cache-controller", }, | 651 | { .compatible = "fsl,p2020-l2-cache-controller", }, |
651 | {}, | 652 | {}, |
652 | }; | 653 | }; |
654 | MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match); | ||
653 | 655 | ||
654 | static struct of_platform_driver mpc85xx_l2_err_driver = { | 656 | static struct of_platform_driver mpc85xx_l2_err_driver = { |
655 | .probe = mpc85xx_l2_err_probe, | 657 | .probe = mpc85xx_l2_err_probe, |
@@ -1126,6 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { | |||
1126 | { .compatible = "fsl,p2020-memory-controller", }, | 1128 | { .compatible = "fsl,p2020-memory-controller", }, |
1127 | {}, | 1129 | {}, |
1128 | }; | 1130 | }; |
1131 | MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); | ||
1129 | 1132 | ||
1130 | static struct of_platform_driver mpc85xx_mc_err_driver = { | 1133 | static struct of_platform_driver mpc85xx_mc_err_driver = { |
1131 | .probe = mpc85xx_mc_err_probe, | 1134 | .probe = mpc85xx_mc_err_probe, |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3ca36542e338..4e51fe3c1fc4 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -893,10 +893,12 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low); | |||
893 | void gpio_unexport(unsigned gpio) | 893 | void gpio_unexport(unsigned gpio) |
894 | { | 894 | { |
895 | struct gpio_desc *desc; | 895 | struct gpio_desc *desc; |
896 | int status = -EINVAL; | 896 | int status = 0; |
897 | 897 | ||
898 | if (!gpio_is_valid(gpio)) | 898 | if (!gpio_is_valid(gpio)) { |
899 | status = -EINVAL; | ||
899 | goto done; | 900 | goto done; |
901 | } | ||
900 | 902 | ||
901 | mutex_lock(&sysfs_lock); | 903 | mutex_lock(&sysfs_lock); |
902 | 904 | ||
@@ -911,7 +913,6 @@ void gpio_unexport(unsigned gpio) | |||
911 | clear_bit(FLAG_EXPORT, &desc->flags); | 913 | clear_bit(FLAG_EXPORT, &desc->flags); |
912 | put_device(dev); | 914 | put_device(dev); |
913 | device_unregister(dev); | 915 | device_unregister(dev); |
914 | status = 0; | ||
915 | } else | 916 | } else |
916 | status = -ENODEV; | 917 | status = -ENODEV; |
917 | } | 918 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 83d8072066cb..ea1d57291b0e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid, | |||
864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | 864 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
865 | false); | 865 | false); |
866 | mode->hdisplay = 1366; | 866 | mode->hdisplay = 1366; |
867 | mode->vsync_start = mode->vsync_start - 1; | 867 | mode->hsync_start = mode->hsync_start - 1; |
868 | mode->vsync_end = mode->vsync_end - 1; | 868 | mode->hsync_end = mode->hsync_end - 1; |
869 | return mode; | 869 | return mode; |
870 | } | 870 | } |
871 | 871 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aee83fa178f6..9214119c0154 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) | |||
605 | case FBC_NOT_TILED: | 605 | case FBC_NOT_TILED: |
606 | seq_printf(m, "scanout buffer not tiled"); | 606 | seq_printf(m, "scanout buffer not tiled"); |
607 | break; | 607 | break; |
608 | case FBC_MULTIPLE_PIPES: | ||
609 | seq_printf(m, "multiple pipes are enabled"); | ||
610 | break; | ||
608 | default: | 611 | default: |
609 | seq_printf(m, "unknown reason"); | 612 | seq_printf(m, "unknown reason"); |
610 | } | 613 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 92898035845d..f19ffe87af3c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <linux/vga_switcheroo.h> | 41 | #include <linux/vga_switcheroo.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | 43 | ||
44 | extern int intel_max_stolen; /* from AGP driver */ | ||
45 | |||
44 | /** | 46 | /** |
45 | * Sets up the hardware status page for devices that need a physical address | 47 | * Sets up the hardware status page for devices that need a physical address |
46 | * in the register. | 48 | * in the register. |
@@ -1257,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1257 | drm_mm_put_block(compressed_fb); | 1259 | drm_mm_put_block(compressed_fb); |
1258 | } | 1260 | } |
1259 | 1261 | ||
1260 | if (!IS_GM45(dev)) { | 1262 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { |
1261 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | 1263 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, |
1262 | 4096, 0); | 1264 | 4096, 0); |
1263 | if (!compressed_llb) { | 1265 | if (!compressed_llb) { |
@@ -1283,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1283 | 1285 | ||
1284 | intel_disable_fbc(dev); | 1286 | intel_disable_fbc(dev); |
1285 | dev_priv->compressed_fb = compressed_fb; | 1287 | dev_priv->compressed_fb = compressed_fb; |
1286 | 1288 | if (IS_IRONLAKE_M(dev)) | |
1287 | if (IS_GM45(dev)) { | 1289 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
1290 | else if (IS_GM45(dev)) { | ||
1288 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1291 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
1289 | } else { | 1292 | } else { |
1290 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1293 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
@@ -1292,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1292 | dev_priv->compressed_llb = compressed_llb; | 1295 | dev_priv->compressed_llb = compressed_llb; |
1293 | } | 1296 | } |
1294 | 1297 | ||
1295 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1298 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
1296 | ll_base, size >> 20); | 1299 | ll_base, size >> 20); |
1297 | } | 1300 | } |
1298 | 1301 | ||
@@ -1301,7 +1304,7 @@ static void i915_cleanup_compression(struct drm_device *dev) | |||
1301 | struct drm_i915_private *dev_priv = dev->dev_private; | 1304 | struct drm_i915_private *dev_priv = dev->dev_private; |
1302 | 1305 | ||
1303 | drm_mm_put_block(dev_priv->compressed_fb); | 1306 | drm_mm_put_block(dev_priv->compressed_fb); |
1304 | if (!IS_GM45(dev)) | 1307 | if (dev_priv->compressed_llb) |
1305 | drm_mm_put_block(dev_priv->compressed_llb); | 1308 | drm_mm_put_block(dev_priv->compressed_llb); |
1306 | } | 1309 | } |
1307 | 1310 | ||
@@ -2105,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2105 | if (ret) | 2108 | if (ret) |
2106 | goto out_iomapfree; | 2109 | goto out_iomapfree; |
2107 | 2110 | ||
2111 | if (prealloc_size > intel_max_stolen) { | ||
2112 | DRM_INFO("detected %dM stolen memory, trimming to %dM\n", | ||
2113 | prealloc_size >> 20, intel_max_stolen >> 20); | ||
2114 | prealloc_size = intel_max_stolen; | ||
2115 | } | ||
2116 | |||
2108 | dev_priv->wq = create_singlethread_workqueue("i915"); | 2117 | dev_priv->wq = create_singlethread_workqueue("i915"); |
2109 | if (dev_priv->wq == NULL) { | 2118 | if (dev_priv->wq == NULL) { |
2110 | DRM_ERROR("Failed to create our workqueue.\n"); | 2119 | DRM_ERROR("Failed to create our workqueue.\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 65d3f3e8475b..5044f653e8ea 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = { | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | static const struct intel_device_info intel_i965g_info = { | 95 | static const struct intel_device_info intel_i965g_info = { |
96 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | 96 | .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, |
97 | }; | 97 | }; |
98 | 98 | ||
99 | static const struct intel_device_info intel_i965gm_info = { | 99 | static const struct intel_device_info intel_i965gm_info = { |
100 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | 100 | .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, |
101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | 101 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, |
102 | .has_hotplug = 1, | 102 | .has_hotplug = 1, |
103 | }; | 103 | }; |
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = { | |||
114 | }; | 114 | }; |
115 | 115 | ||
116 | static const struct intel_device_info intel_gm45_info = { | 116 | static const struct intel_device_info intel_gm45_info = { |
117 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | 117 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, |
118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 118 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
119 | .has_pipe_cxsr = 1, | 119 | .has_pipe_cxsr = 1, |
120 | .has_hotplug = 1, | 120 | .has_hotplug = 1, |
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
134 | 134 | ||
135 | static const struct intel_device_info intel_ironlake_m_info = { | 135 | static const struct intel_device_info intel_ironlake_m_info = { |
136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | 136 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, |
137 | .need_gfx_hws = 1, .has_rc6 = 1, | 137 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, |
138 | .has_hotplug = 1, | 138 | .has_hotplug = 1, |
139 | }; | 139 | }; |
140 | 140 | ||
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
148 | .has_hotplug = 1, .is_gen6 = 1, | 148 | .has_hotplug = 1, .is_gen6 = 1, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static const struct pci_device_id pciidlist[] = { | 151 | static const struct pci_device_id pciidlist[] = { /* aka */ |
152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ |
153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ |
154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ |
155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ |
157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ |
158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ |
159 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | 159 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ |
160 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | 160 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ |
161 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | 161 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ |
162 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | 162 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ |
163 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | 163 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ |
164 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | 164 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ |
165 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | 165 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ |
166 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | 166 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ |
167 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | 167 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ |
168 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | 168 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ |
169 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | 169 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ |
170 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | 170 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ |
171 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | 171 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ |
172 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | 172 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ |
173 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | 173 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ |
174 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | 174 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ |
175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | 175 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | 176 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | 177 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | 178 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | 179 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | 180 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) | |||
340 | /* | 340 | /* |
341 | * Clear request list | 341 | * Clear request list |
342 | */ | 342 | */ |
343 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 343 | i915_gem_retire_requests(dev); |
344 | 344 | ||
345 | if (need_display) | 345 | if (need_display) |
346 | i915_save_display(dev); | 346 | i915_save_display(dev); |
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev) | |||
482 | return i915_drm_freeze(drm_dev); | 482 | return i915_drm_freeze(drm_dev); |
483 | } | 483 | } |
484 | 484 | ||
485 | const struct dev_pm_ops i915_pm_ops = { | 485 | static const struct dev_pm_ops i915_pm_ops = { |
486 | .suspend = i915_pm_suspend, | 486 | .suspend = i915_pm_suspend, |
487 | .resume = i915_pm_resume, | 487 | .resume = i915_pm_resume, |
488 | .freeze = i915_pm_freeze, | 488 | .freeze = i915_pm_freeze, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d147ab2f5bfc..906663b9929e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs { | |||
176 | int (*get_display_clock_speed)(struct drm_device *dev); | 176 | int (*get_display_clock_speed)(struct drm_device *dev); |
177 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 177 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
178 | void (*update_wm)(struct drm_device *dev, int planea_clock, | 178 | void (*update_wm)(struct drm_device *dev, int planea_clock, |
179 | int planeb_clock, int sr_hdisplay, int pixel_size); | 179 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
180 | int pixel_size); | ||
180 | /* clock updates for mode set */ | 181 | /* clock updates for mode set */ |
181 | /* cursor updates */ | 182 | /* cursor updates */ |
182 | /* render clock increase/decrease */ | 183 | /* render clock increase/decrease */ |
@@ -200,6 +201,8 @@ struct intel_device_info { | |||
200 | u8 need_gfx_hws : 1; | 201 | u8 need_gfx_hws : 1; |
201 | u8 is_g4x : 1; | 202 | u8 is_g4x : 1; |
202 | u8 is_pineview : 1; | 203 | u8 is_pineview : 1; |
204 | u8 is_broadwater : 1; | ||
205 | u8 is_crestline : 1; | ||
203 | u8 is_ironlake : 1; | 206 | u8 is_ironlake : 1; |
204 | u8 is_gen6 : 1; | 207 | u8 is_gen6 : 1; |
205 | u8 has_fbc : 1; | 208 | u8 has_fbc : 1; |
@@ -215,6 +218,7 @@ enum no_fbc_reason { | |||
215 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | 218 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ |
216 | FBC_BAD_PLANE, /* fbc not supported on plane */ | 219 | FBC_BAD_PLANE, /* fbc not supported on plane */ |
217 | FBC_NOT_TILED, /* buffer not tiled */ | 220 | FBC_NOT_TILED, /* buffer not tiled */ |
221 | FBC_MULTIPLE_PIPES, /* more than one pipe active */ | ||
218 | }; | 222 | }; |
219 | 223 | ||
220 | enum intel_pch { | 224 | enum intel_pch { |
@@ -222,6 +226,8 @@ enum intel_pch { | |||
222 | PCH_CPT, /* Cougarpoint PCH */ | 226 | PCH_CPT, /* Cougarpoint PCH */ |
223 | }; | 227 | }; |
224 | 228 | ||
229 | #define QUIRK_PIPEA_FORCE (1<<0) | ||
230 | |||
225 | struct intel_fbdev; | 231 | struct intel_fbdev; |
226 | 232 | ||
227 | typedef struct drm_i915_private { | 233 | typedef struct drm_i915_private { |
@@ -285,6 +291,8 @@ typedef struct drm_i915_private { | |||
285 | struct timer_list hangcheck_timer; | 291 | struct timer_list hangcheck_timer; |
286 | int hangcheck_count; | 292 | int hangcheck_count; |
287 | uint32_t last_acthd; | 293 | uint32_t last_acthd; |
294 | uint32_t last_instdone; | ||
295 | uint32_t last_instdone1; | ||
288 | 296 | ||
289 | struct drm_mm vram; | 297 | struct drm_mm vram; |
290 | 298 | ||
@@ -337,6 +345,8 @@ typedef struct drm_i915_private { | |||
337 | /* PCH chipset type */ | 345 | /* PCH chipset type */ |
338 | enum intel_pch pch_type; | 346 | enum intel_pch pch_type; |
339 | 347 | ||
348 | unsigned long quirks; | ||
349 | |||
340 | /* Register state */ | 350 | /* Register state */ |
341 | bool modeset_on_lid; | 351 | bool modeset_on_lid; |
342 | u8 saveLBB; | 352 | u8 saveLBB; |
@@ -542,6 +552,14 @@ typedef struct drm_i915_private { | |||
542 | struct list_head fence_list; | 552 | struct list_head fence_list; |
543 | 553 | ||
544 | /** | 554 | /** |
555 | * List of objects currently pending being freed. | ||
556 | * | ||
557 | * These objects are no longer in use, but due to a signal | ||
558 | * we were prevented from freeing them at the appointed time. | ||
559 | */ | ||
560 | struct list_head deferred_free_list; | ||
561 | |||
562 | /** | ||
545 | * We leave the user IRQ off as much as possible, | 563 | * We leave the user IRQ off as much as possible, |
546 | * but this means that requests will finish and never | 564 | * but this means that requests will finish and never |
547 | * be retired once the system goes idle. Set a timer to | 565 | * be retired once the system goes idle. Set a timer to |
@@ -672,7 +690,7 @@ struct drm_i915_gem_object { | |||
672 | * | 690 | * |
673 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) | 691 | * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) |
674 | */ | 692 | */ |
675 | int fence_reg : 5; | 693 | signed int fence_reg : 5; |
676 | 694 | ||
677 | /** | 695 | /** |
678 | * Used for checking the object doesn't appear more than once | 696 | * Used for checking the object doesn't appear more than once |
@@ -708,7 +726,7 @@ struct drm_i915_gem_object { | |||
708 | * | 726 | * |
709 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | 727 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
710 | * bits with absolutely no headroom. So use 4 bits. */ | 728 | * bits with absolutely no headroom. So use 4 bits. */ |
711 | int pin_count : 4; | 729 | unsigned int pin_count : 4; |
712 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 730 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
713 | 731 | ||
714 | /** AGP memory structure for our GTT binding. */ | 732 | /** AGP memory structure for our GTT binding. */ |
@@ -738,7 +756,7 @@ struct drm_i915_gem_object { | |||
738 | uint32_t stride; | 756 | uint32_t stride; |
739 | 757 | ||
740 | /** Record of address bit 17 of each page at last unbind. */ | 758 | /** Record of address bit 17 of each page at last unbind. */ |
741 | long *bit_17; | 759 | unsigned long *bit_17; |
742 | 760 | ||
743 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ | 761 | /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ |
744 | uint32_t agp_type; | 762 | uint32_t agp_type; |
@@ -950,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev, | |||
950 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); | 968 | bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); |
951 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 969 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
952 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); | 970 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); |
953 | void i915_gem_retire_requests(struct drm_device *dev, | 971 | void i915_gem_retire_requests(struct drm_device *dev); |
954 | struct intel_ring_buffer *ring); | ||
955 | void i915_gem_retire_work_handler(struct work_struct *work); | 972 | void i915_gem_retire_work_handler(struct work_struct *work); |
956 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 973 | void i915_gem_clflush_object(struct drm_gem_object *obj); |
957 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 974 | int i915_gem_object_set_domain(struct drm_gem_object *obj, |
@@ -981,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); | |||
981 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 998 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
982 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 999 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
983 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 1000 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
984 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 1001 | int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
985 | 1002 | ||
986 | void i915_gem_shrinker_init(void); | 1003 | void i915_gem_shrinker_init(void); |
987 | void i915_gem_shrinker_exit(void); | 1004 | void i915_gem_shrinker_exit(void); |
@@ -1041,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); | |||
1041 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 1058 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1042 | extern void i8xx_disable_fbc(struct drm_device *dev); | 1059 | extern void i8xx_disable_fbc(struct drm_device *dev); |
1043 | extern void g4x_disable_fbc(struct drm_device *dev); | 1060 | extern void g4x_disable_fbc(struct drm_device *dev); |
1061 | extern void ironlake_disable_fbc(struct drm_device *dev); | ||
1044 | extern void intel_disable_fbc(struct drm_device *dev); | 1062 | extern void intel_disable_fbc(struct drm_device *dev); |
1045 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1063 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
1046 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1064 | extern bool intel_fbc_enabled(struct drm_device *dev); |
@@ -1130,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
1130 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | 1148 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
1131 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) | 1149 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
1132 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) | 1150 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
1151 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
1152 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
1133 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | 1153 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
1134 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | 1154 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1135 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | 1155 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 15d2d93aaca9..4efd4fd3b340 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 38 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
41 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 41 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | |||
53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
54 | struct drm_i915_gem_pwrite *args, | 54 | struct drm_i915_gem_pwrite *args, |
55 | struct drm_file *file_priv); | 55 | struct drm_file *file_priv); |
56 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | ||
56 | 57 | ||
57 | static LIST_HEAD(shrink_list); | 58 | static LIST_HEAD(shrink_list); |
58 | static DEFINE_SPINLOCK(shrink_list_lock); | 59 | static DEFINE_SPINLOCK(shrink_list_lock); |
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
127 | return -ENOMEM; | 128 | return -ENOMEM; |
128 | 129 | ||
129 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 130 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
130 | drm_gem_object_handle_unreference_unlocked(obj); | 131 | drm_gem_object_unreference_unlocked(obj); |
131 | |||
132 | if (ret) | 132 | if (ret) |
133 | return ret; | 133 | return ret; |
134 | 134 | ||
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev, | |||
1709 | /** | 1709 | /** |
1710 | * This function clears the request list as sequence numbers are passed. | 1710 | * This function clears the request list as sequence numbers are passed. |
1711 | */ | 1711 | */ |
1712 | void | 1712 | static void |
1713 | i915_gem_retire_requests(struct drm_device *dev, | 1713 | i915_gem_retire_requests_ring(struct drm_device *dev, |
1714 | struct intel_ring_buffer *ring) | 1714 | struct intel_ring_buffer *ring) |
1715 | { | 1715 | { |
1716 | drm_i915_private_t *dev_priv = dev->dev_private; | 1716 | drm_i915_private_t *dev_priv = dev->dev_private; |
1717 | uint32_t seqno; | 1717 | uint32_t seqno; |
@@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev, | |||
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | void | 1753 | void |
1754 | i915_gem_retire_requests(struct drm_device *dev) | ||
1755 | { | ||
1756 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1757 | |||
1758 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | ||
1759 | struct drm_i915_gem_object *obj_priv, *tmp; | ||
1760 | |||
1761 | /* We must be careful that during unbind() we do not | ||
1762 | * accidentally infinitely recurse into retire requests. | ||
1763 | * Currently: | ||
1764 | * retire -> free -> unbind -> wait -> retire_ring | ||
1765 | */ | ||
1766 | list_for_each_entry_safe(obj_priv, tmp, | ||
1767 | &dev_priv->mm.deferred_free_list, | ||
1768 | list) | ||
1769 | i915_gem_free_object_tail(&obj_priv->base); | ||
1770 | } | ||
1771 | |||
1772 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | ||
1773 | if (HAS_BSD(dev)) | ||
1774 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | ||
1775 | } | ||
1776 | |||
1777 | void | ||
1754 | i915_gem_retire_work_handler(struct work_struct *work) | 1778 | i915_gem_retire_work_handler(struct work_struct *work) |
1755 | { | 1779 | { |
1756 | drm_i915_private_t *dev_priv; | 1780 | drm_i915_private_t *dev_priv; |
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1761 | dev = dev_priv->dev; | 1785 | dev = dev_priv->dev; |
1762 | 1786 | ||
1763 | mutex_lock(&dev->struct_mutex); | 1787 | mutex_lock(&dev->struct_mutex); |
1764 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 1788 | i915_gem_retire_requests(dev); |
1765 | |||
1766 | if (HAS_BSD(dev)) | ||
1767 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
1768 | 1789 | ||
1769 | if (!dev_priv->mm.suspended && | 1790 | if (!dev_priv->mm.suspended && |
1770 | (!list_empty(&dev_priv->render_ring.request_list) || | 1791 | (!list_empty(&dev_priv->render_ring.request_list) || |
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1832 | * a separate wait queue to handle that. | 1853 | * a separate wait queue to handle that. |
1833 | */ | 1854 | */ |
1834 | if (ret == 0) | 1855 | if (ret == 0) |
1835 | i915_gem_retire_requests(dev, ring); | 1856 | i915_gem_retire_requests_ring(dev, ring); |
1836 | 1857 | ||
1837 | return ret; | 1858 | return ret; |
1838 | } | 1859 | } |
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1945 | * before we unbind. | 1966 | * before we unbind. |
1946 | */ | 1967 | */ |
1947 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 1968 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
1948 | if (ret) { | 1969 | if (ret == -ERESTARTSYS) |
1949 | if (ret != -ERESTARTSYS) | ||
1950 | DRM_ERROR("set_domain failed: %d\n", ret); | ||
1951 | return ret; | 1970 | return ret; |
1952 | } | 1971 | /* Continue on if we fail due to EIO, the GPU is hung so we |
1972 | * should be safe and we need to cleanup or else we might | ||
1973 | * cause memory corruption through use-after-free. | ||
1974 | */ | ||
1953 | 1975 | ||
1954 | BUG_ON(obj_priv->active); | 1976 | BUG_ON(obj_priv->active); |
1955 | 1977 | ||
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1985 | 2007 | ||
1986 | trace_i915_gem_object_unbind(obj); | 2008 | trace_i915_gem_object_unbind(obj); |
1987 | 2009 | ||
1988 | return 0; | 2010 | return ret; |
1989 | } | 2011 | } |
1990 | 2012 | ||
1991 | static struct drm_gem_object * | 2013 | static struct drm_gem_object * |
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2107 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 2129 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; |
2108 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | 2130 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; |
2109 | for (;;) { | 2131 | for (;;) { |
2110 | i915_gem_retire_requests(dev, render_ring); | 2132 | i915_gem_retire_requests(dev); |
2111 | |||
2112 | if (HAS_BSD(dev)) | ||
2113 | i915_gem_retire_requests(dev, bsd_ring); | ||
2114 | 2133 | ||
2115 | /* If there's an inactive buffer available now, grab it | 2134 | /* If there's an inactive buffer available now, grab it |
2116 | * and be done. | 2135 | * and be done. |
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2583 | if (!IS_I965G(dev)) { | 2602 | if (!IS_I965G(dev)) { |
2584 | int ret; | 2603 | int ret; |
2585 | 2604 | ||
2586 | i915_gem_object_flush_gpu_write_domain(obj); | 2605 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2606 | if (ret != 0) | ||
2607 | return ret; | ||
2608 | |||
2587 | ret = i915_gem_object_wait_rendering(obj); | 2609 | ret = i915_gem_object_wait_rendering(obj); |
2588 | if (ret != 0) | 2610 | if (ret != 0) |
2589 | return ret; | 2611 | return ret; |
@@ -2731,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2731 | } | 2753 | } |
2732 | 2754 | ||
2733 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2755 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2734 | static void | 2756 | static int |
2735 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 2757 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) |
2736 | { | 2758 | { |
2737 | struct drm_device *dev = obj->dev; | 2759 | struct drm_device *dev = obj->dev; |
@@ -2739,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2739 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2761 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
2740 | 2762 | ||
2741 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2763 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2742 | return; | 2764 | return 0; |
2743 | 2765 | ||
2744 | /* Queue the GPU write cache flushing we need. */ | 2766 | /* Queue the GPU write cache flushing we need. */ |
2745 | old_write_domain = obj->write_domain; | 2767 | old_write_domain = obj->write_domain; |
2746 | i915_gem_flush(dev, 0, obj->write_domain); | 2768 | i915_gem_flush(dev, 0, obj->write_domain); |
2747 | (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); | 2769 | if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) |
2748 | BUG_ON(obj->write_domain); | 2770 | return -ENOMEM; |
2749 | 2771 | ||
2750 | trace_i915_gem_object_change_domain(obj, | 2772 | trace_i915_gem_object_change_domain(obj, |
2751 | obj->read_domains, | 2773 | obj->read_domains, |
2752 | old_write_domain); | 2774 | old_write_domain); |
2775 | return 0; | ||
2753 | } | 2776 | } |
2754 | 2777 | ||
2755 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2778 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2793,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2793 | old_write_domain); | 2816 | old_write_domain); |
2794 | } | 2817 | } |
2795 | 2818 | ||
2796 | void | 2819 | int |
2797 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | 2820 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) |
2798 | { | 2821 | { |
2822 | int ret = 0; | ||
2823 | |||
2799 | switch (obj->write_domain) { | 2824 | switch (obj->write_domain) { |
2800 | case I915_GEM_DOMAIN_GTT: | 2825 | case I915_GEM_DOMAIN_GTT: |
2801 | i915_gem_object_flush_gtt_write_domain(obj); | 2826 | i915_gem_object_flush_gtt_write_domain(obj); |
@@ -2804,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | |||
2804 | i915_gem_object_flush_cpu_write_domain(obj); | 2829 | i915_gem_object_flush_cpu_write_domain(obj); |
2805 | break; | 2830 | break; |
2806 | default: | 2831 | default: |
2807 | i915_gem_object_flush_gpu_write_domain(obj); | 2832 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2808 | break; | 2833 | break; |
2809 | } | 2834 | } |
2835 | |||
2836 | return ret; | ||
2810 | } | 2837 | } |
2811 | 2838 | ||
2812 | /** | 2839 | /** |
@@ -2826,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2826 | if (obj_priv->gtt_space == NULL) | 2853 | if (obj_priv->gtt_space == NULL) |
2827 | return -EINVAL; | 2854 | return -EINVAL; |
2828 | 2855 | ||
2829 | i915_gem_object_flush_gpu_write_domain(obj); | 2856 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2857 | if (ret != 0) | ||
2858 | return ret; | ||
2859 | |||
2830 | /* Wait on any GPU rendering and flushing to occur. */ | 2860 | /* Wait on any GPU rendering and flushing to occur. */ |
2831 | ret = i915_gem_object_wait_rendering(obj); | 2861 | ret = i915_gem_object_wait_rendering(obj); |
2832 | if (ret != 0) | 2862 | if (ret != 0) |
@@ -2876,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | |||
2876 | if (obj_priv->gtt_space == NULL) | 2906 | if (obj_priv->gtt_space == NULL) |
2877 | return -EINVAL; | 2907 | return -EINVAL; |
2878 | 2908 | ||
2879 | i915_gem_object_flush_gpu_write_domain(obj); | 2909 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2910 | if (ret) | ||
2911 | return ret; | ||
2880 | 2912 | ||
2881 | /* Wait on any GPU rendering and flushing to occur. */ | 2913 | /* Wait on any GPU rendering and flushing to occur. */ |
2882 | if (obj_priv->active) { | 2914 | if (obj_priv->active) { |
@@ -2924,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2924 | uint32_t old_write_domain, old_read_domains; | 2956 | uint32_t old_write_domain, old_read_domains; |
2925 | int ret; | 2957 | int ret; |
2926 | 2958 | ||
2927 | i915_gem_object_flush_gpu_write_domain(obj); | 2959 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2960 | if (ret) | ||
2961 | return ret; | ||
2962 | |||
2928 | /* Wait on any GPU rendering and flushing to occur. */ | 2963 | /* Wait on any GPU rendering and flushing to occur. */ |
2929 | ret = i915_gem_object_wait_rendering(obj); | 2964 | ret = i915_gem_object_wait_rendering(obj); |
2930 | if (ret != 0) | 2965 | if (ret != 0) |
@@ -3214,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3214 | if (offset == 0 && size == obj->size) | 3249 | if (offset == 0 && size == obj->size) |
3215 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3250 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3216 | 3251 | ||
3217 | i915_gem_object_flush_gpu_write_domain(obj); | 3252 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3253 | if (ret) | ||
3254 | return ret; | ||
3255 | |||
3218 | /* Wait on any GPU rendering and flushing to occur. */ | 3256 | /* Wait on any GPU rendering and flushing to occur. */ |
3219 | ret = i915_gem_object_wait_rendering(obj); | 3257 | ret = i915_gem_object_wait_rendering(obj); |
3220 | if (ret != 0) | 3258 | if (ret != 0) |
@@ -3645,6 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3645 | return ret; | 3683 | return ret; |
3646 | } | 3684 | } |
3647 | 3685 | ||
3686 | |||
3648 | int | 3687 | int |
3649 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3688 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3650 | struct drm_file *file_priv, | 3689 | struct drm_file *file_priv, |
@@ -3792,7 +3831,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3792 | unsigned long long total_size = 0; | 3831 | unsigned long long total_size = 0; |
3793 | int num_fences = 0; | 3832 | int num_fences = 0; |
3794 | for (i = 0; i < args->buffer_count; i++) { | 3833 | for (i = 0; i < args->buffer_count; i++) { |
3795 | obj_priv = object_list[i]->driver_private; | 3834 | obj_priv = to_intel_bo(object_list[i]); |
3796 | 3835 | ||
3797 | total_size += object_list[i]->size; | 3836 | total_size += object_list[i]->size; |
3798 | num_fences += | 3837 | num_fences += |
@@ -4310,7 +4349,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4310 | struct drm_i915_gem_busy *args = data; | 4349 | struct drm_i915_gem_busy *args = data; |
4311 | struct drm_gem_object *obj; | 4350 | struct drm_gem_object *obj; |
4312 | struct drm_i915_gem_object *obj_priv; | 4351 | struct drm_i915_gem_object *obj_priv; |
4313 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4314 | 4352 | ||
4315 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 4353 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
4316 | if (obj == NULL) { | 4354 | if (obj == NULL) { |
@@ -4325,10 +4363,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4325 | * actually unmasked, and our working set ends up being larger than | 4363 | * actually unmasked, and our working set ends up being larger than |
4326 | * required. | 4364 | * required. |
4327 | */ | 4365 | */ |
4328 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 4366 | i915_gem_retire_requests(dev); |
4329 | |||
4330 | if (HAS_BSD(dev)) | ||
4331 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
4332 | 4367 | ||
4333 | obj_priv = to_intel_bo(obj); | 4368 | obj_priv = to_intel_bo(obj); |
4334 | /* Don't count being on the flushing list against the object being | 4369 | /* Don't count being on the flushing list against the object being |
@@ -4438,20 +4473,19 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4438 | return 0; | 4473 | return 0; |
4439 | } | 4474 | } |
4440 | 4475 | ||
4441 | void i915_gem_free_object(struct drm_gem_object *obj) | 4476 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) |
4442 | { | 4477 | { |
4443 | struct drm_device *dev = obj->dev; | 4478 | struct drm_device *dev = obj->dev; |
4479 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4444 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4480 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4481 | int ret; | ||
4445 | 4482 | ||
4446 | trace_i915_gem_object_destroy(obj); | 4483 | ret = i915_gem_object_unbind(obj); |
4447 | 4484 | if (ret == -ERESTARTSYS) { | |
4448 | while (obj_priv->pin_count > 0) | 4485 | list_move(&obj_priv->list, |
4449 | i915_gem_object_unpin(obj); | 4486 | &dev_priv->mm.deferred_free_list); |
4450 | 4487 | return; | |
4451 | if (obj_priv->phys_obj) | 4488 | } |
4452 | i915_gem_detach_phys_object(dev, obj); | ||
4453 | |||
4454 | i915_gem_object_unbind(obj); | ||
4455 | 4489 | ||
4456 | if (obj_priv->mmap_offset) | 4490 | if (obj_priv->mmap_offset) |
4457 | i915_gem_free_mmap_offset(obj); | 4491 | i915_gem_free_mmap_offset(obj); |
@@ -4463,6 +4497,22 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
4463 | kfree(obj_priv); | 4497 | kfree(obj_priv); |
4464 | } | 4498 | } |
4465 | 4499 | ||
4500 | void i915_gem_free_object(struct drm_gem_object *obj) | ||
4501 | { | ||
4502 | struct drm_device *dev = obj->dev; | ||
4503 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4504 | |||
4505 | trace_i915_gem_object_destroy(obj); | ||
4506 | |||
4507 | while (obj_priv->pin_count > 0) | ||
4508 | i915_gem_object_unpin(obj); | ||
4509 | |||
4510 | if (obj_priv->phys_obj) | ||
4511 | i915_gem_detach_phys_object(dev, obj); | ||
4512 | |||
4513 | i915_gem_free_object_tail(obj); | ||
4514 | } | ||
4515 | |||
4466 | /** Unbinds all inactive objects. */ | 4516 | /** Unbinds all inactive objects. */ |
4467 | static int | 4517 | static int |
4468 | i915_gem_evict_from_inactive_list(struct drm_device *dev) | 4518 | i915_gem_evict_from_inactive_list(struct drm_device *dev) |
@@ -4686,9 +4736,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4686 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | 4736 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); |
4687 | mutex_unlock(&dev->struct_mutex); | 4737 | mutex_unlock(&dev->struct_mutex); |
4688 | 4738 | ||
4689 | drm_irq_install(dev); | 4739 | ret = drm_irq_install(dev); |
4740 | if (ret) | ||
4741 | goto cleanup_ringbuffer; | ||
4690 | 4742 | ||
4691 | return 0; | 4743 | return 0; |
4744 | |||
4745 | cleanup_ringbuffer: | ||
4746 | mutex_lock(&dev->struct_mutex); | ||
4747 | i915_gem_cleanup_ringbuffer(dev); | ||
4748 | dev_priv->mm.suspended = 1; | ||
4749 | mutex_unlock(&dev->struct_mutex); | ||
4750 | |||
4751 | return ret; | ||
4692 | } | 4752 | } |
4693 | 4753 | ||
4694 | int | 4754 | int |
@@ -4726,6 +4786,7 @@ i915_gem_load(struct drm_device *dev) | |||
4726 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | 4786 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); |
4727 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4787 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4728 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4788 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4789 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | ||
4729 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4790 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); |
4730 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 4791 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); |
4731 | if (HAS_BSD(dev)) { | 4792 | if (HAS_BSD(dev)) { |
@@ -5024,10 +5085,7 @@ rescan: | |||
5024 | continue; | 5085 | continue; |
5025 | 5086 | ||
5026 | spin_unlock(&shrink_list_lock); | 5087 | spin_unlock(&shrink_list_lock); |
5027 | i915_gem_retire_requests(dev, &dev_priv->render_ring); | 5088 | i915_gem_retire_requests(dev); |
5028 | |||
5029 | if (HAS_BSD(dev)) | ||
5030 | i915_gem_retire_requests(dev, &dev_priv->bsd_ring); | ||
5031 | 5089 | ||
5032 | list_for_each_entry_safe(obj_priv, next_obj, | 5090 | list_for_each_entry_safe(obj_priv, next_obj, |
5033 | &dev_priv->mm.inactive_list, | 5091 | &dev_priv->mm.inactive_list, |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4b7c49d4257d..155719e4d16f 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
333 | i915_gem_release_mmap(obj); | 333 | i915_gem_release_mmap(obj); |
334 | 334 | ||
335 | if (ret != 0) { | 335 | if (ret != 0) { |
336 | WARN(ret != -ERESTARTSYS, | ||
337 | "failed to reset object for tiling switch"); | ||
338 | args->tiling_mode = obj_priv->tiling_mode; | 336 | args->tiling_mode = obj_priv->tiling_mode; |
339 | args->stride = obj_priv->stride; | 337 | args->stride = obj_priv->stride; |
340 | goto err; | 338 | goto err; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dba53d4b9fb3..85785a8844ed 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev) | |||
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 171 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
172 | else { | 172 | else { |
173 | i915_enable_pipestat(dev_priv, 1, | 173 | i915_enable_pipestat(dev_priv, 1, |
174 | I915_LEGACY_BLC_EVENT_ENABLE); | 174 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
175 | if (IS_I965G(dev)) | 175 | if (IS_I965G(dev)) |
176 | i915_enable_pipestat(dev_priv, 0, | 176 | i915_enable_pipestat(dev_priv, 0, |
177 | I915_LEGACY_BLC_EVENT_ENABLE); | 177 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 178 | } |
179 | } | 179 | } |
180 | 180 | ||
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
842 | u32 iir, new_iir; | 842 | u32 iir, new_iir; |
843 | u32 pipea_stats, pipeb_stats; | 843 | u32 pipea_stats, pipeb_stats; |
844 | u32 vblank_status; | 844 | u32 vblank_status; |
845 | u32 vblank_enable; | ||
846 | int vblank = 0; | 845 | int vblank = 0; |
847 | unsigned long irqflags; | 846 | unsigned long irqflags; |
848 | int irq_received; | 847 | int irq_received; |
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
856 | 855 | ||
857 | iir = I915_READ(IIR); | 856 | iir = I915_READ(IIR); |
858 | 857 | ||
859 | if (IS_I965G(dev)) { | 858 | if (IS_I965G(dev)) |
860 | vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; | 859 | vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; |
861 | vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; | 860 | else |
862 | } else { | 861 | vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; |
863 | vblank_status = I915_VBLANK_INTERRUPT_STATUS; | ||
864 | vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; | ||
865 | } | ||
866 | 862 | ||
867 | for (;;) { | 863 | for (;;) { |
868 | irq_received = iir != 0; | 864 | irq_received = iir != 0; |
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
966 | intel_finish_page_flip(dev, 1); | 962 | intel_finish_page_flip(dev, 1); |
967 | } | 963 | } |
968 | 964 | ||
969 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 965 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
970 | (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 966 | (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || |
971 | (iir & I915_ASLE_INTERRUPT)) | 967 | (iir & I915_ASLE_INTERRUPT)) |
972 | opregion_asle_intr(dev); | 968 | opregion_asle_intr(dev); |
973 | 969 | ||
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1233 | { | 1229 | { |
1234 | struct drm_device *dev = (struct drm_device *)data; | 1230 | struct drm_device *dev = (struct drm_device *)data; |
1235 | drm_i915_private_t *dev_priv = dev->dev_private; | 1231 | drm_i915_private_t *dev_priv = dev->dev_private; |
1236 | uint32_t acthd; | 1232 | uint32_t acthd, instdone, instdone1; |
1237 | 1233 | ||
1238 | /* No reset support on this chip yet. */ | 1234 | /* No reset support on this chip yet. */ |
1239 | if (IS_GEN6(dev)) | 1235 | if (IS_GEN6(dev)) |
1240 | return; | 1236 | return; |
1241 | 1237 | ||
1242 | if (!IS_I965G(dev)) | 1238 | if (!IS_I965G(dev)) { |
1243 | acthd = I915_READ(ACTHD); | 1239 | acthd = I915_READ(ACTHD); |
1244 | else | 1240 | instdone = I915_READ(INSTDONE); |
1241 | instdone1 = 0; | ||
1242 | } else { | ||
1245 | acthd = I915_READ(ACTHD_I965); | 1243 | acthd = I915_READ(ACTHD_I965); |
1244 | instdone = I915_READ(INSTDONE_I965); | ||
1245 | instdone1 = I915_READ(INSTDONE1); | ||
1246 | } | ||
1246 | 1247 | ||
1247 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1248 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
1248 | if (list_empty(&dev_priv->render_ring.request_list) || | 1249 | if (list_empty(&dev_priv->render_ring.request_list) || |
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1253 | return; | 1254 | return; |
1254 | } | 1255 | } |
1255 | 1256 | ||
1256 | if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { | 1257 | if (dev_priv->last_acthd == acthd && |
1257 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1258 | dev_priv->last_instdone == instdone && |
1258 | i915_handle_error(dev, true); | 1259 | dev_priv->last_instdone1 == instdone1) { |
1259 | return; | 1260 | if (dev_priv->hangcheck_count++ > 1) { |
1260 | } | 1261 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
1262 | i915_handle_error(dev, true); | ||
1263 | return; | ||
1264 | } | ||
1265 | } else { | ||
1266 | dev_priv->hangcheck_count = 0; | ||
1267 | |||
1268 | dev_priv->last_acthd = acthd; | ||
1269 | dev_priv->last_instdone = instdone; | ||
1270 | dev_priv->last_instdone1 = instdone1; | ||
1271 | } | ||
1261 | 1272 | ||
1262 | /* Reset timer case chip hangs without another request being added */ | 1273 | /* Reset timer case chip hangs without another request being added */ |
1263 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 1274 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); |
1264 | |||
1265 | if (acthd != dev_priv->last_acthd) | ||
1266 | dev_priv->hangcheck_count = 0; | ||
1267 | else | ||
1268 | dev_priv->hangcheck_count++; | ||
1269 | |||
1270 | dev_priv->last_acthd = acthd; | ||
1271 | } | 1275 | } |
1272 | 1276 | ||
1273 | /* drm_dma.h hooks | 1277 | /* drm_dma.h hooks |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6d9b0288272a..281db6e5403a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -442,7 +442,7 @@ | |||
442 | #define GEN6_RENDER_IMR 0x20a8 | 442 | #define GEN6_RENDER_IMR 0x20a8 |
443 | #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) | 443 | #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) |
444 | #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) | 444 | #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) |
445 | #define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) | 445 | #define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) |
446 | #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) | 446 | #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) |
447 | #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) | 447 | #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) |
448 | #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) | 448 | #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) |
@@ -530,6 +530,21 @@ | |||
530 | #define DPFC_CHICKEN 0x3224 | 530 | #define DPFC_CHICKEN 0x3224 |
531 | #define DPFC_HT_MODIFY (1<<31) | 531 | #define DPFC_HT_MODIFY (1<<31) |
532 | 532 | ||
533 | /* Framebuffer compression for Ironlake */ | ||
534 | #define ILK_DPFC_CB_BASE 0x43200 | ||
535 | #define ILK_DPFC_CONTROL 0x43208 | ||
536 | /* The bit 28-8 is reserved */ | ||
537 | #define DPFC_RESERVED (0x1FFFFF00) | ||
538 | #define ILK_DPFC_RECOMP_CTL 0x4320c | ||
539 | #define ILK_DPFC_STATUS 0x43210 | ||
540 | #define ILK_DPFC_FENCE_YOFF 0x43218 | ||
541 | #define ILK_DPFC_CHICKEN 0x43224 | ||
542 | #define ILK_FBC_RT_BASE 0x2128 | ||
543 | #define ILK_FBC_RT_VALID (1<<0) | ||
544 | |||
545 | #define ILK_DISPLAY_CHICKEN1 0x42000 | ||
546 | #define ILK_FBCQ_DIS (1<<22) | ||
547 | |||
533 | /* | 548 | /* |
534 | * GPIO regs | 549 | * GPIO regs |
535 | */ | 550 | */ |
@@ -595,32 +610,6 @@ | |||
595 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 610 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
596 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ | 611 | #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ |
597 | 612 | ||
598 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) | ||
599 | #define I915_CRC_ERROR_ENABLE (1UL<<29) | ||
600 | #define I915_CRC_DONE_ENABLE (1UL<<28) | ||
601 | #define I915_GMBUS_EVENT_ENABLE (1UL<<27) | ||
602 | #define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) | ||
603 | #define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) | ||
604 | #define I915_DPST_EVENT_ENABLE (1UL<<23) | ||
605 | #define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) | ||
606 | #define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) | ||
607 | #define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) | ||
608 | #define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ | ||
609 | #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) | ||
610 | #define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) | ||
611 | #define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) | ||
612 | #define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) | ||
613 | #define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) | ||
614 | #define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) | ||
615 | #define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) | ||
616 | #define I915_DPST_EVENT_STATUS (1UL<<7) | ||
617 | #define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) | ||
618 | #define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) | ||
619 | #define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) | ||
620 | #define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ | ||
621 | #define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) | ||
622 | #define I915_OVERLAY_UPDATED_STATUS (1UL<<0) | ||
623 | |||
624 | #define SRX_INDEX 0x3c4 | 613 | #define SRX_INDEX 0x3c4 |
625 | #define SRX_DATA 0x3c5 | 614 | #define SRX_DATA 0x3c5 |
626 | #define SR01 1 | 615 | #define SR01 1 |
@@ -2166,7 +2155,8 @@ | |||
2166 | #define I830_FIFO_LINE_SIZE 32 | 2155 | #define I830_FIFO_LINE_SIZE 32 |
2167 | 2156 | ||
2168 | #define G4X_FIFO_SIZE 127 | 2157 | #define G4X_FIFO_SIZE 127 |
2169 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ | 2158 | #define I965_FIFO_SIZE 512 |
2159 | #define I945_FIFO_SIZE 127 | ||
2170 | #define I915_FIFO_SIZE 95 | 2160 | #define I915_FIFO_SIZE 95 |
2171 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ | 2161 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ |
2172 | #define I830_FIFO_SIZE 95 | 2162 | #define I830_FIFO_SIZE 95 |
@@ -2185,6 +2175,9 @@ | |||
2185 | #define PINEVIEW_CURSOR_DFT_WM 0 | 2175 | #define PINEVIEW_CURSOR_DFT_WM 0 |
2186 | #define PINEVIEW_CURSOR_GUARD_WM 5 | 2176 | #define PINEVIEW_CURSOR_GUARD_WM 5 |
2187 | 2177 | ||
2178 | #define I965_CURSOR_FIFO 64 | ||
2179 | #define I965_CURSOR_MAX_WM 32 | ||
2180 | #define I965_CURSOR_DFT_WM 8 | ||
2188 | 2181 | ||
2189 | /* define the Watermark register on Ironlake */ | 2182 | /* define the Watermark register on Ironlake */ |
2190 | #define WM0_PIPEA_ILK 0x45100 | 2183 | #define WM0_PIPEA_ILK 0x45100 |
@@ -2212,6 +2205,9 @@ | |||
2212 | #define ILK_DISPLAY_FIFO 128 | 2205 | #define ILK_DISPLAY_FIFO 128 |
2213 | #define ILK_DISPLAY_MAXWM 64 | 2206 | #define ILK_DISPLAY_MAXWM 64 |
2214 | #define ILK_DISPLAY_DFTWM 8 | 2207 | #define ILK_DISPLAY_DFTWM 8 |
2208 | #define ILK_CURSOR_FIFO 32 | ||
2209 | #define ILK_CURSOR_MAXWM 16 | ||
2210 | #define ILK_CURSOR_DFTWM 8 | ||
2215 | 2211 | ||
2216 | #define ILK_DISPLAY_SR_FIFO 512 | 2212 | #define ILK_DISPLAY_SR_FIFO 512 |
2217 | #define ILK_DISPLAY_MAX_SRWM 0x1ff | 2213 | #define ILK_DISPLAY_MAX_SRWM 0x1ff |
@@ -2510,6 +2506,10 @@ | |||
2510 | #define ILK_VSDPFD_FULL (1<<21) | 2506 | #define ILK_VSDPFD_FULL (1<<21) |
2511 | #define ILK_DSPCLK_GATE 0x42020 | 2507 | #define ILK_DSPCLK_GATE 0x42020 |
2512 | #define ILK_DPARB_CLK_GATE (1<<5) | 2508 | #define ILK_DPARB_CLK_GATE (1<<5) |
2509 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ | ||
2510 | #define ILK_CLK_FBC (1<<7) | ||
2511 | #define ILK_DPFC_DIS1 (1<<8) | ||
2512 | #define ILK_DPFC_DIS2 (1<<9) | ||
2513 | 2513 | ||
2514 | #define DISP_ARB_CTL 0x45000 | 2514 | #define DISP_ARB_CTL 0x45000 |
2515 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 2515 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
@@ -2869,6 +2869,7 @@ | |||
2869 | 2869 | ||
2870 | #define PCH_PP_STATUS 0xc7200 | 2870 | #define PCH_PP_STATUS 0xc7200 |
2871 | #define PCH_PP_CONTROL 0xc7204 | 2871 | #define PCH_PP_CONTROL 0xc7204 |
2872 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | ||
2872 | #define EDP_FORCE_VDD (1 << 3) | 2873 | #define EDP_FORCE_VDD (1 << 3) |
2873 | #define EDP_BLC_ENABLE (1 << 2) | 2874 | #define EDP_BLC_ENABLE (1 << 2) |
2874 | #define PANEL_POWER_RESET (1 << 1) | 2875 | #define PANEL_POWER_RESET (1 << 1) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 60a5800fba6e..6e2025274db5 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev) | |||
602 | 602 | ||
603 | /* Only save FBC state on the platform that supports FBC */ | 603 | /* Only save FBC state on the platform that supports FBC */ |
604 | if (I915_HAS_FBC(dev)) { | 604 | if (I915_HAS_FBC(dev)) { |
605 | if (IS_GM45(dev)) { | 605 | if (IS_IRONLAKE_M(dev)) { |
606 | dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); | ||
607 | } else if (IS_GM45(dev)) { | ||
606 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); | 608 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
607 | } else { | 609 | } else { |
608 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); | 610 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); |
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev) | |||
706 | 708 | ||
707 | /* only restore FBC info on the platform that supports FBC*/ | 709 | /* only restore FBC info on the platform that supports FBC*/ |
708 | if (I915_HAS_FBC(dev)) { | 710 | if (I915_HAS_FBC(dev)) { |
709 | if (IS_GM45(dev)) { | 711 | if (IS_IRONLAKE_M(dev)) { |
712 | ironlake_disable_fbc(dev); | ||
713 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | ||
714 | } else if (IS_GM45(dev)) { | ||
710 | g4x_disable_fbc(dev); | 715 | g4x_disable_fbc(dev); |
711 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 716 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
712 | } else { | 717 | } else { |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f879589bead1..ae1718549eec 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -43,6 +43,7 @@ | |||
43 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 43 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
44 | static void intel_update_watermarks(struct drm_device *dev); | 44 | static void intel_update_watermarks(struct drm_device *dev); |
45 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); | 45 | static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); |
46 | static void intel_crtc_update_cursor(struct drm_crtc *crtc); | ||
46 | 47 | ||
47 | typedef struct { | 48 | typedef struct { |
48 | /* given values */ | 49 | /* given values */ |
@@ -323,6 +324,9 @@ struct intel_limit { | |||
323 | #define IRONLAKE_DP_P1_MIN 1 | 324 | #define IRONLAKE_DP_P1_MIN 1 |
324 | #define IRONLAKE_DP_P1_MAX 2 | 325 | #define IRONLAKE_DP_P1_MAX 2 |
325 | 326 | ||
327 | /* FDI */ | ||
328 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | ||
329 | |||
326 | static bool | 330 | static bool |
327 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 331 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
328 | int target, int refclk, intel_clock_t *best_clock); | 332 | int target, int refclk, intel_clock_t *best_clock); |
@@ -863,8 +867,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
863 | intel_clock_t clock; | 867 | intel_clock_t clock; |
864 | int max_n; | 868 | int max_n; |
865 | bool found; | 869 | bool found; |
866 | /* approximately equals target * 0.00488 */ | 870 | /* approximately equals target * 0.00585 */ |
867 | int err_most = (target >> 8) + (target >> 10); | 871 | int err_most = (target >> 8) + (target >> 9); |
868 | found = false; | 872 | found = false; |
869 | 873 | ||
870 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
@@ -1123,6 +1127,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1123 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1127 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1124 | } | 1128 | } |
1125 | 1129 | ||
1130 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
1131 | { | ||
1132 | struct drm_device *dev = crtc->dev; | ||
1133 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1134 | struct drm_framebuffer *fb = crtc->fb; | ||
1135 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
1136 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | ||
1137 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1138 | int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : | ||
1139 | DPFC_CTL_PLANEB; | ||
1140 | unsigned long stall_watermark = 200; | ||
1141 | u32 dpfc_ctl; | ||
1142 | |||
1143 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | ||
1144 | dev_priv->cfb_fence = obj_priv->fence_reg; | ||
1145 | dev_priv->cfb_plane = intel_crtc->plane; | ||
1146 | |||
1147 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1148 | dpfc_ctl &= DPFC_RESERVED; | ||
1149 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | ||
1150 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1151 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | ||
1152 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | ||
1153 | } else { | ||
1154 | I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); | ||
1155 | } | ||
1156 | |||
1157 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1158 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | | ||
1159 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | ||
1160 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | ||
1161 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | ||
1162 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | ||
1163 | /* enable it... */ | ||
1164 | I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | | ||
1165 | DPFC_CTL_EN); | ||
1166 | |||
1167 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | ||
1168 | } | ||
1169 | |||
1170 | void ironlake_disable_fbc(struct drm_device *dev) | ||
1171 | { | ||
1172 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1173 | u32 dpfc_ctl; | ||
1174 | |||
1175 | /* Disable compression */ | ||
1176 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
1177 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
1178 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
1179 | intel_wait_for_vblank(dev); | ||
1180 | |||
1181 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
1182 | } | ||
1183 | |||
1184 | static bool ironlake_fbc_enabled(struct drm_device *dev) | ||
1185 | { | ||
1186 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1187 | |||
1188 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | ||
1189 | } | ||
1190 | |||
1126 | bool intel_fbc_enabled(struct drm_device *dev) | 1191 | bool intel_fbc_enabled(struct drm_device *dev) |
1127 | { | 1192 | { |
1128 | struct drm_i915_private *dev_priv = dev->dev_private; | 1193 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1181,8 +1246,12 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1181 | struct drm_framebuffer *fb = crtc->fb; | 1246 | struct drm_framebuffer *fb = crtc->fb; |
1182 | struct intel_framebuffer *intel_fb; | 1247 | struct intel_framebuffer *intel_fb; |
1183 | struct drm_i915_gem_object *obj_priv; | 1248 | struct drm_i915_gem_object *obj_priv; |
1249 | struct drm_crtc *tmp_crtc; | ||
1184 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1250 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1185 | int plane = intel_crtc->plane; | 1251 | int plane = intel_crtc->plane; |
1252 | int crtcs_enabled = 0; | ||
1253 | |||
1254 | DRM_DEBUG_KMS("\n"); | ||
1186 | 1255 | ||
1187 | if (!i915_powersave) | 1256 | if (!i915_powersave) |
1188 | return; | 1257 | return; |
@@ -1200,10 +1269,21 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1200 | * If FBC is already on, we just have to verify that we can | 1269 | * If FBC is already on, we just have to verify that we can |
1201 | * keep it that way... | 1270 | * keep it that way... |
1202 | * Need to disable if: | 1271 | * Need to disable if: |
1272 | * - more than one pipe is active | ||
1203 | * - changing FBC params (stride, fence, mode) | 1273 | * - changing FBC params (stride, fence, mode) |
1204 | * - new fb is too large to fit in compressed buffer | 1274 | * - new fb is too large to fit in compressed buffer |
1205 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1275 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1206 | */ | 1276 | */ |
1277 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | ||
1278 | if (tmp_crtc->enabled) | ||
1279 | crtcs_enabled++; | ||
1280 | } | ||
1281 | DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); | ||
1282 | if (crtcs_enabled > 1) { | ||
1283 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
1284 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | ||
1285 | goto out_disable; | ||
1286 | } | ||
1207 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1287 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
1208 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1288 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1209 | "compression\n"); | 1289 | "compression\n"); |
@@ -1256,7 +1336,7 @@ out_disable: | |||
1256 | } | 1336 | } |
1257 | } | 1337 | } |
1258 | 1338 | ||
1259 | static int | 1339 | int |
1260 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | 1340 | intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) |
1261 | { | 1341 | { |
1262 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1342 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
@@ -1265,7 +1345,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) | |||
1265 | 1345 | ||
1266 | switch (obj_priv->tiling_mode) { | 1346 | switch (obj_priv->tiling_mode) { |
1267 | case I915_TILING_NONE: | 1347 | case I915_TILING_NONE: |
1268 | alignment = 64 * 1024; | 1348 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1349 | alignment = 128 * 1024; | ||
1350 | else if (IS_I965G(dev)) | ||
1351 | alignment = 4 * 1024; | ||
1352 | else | ||
1353 | alignment = 64 * 1024; | ||
1269 | break; | 1354 | break; |
1270 | case I915_TILING_X: | 1355 | case I915_TILING_X: |
1271 | /* pin() will align the object as required by fence */ | 1356 | /* pin() will align the object as required by fence */ |
@@ -1540,6 +1625,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1540 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1625 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
1541 | u32 temp, tries = 0; | 1626 | u32 temp, tries = 0; |
1542 | 1627 | ||
1628 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1629 | for train result */ | ||
1630 | temp = I915_READ(fdi_rx_imr_reg); | ||
1631 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1632 | temp &= ~FDI_RX_BIT_LOCK; | ||
1633 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1634 | I915_READ(fdi_rx_imr_reg); | ||
1635 | udelay(150); | ||
1636 | |||
1543 | /* enable CPU FDI TX and PCH FDI RX */ | 1637 | /* enable CPU FDI TX and PCH FDI RX */ |
1544 | temp = I915_READ(fdi_tx_reg); | 1638 | temp = I915_READ(fdi_tx_reg); |
1545 | temp |= FDI_TX_ENABLE; | 1639 | temp |= FDI_TX_ENABLE; |
@@ -1557,16 +1651,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1557 | I915_READ(fdi_rx_reg); | 1651 | I915_READ(fdi_rx_reg); |
1558 | udelay(150); | 1652 | udelay(150); |
1559 | 1653 | ||
1560 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 1654 | for (tries = 0; tries < 5; tries++) { |
1561 | for train result */ | ||
1562 | temp = I915_READ(fdi_rx_imr_reg); | ||
1563 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1564 | temp &= ~FDI_RX_BIT_LOCK; | ||
1565 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1566 | I915_READ(fdi_rx_imr_reg); | ||
1567 | udelay(150); | ||
1568 | |||
1569 | for (;;) { | ||
1570 | temp = I915_READ(fdi_rx_iir_reg); | 1655 | temp = I915_READ(fdi_rx_iir_reg); |
1571 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1656 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1572 | 1657 | ||
@@ -1576,14 +1661,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1576 | temp | FDI_RX_BIT_LOCK); | 1661 | temp | FDI_RX_BIT_LOCK); |
1577 | break; | 1662 | break; |
1578 | } | 1663 | } |
1579 | |||
1580 | tries++; | ||
1581 | |||
1582 | if (tries > 5) { | ||
1583 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1584 | break; | ||
1585 | } | ||
1586 | } | 1664 | } |
1665 | if (tries == 5) | ||
1666 | DRM_DEBUG_KMS("FDI train 1 fail!\n"); | ||
1587 | 1667 | ||
1588 | /* Train 2 */ | 1668 | /* Train 2 */ |
1589 | temp = I915_READ(fdi_tx_reg); | 1669 | temp = I915_READ(fdi_tx_reg); |
@@ -1599,7 +1679,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1599 | 1679 | ||
1600 | tries = 0; | 1680 | tries = 0; |
1601 | 1681 | ||
1602 | for (;;) { | 1682 | for (tries = 0; tries < 5; tries++) { |
1603 | temp = I915_READ(fdi_rx_iir_reg); | 1683 | temp = I915_READ(fdi_rx_iir_reg); |
1604 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | 1684 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
1605 | 1685 | ||
@@ -1609,14 +1689,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1609 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 1689 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
1610 | break; | 1690 | break; |
1611 | } | 1691 | } |
1612 | |||
1613 | tries++; | ||
1614 | |||
1615 | if (tries > 5) { | ||
1616 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1617 | break; | ||
1618 | } | ||
1619 | } | 1692 | } |
1693 | if (tries == 5) | ||
1694 | DRM_DEBUG_KMS("FDI train 2 fail!\n"); | ||
1620 | 1695 | ||
1621 | DRM_DEBUG_KMS("FDI train done\n"); | 1696 | DRM_DEBUG_KMS("FDI train done\n"); |
1622 | } | 1697 | } |
@@ -1641,6 +1716,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1641 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1716 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
1642 | u32 temp, i; | 1717 | u32 temp, i; |
1643 | 1718 | ||
1719 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1720 | for train result */ | ||
1721 | temp = I915_READ(fdi_rx_imr_reg); | ||
1722 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1723 | temp &= ~FDI_RX_BIT_LOCK; | ||
1724 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1725 | I915_READ(fdi_rx_imr_reg); | ||
1726 | udelay(150); | ||
1727 | |||
1644 | /* enable CPU FDI TX and PCH FDI RX */ | 1728 | /* enable CPU FDI TX and PCH FDI RX */ |
1645 | temp = I915_READ(fdi_tx_reg); | 1729 | temp = I915_READ(fdi_tx_reg); |
1646 | temp |= FDI_TX_ENABLE; | 1730 | temp |= FDI_TX_ENABLE; |
@@ -1666,15 +1750,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
1666 | I915_READ(fdi_rx_reg); | 1750 | I915_READ(fdi_rx_reg); |
1667 | udelay(150); | 1751 | udelay(150); |
1668 | 1752 | ||
1669 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | ||
1670 | for train result */ | ||
1671 | temp = I915_READ(fdi_rx_imr_reg); | ||
1672 | temp &= ~FDI_RX_SYMBOL_LOCK; | ||
1673 | temp &= ~FDI_RX_BIT_LOCK; | ||
1674 | I915_WRITE(fdi_rx_imr_reg, temp); | ||
1675 | I915_READ(fdi_rx_imr_reg); | ||
1676 | udelay(150); | ||
1677 | |||
1678 | for (i = 0; i < 4; i++ ) { | 1753 | for (i = 0; i < 4; i++ ) { |
1679 | temp = I915_READ(fdi_tx_reg); | 1754 | temp = I915_READ(fdi_tx_reg); |
1680 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 1755 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
@@ -1829,7 +1904,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1829 | } | 1904 | } |
1830 | 1905 | ||
1831 | /* Enable panel fitting for LVDS */ | 1906 | /* Enable panel fitting for LVDS */ |
1832 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 1907 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) |
1908 | || HAS_eDP || intel_pch_has_edp(crtc)) { | ||
1833 | temp = I915_READ(pf_ctl_reg); | 1909 | temp = I915_READ(pf_ctl_reg); |
1834 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | 1910 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); |
1835 | 1911 | ||
@@ -1924,9 +2000,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1924 | reg = I915_READ(trans_dp_ctl); | 2000 | reg = I915_READ(trans_dp_ctl); |
1925 | reg &= ~TRANS_DP_PORT_SEL_MASK; | 2001 | reg &= ~TRANS_DP_PORT_SEL_MASK; |
1926 | reg = TRANS_DP_OUTPUT_ENABLE | | 2002 | reg = TRANS_DP_OUTPUT_ENABLE | |
1927 | TRANS_DP_ENH_FRAMING | | 2003 | TRANS_DP_ENH_FRAMING; |
1928 | TRANS_DP_VSYNC_ACTIVE_HIGH | | 2004 | |
1929 | TRANS_DP_HSYNC_ACTIVE_HIGH; | 2005 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2006 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; | ||
2007 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) | ||
2008 | reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; | ||
1930 | 2009 | ||
1931 | switch (intel_trans_dp_port_sel(crtc)) { | 2010 | switch (intel_trans_dp_port_sel(crtc)) { |
1932 | case PCH_DP_B: | 2011 | case PCH_DP_B: |
@@ -1966,6 +2045,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1966 | 2045 | ||
1967 | intel_crtc_load_lut(crtc); | 2046 | intel_crtc_load_lut(crtc); |
1968 | 2047 | ||
2048 | intel_update_fbc(crtc, &crtc->mode); | ||
2049 | |||
1969 | break; | 2050 | break; |
1970 | case DRM_MODE_DPMS_OFF: | 2051 | case DRM_MODE_DPMS_OFF: |
1971 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 2052 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
@@ -1980,6 +2061,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1980 | I915_READ(dspbase_reg); | 2061 | I915_READ(dspbase_reg); |
1981 | } | 2062 | } |
1982 | 2063 | ||
2064 | if (dev_priv->cfb_plane == plane && | ||
2065 | dev_priv->display.disable_fbc) | ||
2066 | dev_priv->display.disable_fbc(dev); | ||
2067 | |||
1983 | i915_disable_vga(dev); | 2068 | i915_disable_vga(dev); |
1984 | 2069 | ||
1985 | /* disable cpu pipe, disable after all planes disabled */ | 2070 | /* disable cpu pipe, disable after all planes disabled */ |
@@ -2256,6 +2341,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2256 | intel_wait_for_vblank(dev); | 2341 | intel_wait_for_vblank(dev); |
2257 | } | 2342 | } |
2258 | 2343 | ||
2344 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
2345 | if (pipeconf_reg == PIPEACONF && | ||
2346 | (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
2347 | goto skip_pipe_off; | ||
2348 | |||
2259 | /* Next, disable display pipes */ | 2349 | /* Next, disable display pipes */ |
2260 | temp = I915_READ(pipeconf_reg); | 2350 | temp = I915_READ(pipeconf_reg); |
2261 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2351 | if ((temp & PIPEACONF_ENABLE) != 0) { |
@@ -2271,7 +2361,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2271 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2361 | I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); |
2272 | I915_READ(dpll_reg); | 2362 | I915_READ(dpll_reg); |
2273 | } | 2363 | } |
2274 | 2364 | skip_pipe_off: | |
2275 | /* Wait for the clocks to turn off. */ | 2365 | /* Wait for the clocks to turn off. */ |
2276 | udelay(150); | 2366 | udelay(150); |
2277 | break; | 2367 | break; |
@@ -2354,11 +2444,9 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | |||
2354 | struct drm_device *dev = crtc->dev; | 2444 | struct drm_device *dev = crtc->dev; |
2355 | if (HAS_PCH_SPLIT(dev)) { | 2445 | if (HAS_PCH_SPLIT(dev)) { |
2356 | /* FDI link clock is fixed at 2.7G */ | 2446 | /* FDI link clock is fixed at 2.7G */ |
2357 | if (mode->clock * 3 > 27000 * 4) | 2447 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
2358 | return MODE_CLOCK_HIGH; | 2448 | return false; |
2359 | } | 2449 | } |
2360 | |||
2361 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
2362 | return true; | 2450 | return true; |
2363 | } | 2451 | } |
2364 | 2452 | ||
@@ -2539,6 +2627,20 @@ static struct intel_watermark_params g4x_wm_info = { | |||
2539 | 2, | 2627 | 2, |
2540 | G4X_FIFO_LINE_SIZE, | 2628 | G4X_FIFO_LINE_SIZE, |
2541 | }; | 2629 | }; |
2630 | static struct intel_watermark_params g4x_cursor_wm_info = { | ||
2631 | I965_CURSOR_FIFO, | ||
2632 | I965_CURSOR_MAX_WM, | ||
2633 | I965_CURSOR_DFT_WM, | ||
2634 | 2, | ||
2635 | G4X_FIFO_LINE_SIZE, | ||
2636 | }; | ||
2637 | static struct intel_watermark_params i965_cursor_wm_info = { | ||
2638 | I965_CURSOR_FIFO, | ||
2639 | I965_CURSOR_MAX_WM, | ||
2640 | I965_CURSOR_DFT_WM, | ||
2641 | 2, | ||
2642 | I915_FIFO_LINE_SIZE, | ||
2643 | }; | ||
2542 | static struct intel_watermark_params i945_wm_info = { | 2644 | static struct intel_watermark_params i945_wm_info = { |
2543 | I945_FIFO_SIZE, | 2645 | I945_FIFO_SIZE, |
2544 | I915_MAX_WM, | 2646 | I915_MAX_WM, |
@@ -2576,6 +2678,14 @@ static struct intel_watermark_params ironlake_display_wm_info = { | |||
2576 | ILK_FIFO_LINE_SIZE | 2678 | ILK_FIFO_LINE_SIZE |
2577 | }; | 2679 | }; |
2578 | 2680 | ||
2681 | static struct intel_watermark_params ironlake_cursor_wm_info = { | ||
2682 | ILK_CURSOR_FIFO, | ||
2683 | ILK_CURSOR_MAXWM, | ||
2684 | ILK_CURSOR_DFTWM, | ||
2685 | 2, | ||
2686 | ILK_FIFO_LINE_SIZE | ||
2687 | }; | ||
2688 | |||
2579 | static struct intel_watermark_params ironlake_display_srwm_info = { | 2689 | static struct intel_watermark_params ironlake_display_srwm_info = { |
2580 | ILK_DISPLAY_SR_FIFO, | 2690 | ILK_DISPLAY_SR_FIFO, |
2581 | ILK_DISPLAY_MAX_SRWM, | 2691 | ILK_DISPLAY_MAX_SRWM, |
@@ -2625,7 +2735,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2625 | */ | 2735 | */ |
2626 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | 2736 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / |
2627 | 1000; | 2737 | 1000; |
2628 | entries_required /= wm->cacheline_size; | 2738 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
2629 | 2739 | ||
2630 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | 2740 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); |
2631 | 2741 | ||
@@ -2636,8 +2746,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2636 | /* Don't promote wm_size to unsigned... */ | 2746 | /* Don't promote wm_size to unsigned... */ |
2637 | if (wm_size > (long)wm->max_wm) | 2747 | if (wm_size > (long)wm->max_wm) |
2638 | wm_size = wm->max_wm; | 2748 | wm_size = wm->max_wm; |
2639 | if (wm_size <= 0) | 2749 | if (wm_size <= 0) { |
2640 | wm_size = wm->default_wm; | 2750 | wm_size = wm->default_wm; |
2751 | DRM_ERROR("Insufficient FIFO for plane, expect flickering:" | ||
2752 | " entries required = %ld, available = %lu.\n", | ||
2753 | entries_required + wm->guard_size, | ||
2754 | wm->fifo_size); | ||
2755 | } | ||
2756 | |||
2641 | return wm_size; | 2757 | return wm_size; |
2642 | } | 2758 | } |
2643 | 2759 | ||
@@ -2746,11 +2862,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | |||
2746 | uint32_t dsparb = I915_READ(DSPARB); | 2862 | uint32_t dsparb = I915_READ(DSPARB); |
2747 | int size; | 2863 | int size; |
2748 | 2864 | ||
2749 | if (plane == 0) | 2865 | size = dsparb & 0x7f; |
2750 | size = dsparb & 0x7f; | 2866 | if (plane) |
2751 | else | 2867 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
2752 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2753 | (dsparb & 0x7f); | ||
2754 | 2868 | ||
2755 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 2869 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
2756 | plane ? "B" : "A", size); | 2870 | plane ? "B" : "A", size); |
@@ -2764,11 +2878,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) | |||
2764 | uint32_t dsparb = I915_READ(DSPARB); | 2878 | uint32_t dsparb = I915_READ(DSPARB); |
2765 | int size; | 2879 | int size; |
2766 | 2880 | ||
2767 | if (plane == 0) | 2881 | size = dsparb & 0x1ff; |
2768 | size = dsparb & 0x1ff; | 2882 | if (plane) |
2769 | else | 2883 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; |
2770 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2771 | (dsparb & 0x1ff); | ||
2772 | size >>= 1; /* Convert to cachelines */ | 2884 | size >>= 1; /* Convert to cachelines */ |
2773 | 2885 | ||
2774 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, | 2886 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
@@ -2809,7 +2921,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
2809 | } | 2921 | } |
2810 | 2922 | ||
2811 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | 2923 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, |
2812 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2924 | int planeb_clock, int sr_hdisplay, int unused, |
2925 | int pixel_size) | ||
2813 | { | 2926 | { |
2814 | struct drm_i915_private *dev_priv = dev->dev_private; | 2927 | struct drm_i915_private *dev_priv = dev->dev_private; |
2815 | u32 reg; | 2928 | u32 reg; |
@@ -2874,7 +2987,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
2874 | } | 2987 | } |
2875 | 2988 | ||
2876 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 2989 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, |
2877 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2990 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
2991 | int pixel_size) | ||
2878 | { | 2992 | { |
2879 | struct drm_i915_private *dev_priv = dev->dev_private; | 2993 | struct drm_i915_private *dev_priv = dev->dev_private; |
2880 | int total_size, cacheline_size; | 2994 | int total_size, cacheline_size; |
@@ -2898,12 +3012,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2898 | */ | 3012 | */ |
2899 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | 3013 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / |
2900 | 1000; | 3014 | 1000; |
2901 | entries_required /= G4X_FIFO_LINE_SIZE; | 3015 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); |
2902 | planea_wm = entries_required + planea_params.guard_size; | 3016 | planea_wm = entries_required + planea_params.guard_size; |
2903 | 3017 | ||
2904 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | 3018 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / |
2905 | 1000; | 3019 | 1000; |
2906 | entries_required /= G4X_FIFO_LINE_SIZE; | 3020 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); |
2907 | planeb_wm = entries_required + planeb_params.guard_size; | 3021 | planeb_wm = entries_required + planeb_params.guard_size; |
2908 | 3022 | ||
2909 | cursora_wm = cursorb_wm = 16; | 3023 | cursora_wm = cursorb_wm = 16; |
@@ -2917,13 +3031,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2917 | static const int sr_latency_ns = 12000; | 3031 | static const int sr_latency_ns = 12000; |
2918 | 3032 | ||
2919 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3033 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2920 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3034 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
2921 | 3035 | ||
2922 | /* Use ns/us then divide to preserve precision */ | 3036 | /* Use ns/us then divide to preserve precision */ |
2923 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3037 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
2924 | pixel_size * sr_hdisplay) / 1000; | 3038 | pixel_size * sr_hdisplay; |
2925 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 3039 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); |
2926 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3040 | |
3041 | entries_required = (((sr_latency_ns / line_time_us) + | ||
3042 | 1000) / 1000) * pixel_size * 64; | ||
3043 | entries_required = DIV_ROUND_UP(entries_required, | ||
3044 | g4x_cursor_wm_info.cacheline_size); | ||
3045 | cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; | ||
3046 | |||
3047 | if (cursor_sr > g4x_cursor_wm_info.max_wm) | ||
3048 | cursor_sr = g4x_cursor_wm_info.max_wm; | ||
3049 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3050 | "cursor %d\n", sr_entries, cursor_sr); | ||
3051 | |||
2927 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3052 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2928 | } else { | 3053 | } else { |
2929 | /* Turn off self refresh if both pipes are enabled */ | 3054 | /* Turn off self refresh if both pipes are enabled */ |
@@ -2948,11 +3073,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2948 | } | 3073 | } |
2949 | 3074 | ||
2950 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | 3075 | static void i965_update_wm(struct drm_device *dev, int planea_clock, |
2951 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3076 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3077 | int pixel_size) | ||
2952 | { | 3078 | { |
2953 | struct drm_i915_private *dev_priv = dev->dev_private; | 3079 | struct drm_i915_private *dev_priv = dev->dev_private; |
2954 | unsigned long line_time_us; | 3080 | unsigned long line_time_us; |
2955 | int sr_clock, sr_entries, srwm = 1; | 3081 | int sr_clock, sr_entries, srwm = 1; |
3082 | int cursor_sr = 16; | ||
2956 | 3083 | ||
2957 | /* Calc sr entries for one plane configs */ | 3084 | /* Calc sr entries for one plane configs */ |
2958 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3085 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
@@ -2960,17 +3087,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2960 | static const int sr_latency_ns = 12000; | 3087 | static const int sr_latency_ns = 12000; |
2961 | 3088 | ||
2962 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3089 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2963 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3090 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
2964 | 3091 | ||
2965 | /* Use ns/us then divide to preserve precision */ | 3092 | /* Use ns/us then divide to preserve precision */ |
2966 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3093 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
2967 | pixel_size * sr_hdisplay) / 1000; | 3094 | pixel_size * sr_hdisplay; |
2968 | sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); | 3095 | sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); |
2969 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3096 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2970 | srwm = I945_FIFO_SIZE - sr_entries; | 3097 | srwm = I965_FIFO_SIZE - sr_entries; |
2971 | if (srwm < 0) | 3098 | if (srwm < 0) |
2972 | srwm = 1; | 3099 | srwm = 1; |
2973 | srwm &= 0x3f; | 3100 | srwm &= 0x1ff; |
3101 | |||
3102 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | ||
3103 | pixel_size * 64; | ||
3104 | sr_entries = DIV_ROUND_UP(sr_entries, | ||
3105 | i965_cursor_wm_info.cacheline_size); | ||
3106 | cursor_sr = i965_cursor_wm_info.fifo_size - | ||
3107 | (sr_entries + i965_cursor_wm_info.guard_size); | ||
3108 | |||
3109 | if (cursor_sr > i965_cursor_wm_info.max_wm) | ||
3110 | cursor_sr = i965_cursor_wm_info.max_wm; | ||
3111 | |||
3112 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3113 | "cursor %d\n", srwm, cursor_sr); | ||
3114 | |||
2974 | if (IS_I965GM(dev)) | 3115 | if (IS_I965GM(dev)) |
2975 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3116 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2976 | } else { | 3117 | } else { |
@@ -2987,10 +3128,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2987 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | 3128 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | |
2988 | (8 << 0)); | 3129 | (8 << 0)); |
2989 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 3130 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
3131 | /* update cursor SR watermark */ | ||
3132 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
2990 | } | 3133 | } |
2991 | 3134 | ||
2992 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | 3135 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, |
2993 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3136 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3137 | int pixel_size) | ||
2994 | { | 3138 | { |
2995 | struct drm_i915_private *dev_priv = dev->dev_private; | 3139 | struct drm_i915_private *dev_priv = dev->dev_private; |
2996 | uint32_t fwater_lo; | 3140 | uint32_t fwater_lo; |
@@ -3035,12 +3179,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3035 | static const int sr_latency_ns = 6000; | 3179 | static const int sr_latency_ns = 6000; |
3036 | 3180 | ||
3037 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3181 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
3038 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3182 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
3039 | 3183 | ||
3040 | /* Use ns/us then divide to preserve precision */ | 3184 | /* Use ns/us then divide to preserve precision */ |
3041 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | 3185 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3042 | pixel_size * sr_hdisplay) / 1000; | 3186 | pixel_size * sr_hdisplay; |
3043 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 3187 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); |
3044 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | 3188 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); |
3045 | srwm = total_size - sr_entries; | 3189 | srwm = total_size - sr_entries; |
3046 | if (srwm < 0) | 3190 | if (srwm < 0) |
@@ -3078,7 +3222,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3078 | } | 3222 | } |
3079 | 3223 | ||
3080 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | 3224 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, |
3081 | int unused2, int pixel_size) | 3225 | int unused2, int unused3, int pixel_size) |
3082 | { | 3226 | { |
3083 | struct drm_i915_private *dev_priv = dev->dev_private; | 3227 | struct drm_i915_private *dev_priv = dev->dev_private; |
3084 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 3228 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
@@ -3096,9 +3240,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3096 | } | 3240 | } |
3097 | 3241 | ||
3098 | #define ILK_LP0_PLANE_LATENCY 700 | 3242 | #define ILK_LP0_PLANE_LATENCY 700 |
3243 | #define ILK_LP0_CURSOR_LATENCY 1300 | ||
3099 | 3244 | ||
3100 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | 3245 | static void ironlake_update_wm(struct drm_device *dev, int planea_clock, |
3101 | int planeb_clock, int sr_hdisplay, int pixel_size) | 3246 | int planeb_clock, int sr_hdisplay, int sr_htotal, |
3247 | int pixel_size) | ||
3102 | { | 3248 | { |
3103 | struct drm_i915_private *dev_priv = dev->dev_private; | 3249 | struct drm_i915_private *dev_priv = dev->dev_private; |
3104 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | 3250 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
@@ -3106,20 +3252,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3106 | unsigned long line_time_us; | 3252 | unsigned long line_time_us; |
3107 | int sr_clock, entries_required; | 3253 | int sr_clock, entries_required; |
3108 | u32 reg_value; | 3254 | u32 reg_value; |
3255 | int line_count; | ||
3256 | int planea_htotal = 0, planeb_htotal = 0; | ||
3257 | struct drm_crtc *crtc; | ||
3258 | struct intel_crtc *intel_crtc; | ||
3259 | |||
3260 | /* Need htotal for all active display plane */ | ||
3261 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3262 | intel_crtc = to_intel_crtc(crtc); | ||
3263 | if (crtc->enabled) { | ||
3264 | if (intel_crtc->plane == 0) | ||
3265 | planea_htotal = crtc->mode.htotal; | ||
3266 | else | ||
3267 | planeb_htotal = crtc->mode.htotal; | ||
3268 | } | ||
3269 | } | ||
3109 | 3270 | ||
3110 | /* Calculate and update the watermark for plane A */ | 3271 | /* Calculate and update the watermark for plane A */ |
3111 | if (planea_clock) { | 3272 | if (planea_clock) { |
3112 | entries_required = ((planea_clock / 1000) * pixel_size * | 3273 | entries_required = ((planea_clock / 1000) * pixel_size * |
3113 | ILK_LP0_PLANE_LATENCY) / 1000; | 3274 | ILK_LP0_PLANE_LATENCY) / 1000; |
3114 | entries_required = DIV_ROUND_UP(entries_required, | 3275 | entries_required = DIV_ROUND_UP(entries_required, |
3115 | ironlake_display_wm_info.cacheline_size); | 3276 | ironlake_display_wm_info.cacheline_size); |
3116 | planea_wm = entries_required + | 3277 | planea_wm = entries_required + |
3117 | ironlake_display_wm_info.guard_size; | 3278 | ironlake_display_wm_info.guard_size; |
3118 | 3279 | ||
3119 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) | 3280 | if (planea_wm > (int)ironlake_display_wm_info.max_wm) |
3120 | planea_wm = ironlake_display_wm_info.max_wm; | 3281 | planea_wm = ironlake_display_wm_info.max_wm; |
3121 | 3282 | ||
3122 | cursora_wm = 16; | 3283 | /* Use the large buffer method to calculate cursor watermark */ |
3284 | line_time_us = (planea_htotal * 1000) / planea_clock; | ||
3285 | |||
3286 | /* Use ns/us then divide to preserve precision */ | ||
3287 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | ||
3288 | |||
3289 | /* calculate the cursor watermark for cursor A */ | ||
3290 | entries_required = line_count * 64 * pixel_size; | ||
3291 | entries_required = DIV_ROUND_UP(entries_required, | ||
3292 | ironlake_cursor_wm_info.cacheline_size); | ||
3293 | cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3294 | if (cursora_wm > ironlake_cursor_wm_info.max_wm) | ||
3295 | cursora_wm = ironlake_cursor_wm_info.max_wm; | ||
3296 | |||
3123 | reg_value = I915_READ(WM0_PIPEA_ILK); | 3297 | reg_value = I915_READ(WM0_PIPEA_ILK); |
3124 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 3298 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
3125 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | | 3299 | reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | |
@@ -3133,14 +3307,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3133 | entries_required = ((planeb_clock / 1000) * pixel_size * | 3307 | entries_required = ((planeb_clock / 1000) * pixel_size * |
3134 | ILK_LP0_PLANE_LATENCY) / 1000; | 3308 | ILK_LP0_PLANE_LATENCY) / 1000; |
3135 | entries_required = DIV_ROUND_UP(entries_required, | 3309 | entries_required = DIV_ROUND_UP(entries_required, |
3136 | ironlake_display_wm_info.cacheline_size); | 3310 | ironlake_display_wm_info.cacheline_size); |
3137 | planeb_wm = entries_required + | 3311 | planeb_wm = entries_required + |
3138 | ironlake_display_wm_info.guard_size; | 3312 | ironlake_display_wm_info.guard_size; |
3139 | 3313 | ||
3140 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) | 3314 | if (planeb_wm > (int)ironlake_display_wm_info.max_wm) |
3141 | planeb_wm = ironlake_display_wm_info.max_wm; | 3315 | planeb_wm = ironlake_display_wm_info.max_wm; |
3142 | 3316 | ||
3143 | cursorb_wm = 16; | 3317 | /* Use the large buffer method to calculate cursor watermark */ |
3318 | line_time_us = (planeb_htotal * 1000) / planeb_clock; | ||
3319 | |||
3320 | /* Use ns/us then divide to preserve precision */ | ||
3321 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | ||
3322 | |||
3323 | /* calculate the cursor watermark for cursor B */ | ||
3324 | entries_required = line_count * 64 * pixel_size; | ||
3325 | entries_required = DIV_ROUND_UP(entries_required, | ||
3326 | ironlake_cursor_wm_info.cacheline_size); | ||
3327 | cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; | ||
3328 | if (cursorb_wm > ironlake_cursor_wm_info.max_wm) | ||
3329 | cursorb_wm = ironlake_cursor_wm_info.max_wm; | ||
3330 | |||
3144 | reg_value = I915_READ(WM0_PIPEB_ILK); | 3331 | reg_value = I915_READ(WM0_PIPEB_ILK); |
3145 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); | 3332 | reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); |
3146 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | | 3333 | reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | |
@@ -3155,12 +3342,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3155 | * display plane is used. | 3342 | * display plane is used. |
3156 | */ | 3343 | */ |
3157 | if (!planea_clock || !planeb_clock) { | 3344 | if (!planea_clock || !planeb_clock) { |
3158 | int line_count; | 3345 | |
3159 | /* Read the self-refresh latency. The unit is 0.5us */ | 3346 | /* Read the self-refresh latency. The unit is 0.5us */ |
3160 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | 3347 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; |
3161 | 3348 | ||
3162 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3349 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
3163 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 3350 | line_time_us = ((sr_htotal * 1000) / sr_clock); |
3164 | 3351 | ||
3165 | /* Use ns/us then divide to preserve precision */ | 3352 | /* Use ns/us then divide to preserve precision */ |
3166 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | 3353 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) |
@@ -3169,14 +3356,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3169 | /* calculate the self-refresh watermark for display plane */ | 3356 | /* calculate the self-refresh watermark for display plane */ |
3170 | entries_required = line_count * sr_hdisplay * pixel_size; | 3357 | entries_required = line_count * sr_hdisplay * pixel_size; |
3171 | entries_required = DIV_ROUND_UP(entries_required, | 3358 | entries_required = DIV_ROUND_UP(entries_required, |
3172 | ironlake_display_srwm_info.cacheline_size); | 3359 | ironlake_display_srwm_info.cacheline_size); |
3173 | sr_wm = entries_required + | 3360 | sr_wm = entries_required + |
3174 | ironlake_display_srwm_info.guard_size; | 3361 | ironlake_display_srwm_info.guard_size; |
3175 | 3362 | ||
3176 | /* calculate the self-refresh watermark for display cursor */ | 3363 | /* calculate the self-refresh watermark for display cursor */ |
3177 | entries_required = line_count * pixel_size * 64; | 3364 | entries_required = line_count * pixel_size * 64; |
3178 | entries_required = DIV_ROUND_UP(entries_required, | 3365 | entries_required = DIV_ROUND_UP(entries_required, |
3179 | ironlake_cursor_srwm_info.cacheline_size); | 3366 | ironlake_cursor_srwm_info.cacheline_size); |
3180 | cursor_wm = entries_required + | 3367 | cursor_wm = entries_required + |
3181 | ironlake_cursor_srwm_info.guard_size; | 3368 | ironlake_cursor_srwm_info.guard_size; |
3182 | 3369 | ||
@@ -3220,6 +3407,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3220 | * bytes per pixel | 3407 | * bytes per pixel |
3221 | * where | 3408 | * where |
3222 | * line time = htotal / dotclock | 3409 | * line time = htotal / dotclock |
3410 | * surface width = hdisplay for normal plane and 64 for cursor | ||
3223 | * and latency is assumed to be high, as above. | 3411 | * and latency is assumed to be high, as above. |
3224 | * | 3412 | * |
3225 | * The final value programmed to the register should always be rounded up, | 3413 | * The final value programmed to the register should always be rounded up, |
@@ -3236,6 +3424,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3236 | int sr_hdisplay = 0; | 3424 | int sr_hdisplay = 0; |
3237 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | 3425 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; |
3238 | int enabled = 0, pixel_size = 0; | 3426 | int enabled = 0, pixel_size = 0; |
3427 | int sr_htotal = 0; | ||
3239 | 3428 | ||
3240 | if (!dev_priv->display.update_wm) | 3429 | if (!dev_priv->display.update_wm) |
3241 | return; | 3430 | return; |
@@ -3256,6 +3445,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3256 | } | 3445 | } |
3257 | sr_hdisplay = crtc->mode.hdisplay; | 3446 | sr_hdisplay = crtc->mode.hdisplay; |
3258 | sr_clock = crtc->mode.clock; | 3447 | sr_clock = crtc->mode.clock; |
3448 | sr_htotal = crtc->mode.htotal; | ||
3259 | if (crtc->fb) | 3449 | if (crtc->fb) |
3260 | pixel_size = crtc->fb->bits_per_pixel / 8; | 3450 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3261 | else | 3451 | else |
@@ -3267,7 +3457,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3267 | return; | 3457 | return; |
3268 | 3458 | ||
3269 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 3459 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, |
3270 | sr_hdisplay, pixel_size); | 3460 | sr_hdisplay, sr_htotal, pixel_size); |
3271 | } | 3461 | } |
3272 | 3462 | ||
3273 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 3463 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
@@ -3386,6 +3576,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3386 | return -EINVAL; | 3576 | return -EINVAL; |
3387 | } | 3577 | } |
3388 | 3578 | ||
3579 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
3580 | intel_crtc_update_cursor(crtc); | ||
3581 | |||
3389 | if (is_lvds && dev_priv->lvds_downclock_avail) { | 3582 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
3390 | has_reduced_clock = limit->find_pll(limit, crtc, | 3583 | has_reduced_clock = limit->find_pll(limit, crtc, |
3391 | dev_priv->lvds_downclock, | 3584 | dev_priv->lvds_downclock, |
@@ -3452,7 +3645,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3452 | temp |= PIPE_8BPC; | 3645 | temp |= PIPE_8BPC; |
3453 | else | 3646 | else |
3454 | temp |= PIPE_6BPC; | 3647 | temp |= PIPE_6BPC; |
3455 | } else if (is_edp) { | 3648 | } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { |
3456 | switch (dev_priv->edp_bpp/3) { | 3649 | switch (dev_priv->edp_bpp/3) { |
3457 | case 8: | 3650 | case 8: |
3458 | temp |= PIPE_8BPC; | 3651 | temp |= PIPE_8BPC; |
@@ -3695,6 +3888,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3695 | udelay(150); | 3888 | udelay(150); |
3696 | } | 3889 | } |
3697 | 3890 | ||
3891 | if (HAS_PCH_SPLIT(dev)) { | ||
3892 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3893 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3894 | } | ||
3895 | |||
3698 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 3896 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
3699 | * This is an exception to the general rule that mode_set doesn't turn | 3897 | * This is an exception to the general rule that mode_set doesn't turn |
3700 | * things on. | 3898 | * things on. |
@@ -3741,11 +3939,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3741 | } else | 3939 | } else |
3742 | lvds |= LVDS_ENABLE_DITHER; | 3940 | lvds |= LVDS_ENABLE_DITHER; |
3743 | } else { | 3941 | } else { |
3744 | if (HAS_PCH_SPLIT(dev)) { | 3942 | if (!HAS_PCH_SPLIT(dev)) { |
3745 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3746 | pipeconf &= ~PIPE_DITHER_TYPE_MASK; | ||
3747 | } else | ||
3748 | lvds &= ~LVDS_ENABLE_DITHER; | 3943 | lvds &= ~LVDS_ENABLE_DITHER; |
3944 | } | ||
3749 | } | 3945 | } |
3750 | } | 3946 | } |
3751 | I915_WRITE(lvds_reg, lvds); | 3947 | I915_WRITE(lvds_reg, lvds); |
@@ -3921,6 +4117,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
3921 | } | 4117 | } |
3922 | } | 4118 | } |
3923 | 4119 | ||
4120 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | ||
4121 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) | ||
4122 | { | ||
4123 | struct drm_device *dev = crtc->dev; | ||
4124 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4125 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4126 | int pipe = intel_crtc->pipe; | ||
4127 | int x = intel_crtc->cursor_x; | ||
4128 | int y = intel_crtc->cursor_y; | ||
4129 | uint32_t base, pos; | ||
4130 | bool visible; | ||
4131 | |||
4132 | pos = 0; | ||
4133 | |||
4134 | if (crtc->fb) { | ||
4135 | base = intel_crtc->cursor_addr; | ||
4136 | if (x > (int) crtc->fb->width) | ||
4137 | base = 0; | ||
4138 | |||
4139 | if (y > (int) crtc->fb->height) | ||
4140 | base = 0; | ||
4141 | } else | ||
4142 | base = 0; | ||
4143 | |||
4144 | if (x < 0) { | ||
4145 | if (x + intel_crtc->cursor_width < 0) | ||
4146 | base = 0; | ||
4147 | |||
4148 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | ||
4149 | x = -x; | ||
4150 | } | ||
4151 | pos |= x << CURSOR_X_SHIFT; | ||
4152 | |||
4153 | if (y < 0) { | ||
4154 | if (y + intel_crtc->cursor_height < 0) | ||
4155 | base = 0; | ||
4156 | |||
4157 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | ||
4158 | y = -y; | ||
4159 | } | ||
4160 | pos |= y << CURSOR_Y_SHIFT; | ||
4161 | |||
4162 | visible = base != 0; | ||
4163 | if (!visible && !intel_crtc->cursor_visble) | ||
4164 | return; | ||
4165 | |||
4166 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | ||
4167 | if (intel_crtc->cursor_visble != visible) { | ||
4168 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | ||
4169 | if (base) { | ||
4170 | /* Hooray for CUR*CNTR differences */ | ||
4171 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4172 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4173 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4174 | cntl |= pipe << 28; /* Connect to correct pipe */ | ||
4175 | } else { | ||
4176 | cntl &= ~(CURSOR_FORMAT_MASK); | ||
4177 | cntl |= CURSOR_ENABLE; | ||
4178 | cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | ||
4179 | } | ||
4180 | } else { | ||
4181 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4182 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
4183 | cntl |= CURSOR_MODE_DISABLE; | ||
4184 | } else { | ||
4185 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
4186 | } | ||
4187 | } | ||
4188 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | ||
4189 | |||
4190 | intel_crtc->cursor_visble = visible; | ||
4191 | } | ||
4192 | /* and commit changes on next vblank */ | ||
4193 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | ||
4194 | |||
4195 | if (visible) | ||
4196 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | ||
4197 | } | ||
4198 | |||
3924 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 4199 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
3925 | struct drm_file *file_priv, | 4200 | struct drm_file *file_priv, |
3926 | uint32_t handle, | 4201 | uint32_t handle, |
@@ -3931,11 +4206,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3931 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4206 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3932 | struct drm_gem_object *bo; | 4207 | struct drm_gem_object *bo; |
3933 | struct drm_i915_gem_object *obj_priv; | 4208 | struct drm_i915_gem_object *obj_priv; |
3934 | int pipe = intel_crtc->pipe; | 4209 | uint32_t addr; |
3935 | uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; | ||
3936 | uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; | ||
3937 | uint32_t temp = I915_READ(control); | ||
3938 | size_t addr; | ||
3939 | int ret; | 4210 | int ret; |
3940 | 4211 | ||
3941 | DRM_DEBUG_KMS("\n"); | 4212 | DRM_DEBUG_KMS("\n"); |
@@ -3943,12 +4214,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3943 | /* if we want to turn off the cursor ignore width and height */ | 4214 | /* if we want to turn off the cursor ignore width and height */ |
3944 | if (!handle) { | 4215 | if (!handle) { |
3945 | DRM_DEBUG_KMS("cursor off\n"); | 4216 | DRM_DEBUG_KMS("cursor off\n"); |
3946 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
3947 | temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
3948 | temp |= CURSOR_MODE_DISABLE; | ||
3949 | } else { | ||
3950 | temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
3951 | } | ||
3952 | addr = 0; | 4217 | addr = 0; |
3953 | bo = NULL; | 4218 | bo = NULL; |
3954 | mutex_lock(&dev->struct_mutex); | 4219 | mutex_lock(&dev->struct_mutex); |
@@ -3990,7 +4255,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3990 | 4255 | ||
3991 | addr = obj_priv->gtt_offset; | 4256 | addr = obj_priv->gtt_offset; |
3992 | } else { | 4257 | } else { |
3993 | ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | 4258 | ret = i915_gem_attach_phys_object(dev, bo, |
4259 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | ||
3994 | if (ret) { | 4260 | if (ret) { |
3995 | DRM_ERROR("failed to attach phys object\n"); | 4261 | DRM_ERROR("failed to attach phys object\n"); |
3996 | goto fail_locked; | 4262 | goto fail_locked; |
@@ -4001,21 +4267,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4001 | if (!IS_I9XX(dev)) | 4267 | if (!IS_I9XX(dev)) |
4002 | I915_WRITE(CURSIZE, (height << 12) | width); | 4268 | I915_WRITE(CURSIZE, (height << 12) | width); |
4003 | 4269 | ||
4004 | /* Hooray for CUR*CNTR differences */ | ||
4005 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4006 | temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4007 | temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4008 | temp |= (pipe << 28); /* Connect to correct pipe */ | ||
4009 | } else { | ||
4010 | temp &= ~(CURSOR_FORMAT_MASK); | ||
4011 | temp |= CURSOR_ENABLE; | ||
4012 | temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | ||
4013 | } | ||
4014 | |||
4015 | finish: | 4270 | finish: |
4016 | I915_WRITE(control, temp); | ||
4017 | I915_WRITE(base, addr); | ||
4018 | |||
4019 | if (intel_crtc->cursor_bo) { | 4271 | if (intel_crtc->cursor_bo) { |
4020 | if (dev_priv->info->cursor_needs_physical) { | 4272 | if (dev_priv->info->cursor_needs_physical) { |
4021 | if (intel_crtc->cursor_bo != bo) | 4273 | if (intel_crtc->cursor_bo != bo) |
@@ -4029,6 +4281,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4029 | 4281 | ||
4030 | intel_crtc->cursor_addr = addr; | 4282 | intel_crtc->cursor_addr = addr; |
4031 | intel_crtc->cursor_bo = bo; | 4283 | intel_crtc->cursor_bo = bo; |
4284 | intel_crtc->cursor_width = width; | ||
4285 | intel_crtc->cursor_height = height; | ||
4286 | |||
4287 | intel_crtc_update_cursor(crtc); | ||
4032 | 4288 | ||
4033 | return 0; | 4289 | return 0; |
4034 | fail_unpin: | 4290 | fail_unpin: |
@@ -4042,34 +4298,12 @@ fail: | |||
4042 | 4298 | ||
4043 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 4299 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
4044 | { | 4300 | { |
4045 | struct drm_device *dev = crtc->dev; | ||
4046 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4047 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4301 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4048 | struct intel_framebuffer *intel_fb; | ||
4049 | int pipe = intel_crtc->pipe; | ||
4050 | uint32_t temp = 0; | ||
4051 | uint32_t adder; | ||
4052 | |||
4053 | if (crtc->fb) { | ||
4054 | intel_fb = to_intel_framebuffer(crtc->fb); | ||
4055 | intel_mark_busy(dev, intel_fb->obj); | ||
4056 | } | ||
4057 | |||
4058 | if (x < 0) { | ||
4059 | temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | ||
4060 | x = -x; | ||
4061 | } | ||
4062 | if (y < 0) { | ||
4063 | temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | ||
4064 | y = -y; | ||
4065 | } | ||
4066 | 4302 | ||
4067 | temp |= x << CURSOR_X_SHIFT; | 4303 | intel_crtc->cursor_x = x; |
4068 | temp |= y << CURSOR_Y_SHIFT; | 4304 | intel_crtc->cursor_y = y; |
4069 | 4305 | ||
4070 | adder = intel_crtc->cursor_addr; | 4306 | intel_crtc_update_cursor(crtc); |
4071 | I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); | ||
4072 | I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); | ||
4073 | 4307 | ||
4074 | return 0; | 4308 | return 0; |
4075 | } | 4309 | } |
@@ -4413,7 +4647,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4413 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 4647 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4414 | 4648 | ||
4415 | /* Unlock panel regs */ | 4649 | /* Unlock panel regs */ |
4416 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4650 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4651 | PANEL_UNLOCK_REGS); | ||
4417 | 4652 | ||
4418 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4653 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4419 | I915_WRITE(dpll_reg, dpll); | 4654 | I915_WRITE(dpll_reg, dpll); |
@@ -4456,7 +4691,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4456 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); | 4691 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
4457 | 4692 | ||
4458 | /* Unlock panel regs */ | 4693 | /* Unlock panel regs */ |
4459 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); | 4694 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
4695 | PANEL_UNLOCK_REGS); | ||
4460 | 4696 | ||
4461 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4697 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4462 | I915_WRITE(dpll_reg, dpll); | 4698 | I915_WRITE(dpll_reg, dpll); |
@@ -4698,7 +4934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4698 | struct drm_gem_object *obj; | 4934 | struct drm_gem_object *obj; |
4699 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4935 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4700 | struct intel_unpin_work *work; | 4936 | struct intel_unpin_work *work; |
4701 | unsigned long flags; | 4937 | unsigned long flags, offset; |
4702 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4938 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4703 | int ret, pipesrc; | 4939 | int ret, pipesrc; |
4704 | u32 flip_mask; | 4940 | u32 flip_mask; |
@@ -4730,27 +4966,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4730 | 4966 | ||
4731 | mutex_lock(&dev->struct_mutex); | 4967 | mutex_lock(&dev->struct_mutex); |
4732 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4968 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4733 | if (ret != 0) { | 4969 | if (ret) |
4734 | mutex_unlock(&dev->struct_mutex); | 4970 | goto cleanup_work; |
4735 | |||
4736 | spin_lock_irqsave(&dev->event_lock, flags); | ||
4737 | intel_crtc->unpin_work = NULL; | ||
4738 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
4739 | |||
4740 | kfree(work); | ||
4741 | |||
4742 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4743 | to_intel_bo(obj)); | ||
4744 | return ret; | ||
4745 | } | ||
4746 | 4971 | ||
4747 | /* Reference the objects for the scheduled work. */ | 4972 | /* Reference the objects for the scheduled work. */ |
4748 | drm_gem_object_reference(work->old_fb_obj); | 4973 | drm_gem_object_reference(work->old_fb_obj); |
4749 | drm_gem_object_reference(obj); | 4974 | drm_gem_object_reference(obj); |
4750 | 4975 | ||
4751 | crtc->fb = fb; | 4976 | crtc->fb = fb; |
4752 | i915_gem_object_flush_write_domain(obj); | 4977 | ret = i915_gem_object_flush_write_domain(obj); |
4753 | drm_vblank_get(dev, intel_crtc->pipe); | 4978 | if (ret) |
4979 | goto cleanup_objs; | ||
4980 | |||
4981 | ret = drm_vblank_get(dev, intel_crtc->pipe); | ||
4982 | if (ret) | ||
4983 | goto cleanup_objs; | ||
4984 | |||
4754 | obj_priv = to_intel_bo(obj); | 4985 | obj_priv = to_intel_bo(obj); |
4755 | atomic_inc(&obj_priv->pending_flip); | 4986 | atomic_inc(&obj_priv->pending_flip); |
4756 | work->pending_flip_obj = obj; | 4987 | work->pending_flip_obj = obj; |
@@ -4765,19 +4996,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4765 | while (I915_READ(ISR) & flip_mask) | 4996 | while (I915_READ(ISR) & flip_mask) |
4766 | ; | 4997 | ; |
4767 | 4998 | ||
4999 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | ||
5000 | offset = obj_priv->gtt_offset; | ||
5001 | offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); | ||
5002 | |||
4768 | BEGIN_LP_RING(4); | 5003 | BEGIN_LP_RING(4); |
4769 | if (IS_I965G(dev)) { | 5004 | if (IS_I965G(dev)) { |
4770 | OUT_RING(MI_DISPLAY_FLIP | | 5005 | OUT_RING(MI_DISPLAY_FLIP | |
4771 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5006 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4772 | OUT_RING(fb->pitch); | 5007 | OUT_RING(fb->pitch); |
4773 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 5008 | OUT_RING(offset | obj_priv->tiling_mode); |
4774 | pipesrc = I915_READ(pipesrc_reg); | 5009 | pipesrc = I915_READ(pipesrc_reg); |
4775 | OUT_RING(pipesrc & 0x0fff0fff); | 5010 | OUT_RING(pipesrc & 0x0fff0fff); |
4776 | } else { | 5011 | } else { |
4777 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5012 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
4778 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5013 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
4779 | OUT_RING(fb->pitch); | 5014 | OUT_RING(fb->pitch); |
4780 | OUT_RING(obj_priv->gtt_offset); | 5015 | OUT_RING(offset); |
4781 | OUT_RING(MI_NOOP); | 5016 | OUT_RING(MI_NOOP); |
4782 | } | 5017 | } |
4783 | ADVANCE_LP_RING(); | 5018 | ADVANCE_LP_RING(); |
@@ -4787,6 +5022,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4787 | trace_i915_flip_request(intel_crtc->plane, obj); | 5022 | trace_i915_flip_request(intel_crtc->plane, obj); |
4788 | 5023 | ||
4789 | return 0; | 5024 | return 0; |
5025 | |||
5026 | cleanup_objs: | ||
5027 | drm_gem_object_unreference(work->old_fb_obj); | ||
5028 | drm_gem_object_unreference(obj); | ||
5029 | cleanup_work: | ||
5030 | mutex_unlock(&dev->struct_mutex); | ||
5031 | |||
5032 | spin_lock_irqsave(&dev->event_lock, flags); | ||
5033 | intel_crtc->unpin_work = NULL; | ||
5034 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
5035 | |||
5036 | kfree(work); | ||
5037 | |||
5038 | return ret; | ||
4790 | } | 5039 | } |
4791 | 5040 | ||
4792 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 5041 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { |
@@ -4912,19 +5161,26 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4912 | { | 5161 | { |
4913 | struct drm_i915_private *dev_priv = dev->dev_private; | 5162 | struct drm_i915_private *dev_priv = dev->dev_private; |
4914 | struct drm_encoder *encoder; | 5163 | struct drm_encoder *encoder; |
5164 | bool dpd_is_edp = false; | ||
4915 | 5165 | ||
4916 | intel_crt_init(dev); | ||
4917 | |||
4918 | /* Set up integrated LVDS */ | ||
4919 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 5166 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
4920 | intel_lvds_init(dev); | 5167 | intel_lvds_init(dev); |
4921 | 5168 | ||
4922 | if (HAS_PCH_SPLIT(dev)) { | 5169 | if (HAS_PCH_SPLIT(dev)) { |
4923 | int found; | 5170 | dpd_is_edp = intel_dpd_is_edp(dev); |
4924 | 5171 | ||
4925 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 5172 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) |
4926 | intel_dp_init(dev, DP_A); | 5173 | intel_dp_init(dev, DP_A); |
4927 | 5174 | ||
5175 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | ||
5176 | intel_dp_init(dev, PCH_DP_D); | ||
5177 | } | ||
5178 | |||
5179 | intel_crt_init(dev); | ||
5180 | |||
5181 | if (HAS_PCH_SPLIT(dev)) { | ||
5182 | int found; | ||
5183 | |||
4928 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 5184 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
4929 | /* PCH SDVOB multiplex with HDMIB */ | 5185 | /* PCH SDVOB multiplex with HDMIB */ |
4930 | found = intel_sdvo_init(dev, PCH_SDVOB); | 5186 | found = intel_sdvo_init(dev, PCH_SDVOB); |
@@ -4943,7 +5199,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4943 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 5199 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
4944 | intel_dp_init(dev, PCH_DP_C); | 5200 | intel_dp_init(dev, PCH_DP_C); |
4945 | 5201 | ||
4946 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 5202 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
4947 | intel_dp_init(dev, PCH_DP_D); | 5203 | intel_dp_init(dev, PCH_DP_D); |
4948 | 5204 | ||
4949 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 5205 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
@@ -5352,6 +5608,26 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5352 | (I915_READ(DISP_ARB_CTL) | | 5608 | (I915_READ(DISP_ARB_CTL) | |
5353 | DISP_FBC_WM_DIS)); | 5609 | DISP_FBC_WM_DIS)); |
5354 | } | 5610 | } |
5611 | /* | ||
5612 | * Based on the document from hardware guys the following bits | ||
5613 | * should be set unconditionally in order to enable FBC. | ||
5614 | * The bit 22 of 0x42000 | ||
5615 | * The bit 22 of 0x42004 | ||
5616 | * The bit 7,8,9 of 0x42020. | ||
5617 | */ | ||
5618 | if (IS_IRONLAKE_M(dev)) { | ||
5619 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
5620 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
5621 | ILK_FBCQ_DIS); | ||
5622 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
5623 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
5624 | ILK_DPARB_GATE); | ||
5625 | I915_WRITE(ILK_DSPCLK_GATE, | ||
5626 | I915_READ(ILK_DSPCLK_GATE) | | ||
5627 | ILK_DPFC_DIS1 | | ||
5628 | ILK_DPFC_DIS2 | | ||
5629 | ILK_CLK_FBC); | ||
5630 | } | ||
5355 | return; | 5631 | return; |
5356 | } else if (IS_G4X(dev)) { | 5632 | } else if (IS_G4X(dev)) { |
5357 | uint32_t dspclk_gate; | 5633 | uint32_t dspclk_gate; |
@@ -5430,7 +5706,11 @@ static void intel_init_display(struct drm_device *dev) | |||
5430 | dev_priv->display.dpms = i9xx_crtc_dpms; | 5706 | dev_priv->display.dpms = i9xx_crtc_dpms; |
5431 | 5707 | ||
5432 | if (I915_HAS_FBC(dev)) { | 5708 | if (I915_HAS_FBC(dev)) { |
5433 | if (IS_GM45(dev)) { | 5709 | if (IS_IRONLAKE_M(dev)) { |
5710 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | ||
5711 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | ||
5712 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | ||
5713 | } else if (IS_GM45(dev)) { | ||
5434 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | 5714 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
5435 | dev_priv->display.enable_fbc = g4x_enable_fbc; | 5715 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
5436 | dev_priv->display.disable_fbc = g4x_disable_fbc; | 5716 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
@@ -5511,6 +5791,66 @@ static void intel_init_display(struct drm_device *dev) | |||
5511 | } | 5791 | } |
5512 | } | 5792 | } |
5513 | 5793 | ||
5794 | /* | ||
5795 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | ||
5796 | * resume, or other times. This quirk makes sure that's the case for | ||
5797 | * affected systems. | ||
5798 | */ | ||
5799 | static void quirk_pipea_force (struct drm_device *dev) | ||
5800 | { | ||
5801 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5802 | |||
5803 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | ||
5804 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | ||
5805 | } | ||
5806 | |||
5807 | struct intel_quirk { | ||
5808 | int device; | ||
5809 | int subsystem_vendor; | ||
5810 | int subsystem_device; | ||
5811 | void (*hook)(struct drm_device *dev); | ||
5812 | }; | ||
5813 | |||
5814 | struct intel_quirk intel_quirks[] = { | ||
5815 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | ||
5816 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | ||
5817 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
5818 | { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, | ||
5819 | |||
5820 | /* Thinkpad R31 needs pipe A force quirk */ | ||
5821 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | ||
5822 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | ||
5823 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | ||
5824 | |||
5825 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | ||
5826 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | ||
5827 | /* ThinkPad X40 needs pipe A force quirk */ | ||
5828 | |||
5829 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | ||
5830 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | ||
5831 | |||
5832 | /* 855 & before need to leave pipe A & dpll A up */ | ||
5833 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5834 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
5835 | }; | ||
5836 | |||
5837 | static void intel_init_quirks(struct drm_device *dev) | ||
5838 | { | ||
5839 | struct pci_dev *d = dev->pdev; | ||
5840 | int i; | ||
5841 | |||
5842 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
5843 | struct intel_quirk *q = &intel_quirks[i]; | ||
5844 | |||
5845 | if (d->device == q->device && | ||
5846 | (d->subsystem_vendor == q->subsystem_vendor || | ||
5847 | q->subsystem_vendor == PCI_ANY_ID) && | ||
5848 | (d->subsystem_device == q->subsystem_device || | ||
5849 | q->subsystem_device == PCI_ANY_ID)) | ||
5850 | q->hook(dev); | ||
5851 | } | ||
5852 | } | ||
5853 | |||
5514 | void intel_modeset_init(struct drm_device *dev) | 5854 | void intel_modeset_init(struct drm_device *dev) |
5515 | { | 5855 | { |
5516 | struct drm_i915_private *dev_priv = dev->dev_private; | 5856 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5523,6 +5863,8 @@ void intel_modeset_init(struct drm_device *dev) | |||
5523 | 5863 | ||
5524 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | 5864 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
5525 | 5865 | ||
5866 | intel_init_quirks(dev); | ||
5867 | |||
5526 | intel_init_display(dev); | 5868 | intel_init_display(dev); |
5527 | 5869 | ||
5528 | if (IS_I965G(dev)) { | 5870 | if (IS_I965G(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1aac59e83bff..40be1fa65be1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) | 45 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) |
46 | #define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) | ||
46 | 47 | ||
47 | struct intel_dp_priv { | 48 | struct intel_dp_priv { |
48 | uint32_t output_reg; | 49 | uint32_t output_reg; |
@@ -56,6 +57,7 @@ struct intel_dp_priv { | |||
56 | struct intel_encoder *intel_encoder; | 57 | struct intel_encoder *intel_encoder; |
57 | struct i2c_adapter adapter; | 58 | struct i2c_adapter adapter; |
58 | struct i2c_algo_dp_aux_data algo; | 59 | struct i2c_algo_dp_aux_data algo; |
60 | bool is_pch_edp; | ||
59 | }; | 61 | }; |
60 | 62 | ||
61 | static void | 63 | static void |
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev, | |||
128 | struct intel_encoder *intel_encoder, int pixel_clock) | 130 | struct intel_encoder *intel_encoder, int pixel_clock) |
129 | { | 131 | { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | 132 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
131 | 134 | ||
132 | if (IS_eDP(intel_encoder)) | 135 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) |
133 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 136 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
134 | else | 137 | else |
135 | return pixel_clock * 3; | 138 | return pixel_clock * 3; |
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
147 | { | 150 | { |
148 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 151 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
149 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 152 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
153 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
154 | struct drm_device *dev = connector->dev; | ||
155 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
150 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 156 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
151 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 157 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
152 | 158 | ||
159 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
160 | dev_priv->panel_fixed_mode) { | ||
161 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | ||
162 | return MODE_PANEL; | ||
163 | |||
164 | if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) | ||
165 | return MODE_PANEL; | ||
166 | } | ||
167 | |||
153 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 168 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
154 | which are outside spec tolerances but somehow work by magic */ | 169 | which are outside spec tolerances but somehow work by magic */ |
155 | if (!IS_eDP(intel_encoder) && | 170 | if (!IS_eDP(intel_encoder) && |
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
508 | { | 523 | { |
509 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 524 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
510 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 525 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; |
526 | struct drm_device *dev = encoder->dev; | ||
527 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
511 | int lane_count, clock; | 528 | int lane_count, clock; |
512 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); | 529 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); |
513 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; | 530 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; |
514 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 531 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
515 | 532 | ||
533 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
534 | dev_priv->panel_fixed_mode) { | ||
535 | struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; | ||
536 | |||
537 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | ||
538 | adjusted_mode->hsync_start = fixed_mode->hsync_start; | ||
539 | adjusted_mode->hsync_end = fixed_mode->hsync_end; | ||
540 | adjusted_mode->htotal = fixed_mode->htotal; | ||
541 | |||
542 | adjusted_mode->vdisplay = fixed_mode->vdisplay; | ||
543 | adjusted_mode->vsync_start = fixed_mode->vsync_start; | ||
544 | adjusted_mode->vsync_end = fixed_mode->vsync_end; | ||
545 | adjusted_mode->vtotal = fixed_mode->vtotal; | ||
546 | |||
547 | adjusted_mode->clock = fixed_mode->clock; | ||
548 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
549 | |||
550 | /* | ||
551 | * the mode->clock is used to calculate the Data&Link M/N | ||
552 | * of the pipe. For the eDP the fixed clock should be used. | ||
553 | */ | ||
554 | mode->clock = dev_priv->panel_fixed_mode->clock; | ||
555 | } | ||
556 | |||
516 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 557 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
517 | for (clock = 0; clock <= max_clock; clock++) { | 558 | for (clock = 0; clock <= max_clock; clock++) { |
518 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 559 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
531 | } | 572 | } |
532 | } | 573 | } |
533 | 574 | ||
534 | if (IS_eDP(intel_encoder)) { | 575 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
535 | /* okay we failed just pick the highest */ | 576 | /* okay we failed just pick the highest */ |
536 | dp_priv->lane_count = max_lane_count; | 577 | dp_priv->lane_count = max_lane_count; |
537 | dp_priv->link_bw = bws[max_clock]; | 578 | dp_priv->link_bw = bws[max_clock]; |
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den) | |||
563 | } | 604 | } |
564 | 605 | ||
565 | static void | 606 | static void |
566 | intel_dp_compute_m_n(int bytes_per_pixel, | 607 | intel_dp_compute_m_n(int bpp, |
567 | int nlanes, | 608 | int nlanes, |
568 | int pixel_clock, | 609 | int pixel_clock, |
569 | int link_clock, | 610 | int link_clock, |
570 | struct intel_dp_m_n *m_n) | 611 | struct intel_dp_m_n *m_n) |
571 | { | 612 | { |
572 | m_n->tu = 64; | 613 | m_n->tu = 64; |
573 | m_n->gmch_m = pixel_clock * bytes_per_pixel; | 614 | m_n->gmch_m = (pixel_clock * bpp) >> 3; |
574 | m_n->gmch_n = link_clock * nlanes; | 615 | m_n->gmch_n = link_clock * nlanes; |
575 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 616 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
576 | m_n->link_m = pixel_clock; | 617 | m_n->link_m = pixel_clock; |
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel, | |||
578 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | 619 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
579 | } | 620 | } |
580 | 621 | ||
622 | bool intel_pch_has_edp(struct drm_crtc *crtc) | ||
623 | { | ||
624 | struct drm_device *dev = crtc->dev; | ||
625 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
626 | struct drm_encoder *encoder; | ||
627 | |||
628 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
629 | struct intel_encoder *intel_encoder; | ||
630 | struct intel_dp_priv *dp_priv; | ||
631 | |||
632 | if (!encoder || encoder->crtc != crtc) | ||
633 | continue; | ||
634 | |||
635 | intel_encoder = enc_to_intel_encoder(encoder); | ||
636 | dp_priv = intel_encoder->dev_priv; | ||
637 | |||
638 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) | ||
639 | return dp_priv->is_pch_edp; | ||
640 | } | ||
641 | return false; | ||
642 | } | ||
643 | |||
581 | void | 644 | void |
582 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 645 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
583 | struct drm_display_mode *adjusted_mode) | 646 | struct drm_display_mode *adjusted_mode) |
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
587 | struct drm_encoder *encoder; | 650 | struct drm_encoder *encoder; |
588 | struct drm_i915_private *dev_priv = dev->dev_private; | 651 | struct drm_i915_private *dev_priv = dev->dev_private; |
589 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 652 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
590 | int lane_count = 4; | 653 | int lane_count = 4, bpp = 24; |
591 | struct intel_dp_m_n m_n; | 654 | struct intel_dp_m_n m_n; |
592 | 655 | ||
593 | /* | 656 | /* |
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
605 | 668 | ||
606 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | 669 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
607 | lane_count = dp_priv->lane_count; | 670 | lane_count = dp_priv->lane_count; |
671 | if (IS_PCH_eDP(dp_priv)) | ||
672 | bpp = dev_priv->edp_bpp; | ||
608 | break; | 673 | break; |
609 | } | 674 | } |
610 | } | 675 | } |
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
614 | * the number of bytes_per_pixel post-LUT, which we always | 679 | * the number of bytes_per_pixel post-LUT, which we always |
615 | * set up for 8-bits of R/G/B, or 3 bytes total. | 680 | * set up for 8-bits of R/G/B, or 3 bytes total. |
616 | */ | 681 | */ |
617 | intel_dp_compute_m_n(3, lane_count, | 682 | intel_dp_compute_m_n(bpp, lane_count, |
618 | mode->clock, adjusted_mode->clock, &m_n); | 683 | mode->clock, adjusted_mode->clock, &m_n); |
619 | 684 | ||
620 | if (HAS_PCH_SPLIT(dev)) { | 685 | if (HAS_PCH_SPLIT(dev)) { |
@@ -717,6 +782,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
717 | } | 782 | } |
718 | } | 783 | } |
719 | 784 | ||
785 | static void ironlake_edp_panel_on (struct drm_device *dev) | ||
786 | { | ||
787 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
788 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
789 | u32 pp, pp_status; | ||
790 | |||
791 | pp_status = I915_READ(PCH_PP_STATUS); | ||
792 | if (pp_status & PP_ON) | ||
793 | return; | ||
794 | |||
795 | pp = I915_READ(PCH_PP_CONTROL); | ||
796 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
797 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
798 | do { | ||
799 | pp_status = I915_READ(PCH_PP_STATUS); | ||
800 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
801 | |||
802 | if (time_after(jiffies, timeout)) | ||
803 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | ||
804 | |||
805 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | ||
806 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
807 | } | ||
808 | |||
809 | static void ironlake_edp_panel_off (struct drm_device *dev) | ||
810 | { | ||
811 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
812 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
813 | u32 pp, pp_status; | ||
814 | |||
815 | pp = I915_READ(PCH_PP_CONTROL); | ||
816 | pp &= ~POWER_TARGET_ON; | ||
817 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
818 | do { | ||
819 | pp_status = I915_READ(PCH_PP_STATUS); | ||
820 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
821 | |||
822 | if (time_after(jiffies, timeout)) | ||
823 | DRM_DEBUG_KMS("panel off wait timed out\n"); | ||
824 | |||
825 | /* Make sure VDD is enabled so DP AUX will work */ | ||
826 | pp |= EDP_FORCE_VDD; | ||
827 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
828 | } | ||
829 | |||
720 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 830 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
721 | { | 831 | { |
722 | struct drm_i915_private *dev_priv = dev->dev_private; | 832 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -751,14 +861,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
751 | if (mode != DRM_MODE_DPMS_ON) { | 861 | if (mode != DRM_MODE_DPMS_ON) { |
752 | if (dp_reg & DP_PORT_EN) { | 862 | if (dp_reg & DP_PORT_EN) { |
753 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 863 | intel_dp_link_down(intel_encoder, dp_priv->DP); |
754 | if (IS_eDP(intel_encoder)) | 864 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
755 | ironlake_edp_backlight_off(dev); | 865 | ironlake_edp_backlight_off(dev); |
866 | ironlake_edp_panel_off(dev); | ||
867 | } | ||
756 | } | 868 | } |
757 | } else { | 869 | } else { |
758 | if (!(dp_reg & DP_PORT_EN)) { | 870 | if (!(dp_reg & DP_PORT_EN)) { |
759 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 871 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); |
760 | if (IS_eDP(intel_encoder)) | 872 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
873 | ironlake_edp_panel_on(dev); | ||
761 | ironlake_edp_backlight_on(dev); | 874 | ironlake_edp_backlight_on(dev); |
875 | } | ||
762 | } | 876 | } |
763 | } | 877 | } |
764 | dp_priv->dpms_mode = mode; | 878 | dp_priv->dpms_mode = mode; |
@@ -1291,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1291 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1405 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
1292 | struct drm_device *dev = intel_encoder->enc.dev; | 1406 | struct drm_device *dev = intel_encoder->enc.dev; |
1293 | struct drm_i915_private *dev_priv = dev->dev_private; | 1407 | struct drm_i915_private *dev_priv = dev->dev_private; |
1408 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1294 | int ret; | 1409 | int ret; |
1295 | 1410 | ||
1296 | /* We should parse the EDID data and find out if it has an audio sink | 1411 | /* We should parse the EDID data and find out if it has an audio sink |
1297 | */ | 1412 | */ |
1298 | 1413 | ||
1299 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 1414 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); |
1300 | if (ret) | 1415 | if (ret) { |
1416 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | ||
1417 | !dev_priv->panel_fixed_mode) { | ||
1418 | struct drm_display_mode *newmode; | ||
1419 | list_for_each_entry(newmode, &connector->probed_modes, | ||
1420 | head) { | ||
1421 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | ||
1422 | dev_priv->panel_fixed_mode = | ||
1423 | drm_mode_duplicate(dev, newmode); | ||
1424 | break; | ||
1425 | } | ||
1426 | } | ||
1427 | } | ||
1428 | |||
1301 | return ret; | 1429 | return ret; |
1430 | } | ||
1302 | 1431 | ||
1303 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1432 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1304 | if (IS_eDP(intel_encoder)) { | 1433 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { |
1305 | if (dev_priv->panel_fixed_mode != NULL) { | 1434 | if (dev_priv->panel_fixed_mode != NULL) { |
1306 | struct drm_display_mode *mode; | 1435 | struct drm_display_mode *mode; |
1307 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1436 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1386,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) | |||
1386 | return -1; | 1515 | return -1; |
1387 | } | 1516 | } |
1388 | 1517 | ||
1518 | /* check the VBT to see whether the eDP is on DP-D port */ | ||
1519 | bool intel_dpd_is_edp(struct drm_device *dev) | ||
1520 | { | ||
1521 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1522 | struct child_device_config *p_child; | ||
1523 | int i; | ||
1524 | |||
1525 | if (!dev_priv->child_dev_num) | ||
1526 | return false; | ||
1527 | |||
1528 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1529 | p_child = dev_priv->child_dev + i; | ||
1530 | |||
1531 | if (p_child->dvo_port == PORT_IDPD && | ||
1532 | p_child->device_type == DEVICE_TYPE_eDP) | ||
1533 | return true; | ||
1534 | } | ||
1535 | return false; | ||
1536 | } | ||
1537 | |||
1389 | void | 1538 | void |
1390 | intel_dp_init(struct drm_device *dev, int output_reg) | 1539 | intel_dp_init(struct drm_device *dev, int output_reg) |
1391 | { | 1540 | { |
@@ -1395,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1395 | struct intel_connector *intel_connector; | 1544 | struct intel_connector *intel_connector; |
1396 | struct intel_dp_priv *dp_priv; | 1545 | struct intel_dp_priv *dp_priv; |
1397 | const char *name = NULL; | 1546 | const char *name = NULL; |
1547 | int type; | ||
1398 | 1548 | ||
1399 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + | 1549 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + |
1400 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1550 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
@@ -1409,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1409 | 1559 | ||
1410 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); | 1560 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); |
1411 | 1561 | ||
1562 | if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) | ||
1563 | if (intel_dpd_is_edp(dev)) | ||
1564 | dp_priv->is_pch_edp = true; | ||
1565 | |||
1566 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { | ||
1567 | type = DRM_MODE_CONNECTOR_eDP; | ||
1568 | intel_encoder->type = INTEL_OUTPUT_EDP; | ||
1569 | } else { | ||
1570 | type = DRM_MODE_CONNECTOR_DisplayPort; | ||
1571 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1572 | } | ||
1573 | |||
1412 | connector = &intel_connector->base; | 1574 | connector = &intel_connector->base; |
1413 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | 1575 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
1414 | DRM_MODE_CONNECTOR_DisplayPort); | ||
1415 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1576 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
1416 | 1577 | ||
1417 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 1578 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1418 | 1579 | ||
1419 | if (output_reg == DP_A) | ||
1420 | intel_encoder->type = INTEL_OUTPUT_EDP; | ||
1421 | else | ||
1422 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1423 | |||
1424 | if (output_reg == DP_B || output_reg == PCH_DP_B) | 1580 | if (output_reg == DP_B || output_reg == PCH_DP_B) |
1425 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | 1581 | intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); |
1426 | else if (output_reg == DP_C || output_reg == PCH_DP_C) | 1582 | else if (output_reg == DP_C || output_reg == PCH_DP_C) |
@@ -1479,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1479 | intel_encoder->ddc_bus = &dp_priv->adapter; | 1635 | intel_encoder->ddc_bus = &dp_priv->adapter; |
1480 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1636 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1481 | 1637 | ||
1482 | if (output_reg == DP_A) { | 1638 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { |
1483 | /* initialize panel mode from VBT if available for eDP */ | 1639 | /* initialize panel mode from VBT if available for eDP */ |
1484 | if (dev_priv->lfp_lvds_vbt_mode) { | 1640 | if (dev_priv->lfp_lvds_vbt_mode) { |
1485 | dev_priv->panel_fixed_mode = | 1641 | dev_priv->panel_fixed_mode = |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 72206f37c4fb..b2190148703a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -143,8 +143,6 @@ struct intel_crtc { | |||
143 | struct drm_crtc base; | 143 | struct drm_crtc base; |
144 | enum pipe pipe; | 144 | enum pipe pipe; |
145 | enum plane plane; | 145 | enum plane plane; |
146 | struct drm_gem_object *cursor_bo; | ||
147 | uint32_t cursor_addr; | ||
148 | u8 lut_r[256], lut_g[256], lut_b[256]; | 146 | u8 lut_r[256], lut_g[256], lut_b[256]; |
149 | int dpms_mode; | 147 | int dpms_mode; |
150 | bool busy; /* is scanout buffer being updated frequently? */ | 148 | bool busy; /* is scanout buffer being updated frequently? */ |
@@ -153,6 +151,12 @@ struct intel_crtc { | |||
153 | struct intel_overlay *overlay; | 151 | struct intel_overlay *overlay; |
154 | struct intel_unpin_work *unpin_work; | 152 | struct intel_unpin_work *unpin_work; |
155 | int fdi_lanes; | 153 | int fdi_lanes; |
154 | |||
155 | struct drm_gem_object *cursor_bo; | ||
156 | uint32_t cursor_addr; | ||
157 | int16_t cursor_x, cursor_y; | ||
158 | int16_t cursor_width, cursor_height; | ||
159 | bool cursor_visble; | ||
156 | }; | 160 | }; |
157 | 161 | ||
158 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 162 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
179 | void | 183 | void |
180 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 184 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
181 | struct drm_display_mode *adjusted_mode); | 185 | struct drm_display_mode *adjusted_mode); |
186 | extern bool intel_pch_has_edp(struct drm_crtc *crtc); | ||
187 | extern bool intel_dpd_is_edp(struct drm_device *dev); | ||
182 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 188 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
183 | 189 | ||
184 | 190 | ||
@@ -215,6 +221,9 @@ extern void intel_init_clock_gating(struct drm_device *dev); | |||
215 | extern void ironlake_enable_drps(struct drm_device *dev); | 221 | extern void ironlake_enable_drps(struct drm_device *dev); |
216 | extern void ironlake_disable_drps(struct drm_device *dev); | 222 | extern void ironlake_disable_drps(struct drm_device *dev); |
217 | 223 | ||
224 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | ||
225 | struct drm_gem_object *obj); | ||
226 | |||
218 | extern int intel_framebuffer_init(struct drm_device *dev, | 227 | extern int intel_framebuffer_init(struct drm_device *dev, |
219 | struct intel_framebuffer *ifb, | 228 | struct intel_framebuffer *ifb, |
220 | struct drm_mode_fb_cmd *mode_cmd, | 229 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index c3c505244e07..3e18c9e7729b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
98 | 98 | ||
99 | mutex_lock(&dev->struct_mutex); | 99 | mutex_lock(&dev->struct_mutex); |
100 | 100 | ||
101 | ret = i915_gem_object_pin(fbo, 64*1024); | 101 | ret = intel_pin_and_fence_fb_obj(dev, fbo); |
102 | if (ret) { | 102 | if (ret) { |
103 | DRM_ERROR("failed to pin fb: %d\n", ret); | 103 | DRM_ERROR("failed to pin fb: %d\n", ret); |
104 | goto out_unref; | 104 | goto out_unref; |
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev, | |||
236 | 236 | ||
237 | drm_framebuffer_cleanup(&ifb->base); | 237 | drm_framebuffer_cleanup(&ifb->base); |
238 | if (ifb->obj) | 238 | if (ifb->obj) |
239 | drm_gem_object_unreference_unlocked(ifb->obj); | 239 | drm_gem_object_unreference(ifb->obj); |
240 | 240 | ||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 83bd764b000e..197887ed1823 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
54 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | 54 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; |
55 | u32 sdvox; | 55 | u32 sdvox; |
56 | 56 | ||
57 | sdvox = SDVO_ENCODING_HDMI | | 57 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
58 | SDVO_BORDER_ENABLE | | 58 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
59 | SDVO_VSYNC_ACTIVE_HIGH | | 59 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
60 | SDVO_HSYNC_ACTIVE_HIGH; | 60 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
61 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | ||
61 | 62 | ||
62 | if (hdmi_priv->has_hdmi_sink) { | 63 | if (hdmi_priv->has_hdmi_sink) { |
63 | sdvox |= SDVO_AUDIO_ENABLE; | 64 | sdvox |= SDVO_AUDIO_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 31df55f0a0a7..0a2e60059fb3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector, | |||
156 | return MODE_OK; | 156 | return MODE_OK; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void | ||
160 | centre_horizontally(struct drm_display_mode *mode, | ||
161 | int width) | ||
162 | { | ||
163 | u32 border, sync_pos, blank_width, sync_width; | ||
164 | |||
165 | /* keep the hsync and hblank widths constant */ | ||
166 | sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; | ||
167 | blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; | ||
168 | sync_pos = (blank_width - sync_width + 1) / 2; | ||
169 | |||
170 | border = (mode->hdisplay - width + 1) / 2; | ||
171 | border += border & 1; /* make the border even */ | ||
172 | |||
173 | mode->crtc_hdisplay = width; | ||
174 | mode->crtc_hblank_start = width + border; | ||
175 | mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; | ||
176 | |||
177 | mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; | ||
178 | mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; | ||
179 | } | ||
180 | |||
181 | static void | ||
182 | centre_vertically(struct drm_display_mode *mode, | ||
183 | int height) | ||
184 | { | ||
185 | u32 border, sync_pos, blank_width, sync_width; | ||
186 | |||
187 | /* keep the vsync and vblank widths constant */ | ||
188 | sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
189 | blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; | ||
190 | sync_pos = (blank_width - sync_width + 1) / 2; | ||
191 | |||
192 | border = (mode->vdisplay - height + 1) / 2; | ||
193 | |||
194 | mode->crtc_vdisplay = height; | ||
195 | mode->crtc_vblank_start = height + border; | ||
196 | mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; | ||
197 | |||
198 | mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; | ||
199 | mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; | ||
200 | } | ||
201 | |||
202 | static inline u32 panel_fitter_scaling(u32 source, u32 target) | ||
203 | { | ||
204 | /* | ||
205 | * Floating point operation is not supported. So the FACTOR | ||
206 | * is defined, which can avoid the floating point computation | ||
207 | * when calculating the panel ratio. | ||
208 | */ | ||
209 | #define ACCURACY 12 | ||
210 | #define FACTOR (1 << ACCURACY) | ||
211 | u32 ratio = source * FACTOR / target; | ||
212 | return (FACTOR * ratio + FACTOR/2) / FACTOR; | ||
213 | } | ||
214 | |||
159 | static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | 215 | static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, |
160 | struct drm_display_mode *mode, | 216 | struct drm_display_mode *mode, |
161 | struct drm_display_mode *adjusted_mode) | 217 | struct drm_display_mode *adjusted_mode) |
162 | { | 218 | { |
163 | /* | ||
164 | * float point operation is not supported . So the PANEL_RATIO_FACTOR | ||
165 | * is defined, which can avoid the float point computation when | ||
166 | * calculating the panel ratio. | ||
167 | */ | ||
168 | #define PANEL_RATIO_FACTOR 8192 | ||
169 | struct drm_device *dev = encoder->dev; | 219 | struct drm_device *dev = encoder->dev; |
170 | struct drm_i915_private *dev_priv = dev->dev_private; | 220 | struct drm_i915_private *dev_priv = dev->dev_private; |
171 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 221 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
172 | struct drm_encoder *tmp_encoder; | 222 | struct drm_encoder *tmp_encoder; |
173 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 223 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); |
174 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | 224 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; |
175 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | 225 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
176 | int left_border = 0, right_border = 0, top_border = 0; | ||
177 | int bottom_border = 0; | ||
178 | bool border = 0; | ||
179 | int panel_ratio, desired_ratio, vert_scale, horiz_scale; | ||
180 | int horiz_ratio, vert_ratio; | ||
181 | u32 hsync_width, vsync_width; | ||
182 | u32 hblank_width, vblank_width; | ||
183 | u32 hsync_pos, vsync_pos; | ||
184 | 226 | ||
185 | /* Should never happen!! */ | 227 | /* Should never happen!! */ |
186 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | 228 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { |
@@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
200 | if (dev_priv->panel_fixed_mode == NULL) | 242 | if (dev_priv->panel_fixed_mode == NULL) |
201 | return true; | 243 | return true; |
202 | /* | 244 | /* |
203 | * If we have timings from the BIOS for the panel, put them in | 245 | * We have timings from the BIOS for the panel, put them in |
204 | * to the adjusted mode. The CRTC will be set up for this mode, | 246 | * to the adjusted mode. The CRTC will be set up for this mode, |
205 | * with the panel scaling set up to source from the H/VDisplay | 247 | * with the panel scaling set up to source from the H/VDisplay |
206 | * of the original mode. | 248 | * of the original mode. |
207 | */ | 249 | */ |
208 | if (dev_priv->panel_fixed_mode != NULL) { | 250 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; |
209 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; | 251 | adjusted_mode->hsync_start = |
210 | adjusted_mode->hsync_start = | 252 | dev_priv->panel_fixed_mode->hsync_start; |
211 | dev_priv->panel_fixed_mode->hsync_start; | 253 | adjusted_mode->hsync_end = |
212 | adjusted_mode->hsync_end = | 254 | dev_priv->panel_fixed_mode->hsync_end; |
213 | dev_priv->panel_fixed_mode->hsync_end; | 255 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; |
214 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; | 256 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; |
215 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; | 257 | adjusted_mode->vsync_start = |
216 | adjusted_mode->vsync_start = | 258 | dev_priv->panel_fixed_mode->vsync_start; |
217 | dev_priv->panel_fixed_mode->vsync_start; | 259 | adjusted_mode->vsync_end = |
218 | adjusted_mode->vsync_end = | 260 | dev_priv->panel_fixed_mode->vsync_end; |
219 | dev_priv->panel_fixed_mode->vsync_end; | 261 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; |
220 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; | 262 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; |
221 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; | 263 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
222 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
223 | } | ||
224 | 264 | ||
225 | /* Make sure pre-965s set dither correctly */ | 265 | /* Make sure pre-965s set dither correctly */ |
226 | if (!IS_I965G(dev)) { | 266 | if (!IS_I965G(dev)) { |
@@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
230 | 270 | ||
231 | /* Native modes don't need fitting */ | 271 | /* Native modes don't need fitting */ |
232 | if (adjusted_mode->hdisplay == mode->hdisplay && | 272 | if (adjusted_mode->hdisplay == mode->hdisplay && |
233 | adjusted_mode->vdisplay == mode->vdisplay) { | 273 | adjusted_mode->vdisplay == mode->vdisplay) |
234 | pfit_pgm_ratios = 0; | ||
235 | border = 0; | ||
236 | goto out; | 274 | goto out; |
237 | } | ||
238 | 275 | ||
239 | /* full screen scale for now */ | 276 | /* full screen scale for now */ |
240 | if (HAS_PCH_SPLIT(dev)) | 277 | if (HAS_PCH_SPLIT(dev)) |
@@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
242 | 279 | ||
243 | /* 965+ wants fuzzy fitting */ | 280 | /* 965+ wants fuzzy fitting */ |
244 | if (IS_I965G(dev)) | 281 | if (IS_I965G(dev)) |
245 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 282 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
246 | PFIT_FILTER_FUZZY; | 283 | PFIT_FILTER_FUZZY); |
247 | 284 | ||
248 | hsync_width = adjusted_mode->crtc_hsync_end - | ||
249 | adjusted_mode->crtc_hsync_start; | ||
250 | vsync_width = adjusted_mode->crtc_vsync_end - | ||
251 | adjusted_mode->crtc_vsync_start; | ||
252 | hblank_width = adjusted_mode->crtc_hblank_end - | ||
253 | adjusted_mode->crtc_hblank_start; | ||
254 | vblank_width = adjusted_mode->crtc_vblank_end - | ||
255 | adjusted_mode->crtc_vblank_start; | ||
256 | /* | ||
257 | * Deal with panel fitting options. Figure out how to stretch the | ||
258 | * image based on its aspect ratio & the current panel fitting mode. | ||
259 | */ | ||
260 | panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / | ||
261 | adjusted_mode->vdisplay; | ||
262 | desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / | ||
263 | mode->vdisplay; | ||
264 | /* | 285 | /* |
265 | * Enable automatic panel scaling for non-native modes so that they fill | 286 | * Enable automatic panel scaling for non-native modes so that they fill |
266 | * the screen. Should be enabled before the pipe is enabled, according | 287 | * the screen. Should be enabled before the pipe is enabled, according |
@@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
278 | * For centered modes, we have to calculate border widths & | 299 | * For centered modes, we have to calculate border widths & |
279 | * heights and modify the values programmed into the CRTC. | 300 | * heights and modify the values programmed into the CRTC. |
280 | */ | 301 | */ |
281 | left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; | 302 | centre_horizontally(adjusted_mode, mode->hdisplay); |
282 | right_border = left_border; | 303 | centre_vertically(adjusted_mode, mode->vdisplay); |
283 | if (mode->hdisplay & 1) | 304 | border = LVDS_BORDER_ENABLE; |
284 | right_border++; | ||
285 | top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; | ||
286 | bottom_border = top_border; | ||
287 | if (mode->vdisplay & 1) | ||
288 | bottom_border++; | ||
289 | /* Set active & border values */ | ||
290 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
291 | /* Keep the boder be even */ | ||
292 | if (right_border & 1) | ||
293 | right_border++; | ||
294 | /* use the border directly instead of border minuse one */ | ||
295 | adjusted_mode->crtc_hblank_start = mode->hdisplay + | ||
296 | right_border; | ||
297 | /* keep the blank width constant */ | ||
298 | adjusted_mode->crtc_hblank_end = | ||
299 | adjusted_mode->crtc_hblank_start + hblank_width; | ||
300 | /* get the hsync pos relative to hblank start */ | ||
301 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
302 | /* keep the hsync pos be even */ | ||
303 | if (hsync_pos & 1) | ||
304 | hsync_pos++; | ||
305 | adjusted_mode->crtc_hsync_start = | ||
306 | adjusted_mode->crtc_hblank_start + hsync_pos; | ||
307 | /* keep the hsync width constant */ | ||
308 | adjusted_mode->crtc_hsync_end = | ||
309 | adjusted_mode->crtc_hsync_start + hsync_width; | ||
310 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
311 | /* use the border instead of border minus one */ | ||
312 | adjusted_mode->crtc_vblank_start = mode->vdisplay + | ||
313 | bottom_border; | ||
314 | /* keep the vblank width constant */ | ||
315 | adjusted_mode->crtc_vblank_end = | ||
316 | adjusted_mode->crtc_vblank_start + vblank_width; | ||
317 | /* get the vsync start postion relative to vblank start */ | ||
318 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
319 | adjusted_mode->crtc_vsync_start = | ||
320 | adjusted_mode->crtc_vblank_start + vsync_pos; | ||
321 | /* keep the vsync width constant */ | ||
322 | adjusted_mode->crtc_vsync_end = | ||
323 | adjusted_mode->crtc_vsync_start + vsync_width; | ||
324 | border = 1; | ||
325 | break; | 305 | break; |
306 | |||
326 | case DRM_MODE_SCALE_ASPECT: | 307 | case DRM_MODE_SCALE_ASPECT: |
327 | /* Scale but preserve the spect ratio */ | 308 | /* Scale but preserve the aspect ratio */ |
328 | pfit_control |= PFIT_ENABLE; | ||
329 | if (IS_I965G(dev)) { | 309 | if (IS_I965G(dev)) { |
310 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | ||
311 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | ||
312 | |||
313 | pfit_control |= PFIT_ENABLE; | ||
330 | /* 965+ is easy, it does everything in hw */ | 314 | /* 965+ is easy, it does everything in hw */ |
331 | if (panel_ratio > desired_ratio) | 315 | if (scaled_width > scaled_height) |
332 | pfit_control |= PFIT_SCALING_PILLAR; | 316 | pfit_control |= PFIT_SCALING_PILLAR; |
333 | else if (panel_ratio < desired_ratio) | 317 | else if (scaled_width < scaled_height) |
334 | pfit_control |= PFIT_SCALING_LETTER; | 318 | pfit_control |= PFIT_SCALING_LETTER; |
335 | else | 319 | else |
336 | pfit_control |= PFIT_SCALING_AUTO; | 320 | pfit_control |= PFIT_SCALING_AUTO; |
337 | } else { | 321 | } else { |
322 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | ||
323 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | ||
338 | /* | 324 | /* |
339 | * For earlier chips we have to calculate the scaling | 325 | * For earlier chips we have to calculate the scaling |
340 | * ratio by hand and program it into the | 326 | * ratio by hand and program it into the |
341 | * PFIT_PGM_RATIO register | 327 | * PFIT_PGM_RATIO register |
342 | */ | 328 | */ |
343 | u32 horiz_bits, vert_bits, bits = 12; | 329 | if (scaled_width > scaled_height) { /* pillar */ |
344 | horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ | 330 | centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); |
345 | adjusted_mode->hdisplay; | 331 | |
346 | vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ | 332 | border = LVDS_BORDER_ENABLE; |
347 | adjusted_mode->vdisplay; | 333 | if (mode->vdisplay != adjusted_mode->vdisplay) { |
348 | horiz_scale = adjusted_mode->hdisplay * | 334 | u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); |
349 | PANEL_RATIO_FACTOR / mode->hdisplay; | 335 | pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | |
350 | vert_scale = adjusted_mode->vdisplay * | 336 | bits << PFIT_VERT_SCALE_SHIFT); |
351 | PANEL_RATIO_FACTOR / mode->vdisplay; | 337 | pfit_control |= (PFIT_ENABLE | |
352 | 338 | VERT_INTERP_BILINEAR | | |
353 | /* retain aspect ratio */ | 339 | HORIZ_INTERP_BILINEAR); |
354 | if (panel_ratio > desired_ratio) { /* Pillar */ | 340 | } |
355 | u32 scaled_width; | 341 | } else if (scaled_width < scaled_height) { /* letter */ |
356 | scaled_width = mode->hdisplay * vert_scale / | 342 | centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); |
357 | PANEL_RATIO_FACTOR; | 343 | |
358 | horiz_ratio = vert_ratio; | 344 | border = LVDS_BORDER_ENABLE; |
359 | pfit_control |= (VERT_AUTO_SCALE | | 345 | if (mode->hdisplay != adjusted_mode->hdisplay) { |
360 | VERT_INTERP_BILINEAR | | 346 | u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); |
361 | HORIZ_INTERP_BILINEAR); | 347 | pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | |
362 | /* Pillar will have left/right borders */ | 348 | bits << PFIT_VERT_SCALE_SHIFT); |
363 | left_border = (adjusted_mode->hdisplay - | 349 | pfit_control |= (PFIT_ENABLE | |
364 | scaled_width) / 2; | 350 | VERT_INTERP_BILINEAR | |
365 | right_border = left_border; | 351 | HORIZ_INTERP_BILINEAR); |
366 | if (mode->hdisplay & 1) /* odd resolutions */ | 352 | } |
367 | right_border++; | 353 | } else |
368 | /* keep the border be even */ | 354 | /* Aspects match, Let hw scale both directions */ |
369 | if (right_border & 1) | 355 | pfit_control |= (PFIT_ENABLE | |
370 | right_border++; | 356 | VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | |
371 | adjusted_mode->crtc_hdisplay = scaled_width; | ||
372 | /* use border instead of border minus one */ | ||
373 | adjusted_mode->crtc_hblank_start = | ||
374 | scaled_width + right_border; | ||
375 | /* keep the hblank width constant */ | ||
376 | adjusted_mode->crtc_hblank_end = | ||
377 | adjusted_mode->crtc_hblank_start + | ||
378 | hblank_width; | ||
379 | /* | ||
380 | * get the hsync start pos relative to | ||
381 | * hblank start | ||
382 | */ | ||
383 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
384 | /* keep the hsync_pos be even */ | ||
385 | if (hsync_pos & 1) | ||
386 | hsync_pos++; | ||
387 | adjusted_mode->crtc_hsync_start = | ||
388 | adjusted_mode->crtc_hblank_start + | ||
389 | hsync_pos; | ||
390 | /* keept hsync width constant */ | ||
391 | adjusted_mode->crtc_hsync_end = | ||
392 | adjusted_mode->crtc_hsync_start + | ||
393 | hsync_width; | ||
394 | border = 1; | ||
395 | } else if (panel_ratio < desired_ratio) { /* letter */ | ||
396 | u32 scaled_height = mode->vdisplay * | ||
397 | horiz_scale / PANEL_RATIO_FACTOR; | ||
398 | vert_ratio = horiz_ratio; | ||
399 | pfit_control |= (HORIZ_AUTO_SCALE | | ||
400 | VERT_INTERP_BILINEAR | | ||
401 | HORIZ_INTERP_BILINEAR); | ||
402 | /* Letterbox will have top/bottom border */ | ||
403 | top_border = (adjusted_mode->vdisplay - | ||
404 | scaled_height) / 2; | ||
405 | bottom_border = top_border; | ||
406 | if (mode->vdisplay & 1) | ||
407 | bottom_border++; | ||
408 | adjusted_mode->crtc_vdisplay = scaled_height; | ||
409 | /* use border instead of border minus one */ | ||
410 | adjusted_mode->crtc_vblank_start = | ||
411 | scaled_height + bottom_border; | ||
412 | /* keep the vblank width constant */ | ||
413 | adjusted_mode->crtc_vblank_end = | ||
414 | adjusted_mode->crtc_vblank_start + | ||
415 | vblank_width; | ||
416 | /* | ||
417 | * get the vsync start pos relative to | ||
418 | * vblank start | ||
419 | */ | ||
420 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
421 | adjusted_mode->crtc_vsync_start = | ||
422 | adjusted_mode->crtc_vblank_start + | ||
423 | vsync_pos; | ||
424 | /* keep the vsync width constant */ | ||
425 | adjusted_mode->crtc_vsync_end = | ||
426 | adjusted_mode->crtc_vsync_start + | ||
427 | vsync_width; | ||
428 | border = 1; | ||
429 | } else { | ||
430 | /* Aspects match, Let hw scale both directions */ | ||
431 | pfit_control |= (VERT_AUTO_SCALE | | ||
432 | HORIZ_AUTO_SCALE | | ||
433 | VERT_INTERP_BILINEAR | | 357 | VERT_INTERP_BILINEAR | |
434 | HORIZ_INTERP_BILINEAR); | 358 | HORIZ_INTERP_BILINEAR); |
435 | } | ||
436 | horiz_bits = (1 << bits) * horiz_ratio / | ||
437 | PANEL_RATIO_FACTOR; | ||
438 | vert_bits = (1 << bits) * vert_ratio / | ||
439 | PANEL_RATIO_FACTOR; | ||
440 | pfit_pgm_ratios = | ||
441 | ((vert_bits << PFIT_VERT_SCALE_SHIFT) & | ||
442 | PFIT_VERT_SCALE_MASK) | | ||
443 | ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & | ||
444 | PFIT_HORIZ_SCALE_MASK); | ||
445 | } | 359 | } |
446 | break; | 360 | break; |
447 | 361 | ||
@@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
458 | VERT_INTERP_BILINEAR | | 372 | VERT_INTERP_BILINEAR | |
459 | HORIZ_INTERP_BILINEAR); | 373 | HORIZ_INTERP_BILINEAR); |
460 | break; | 374 | break; |
375 | |||
461 | default: | 376 | default: |
462 | break; | 377 | break; |
463 | } | 378 | } |
@@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
465 | out: | 380 | out: |
466 | lvds_priv->pfit_control = pfit_control; | 381 | lvds_priv->pfit_control = pfit_control; |
467 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | 382 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; |
468 | /* | 383 | dev_priv->lvds_border_bits = border; |
469 | * When there exists the border, it means that the LVDS_BORDR | 384 | |
470 | * should be enabled. | ||
471 | */ | ||
472 | if (border) | ||
473 | dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE; | ||
474 | else | ||
475 | dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE); | ||
476 | /* | 385 | /* |
477 | * XXX: It would be nice to support lower refresh rates on the | 386 | * XXX: It would be nice to support lower refresh rates on the |
478 | * panels to reduce power consumption, and perhaps match the | 387 | * panels to reduce power consumption, and perhaps match the |
@@ -599,6 +508,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
599 | return 0; | 508 | return 0; |
600 | } | 509 | } |
601 | 510 | ||
511 | static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) | ||
512 | { | ||
513 | DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | /* The GPU hangs up on these systems if modeset is performed on LID open */ | ||
518 | static const struct dmi_system_id intel_no_modeset_on_lid[] = { | ||
519 | { | ||
520 | .callback = intel_no_modeset_on_lid_dmi_callback, | ||
521 | .ident = "Toshiba Tecra A11", | ||
522 | .matches = { | ||
523 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
524 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), | ||
525 | }, | ||
526 | }, | ||
527 | |||
528 | { } /* terminating entry */ | ||
529 | }; | ||
530 | |||
602 | /* | 531 | /* |
603 | * Lid events. Note the use of 'modeset_on_lid': | 532 | * Lid events. Note the use of 'modeset_on_lid': |
604 | * - we set it on lid close, and reset it on open | 533 | * - we set it on lid close, and reset it on open |
@@ -622,6 +551,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
622 | */ | 551 | */ |
623 | if (connector) | 552 | if (connector) |
624 | connector->status = connector->funcs->detect(connector); | 553 | connector->status = connector->funcs->detect(connector); |
554 | /* Don't force modeset on machines where it causes a GPU lockup */ | ||
555 | if (dmi_check_system(intel_no_modeset_on_lid)) | ||
556 | return NOTIFY_OK; | ||
625 | if (!acpi_lid_open()) { | 557 | if (!acpi_lid_open()) { |
626 | dev_priv->modeset_on_lid = 1; | 558 | dev_priv->modeset_on_lid = 1; |
627 | return NOTIFY_OK; | 559 | return NOTIFY_OK; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d7ad5139d17c..f26ec2f27d36 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -65,7 +65,7 @@ | |||
65 | #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ | 65 | #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ |
66 | #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) | 66 | #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) |
67 | #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) | 67 | #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) |
68 | #define OCMD_BUF_TYPE_MASK (Ox1<<5) | 68 | #define OCMD_BUF_TYPE_MASK (0x1<<5) |
69 | #define OCMD_BUF_TYPE_FRAME (0x0<<5) | 69 | #define OCMD_BUF_TYPE_FRAME (0x0<<5) |
70 | #define OCMD_BUF_TYPE_FIELD (0x1<<5) | 70 | #define OCMD_BUF_TYPE_FIELD (0x1<<5) |
71 | #define OCMD_TEST_MODE (0x1<<4) | 71 | #define OCMD_TEST_MODE (0x1<<4) |
@@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev, | |||
958 | || rec->src_width < N_HORIZ_Y_TAPS*4) | 958 | || rec->src_width < N_HORIZ_Y_TAPS*4) |
959 | return -EINVAL; | 959 | return -EINVAL; |
960 | 960 | ||
961 | /* check alingment constrains */ | 961 | /* check alignment constraints */ |
962 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { | 962 | switch (rec->flags & I915_OVERLAY_TYPE_MASK) { |
963 | case I915_OVERLAY_RGB: | 963 | case I915_OVERLAY_RGB: |
964 | /* not implemented */ | 964 | /* not implemented */ |
@@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev, | |||
990 | return -EINVAL; | 990 | return -EINVAL; |
991 | 991 | ||
992 | /* stride checking */ | 992 | /* stride checking */ |
993 | stride_mask = 63; | 993 | if (IS_I830(dev) || IS_845G(dev)) |
994 | stride_mask = 255; | ||
995 | else | ||
996 | stride_mask = 63; | ||
994 | 997 | ||
995 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) | 998 | if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) |
996 | return -EINVAL; | 999 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 76993ac16cc1..8b2bfc005c59 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1237 | 1237 | ||
1238 | /* Set the SDVO control regs. */ | 1238 | /* Set the SDVO control regs. */ |
1239 | if (IS_I965G(dev)) { | 1239 | if (IS_I965G(dev)) { |
1240 | sdvox |= SDVO_BORDER_ENABLE | | 1240 | sdvox |= SDVO_BORDER_ENABLE; |
1241 | SDVO_VSYNC_ACTIVE_HIGH | | 1241 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1242 | SDVO_HSYNC_ACTIVE_HIGH; | 1242 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
1243 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | ||
1244 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | ||
1243 | } else { | 1245 | } else { |
1244 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); | 1246 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); |
1245 | switch (sdvo_priv->sdvo_reg) { | 1247 | switch (sdvo_priv->sdvo_reg) { |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 6d553c29d106..d61ffbc381e5 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = { | |||
476 | .vi_end_f1 = 20, .vi_end_f2 = 21, | 476 | .vi_end_f1 = 20, .vi_end_f2 = 21, |
477 | .nbr_end = 240, | 477 | .nbr_end = 240, |
478 | 478 | ||
479 | .burst_ena = 8, | 479 | .burst_ena = true, |
480 | .hburst_start = 72, .hburst_len = 34, | 480 | .hburst_start = 72, .hburst_len = 34, |
481 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, | 481 | .vburst_start_f1 = 9, .vburst_end_f1 = 240, |
482 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, | 482 | .vburst_start_f2 = 10, .vburst_end_f2 = 240, |
@@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = { | |||
896 | }, | 896 | }, |
897 | }; | 897 | }; |
898 | 898 | ||
899 | #define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) | ||
900 | |||
901 | static void | 899 | static void |
902 | intel_tv_dpms(struct drm_encoder *encoder, int mode) | 900 | intel_tv_dpms(struct drm_encoder *encoder, int mode) |
903 | { | 901 | { |
@@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1512 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; | 1510 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; |
1513 | changed = true; | 1511 | changed = true; |
1514 | } else if (property == dev->mode_config.tv_mode_property) { | 1512 | } else if (property == dev->mode_config.tv_mode_property) { |
1515 | if (val >= NUM_TV_MODES) { | 1513 | if (val >= ARRAY_SIZE(tv_modes)) { |
1516 | ret = -EINVAL; | 1514 | ret = -EINVAL; |
1517 | goto out; | 1515 | goto out; |
1518 | } | 1516 | } |
@@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev) | |||
1693 | connector->doublescan_allowed = false; | 1691 | connector->doublescan_allowed = false; |
1694 | 1692 | ||
1695 | /* Create TV properties then attach current values */ | 1693 | /* Create TV properties then attach current values */ |
1696 | tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, | 1694 | tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), |
1697 | GFP_KERNEL); | 1695 | GFP_KERNEL); |
1698 | if (!tv_format_names) | 1696 | if (!tv_format_names) |
1699 | goto out; | 1697 | goto out; |
1700 | for (i = 0; i < NUM_TV_MODES; i++) | 1698 | for (i = 0; i < ARRAY_SIZE(tv_modes); i++) |
1701 | tv_format_names[i] = tv_modes[i].name; | 1699 | tv_format_names[i] = tv_modes[i].name; |
1702 | drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); | 1700 | drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); |
1703 | 1701 | ||
1704 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, | 1702 | drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, |
1705 | initial_mode); | 1703 | initial_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a3d25f419853..95f8b3a3c43d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -335,6 +335,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev, | |||
335 | return snprintf(buf, PAGE_SIZE, "%s\n", | 335 | return snprintf(buf, PAGE_SIZE, "%s\n", |
336 | (cp == PM_PROFILE_AUTO) ? "auto" : | 336 | (cp == PM_PROFILE_AUTO) ? "auto" : |
337 | (cp == PM_PROFILE_LOW) ? "low" : | 337 | (cp == PM_PROFILE_LOW) ? "low" : |
338 | (cp == PM_PROFILE_MID) ? "mid" : | ||
338 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | 339 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); |
339 | } | 340 | } |
340 | 341 | ||
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 4917af96bae1..2ed435bd4b6c 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -539,9 +539,13 @@ static int mmci_get_cd(struct mmc_host *mmc) | |||
539 | if (host->gpio_cd == -ENOSYS) | 539 | if (host->gpio_cd == -ENOSYS) |
540 | status = host->plat->status(mmc_dev(host->mmc)); | 540 | status = host->plat->status(mmc_dev(host->mmc)); |
541 | else | 541 | else |
542 | status = gpio_get_value(host->gpio_cd); | 542 | status = !gpio_get_value(host->gpio_cd); |
543 | 543 | ||
544 | return !status; | 544 | /* |
545 | * Use positive logic throughout - status is zero for no card, | ||
546 | * non-zero for card inserted. | ||
547 | */ | ||
548 | return status; | ||
545 | } | 549 | } |
546 | 550 | ||
547 | static const struct mmc_host_ops mmci_ops = { | 551 | static const struct mmc_host_ops mmci_ops = { |
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 8bd23687c530..bb0872a63315 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -1062,6 +1062,10 @@ struct bnx2x { | |||
1062 | 1062 | ||
1063 | /* used to synchronize stats collecting */ | 1063 | /* used to synchronize stats collecting */ |
1064 | int stats_state; | 1064 | int stats_state; |
1065 | |||
1066 | /* used for synchronization of concurrent threads statistics handling */ | ||
1067 | spinlock_t stats_lock; | ||
1068 | |||
1065 | /* used by dmae command loader */ | 1069 | /* used by dmae command loader */ |
1066 | struct dmae_command stats_dmae; | 1070 | struct dmae_command stats_dmae; |
1067 | int executer_idx; | 1071 | int executer_idx; |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 57ff5b3bcce6..46167c081727 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -57,8 +57,8 @@ | |||
57 | #include "bnx2x_init_ops.h" | 57 | #include "bnx2x_init_ops.h" |
58 | #include "bnx2x_dump.h" | 58 | #include "bnx2x_dump.h" |
59 | 59 | ||
60 | #define DRV_MODULE_VERSION "1.52.53-1" | 60 | #define DRV_MODULE_VERSION "1.52.53-2" |
61 | #define DRV_MODULE_RELDATE "2010/18/04" | 61 | #define DRV_MODULE_RELDATE "2010/21/07" |
62 | #define BNX2X_BC_VER 0x040200 | 62 | #define BNX2X_BC_VER 0x040200 |
63 | 63 | ||
64 | #include <linux/firmware.h> | 64 | #include <linux/firmware.h> |
@@ -3789,6 +3789,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
3789 | struct eth_query_ramrod_data ramrod_data = {0}; | 3789 | struct eth_query_ramrod_data ramrod_data = {0}; |
3790 | int i, rc; | 3790 | int i, rc; |
3791 | 3791 | ||
3792 | spin_lock_bh(&bp->stats_lock); | ||
3793 | |||
3792 | ramrod_data.drv_counter = bp->stats_counter++; | 3794 | ramrod_data.drv_counter = bp->stats_counter++; |
3793 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | 3795 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; |
3794 | for_each_queue(bp, i) | 3796 | for_each_queue(bp, i) |
@@ -3802,6 +3804,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
3802 | bp->spq_left++; | 3804 | bp->spq_left++; |
3803 | bp->stats_pending = 1; | 3805 | bp->stats_pending = 1; |
3804 | } | 3806 | } |
3807 | |||
3808 | spin_unlock_bh(&bp->stats_lock); | ||
3805 | } | 3809 | } |
3806 | } | 3810 | } |
3807 | 3811 | ||
@@ -4367,6 +4371,14 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
4367 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); | 4371 | struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); |
4368 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 4372 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
4369 | int i; | 4373 | int i; |
4374 | u16 cur_stats_counter; | ||
4375 | |||
4376 | /* Make sure we use the value of the counter | ||
4377 | * used for sending the last stats ramrod. | ||
4378 | */ | ||
4379 | spin_lock_bh(&bp->stats_lock); | ||
4380 | cur_stats_counter = bp->stats_counter - 1; | ||
4381 | spin_unlock_bh(&bp->stats_lock); | ||
4370 | 4382 | ||
4371 | memcpy(&(fstats->total_bytes_received_hi), | 4383 | memcpy(&(fstats->total_bytes_received_hi), |
4372 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), | 4384 | &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), |
@@ -4394,25 +4406,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
4394 | u32 diff; | 4406 | u32 diff; |
4395 | 4407 | ||
4396 | /* are storm stats valid? */ | 4408 | /* are storm stats valid? */ |
4397 | if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != | 4409 | if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { |
4398 | bp->stats_counter) { | ||
4399 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" | 4410 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" |
4400 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", | 4411 | " xstorm counter (0x%x) != stats_counter (0x%x)\n", |
4401 | i, xclient->stats_counter, bp->stats_counter); | 4412 | i, xclient->stats_counter, cur_stats_counter + 1); |
4402 | return -1; | 4413 | return -1; |
4403 | } | 4414 | } |
4404 | if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != | 4415 | if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { |
4405 | bp->stats_counter) { | ||
4406 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" | 4416 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" |
4407 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", | 4417 | " tstorm counter (0x%x) != stats_counter (0x%x)\n", |
4408 | i, tclient->stats_counter, bp->stats_counter); | 4418 | i, tclient->stats_counter, cur_stats_counter + 1); |
4409 | return -2; | 4419 | return -2; |
4410 | } | 4420 | } |
4411 | if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != | 4421 | if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { |
4412 | bp->stats_counter) { | ||
4413 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" | 4422 | DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" |
4414 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", | 4423 | " ustorm counter (0x%x) != stats_counter (0x%x)\n", |
4415 | i, uclient->stats_counter, bp->stats_counter); | 4424 | i, uclient->stats_counter, cur_stats_counter + 1); |
4416 | return -4; | 4425 | return -4; |
4417 | } | 4426 | } |
4418 | 4427 | ||
@@ -4849,16 +4858,18 @@ static const struct { | |||
4849 | 4858 | ||
4850 | static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 4859 | static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
4851 | { | 4860 | { |
4852 | enum bnx2x_stats_state state = bp->stats_state; | 4861 | enum bnx2x_stats_state state; |
4853 | 4862 | ||
4854 | if (unlikely(bp->panic)) | 4863 | if (unlikely(bp->panic)) |
4855 | return; | 4864 | return; |
4856 | 4865 | ||
4857 | bnx2x_stats_stm[state][event].action(bp); | 4866 | /* Protect a state change flow */ |
4867 | spin_lock_bh(&bp->stats_lock); | ||
4868 | state = bp->stats_state; | ||
4858 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 4869 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
4870 | spin_unlock_bh(&bp->stats_lock); | ||
4859 | 4871 | ||
4860 | /* Make sure the state has been "changed" */ | 4872 | bnx2x_stats_stm[state][event].action(bp); |
4861 | smp_wmb(); | ||
4862 | 4873 | ||
4863 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 4874 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
4864 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 4875 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
@@ -9908,6 +9919,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9908 | 9919 | ||
9909 | mutex_init(&bp->port.phy_mutex); | 9920 | mutex_init(&bp->port.phy_mutex); |
9910 | mutex_init(&bp->fw_mb_mutex); | 9921 | mutex_init(&bp->fw_mb_mutex); |
9922 | spin_lock_init(&bp->stats_lock); | ||
9911 | #ifdef BCM_CNIC | 9923 | #ifdef BCM_CNIC |
9912 | mutex_init(&bp->cnic_mutex); | 9924 | mutex_init(&bp->cnic_mutex); |
9913 | #endif | 9925 | #endif |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index df483076eda6..8d7dfd2f1e90 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond) | |||
822 | 822 | ||
823 | /*initialize packet type*/ | 823 | /*initialize packet type*/ |
824 | pk_type->type = cpu_to_be16(ETH_P_ARP); | 824 | pk_type->type = cpu_to_be16(ETH_P_ARP); |
825 | pk_type->dev = NULL; | 825 | pk_type->dev = bond->dev; |
826 | pk_type->func = rlb_arp_recv; | 826 | pk_type->func = rlb_arp_recv; |
827 | 827 | ||
828 | /* register to receive ARPs */ | 828 | /* register to receive ARPs */ |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 1d973db27c32..d7de376d7178 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -1022,7 +1022,7 @@ static const struct net_device_ops lance_netdev_ops = { | |||
1022 | .ndo_set_mac_address = eth_mac_addr, | 1022 | .ndo_set_mac_address = eth_mac_addr, |
1023 | }; | 1023 | }; |
1024 | 1024 | ||
1025 | static int __init dec_lance_probe(struct device *bdev, const int type) | 1025 | static int __devinit dec_lance_probe(struct device *bdev, const int type) |
1026 | { | 1026 | { |
1027 | static unsigned version_printed; | 1027 | static unsigned version_printed; |
1028 | static const char fmt[] = "declance%d"; | 1028 | static const char fmt[] = "declance%d"; |
@@ -1326,7 +1326,7 @@ static void __exit dec_lance_platform_remove(void) | |||
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | #ifdef CONFIG_TC | 1328 | #ifdef CONFIG_TC |
1329 | static int __init dec_lance_tc_probe(struct device *dev); | 1329 | static int __devinit dec_lance_tc_probe(struct device *dev); |
1330 | static int __exit dec_lance_tc_remove(struct device *dev); | 1330 | static int __exit dec_lance_tc_remove(struct device *dev); |
1331 | 1331 | ||
1332 | static const struct tc_device_id dec_lance_tc_table[] = { | 1332 | static const struct tc_device_id dec_lance_tc_table[] = { |
@@ -1345,7 +1345,7 @@ static struct tc_driver dec_lance_tc_driver = { | |||
1345 | }, | 1345 | }, |
1346 | }; | 1346 | }; |
1347 | 1347 | ||
1348 | static int __init dec_lance_tc_probe(struct device *dev) | 1348 | static int __devinit dec_lance_tc_probe(struct device *dev) |
1349 | { | 1349 | { |
1350 | int status = dec_lance_probe(dev, PMAD_LANCE); | 1350 | int status = dec_lance_probe(dev, PMAD_LANCE); |
1351 | if (!status) | 1351 | if (!status) |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 3881918f5382..cea37e0837ff 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1722,6 +1722,15 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1722 | u16 eeprom_apme_mask = IGB_EEPROM_APME; | 1722 | u16 eeprom_apme_mask = IGB_EEPROM_APME; |
1723 | u32 part_num; | 1723 | u32 part_num; |
1724 | 1724 | ||
1725 | /* Catch broken hardware that put the wrong VF device ID in | ||
1726 | * the PCIe SR-IOV capability. | ||
1727 | */ | ||
1728 | if (pdev->is_virtfn) { | ||
1729 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | ||
1730 | pci_name(pdev), pdev->vendor, pdev->device); | ||
1731 | return -EINVAL; | ||
1732 | } | ||
1733 | |||
1725 | err = pci_enable_device_mem(pdev); | 1734 | err = pci_enable_device_mem(pdev); |
1726 | if (err) | 1735 | if (err) |
1727 | return err; | 1736 | return err; |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 7b5d9764f317..74d9b6df3029 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -6492,6 +6492,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6492 | #endif | 6492 | #endif |
6493 | u32 part_num, eec; | 6493 | u32 part_num, eec; |
6494 | 6494 | ||
6495 | /* Catch broken hardware that put the wrong VF device ID in | ||
6496 | * the PCIe SR-IOV capability. | ||
6497 | */ | ||
6498 | if (pdev->is_virtfn) { | ||
6499 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", | ||
6500 | pci_name(pdev), pdev->vendor, pdev->device); | ||
6501 | return -EINVAL; | ||
6502 | } | ||
6503 | |||
6495 | err = pci_enable_device_mem(pdev); | 6504 | err = pci_enable_device_mem(pdev); |
6496 | if (err) | 6505 | if (err) |
6497 | return err; | 6506 | return err; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 87e8d4cb4057..f15fe2cf72ae 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -499,7 +499,7 @@ static const struct net_device_ops macvlan_netdev_ops = { | |||
499 | .ndo_validate_addr = eth_validate_addr, | 499 | .ndo_validate_addr = eth_validate_addr, |
500 | }; | 500 | }; |
501 | 501 | ||
502 | static void macvlan_setup(struct net_device *dev) | 502 | void macvlan_common_setup(struct net_device *dev) |
503 | { | 503 | { |
504 | ether_setup(dev); | 504 | ether_setup(dev); |
505 | 505 | ||
@@ -508,6 +508,12 @@ static void macvlan_setup(struct net_device *dev) | |||
508 | dev->destructor = free_netdev; | 508 | dev->destructor = free_netdev; |
509 | dev->header_ops = &macvlan_hard_header_ops, | 509 | dev->header_ops = &macvlan_hard_header_ops, |
510 | dev->ethtool_ops = &macvlan_ethtool_ops; | 510 | dev->ethtool_ops = &macvlan_ethtool_ops; |
511 | } | ||
512 | EXPORT_SYMBOL_GPL(macvlan_common_setup); | ||
513 | |||
514 | static void macvlan_setup(struct net_device *dev) | ||
515 | { | ||
516 | macvlan_common_setup(dev); | ||
511 | dev->tx_queue_len = 0; | 517 | dev->tx_queue_len = 0; |
512 | } | 518 | } |
513 | 519 | ||
@@ -705,7 +711,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops) | |||
705 | /* common fields */ | 711 | /* common fields */ |
706 | ops->priv_size = sizeof(struct macvlan_dev); | 712 | ops->priv_size = sizeof(struct macvlan_dev); |
707 | ops->get_tx_queues = macvlan_get_tx_queues; | 713 | ops->get_tx_queues = macvlan_get_tx_queues; |
708 | ops->setup = macvlan_setup; | ||
709 | ops->validate = macvlan_validate; | 714 | ops->validate = macvlan_validate; |
710 | ops->maxtype = IFLA_MACVLAN_MAX; | 715 | ops->maxtype = IFLA_MACVLAN_MAX; |
711 | ops->policy = macvlan_policy; | 716 | ops->policy = macvlan_policy; |
@@ -719,6 +724,7 @@ EXPORT_SYMBOL_GPL(macvlan_link_register); | |||
719 | 724 | ||
720 | static struct rtnl_link_ops macvlan_link_ops = { | 725 | static struct rtnl_link_ops macvlan_link_ops = { |
721 | .kind = "macvlan", | 726 | .kind = "macvlan", |
727 | .setup = macvlan_setup, | ||
722 | .newlink = macvlan_newlink, | 728 | .newlink = macvlan_newlink, |
723 | .dellink = macvlan_dellink, | 729 | .dellink = macvlan_dellink, |
724 | }; | 730 | }; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a8a94e2f6ddc..ff02b836c3c4 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -180,11 +180,18 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
180 | { | 180 | { |
181 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 181 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
182 | if (!q) | 182 | if (!q) |
183 | return -ENOLINK; | 183 | goto drop; |
184 | |||
185 | if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) | ||
186 | goto drop; | ||
184 | 187 | ||
185 | skb_queue_tail(&q->sk.sk_receive_queue, skb); | 188 | skb_queue_tail(&q->sk.sk_receive_queue, skb); |
186 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); | 189 | wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); |
187 | return 0; | 190 | return NET_RX_SUCCESS; |
191 | |||
192 | drop: | ||
193 | kfree_skb(skb); | ||
194 | return NET_RX_DROP; | ||
188 | } | 195 | } |
189 | 196 | ||
190 | /* | 197 | /* |
@@ -235,8 +242,15 @@ static void macvtap_dellink(struct net_device *dev, | |||
235 | macvlan_dellink(dev, head); | 242 | macvlan_dellink(dev, head); |
236 | } | 243 | } |
237 | 244 | ||
245 | static void macvtap_setup(struct net_device *dev) | ||
246 | { | ||
247 | macvlan_common_setup(dev); | ||
248 | dev->tx_queue_len = TUN_READQ_SIZE; | ||
249 | } | ||
250 | |||
238 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { | 251 | static struct rtnl_link_ops macvtap_link_ops __read_mostly = { |
239 | .kind = "macvtap", | 252 | .kind = "macvtap", |
253 | .setup = macvtap_setup, | ||
240 | .newlink = macvtap_newlink, | 254 | .newlink = macvtap_newlink, |
241 | .dellink = macvtap_dellink, | 255 | .dellink = macvtap_dellink, |
242 | }; | 256 | }; |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 5e52c75892df..7f3a53dcc6ef 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -65,7 +65,7 @@ static int debug_level = ERR_DBG; | |||
65 | 65 | ||
66 | /* DEBUG message print. */ | 66 | /* DEBUG message print. */ |
67 | #define DBG_PRINT(dbg_level, fmt, args...) do { \ | 67 | #define DBG_PRINT(dbg_level, fmt, args...) do { \ |
68 | if (dbg_level >= debug_level) \ | 68 | if (dbg_level <= debug_level) \ |
69 | pr_info(fmt, ##args); \ | 69 | pr_info(fmt, ##args); \ |
70 | } while (0) | 70 | } while (0) |
71 | 71 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6ad6fe706312..63042596f0cf 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -736,8 +736,18 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, | |||
736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
737 | else if (sinfo->gso_type & SKB_GSO_UDP) | 737 | else if (sinfo->gso_type & SKB_GSO_UDP) |
738 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; | 738 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
739 | else | 739 | else { |
740 | BUG(); | 740 | printk(KERN_ERR "tun: unexpected GSO type: " |
741 | "0x%x, gso_size %d, hdr_len %d\n", | ||
742 | sinfo->gso_type, gso.gso_size, | ||
743 | gso.hdr_len); | ||
744 | print_hex_dump(KERN_ERR, "tun: ", | ||
745 | DUMP_PREFIX_NONE, | ||
746 | 16, 1, skb->head, | ||
747 | min((int)gso.hdr_len, 64), true); | ||
748 | WARN_ON_ONCE(1); | ||
749 | return -EINVAL; | ||
750 | } | ||
741 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | 751 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
742 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | 752 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
743 | } else | 753 | } else |
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 2d7c96d7e865..eb80243e22df 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h | |||
@@ -152,6 +152,7 @@ enum { | |||
152 | /* Device IDs */ | 152 | /* Device IDs */ |
153 | USB_DEVICE_ID_I6050 = 0x0186, | 153 | USB_DEVICE_ID_I6050 = 0x0186, |
154 | USB_DEVICE_ID_I6050_2 = 0x0188, | 154 | USB_DEVICE_ID_I6050_2 = 0x0188, |
155 | USB_DEVICE_ID_I6250 = 0x0187, | ||
155 | }; | 156 | }; |
156 | 157 | ||
157 | 158 | ||
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 0d5081d77dc0..d3365ac85dde 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -491,6 +491,7 @@ int i2400mu_probe(struct usb_interface *iface, | |||
491 | switch (id->idProduct) { | 491 | switch (id->idProduct) { |
492 | case USB_DEVICE_ID_I6050: | 492 | case USB_DEVICE_ID_I6050: |
493 | case USB_DEVICE_ID_I6050_2: | 493 | case USB_DEVICE_ID_I6050_2: |
494 | case USB_DEVICE_ID_I6250: | ||
494 | i2400mu->i6050 = 1; | 495 | i2400mu->i6050 = 1; |
495 | break; | 496 | break; |
496 | default: | 497 | default: |
@@ -739,6 +740,7 @@ static | |||
739 | struct usb_device_id i2400mu_id_table[] = { | 740 | struct usb_device_id i2400mu_id_table[] = { |
740 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, | 741 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, |
741 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, | 742 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, |
743 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, | ||
742 | { USB_DEVICE(0x8086, 0x0181) }, | 744 | { USB_DEVICE(0x8086, 0x0181) }, |
743 | { USB_DEVICE(0x8086, 0x1403) }, | 745 | { USB_DEVICE(0x8086, 0x1403) }, |
744 | { USB_DEVICE(0x8086, 0x1405) }, | 746 | { USB_DEVICE(0x8086, 0x1405) }, |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index ca6065b71b46..e3e52913d83a 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -844,9 +844,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | |||
844 | int dma_type; | 844 | int dma_type; |
845 | 845 | ||
846 | if (edma) | 846 | if (edma) |
847 | dma_type = DMA_FROM_DEVICE; | ||
848 | else | ||
849 | dma_type = DMA_BIDIRECTIONAL; | 847 | dma_type = DMA_BIDIRECTIONAL; |
848 | else | ||
849 | dma_type = DMA_FROM_DEVICE; | ||
850 | 850 | ||
851 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | 851 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; |
852 | spin_lock_bh(&sc->rx.rxbuflock); | 852 | spin_lock_bh(&sc->rx.rxbuflock); |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index df4532e91b1a..f370476d5417 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -178,7 +178,6 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
178 | unsigned long val, | 178 | unsigned long val, |
179 | struct cpufreq_freqs *freqs) | 179 | struct cpufreq_freqs *freqs) |
180 | { | 180 | { |
181 | #warning "it's not clear if this is right since the core CPU (N) clock has no effect on the memory (L) clock" | ||
182 | switch (val) { | 181 | switch (val) { |
183 | case CPUFREQ_PRECHANGE: | 182 | case CPUFREQ_PRECHANGE: |
184 | if (freqs->new > freqs->old) { | 183 | if (freqs->new > freqs->old) { |
@@ -186,7 +185,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
186 | "pre-updating\n", | 185 | "pre-updating\n", |
187 | freqs->new / 1000, (freqs->new / 100) % 10, | 186 | freqs->new / 1000, (freqs->new / 100) % 10, |
188 | freqs->old / 1000, (freqs->old / 100) % 10); | 187 | freqs->old / 1000, (freqs->old / 100) % 10); |
189 | pxa2xx_pcmcia_set_mcxx(skt, freqs->new); | 188 | pxa2xx_pcmcia_set_timing(skt); |
190 | } | 189 | } |
191 | break; | 190 | break; |
192 | 191 | ||
@@ -196,7 +195,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
196 | "post-updating\n", | 195 | "post-updating\n", |
197 | freqs->new / 1000, (freqs->new / 100) % 10, | 196 | freqs->new / 1000, (freqs->new / 100) % 10, |
198 | freqs->old / 1000, (freqs->old / 100) % 10); | 197 | freqs->old / 1000, (freqs->old / 100) % 10); |
199 | pxa2xx_pcmcia_set_mcxx(skt, freqs->new); | 198 | pxa2xx_pcmcia_set_timing(skt); |
200 | } | 199 | } |
201 | break; | 200 | break; |
202 | } | 201 | } |
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 2afbeec8b791..84d3c43cf2bc 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c | |||
@@ -43,10 +43,9 @@ | |||
43 | struct ds278x_info; | 43 | struct ds278x_info; |
44 | 44 | ||
45 | struct ds278x_battery_ops { | 45 | struct ds278x_battery_ops { |
46 | int (*get_current)(struct ds278x_info *info, int *current_uA); | 46 | int (*get_battery_current)(struct ds278x_info *info, int *current_uA); |
47 | int (*get_voltage)(struct ds278x_info *info, int *voltage_uA); | 47 | int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uA); |
48 | int (*get_capacity)(struct ds278x_info *info, int *capacity_uA); | 48 | int (*get_battery_capacity)(struct ds278x_info *info, int *capacity_uA); |
49 | |||
50 | }; | 49 | }; |
51 | 50 | ||
52 | #define to_ds278x_info(x) container_of(x, struct ds278x_info, battery) | 51 | #define to_ds278x_info(x) container_of(x, struct ds278x_info, battery) |
@@ -213,11 +212,11 @@ static int ds278x_get_status(struct ds278x_info *info, int *status) | |||
213 | int current_uA; | 212 | int current_uA; |
214 | int capacity; | 213 | int capacity; |
215 | 214 | ||
216 | err = info->ops->get_current(info, ¤t_uA); | 215 | err = info->ops->get_battery_current(info, ¤t_uA); |
217 | if (err) | 216 | if (err) |
218 | return err; | 217 | return err; |
219 | 218 | ||
220 | err = info->ops->get_capacity(info, &capacity); | 219 | err = info->ops->get_battery_capacity(info, &capacity); |
221 | if (err) | 220 | if (err) |
222 | return err; | 221 | return err; |
223 | 222 | ||
@@ -246,15 +245,15 @@ static int ds278x_battery_get_property(struct power_supply *psy, | |||
246 | break; | 245 | break; |
247 | 246 | ||
248 | case POWER_SUPPLY_PROP_CAPACITY: | 247 | case POWER_SUPPLY_PROP_CAPACITY: |
249 | ret = info->ops->get_capacity(info, &val->intval); | 248 | ret = info->ops->get_battery_capacity(info, &val->intval); |
250 | break; | 249 | break; |
251 | 250 | ||
252 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | 251 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
253 | ret = info->ops->get_voltage(info, &val->intval); | 252 | ret = info->ops->get_battery_voltage(info, &val->intval); |
254 | break; | 253 | break; |
255 | 254 | ||
256 | case POWER_SUPPLY_PROP_CURRENT_NOW: | 255 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
257 | ret = info->ops->get_current(info, &val->intval); | 256 | ret = info->ops->get_battery_current(info, &val->intval); |
258 | break; | 257 | break; |
259 | 258 | ||
260 | case POWER_SUPPLY_PROP_TEMP: | 259 | case POWER_SUPPLY_PROP_TEMP: |
@@ -307,14 +306,14 @@ enum ds278x_num_id { | |||
307 | 306 | ||
308 | static struct ds278x_battery_ops ds278x_ops[] = { | 307 | static struct ds278x_battery_ops ds278x_ops[] = { |
309 | [DS2782] = { | 308 | [DS2782] = { |
310 | .get_current = ds2782_get_current, | 309 | .get_battery_current = ds2782_get_current, |
311 | .get_voltage = ds2782_get_voltage, | 310 | .get_battery_voltage = ds2782_get_voltage, |
312 | .get_capacity = ds2782_get_capacity, | 311 | .get_battery_capacity = ds2782_get_capacity, |
313 | }, | 312 | }, |
314 | [DS2786] = { | 313 | [DS2786] = { |
315 | .get_current = ds2786_get_current, | 314 | .get_battery_current = ds2786_get_current, |
316 | .get_voltage = ds2786_get_voltage, | 315 | .get_battery_voltage = ds2786_get_voltage, |
317 | .get_capacity = ds2786_get_capacity, | 316 | .get_battery_capacity = ds2786_get_capacity, |
318 | } | 317 | } |
319 | }; | 318 | }; |
320 | 319 | ||
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 7b14a67bdca2..11790990277a 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c | |||
@@ -286,7 +286,7 @@ static int ab3100_list_voltage_regulator(struct regulator_dev *reg, | |||
286 | { | 286 | { |
287 | struct ab3100_regulator *abreg = reg->reg_data; | 287 | struct ab3100_regulator *abreg = reg->reg_data; |
288 | 288 | ||
289 | if (selector > abreg->voltages_len) | 289 | if (selector >= abreg->voltages_len) |
290 | return -EINVAL; | 290 | return -EINVAL; |
291 | return abreg->typ_voltages[selector]; | 291 | return abreg->typ_voltages[selector]; |
292 | } | 292 | } |
@@ -318,7 +318,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg) | |||
318 | regval &= 0xE0; | 318 | regval &= 0xE0; |
319 | regval >>= 5; | 319 | regval >>= 5; |
320 | 320 | ||
321 | if (regval > abreg->voltages_len) { | 321 | if (regval >= abreg->voltages_len) { |
322 | dev_err(®->dev, | 322 | dev_err(®->dev, |
323 | "regulator register %02x contains an illegal voltage setting\n", | 323 | "regulator register %02x contains an illegal voltage setting\n", |
324 | abreg->regreg); | 324 | abreg->regreg); |
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 14b4576281c5..8152d65220f5 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/regulator/driver.h> | 23 | #include <linux/regulator/driver.h> |
24 | #include <linux/regulator/machine.h> | 24 | #include <linux/regulator/machine.h> |
25 | #include <linux/regulator/tps6507x.h> | ||
25 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | #include <linux/mfd/tps6507x.h> | 28 | #include <linux/mfd/tps6507x.h> |
@@ -101,9 +102,12 @@ struct tps_info { | |||
101 | unsigned max_uV; | 102 | unsigned max_uV; |
102 | u8 table_len; | 103 | u8 table_len; |
103 | const u16 *table; | 104 | const u16 *table; |
105 | |||
106 | /* Does DCDC high or the low register defines output voltage? */ | ||
107 | bool defdcdc_default; | ||
104 | }; | 108 | }; |
105 | 109 | ||
106 | static const struct tps_info tps6507x_pmic_regs[] = { | 110 | static struct tps_info tps6507x_pmic_regs[] = { |
107 | { | 111 | { |
108 | .name = "VDCDC1", | 112 | .name = "VDCDC1", |
109 | .min_uV = 725000, | 113 | .min_uV = 725000, |
@@ -145,7 +149,7 @@ struct tps6507x_pmic { | |||
145 | struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; | 149 | struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; |
146 | struct tps6507x_dev *mfd; | 150 | struct tps6507x_dev *mfd; |
147 | struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; | 151 | struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; |
148 | const struct tps_info *info[TPS6507X_NUM_REGULATOR]; | 152 | struct tps_info *info[TPS6507X_NUM_REGULATOR]; |
149 | struct mutex io_lock; | 153 | struct mutex io_lock; |
150 | }; | 154 | }; |
151 | static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) | 155 | static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) |
@@ -341,10 +345,16 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev) | |||
341 | reg = TPS6507X_REG_DEFDCDC1; | 345 | reg = TPS6507X_REG_DEFDCDC1; |
342 | break; | 346 | break; |
343 | case TPS6507X_DCDC_2: | 347 | case TPS6507X_DCDC_2: |
344 | reg = TPS6507X_REG_DEFDCDC2_LOW; | 348 | if (tps->info[dcdc]->defdcdc_default) |
349 | reg = TPS6507X_REG_DEFDCDC2_HIGH; | ||
350 | else | ||
351 | reg = TPS6507X_REG_DEFDCDC2_LOW; | ||
345 | break; | 352 | break; |
346 | case TPS6507X_DCDC_3: | 353 | case TPS6507X_DCDC_3: |
347 | reg = TPS6507X_REG_DEFDCDC3_LOW; | 354 | if (tps->info[dcdc]->defdcdc_default) |
355 | reg = TPS6507X_REG_DEFDCDC3_HIGH; | ||
356 | else | ||
357 | reg = TPS6507X_REG_DEFDCDC3_LOW; | ||
348 | break; | 358 | break; |
349 | default: | 359 | default: |
350 | return -EINVAL; | 360 | return -EINVAL; |
@@ -370,10 +380,16 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, | |||
370 | reg = TPS6507X_REG_DEFDCDC1; | 380 | reg = TPS6507X_REG_DEFDCDC1; |
371 | break; | 381 | break; |
372 | case TPS6507X_DCDC_2: | 382 | case TPS6507X_DCDC_2: |
373 | reg = TPS6507X_REG_DEFDCDC2_LOW; | 383 | if (tps->info[dcdc]->defdcdc_default) |
384 | reg = TPS6507X_REG_DEFDCDC2_HIGH; | ||
385 | else | ||
386 | reg = TPS6507X_REG_DEFDCDC2_LOW; | ||
374 | break; | 387 | break; |
375 | case TPS6507X_DCDC_3: | 388 | case TPS6507X_DCDC_3: |
376 | reg = TPS6507X_REG_DEFDCDC3_LOW; | 389 | if (tps->info[dcdc]->defdcdc_default) |
390 | reg = TPS6507X_REG_DEFDCDC3_HIGH; | ||
391 | else | ||
392 | reg = TPS6507X_REG_DEFDCDC3_LOW; | ||
377 | break; | 393 | break; |
378 | default: | 394 | default: |
379 | return -EINVAL; | 395 | return -EINVAL; |
@@ -532,7 +548,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev) | |||
532 | { | 548 | { |
533 | struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); | 549 | struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); |
534 | static int desc_id; | 550 | static int desc_id; |
535 | const struct tps_info *info = &tps6507x_pmic_regs[0]; | 551 | struct tps_info *info = &tps6507x_pmic_regs[0]; |
536 | struct regulator_init_data *init_data; | 552 | struct regulator_init_data *init_data; |
537 | struct regulator_dev *rdev; | 553 | struct regulator_dev *rdev; |
538 | struct tps6507x_pmic *tps; | 554 | struct tps6507x_pmic *tps; |
@@ -569,6 +585,12 @@ int tps6507x_pmic_probe(struct platform_device *pdev) | |||
569 | for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { | 585 | for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { |
570 | /* Register the regulators */ | 586 | /* Register the regulators */ |
571 | tps->info[i] = info; | 587 | tps->info[i] = info; |
588 | if (init_data->driver_data) { | ||
589 | struct tps6507x_reg_platform_data *data = | ||
590 | init_data->driver_data; | ||
591 | tps->info[i]->defdcdc_default = data->defdcdc_default; | ||
592 | } | ||
593 | |||
572 | tps->desc[i].name = info->name; | 594 | tps->desc[i].name = info->name; |
573 | tps->desc[i].id = desc_id++; | 595 | tps->desc[i].id = desc_id++; |
574 | tps->desc[i].n_voltages = num_voltages[i]; | 596 | tps->desc[i].n_voltages = num_voltages[i]; |
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 723cd1fb4867..0e6ed7db9364 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c | |||
@@ -1495,7 +1495,7 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg, | |||
1495 | if (ret != 0) { | 1495 | if (ret != 0) { |
1496 | dev_err(wm8350->dev, "Failed to register regulator %d: %d\n", | 1496 | dev_err(wm8350->dev, "Failed to register regulator %d: %d\n", |
1497 | reg, ret); | 1497 | reg, ret); |
1498 | platform_device_del(pdev); | 1498 | platform_device_put(pdev); |
1499 | wm8350->pmic.pdev[reg] = NULL; | 1499 | wm8350->pmic.pdev[reg] = NULL; |
1500 | } | 1500 | } |
1501 | 1501 | ||
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c index 9718aaaa8215..600b890a3c15 100644 --- a/drivers/rtc/rtc-rx8581.c +++ b/drivers/rtc/rtc-rx8581.c | |||
@@ -168,7 +168,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
168 | return -EIO; | 168 | return -EIO; |
169 | } | 169 | } |
170 | 170 | ||
171 | err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG, | 171 | err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL, |
172 | (data | RX8581_CTRL_STOP)); | 172 | (data | RX8581_CTRL_STOP)); |
173 | if (err < 0) { | 173 | if (err < 0) { |
174 | dev_err(&client->dev, "Unable to write control register\n"); | 174 | dev_err(&client->dev, "Unable to write control register\n"); |
@@ -182,6 +182,20 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
182 | return -EIO; | 182 | return -EIO; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* get VLF and clear it */ | ||
186 | data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG); | ||
187 | if (data < 0) { | ||
188 | dev_err(&client->dev, "Unable to read flag register\n"); | ||
189 | return -EIO; | ||
190 | } | ||
191 | |||
192 | err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG, | ||
193 | (data & ~(RX8581_FLAG_VLF))); | ||
194 | if (err != 0) { | ||
195 | dev_err(&client->dev, "Unable to write flag register\n"); | ||
196 | return -EIO; | ||
197 | } | ||
198 | |||
185 | /* Restart the clock */ | 199 | /* Restart the clock */ |
186 | data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL); | 200 | data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL); |
187 | if (data < 0) { | 201 | if (data < 0) { |
@@ -189,8 +203,8 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
189 | return -EIO; | 203 | return -EIO; |
190 | } | 204 | } |
191 | 205 | ||
192 | err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG, | 206 | err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL, |
193 | (data | ~(RX8581_CTRL_STOP))); | 207 | (data & ~(RX8581_CTRL_STOP))); |
194 | if (err != 0) { | 208 | if (err != 0) { |
195 | dev_err(&client->dev, "Unable to write control register\n"); | 209 | dev_err(&client->dev, "Unable to write control register\n"); |
196 | return -EIO; | 210 | return -EIO; |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e3dbeda97179..fd068bc1bd0a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -714,6 +714,14 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) | |||
714 | if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) | 714 | if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) |
715 | return ZFCP_ERP_FAILED; | 715 | return ZFCP_ERP_FAILED; |
716 | 716 | ||
717 | if (mempool_resize(act->adapter->pool.status_read_data, | ||
718 | act->adapter->stat_read_buf_num, GFP_KERNEL)) | ||
719 | return ZFCP_ERP_FAILED; | ||
720 | |||
721 | if (mempool_resize(act->adapter->pool.status_read_req, | ||
722 | act->adapter->stat_read_buf_num, GFP_KERNEL)) | ||
723 | return ZFCP_ERP_FAILED; | ||
724 | |||
717 | atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num); | 725 | atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num); |
718 | if (zfcp_status_read_refill(act->adapter)) | 726 | if (zfcp_status_read_refill(act->adapter)) |
719 | return ZFCP_ERP_FAILED; | 727 | return ZFCP_ERP_FAILED; |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9ac6a6e4a604..71663fb77310 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -496,7 +496,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | |||
496 | 496 | ||
497 | adapter->hydra_version = bottom->adapter_type; | 497 | adapter->hydra_version = bottom->adapter_type; |
498 | adapter->timer_ticks = bottom->timer_interval; | 498 | adapter->timer_ticks = bottom->timer_interval; |
499 | adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16); | 499 | adapter->stat_read_buf_num = max(bottom->status_read_buf_num, |
500 | (u16)FSF_STATUS_READS_RECOM); | ||
500 | 501 | ||
501 | if (fc_host_permanent_port_name(shost) == -1) | 502 | if (fc_host_permanent_port_name(shost) == -1) |
502 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); | 503 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); |
@@ -719,11 +720,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, | |||
719 | zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, | 720 | zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, |
720 | req->qtcb, sizeof(struct fsf_qtcb)); | 721 | req->qtcb, sizeof(struct fsf_qtcb)); |
721 | 722 | ||
722 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) { | ||
723 | zfcp_fsf_req_free(req); | ||
724 | return ERR_PTR(-EIO); | ||
725 | } | ||
726 | |||
727 | return req; | 723 | return req; |
728 | } | 724 | } |
729 | 725 | ||
@@ -981,7 +977,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
981 | } | 977 | } |
982 | 978 | ||
983 | /* use single, unchained SBAL if it can hold the request */ | 979 | /* use single, unchained SBAL if it can hold the request */ |
984 | if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) { | 980 | if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { |
985 | zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, | 981 | zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, |
986 | sg_req, sg_resp); | 982 | sg_req, sg_resp); |
987 | return 0; | 983 | return 0; |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 28117e130e2c..6fa5e0453176 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -251,7 +251,8 @@ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | |||
251 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | 251 | struct zfcp_qdio_queue *req_q = &qdio->req_q; |
252 | 252 | ||
253 | spin_lock_bh(&qdio->req_q_lock); | 253 | spin_lock_bh(&qdio->req_q_lock); |
254 | if (atomic_read(&req_q->count)) | 254 | if (atomic_read(&req_q->count) || |
255 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | ||
255 | return 1; | 256 | return 1; |
256 | spin_unlock_bh(&qdio->req_q_lock); | 257 | spin_unlock_bh(&qdio->req_q_lock); |
257 | return 0; | 258 | return 0; |
@@ -274,8 +275,13 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
274 | spin_unlock_bh(&qdio->req_q_lock); | 275 | spin_unlock_bh(&qdio->req_q_lock); |
275 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 276 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
276 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | 277 | zfcp_qdio_sbal_check(qdio), 5 * HZ); |
278 | |||
279 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | ||
280 | return -EIO; | ||
281 | |||
277 | if (ret > 0) | 282 | if (ret > 0) |
278 | return 0; | 283 | return 0; |
284 | |||
279 | if (!ret) { | 285 | if (!ret) { |
280 | atomic_inc(&qdio->req_q_full); | 286 | atomic_inc(&qdio->req_q_full); |
281 | /* assume hanging outbound queue, try queue recovery */ | 287 | /* assume hanging outbound queue, try queue recovery */ |
@@ -375,6 +381,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) | |||
375 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); | 381 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); |
376 | spin_unlock_bh(&qdio->req_q_lock); | 382 | spin_unlock_bh(&qdio->req_q_lock); |
377 | 383 | ||
384 | wake_up(&qdio->req_q_wq); | ||
385 | |||
378 | qdio_shutdown(qdio->adapter->ccw_device, | 386 | qdio_shutdown(qdio->adapter->ccw_device, |
379 | QDIO_FLAG_CLEANUP_USING_CLEAR); | 387 | QDIO_FLAG_CLEANUP_USING_CLEAR); |
380 | 388 | ||
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index a864ccc0a342..989b9a8ba72d 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -277,6 +277,12 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue, | |||
277 | goto reg_crq_failed; | 277 | goto reg_crq_failed; |
278 | } | 278 | } |
279 | 279 | ||
280 | queue->cur = 0; | ||
281 | spin_lock_init(&queue->lock); | ||
282 | |||
283 | tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task, | ||
284 | (unsigned long)hostdata); | ||
285 | |||
280 | if (request_irq(vdev->irq, | 286 | if (request_irq(vdev->irq, |
281 | rpavscsi_handle_event, | 287 | rpavscsi_handle_event, |
282 | 0, "ibmvscsi", (void *)hostdata) != 0) { | 288 | 0, "ibmvscsi", (void *)hostdata) != 0) { |
@@ -291,15 +297,10 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue, | |||
291 | goto req_irq_failed; | 297 | goto req_irq_failed; |
292 | } | 298 | } |
293 | 299 | ||
294 | queue->cur = 0; | ||
295 | spin_lock_init(&queue->lock); | ||
296 | |||
297 | tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task, | ||
298 | (unsigned long)hostdata); | ||
299 | |||
300 | return retrc; | 300 | return retrc; |
301 | 301 | ||
302 | req_irq_failed: | 302 | req_irq_failed: |
303 | tasklet_kill(&hostdata->srp_task); | ||
303 | do { | 304 | do { |
304 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); | 305 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); |
305 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); | 306 | } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 82ea4a8226b0..f820cffb7f00 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -1129,20 +1129,22 @@ static int ipr_is_same_device(struct ipr_resource_entry *res, | |||
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | /** | 1131 | /** |
1132 | * ipr_format_resource_path - Format the resource path for printing. | 1132 | * ipr_format_res_path - Format the resource path for printing. |
1133 | * @res_path: resource path | 1133 | * @res_path: resource path |
1134 | * @buf: buffer | 1134 | * @buf: buffer |
1135 | * | 1135 | * |
1136 | * Return value: | 1136 | * Return value: |
1137 | * pointer to buffer | 1137 | * pointer to buffer |
1138 | **/ | 1138 | **/ |
1139 | static char *ipr_format_resource_path(u8 *res_path, char *buffer) | 1139 | static char *ipr_format_res_path(u8 *res_path, char *buffer, int len) |
1140 | { | 1140 | { |
1141 | int i; | 1141 | int i; |
1142 | char *p = buffer; | ||
1142 | 1143 | ||
1143 | sprintf(buffer, "%02X", res_path[0]); | 1144 | res_path[0] = '\0'; |
1144 | for (i=1; res_path[i] != 0xff; i++) | 1145 | p += snprintf(p, buffer + len - p, "%02X", res_path[0]); |
1145 | sprintf(buffer, "%s-%02X", buffer, res_path[i]); | 1146 | for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) |
1147 | p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); | ||
1146 | 1148 | ||
1147 | return buffer; | 1149 | return buffer; |
1148 | } | 1150 | } |
@@ -1187,7 +1189,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res, | |||
1187 | 1189 | ||
1188 | if (res->sdev && new_path) | 1190 | if (res->sdev && new_path) |
1189 | sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", | 1191 | sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", |
1190 | ipr_format_resource_path(&res->res_path[0], &buffer[0])); | 1192 | ipr_format_res_path(res->res_path, buffer, |
1193 | sizeof(buffer))); | ||
1191 | } else { | 1194 | } else { |
1192 | res->flags = cfgtew->u.cfgte->flags; | 1195 | res->flags = cfgtew->u.cfgte->flags; |
1193 | if (res->flags & IPR_IS_IOA_RESOURCE) | 1196 | if (res->flags & IPR_IS_IOA_RESOURCE) |
@@ -1573,7 +1576,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, | |||
1573 | ipr_err_separator; | 1576 | ipr_err_separator; |
1574 | 1577 | ||
1575 | ipr_err("Device %d : %s", i + 1, | 1578 | ipr_err("Device %d : %s", i + 1, |
1576 | ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0])); | 1579 | ipr_format_res_path(dev_entry->res_path, buffer, |
1580 | sizeof(buffer))); | ||
1577 | ipr_log_ext_vpd(&dev_entry->vpd); | 1581 | ipr_log_ext_vpd(&dev_entry->vpd); |
1578 | 1582 | ||
1579 | ipr_err("-----New Device Information-----\n"); | 1583 | ipr_err("-----New Device Information-----\n"); |
@@ -1919,13 +1923,14 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, | |||
1919 | 1923 | ||
1920 | ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", | 1924 | ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", |
1921 | path_active_desc[i].desc, path_state_desc[j].desc, | 1925 | path_active_desc[i].desc, path_state_desc[j].desc, |
1922 | ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); | 1926 | ipr_format_res_path(fabric->res_path, buffer, |
1927 | sizeof(buffer))); | ||
1923 | return; | 1928 | return; |
1924 | } | 1929 | } |
1925 | } | 1930 | } |
1926 | 1931 | ||
1927 | ipr_err("Path state=%02X Resource Path=%s\n", path_state, | 1932 | ipr_err("Path state=%02X Resource Path=%s\n", path_state, |
1928 | ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); | 1933 | ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer))); |
1929 | } | 1934 | } |
1930 | 1935 | ||
1931 | static const struct { | 1936 | static const struct { |
@@ -2066,7 +2071,8 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, | |||
2066 | 2071 | ||
2067 | ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", | 2072 | ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", |
2068 | path_status_desc[j].desc, path_type_desc[i].desc, | 2073 | path_status_desc[j].desc, path_type_desc[i].desc, |
2069 | ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), | 2074 | ipr_format_res_path(cfg->res_path, buffer, |
2075 | sizeof(buffer)), | ||
2070 | link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], | 2076 | link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], |
2071 | be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); | 2077 | be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); |
2072 | return; | 2078 | return; |
@@ -2074,7 +2080,7 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, | |||
2074 | } | 2080 | } |
2075 | ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " | 2081 | ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " |
2076 | "WWN=%08X%08X\n", cfg->type_status, | 2082 | "WWN=%08X%08X\n", cfg->type_status, |
2077 | ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), | 2083 | ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)), |
2078 | link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], | 2084 | link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], |
2079 | be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); | 2085 | be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); |
2080 | } | 2086 | } |
@@ -2139,7 +2145,7 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, | |||
2139 | 2145 | ||
2140 | ipr_err("RAID %s Array Configuration: %s\n", | 2146 | ipr_err("RAID %s Array Configuration: %s\n", |
2141 | error->protection_level, | 2147 | error->protection_level, |
2142 | ipr_format_resource_path(&error->last_res_path[0], &buffer[0])); | 2148 | ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer))); |
2143 | 2149 | ||
2144 | ipr_err_separator; | 2150 | ipr_err_separator; |
2145 | 2151 | ||
@@ -2160,9 +2166,11 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, | |||
2160 | ipr_err("Array Member %d:\n", i); | 2166 | ipr_err("Array Member %d:\n", i); |
2161 | ipr_log_ext_vpd(&array_entry->vpd); | 2167 | ipr_log_ext_vpd(&array_entry->vpd); |
2162 | ipr_err("Current Location: %s", | 2168 | ipr_err("Current Location: %s", |
2163 | ipr_format_resource_path(&array_entry->res_path[0], &buffer[0])); | 2169 | ipr_format_res_path(array_entry->res_path, buffer, |
2170 | sizeof(buffer))); | ||
2164 | ipr_err("Expected Location: %s", | 2171 | ipr_err("Expected Location: %s", |
2165 | ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0])); | 2172 | ipr_format_res_path(array_entry->expected_res_path, |
2173 | buffer, sizeof(buffer))); | ||
2166 | 2174 | ||
2167 | ipr_err_separator; | 2175 | ipr_err_separator; |
2168 | } | 2176 | } |
@@ -4079,7 +4087,8 @@ static struct device_attribute ipr_adapter_handle_attr = { | |||
4079 | }; | 4087 | }; |
4080 | 4088 | ||
4081 | /** | 4089 | /** |
4082 | * ipr_show_resource_path - Show the resource path for this device. | 4090 | * ipr_show_resource_path - Show the resource path or the resource address for |
4091 | * this device. | ||
4083 | * @dev: device struct | 4092 | * @dev: device struct |
4084 | * @buf: buffer | 4093 | * @buf: buffer |
4085 | * | 4094 | * |
@@ -4097,9 +4106,14 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut | |||
4097 | 4106 | ||
4098 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 4107 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
4099 | res = (struct ipr_resource_entry *)sdev->hostdata; | 4108 | res = (struct ipr_resource_entry *)sdev->hostdata; |
4100 | if (res) | 4109 | if (res && ioa_cfg->sis64) |
4101 | len = snprintf(buf, PAGE_SIZE, "%s\n", | 4110 | len = snprintf(buf, PAGE_SIZE, "%s\n", |
4102 | ipr_format_resource_path(&res->res_path[0], &buffer[0])); | 4111 | ipr_format_res_path(res->res_path, buffer, |
4112 | sizeof(buffer))); | ||
4113 | else if (res) | ||
4114 | len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, | ||
4115 | res->bus, res->target, res->lun); | ||
4116 | |||
4103 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 4117 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
4104 | return len; | 4118 | return len; |
4105 | } | 4119 | } |
@@ -4351,7 +4365,8 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
4351 | scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); | 4365 | scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); |
4352 | if (ioa_cfg->sis64) | 4366 | if (ioa_cfg->sis64) |
4353 | sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", | 4367 | sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", |
4354 | ipr_format_resource_path(&res->res_path[0], &buffer[0])); | 4368 | ipr_format_res_path(res->res_path, buffer, |
4369 | sizeof(buffer))); | ||
4355 | return 0; | 4370 | return 0; |
4356 | } | 4371 | } |
4357 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 4372 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 9ecd2259eb39..b965f3587c9d 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -1684,8 +1684,9 @@ struct ipr_ucode_image_header { | |||
1684 | if (ipr_is_device(hostrcb)) { \ | 1684 | if (ipr_is_device(hostrcb)) { \ |
1685 | if ((hostrcb)->ioa_cfg->sis64) { \ | 1685 | if ((hostrcb)->ioa_cfg->sis64) { \ |
1686 | printk(KERN_ERR IPR_NAME ": %s: " fmt, \ | 1686 | printk(KERN_ERR IPR_NAME ": %s: " fmt, \ |
1687 | ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \ | 1687 | ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \ |
1688 | &hostrcb->rp_buffer[0]), \ | 1688 | hostrcb->rp_buffer, \ |
1689 | sizeof(hostrcb->rp_buffer)), \ | ||
1689 | __VA_ARGS__); \ | 1690 | __VA_ARGS__); \ |
1690 | } else { \ | 1691 | } else { \ |
1691 | ipr_ra_err((hostrcb)->ioa_cfg, \ | 1692 | ipr_ra_err((hostrcb)->ioa_cfg, \ |
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index eed3c2d8dd1c..a182def7007d 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/uaccess.h> | 41 | #include <linux/uaccess.h> |
42 | 42 | ||
43 | #include <asm/io.h> | 43 | #include <asm/io.h> |
44 | #include <asm/ioctls.h> | ||
44 | 45 | ||
45 | #include <asm/mach/serial_at91.h> | 46 | #include <asm/mach/serial_at91.h> |
46 | #include <mach/board.h> | 47 | #include <mach/board.h> |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 61d75507d5d0..162c95a088ed 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1596,6 +1596,7 @@ static const struct usb_device_id acm_ids[] = { | |||
1596 | { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */ | 1596 | { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */ |
1597 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ | 1597 | { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ |
1598 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ | 1598 | { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ |
1599 | { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ | ||
1599 | 1600 | ||
1600 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ | 1601 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ |
1601 | 1602 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 83e7bbbe97fa..70cccc75a362 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -1982,6 +1982,8 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, | |||
1982 | (portstatus & USB_PORT_STAT_ENABLE)) { | 1982 | (portstatus & USB_PORT_STAT_ENABLE)) { |
1983 | if (hub_is_wusb(hub)) | 1983 | if (hub_is_wusb(hub)) |
1984 | udev->speed = USB_SPEED_WIRELESS; | 1984 | udev->speed = USB_SPEED_WIRELESS; |
1985 | else if (portstatus & USB_PORT_STAT_SUPER_SPEED) | ||
1986 | udev->speed = USB_SPEED_SUPER; | ||
1985 | else if (portstatus & USB_PORT_STAT_HIGH_SPEED) | 1987 | else if (portstatus & USB_PORT_STAT_HIGH_SPEED) |
1986 | udev->speed = USB_SPEED_HIGH; | 1988 | udev->speed = USB_SPEED_HIGH; |
1987 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) | 1989 | else if (portstatus & USB_PORT_STAT_LOW_SPEED) |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f22d03df8b17..db99c084df92 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -41,6 +41,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
41 | /* Philips PSC805 audio device */ | 41 | /* Philips PSC805 audio device */ |
42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, |
43 | 43 | ||
44 | /* Artisman Watchdog Dongle */ | ||
45 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = | ||
46 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
47 | |||
44 | /* Roland SC-8820 */ | 48 | /* Roland SC-8820 */ |
45 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 49 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
46 | 50 | ||
@@ -64,6 +68,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
64 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ | 68 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ |
65 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, | 69 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, |
66 | 70 | ||
71 | /* Broadcom BCM92035DGROM BT dongle */ | ||
72 | { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
73 | |||
67 | /* Action Semiconductor flash disk */ | 74 | /* Action Semiconductor flash disk */ |
68 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = | 75 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
69 | USB_QUIRK_STRING_FETCH_255 }, | 76 | USB_QUIRK_STRING_FETCH_255 }, |
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index 85b0d8921eae..980762453a9c 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c | |||
@@ -2561,7 +2561,7 @@ static void pxa_udc_shutdown(struct platform_device *_dev) | |||
2561 | udc_disable(udc); | 2561 | udc_disable(udc); |
2562 | } | 2562 | } |
2563 | 2563 | ||
2564 | #ifdef CONFIG_CPU_PXA27x | 2564 | #ifdef CONFIG_PXA27x |
2565 | extern void pxa27x_clear_otgph(void); | 2565 | extern void pxa27x_clear_otgph(void); |
2566 | #else | 2566 | #else |
2567 | #define pxa27x_clear_otgph() do {} while (0) | 2567 | #define pxa27x_clear_otgph() do {} while (0) |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index e724a051bfdd..ea2b3c7ebee5 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
@@ -735,6 +735,10 @@ static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev, | |||
735 | else | 735 | else |
736 | dev->ep0state = EP0_OUT_DATA_PHASE; | 736 | dev->ep0state = EP0_OUT_DATA_PHASE; |
737 | 737 | ||
738 | if (!dev->driver) | ||
739 | return; | ||
740 | |||
741 | /* deliver the request to the gadget driver */ | ||
738 | ret = dev->driver->setup(&dev->gadget, crq); | 742 | ret = dev->driver->setup(&dev->gadget, crq); |
739 | if (ret < 0) { | 743 | if (ret < 0) { |
740 | if (dev->req_config) { | 744 | if (dev->req_config) { |
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index a18debdd79b8..418163894775 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
@@ -203,7 +203,7 @@ static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci) | |||
203 | __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR); | 203 | __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR); |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifdef CONFIG_CPU_PXA27x | 206 | #ifdef CONFIG_PXA27x |
207 | extern void pxa27x_clear_otgph(void); | 207 | extern void pxa27x_clear_otgph(void); |
208 | #else | 208 | #else |
209 | #define pxa27x_clear_otgph() do {} while (0) | 209 | #define pxa27x_clear_otgph() do {} while (0) |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index fd9e03afd91c..2eb658d26394 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -835,6 +835,27 @@ fail: | |||
835 | return 0; | 835 | return 0; |
836 | } | 836 | } |
837 | 837 | ||
838 | void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, | ||
839 | struct usb_device *udev) | ||
840 | { | ||
841 | struct xhci_virt_device *virt_dev; | ||
842 | struct xhci_ep_ctx *ep0_ctx; | ||
843 | struct xhci_ring *ep_ring; | ||
844 | |||
845 | virt_dev = xhci->devs[udev->slot_id]; | ||
846 | ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); | ||
847 | ep_ring = virt_dev->eps[0].ring; | ||
848 | /* | ||
849 | * FIXME we don't keep track of the dequeue pointer very well after a | ||
850 | * Set TR dequeue pointer, so we're setting the dequeue pointer of the | ||
851 | * host to our enqueue pointer. This should only be called after a | ||
852 | * configured device has reset, so all control transfers should have | ||
853 | * been completed or cancelled before the reset. | ||
854 | */ | ||
855 | ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); | ||
856 | ep0_ctx->deq |= ep_ring->cycle_state; | ||
857 | } | ||
858 | |||
838 | /* Setup an xHCI virtual device for a Set Address command */ | 859 | /* Setup an xHCI virtual device for a Set Address command */ |
839 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) | 860 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) |
840 | { | 861 | { |
@@ -1002,7 +1023,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
1002 | return EP_INTERVAL(interval); | 1023 | return EP_INTERVAL(interval); |
1003 | } | 1024 | } |
1004 | 1025 | ||
1005 | /* The "Mult" field in the endpoint context is only set for SuperSpeed devices. | 1026 | /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. |
1006 | * High speed endpoint descriptors can define "the number of additional | 1027 | * High speed endpoint descriptors can define "the number of additional |
1007 | * transaction opportunities per microframe", but that goes in the Max Burst | 1028 | * transaction opportunities per microframe", but that goes in the Max Burst |
1008 | * endpoint context field. | 1029 | * endpoint context field. |
@@ -1010,7 +1031,8 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
1010 | static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, | 1031 | static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, |
1011 | struct usb_host_endpoint *ep) | 1032 | struct usb_host_endpoint *ep) |
1012 | { | 1033 | { |
1013 | if (udev->speed != USB_SPEED_SUPER) | 1034 | if (udev->speed != USB_SPEED_SUPER || |
1035 | !usb_endpoint_xfer_isoc(&ep->desc)) | ||
1014 | return 0; | 1036 | return 0; |
1015 | return ep->ss_ep_comp.bmAttributes; | 1037 | return ep->ss_ep_comp.bmAttributes; |
1016 | } | 1038 | } |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 94e6934edb09..bfc99a939455 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -2380,16 +2380,19 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
2380 | u32 field3, u32 field4, bool command_must_succeed) | 2380 | u32 field3, u32 field4, bool command_must_succeed) |
2381 | { | 2381 | { |
2382 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; | 2382 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; |
2383 | int ret; | ||
2384 | |||
2383 | if (!command_must_succeed) | 2385 | if (!command_must_succeed) |
2384 | reserved_trbs++; | 2386 | reserved_trbs++; |
2385 | 2387 | ||
2386 | if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) { | 2388 | ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, |
2387 | if (!in_interrupt()) | 2389 | reserved_trbs, GFP_ATOMIC); |
2388 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | 2390 | if (ret < 0) { |
2391 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | ||
2389 | if (command_must_succeed) | 2392 | if (command_must_succeed) |
2390 | xhci_err(xhci, "ERR: Reserved TRB counting for " | 2393 | xhci_err(xhci, "ERR: Reserved TRB counting for " |
2391 | "unfailable commands failed.\n"); | 2394 | "unfailable commands failed.\n"); |
2392 | return -ENOMEM; | 2395 | return ret; |
2393 | } | 2396 | } |
2394 | queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, | 2397 | queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, |
2395 | field4 | xhci->cmd_ring->cycle_state); | 2398 | field4 | xhci->cmd_ring->cycle_state); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 27345cd04da0..3998f72cd0c4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -2134,6 +2134,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2134 | /* If this is a Set Address to an unconfigured device, setup ep 0 */ | 2134 | /* If this is a Set Address to an unconfigured device, setup ep 0 */ |
2135 | if (!udev->config) | 2135 | if (!udev->config) |
2136 | xhci_setup_addressable_virt_dev(xhci, udev); | 2136 | xhci_setup_addressable_virt_dev(xhci, udev); |
2137 | else | ||
2138 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | ||
2137 | /* Otherwise, assume the core has the device configured how it wants */ | 2139 | /* Otherwise, assume the core has the device configured how it wants */ |
2138 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 2140 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
2139 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | 2141 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8b4b7d39f79c..6c7e3430ec93 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1292,6 +1292,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); | |||
1292 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); | 1292 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); |
1293 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); | 1293 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); |
1294 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); | 1294 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); |
1295 | void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, | ||
1296 | struct usb_device *udev); | ||
1295 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); | 1297 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); |
1296 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); | 1298 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); |
1297 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); | 1299 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); |
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 30d930386b65..d25814c172b2 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c | |||
@@ -2436,7 +2436,8 @@ sisusb_open(struct inode *inode, struct file *file) | |||
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | if (!sisusb->devinit) { | 2438 | if (!sisusb->devinit) { |
2439 | if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) { | 2439 | if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH || |
2440 | sisusb->sisusb_dev->speed == USB_SPEED_SUPER) { | ||
2440 | if (sisusb_init_gfxdevice(sisusb, 0)) { | 2441 | if (sisusb_init_gfxdevice(sisusb, 0)) { |
2441 | mutex_unlock(&sisusb->lock); | 2442 | mutex_unlock(&sisusb->lock); |
2442 | dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); | 2443 | dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); |
@@ -3166,7 +3167,7 @@ static int sisusb_probe(struct usb_interface *intf, | |||
3166 | 3167 | ||
3167 | sisusb->present = 1; | 3168 | sisusb->present = 1; |
3168 | 3169 | ||
3169 | if (dev->speed == USB_SPEED_HIGH) { | 3170 | if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) { |
3170 | int initscreen = 1; | 3171 | int initscreen = 1; |
3171 | #ifdef INCL_SISUSB_CON | 3172 | #ifdef INCL_SISUSB_CON |
3172 | if (sisusb_first_vc > 0 && | 3173 | if (sisusb_first_vc > 0 && |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 05c077f8f9ac..3c48e77a0aa2 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -29,19 +29,6 @@ static void tusb_source_power(struct musb *musb, int is_on); | |||
29 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) | 29 | #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) |
30 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) | 30 | #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) |
31 | 31 | ||
32 | #ifdef CONFIG_PM | ||
33 | /* REVISIT: These should be only needed if somebody implements off idle */ | ||
34 | void musb_platform_save_context(struct musb *musb, | ||
35 | struct musb_context_registers *musb_context) | ||
36 | { | ||
37 | } | ||
38 | |||
39 | void musb_platform_restore_context(struct musb *musb, | ||
40 | struct musb_context_registers *musb_context) | ||
41 | { | ||
42 | } | ||
43 | #endif | ||
44 | |||
45 | /* | 32 | /* |
46 | * Checks the revision. We need to use the DMA register as 3.0 does not | 33 | * Checks the revision. We need to use the DMA register as 3.0 does not |
47 | * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. | 34 | * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index da7e334b0407..e298dc4baed7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -691,6 +691,7 @@ static struct usb_device_id id_table_combined [] = { | |||
691 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), | 691 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), |
692 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, | 692 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, |
693 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 693 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
694 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, | ||
694 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 695 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
695 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | 696 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, |
696 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 697 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
@@ -737,6 +738,14 @@ static struct usb_device_id id_table_combined [] = { | |||
737 | { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, | 738 | { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, |
738 | { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, | 739 | { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, |
739 | { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, | 740 | { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, |
741 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID), | ||
742 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
743 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID), | ||
744 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
745 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID), | ||
746 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
747 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), | ||
748 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
740 | { }, /* Optional parameter entry */ | 749 | { }, /* Optional parameter entry */ |
741 | { } /* Terminating entry */ | 750 | { } /* Terminating entry */ |
742 | }; | 751 | }; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index bbc159a1df45..d01946db8fac 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -696,6 +696,12 @@ | |||
696 | #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ | 696 | #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ |
697 | 697 | ||
698 | /* | 698 | /* |
699 | * RT Systems programming cables for various ham radios | ||
700 | */ | ||
701 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | ||
702 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ | ||
703 | |||
704 | /* | ||
699 | * Bayer Ascensia Contour blood glucose meter USB-converter cable. | 705 | * Bayer Ascensia Contour blood glucose meter USB-converter cable. |
700 | * http://winglucofacts.com/cables/ | 706 | * http://winglucofacts.com/cables/ |
701 | */ | 707 | */ |
@@ -1017,3 +1023,12 @@ | |||
1017 | #define MJSG_SR_RADIO_PID 0x9379 | 1023 | #define MJSG_SR_RADIO_PID 0x9379 |
1018 | #define MJSG_XM_RADIO_PID 0x937A | 1024 | #define MJSG_XM_RADIO_PID 0x937A |
1019 | #define MJSG_HD_RADIO_PID 0x937C | 1025 | #define MJSG_HD_RADIO_PID 0x937C |
1026 | |||
1027 | /* | ||
1028 | * Xverve Signalyzer tools (http://www.signalyzer.com/) | ||
1029 | */ | ||
1030 | #define XVERVE_SIGNALYZER_ST_PID 0xBCA0 | ||
1031 | #define XVERVE_SIGNALYZER_SLITE_PID 0xBCA1 | ||
1032 | #define XVERVE_SIGNALYZER_SH2_PID 0xBCA2 | ||
1033 | #define XVERVE_SIGNALYZER_SH4_PID 0xBCA4 | ||
1034 | |||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e280ad8e12f7..5cd30e4345c6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -206,6 +206,7 @@ static void option_instat_callback(struct urb *urb); | |||
206 | #define AMOI_PRODUCT_H01 0x0800 | 206 | #define AMOI_PRODUCT_H01 0x0800 |
207 | #define AMOI_PRODUCT_H01A 0x7002 | 207 | #define AMOI_PRODUCT_H01A 0x7002 |
208 | #define AMOI_PRODUCT_H02 0x0802 | 208 | #define AMOI_PRODUCT_H02 0x0802 |
209 | #define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407 | ||
209 | 210 | ||
210 | #define DELL_VENDOR_ID 0x413C | 211 | #define DELL_VENDOR_ID 0x413C |
211 | 212 | ||
@@ -302,6 +303,7 @@ static void option_instat_callback(struct urb *urb); | |||
302 | #define QISDA_PRODUCT_H21_4512 0x4512 | 303 | #define QISDA_PRODUCT_H21_4512 0x4512 |
303 | #define QISDA_PRODUCT_H21_4523 0x4523 | 304 | #define QISDA_PRODUCT_H21_4523 0x4523 |
304 | #define QISDA_PRODUCT_H20_4515 0x4515 | 305 | #define QISDA_PRODUCT_H20_4515 0x4515 |
306 | #define QISDA_PRODUCT_H20_4518 0x4518 | ||
305 | #define QISDA_PRODUCT_H20_4519 0x4519 | 307 | #define QISDA_PRODUCT_H20_4519 0x4519 |
306 | 308 | ||
307 | /* TLAYTECH PRODUCTS */ | 309 | /* TLAYTECH PRODUCTS */ |
@@ -516,6 +518,7 @@ static const struct usb_device_id option_ids[] = { | |||
516 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 518 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
517 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 519 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
518 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, | 520 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, |
521 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) }, | ||
519 | 522 | ||
520 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ | 523 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ |
521 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 524 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
@@ -852,6 +855,7 @@ static const struct usb_device_id option_ids[] = { | |||
852 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, | 855 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, |
853 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, | 856 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, |
854 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, | 857 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, |
858 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) }, | ||
855 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, | 859 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, |
856 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, | 860 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, |
857 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ | 861 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 93d72eb8cafc..cde67cacb2c3 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -51,6 +51,8 @@ static const struct usb_device_id id_table[] = { | |||
51 | {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ | 51 | {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ |
52 | {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ | 52 | {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ |
53 | {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ | 53 | {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ |
54 | {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */ | ||
55 | {USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ | ||
54 | {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ | 56 | {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ |
55 | {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ | 57 | {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ |
56 | {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ | 58 | {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ef0bdb08d788..d47b56e9e8ce 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -245,6 +245,7 @@ static const struct usb_device_id id_table[] = { | |||
245 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ | 245 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
246 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ | 246 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ |
247 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ | 247 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ |
248 | { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */ | ||
248 | /* Sierra Wireless C597 */ | 249 | /* Sierra Wireless C597 */ |
249 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, | 250 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, |
250 | /* Sierra Wireless T598 */ | 251 | /* Sierra Wireless T598 */ |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 44716427c51c..64ec073e89de 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -139,9 +139,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout) | |||
139 | 139 | ||
140 | /* fill the common fields in the URB */ | 140 | /* fill the common fields in the URB */ |
141 | us->current_urb->context = &urb_done; | 141 | us->current_urb->context = &urb_done; |
142 | us->current_urb->actual_length = 0; | 142 | us->current_urb->transfer_flags = 0; |
143 | us->current_urb->error_count = 0; | ||
144 | us->current_urb->status = 0; | ||
145 | 143 | ||
146 | /* we assume that if transfer_buffer isn't us->iobuf then it | 144 | /* we assume that if transfer_buffer isn't us->iobuf then it |
147 | * hasn't been mapped for DMA. Yes, this is clunky, but it's | 145 | * hasn't been mapped for DMA. Yes, this is clunky, but it's |
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index 40f61320ce16..34b2fc472fe8 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c | |||
@@ -95,7 +95,7 @@ struct fb_bitfield rgb_bitfields[][4] = | |||
95 | { { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } }, | 95 | { { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } }, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static struct fb_fix_screeninfo au1100fb_fix __initdata = { | 98 | static struct fb_fix_screeninfo au1100fb_fix __devinitdata = { |
99 | .id = "AU1100 FB", | 99 | .id = "AU1100 FB", |
100 | .xpanstep = 1, | 100 | .xpanstep = 1, |
101 | .ypanstep = 1, | 101 | .ypanstep = 1, |
@@ -103,7 +103,7 @@ static struct fb_fix_screeninfo au1100fb_fix __initdata = { | |||
103 | .accel = FB_ACCEL_NONE, | 103 | .accel = FB_ACCEL_NONE, |
104 | }; | 104 | }; |
105 | 105 | ||
106 | static struct fb_var_screeninfo au1100fb_var __initdata = { | 106 | static struct fb_var_screeninfo au1100fb_var __devinitdata = { |
107 | .activate = FB_ACTIVATE_NOW, | 107 | .activate = FB_ACTIVATE_NOW, |
108 | .height = -1, | 108 | .height = -1, |
109 | .width = -1, | 109 | .width = -1, |
@@ -458,7 +458,7 @@ static struct fb_ops au1100fb_ops = | |||
458 | 458 | ||
459 | /* AU1100 LCD controller device driver */ | 459 | /* AU1100 LCD controller device driver */ |
460 | 460 | ||
461 | static int __init au1100fb_drv_probe(struct platform_device *dev) | 461 | static int __devinit au1100fb_drv_probe(struct platform_device *dev) |
462 | { | 462 | { |
463 | struct au1100fb_device *fbdev = NULL; | 463 | struct au1100fb_device *fbdev = NULL; |
464 | struct resource *regs_res; | 464 | struct resource *regs_res; |
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c index 3a561df2e8a2..0c1afd13ddd3 100644 --- a/drivers/video/cyber2000fb.c +++ b/drivers/video/cyber2000fb.c | |||
@@ -388,6 +388,7 @@ cyber2000fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | |||
388 | pseudo_val |= convert_bitfield(red, &var->red); | 388 | pseudo_val |= convert_bitfield(red, &var->red); |
389 | pseudo_val |= convert_bitfield(green, &var->green); | 389 | pseudo_val |= convert_bitfield(green, &var->green); |
390 | pseudo_val |= convert_bitfield(blue, &var->blue); | 390 | pseudo_val |= convert_bitfield(blue, &var->blue); |
391 | ret = 0; | ||
391 | break; | 392 | break; |
392 | } | 393 | } |
393 | 394 | ||
@@ -436,6 +437,8 @@ static void cyber2000fb_write_ramdac_ctrl(struct cfb_info *cfb) | |||
436 | cyber2000fb_writeb(i | 4, 0x3cf, cfb); | 437 | cyber2000fb_writeb(i | 4, 0x3cf, cfb); |
437 | cyber2000fb_writeb(val, 0x3c6, cfb); | 438 | cyber2000fb_writeb(val, 0x3c6, cfb); |
438 | cyber2000fb_writeb(i, 0x3cf, cfb); | 439 | cyber2000fb_writeb(i, 0x3cf, cfb); |
440 | /* prevent card lock-up observed on x86 with CyberPro 2000 */ | ||
441 | cyber2000fb_readb(0x3cf, cfb); | ||
439 | } | 442 | } |
440 | 443 | ||
441 | static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) | 444 | static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) |
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index 7d8c55d7fd28..ca3355e430bf 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c | |||
@@ -91,10 +91,10 @@ static uint32_t pseudo_palette[16]; | |||
91 | static uint32_t gbe_cmap[256]; | 91 | static uint32_t gbe_cmap[256]; |
92 | static int gbe_turned_on; /* 0 turned off, 1 turned on */ | 92 | static int gbe_turned_on; /* 0 turned off, 1 turned on */ |
93 | 93 | ||
94 | static char *mode_option __initdata = NULL; | 94 | static char *mode_option __devinitdata = NULL; |
95 | 95 | ||
96 | /* default CRT mode */ | 96 | /* default CRT mode */ |
97 | static struct fb_var_screeninfo default_var_CRT __initdata = { | 97 | static struct fb_var_screeninfo default_var_CRT __devinitdata = { |
98 | /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ | 98 | /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ |
99 | .xres = 640, | 99 | .xres = 640, |
100 | .yres = 480, | 100 | .yres = 480, |
@@ -125,7 +125,7 @@ static struct fb_var_screeninfo default_var_CRT __initdata = { | |||
125 | }; | 125 | }; |
126 | 126 | ||
127 | /* default LCD mode */ | 127 | /* default LCD mode */ |
128 | static struct fb_var_screeninfo default_var_LCD __initdata = { | 128 | static struct fb_var_screeninfo default_var_LCD __devinitdata = { |
129 | /* 1600x1024, 8 bpp */ | 129 | /* 1600x1024, 8 bpp */ |
130 | .xres = 1600, | 130 | .xres = 1600, |
131 | .yres = 1024, | 131 | .yres = 1024, |
@@ -157,7 +157,7 @@ static struct fb_var_screeninfo default_var_LCD __initdata = { | |||
157 | 157 | ||
158 | /* default modedb mode */ | 158 | /* default modedb mode */ |
159 | /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ | 159 | /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ |
160 | static struct fb_videomode default_mode_CRT __initdata = { | 160 | static struct fb_videomode default_mode_CRT __devinitdata = { |
161 | .refresh = 60, | 161 | .refresh = 60, |
162 | .xres = 640, | 162 | .xres = 640, |
163 | .yres = 480, | 163 | .yres = 480, |
@@ -172,7 +172,7 @@ static struct fb_videomode default_mode_CRT __initdata = { | |||
172 | .vmode = FB_VMODE_NONINTERLACED, | 172 | .vmode = FB_VMODE_NONINTERLACED, |
173 | }; | 173 | }; |
174 | /* 1600x1024 SGI flatpanel 1600sw */ | 174 | /* 1600x1024 SGI flatpanel 1600sw */ |
175 | static struct fb_videomode default_mode_LCD __initdata = { | 175 | static struct fb_videomode default_mode_LCD __devinitdata = { |
176 | /* 1600x1024, 8 bpp */ | 176 | /* 1600x1024, 8 bpp */ |
177 | .xres = 1600, | 177 | .xres = 1600, |
178 | .yres = 1024, | 178 | .yres = 1024, |
@@ -186,8 +186,8 @@ static struct fb_videomode default_mode_LCD __initdata = { | |||
186 | .vmode = FB_VMODE_NONINTERLACED, | 186 | .vmode = FB_VMODE_NONINTERLACED, |
187 | }; | 187 | }; |
188 | 188 | ||
189 | static struct fb_videomode *default_mode __initdata = &default_mode_CRT; | 189 | static struct fb_videomode *default_mode __devinitdata = &default_mode_CRT; |
190 | static struct fb_var_screeninfo *default_var __initdata = &default_var_CRT; | 190 | static struct fb_var_screeninfo *default_var __devinitdata = &default_var_CRT; |
191 | 191 | ||
192 | static int flat_panel_enabled = 0; | 192 | static int flat_panel_enabled = 0; |
193 | 193 | ||
@@ -1098,7 +1098,7 @@ static void gbefb_create_sysfs(struct device *dev) | |||
1098 | * Initialization | 1098 | * Initialization |
1099 | */ | 1099 | */ |
1100 | 1100 | ||
1101 | static int __init gbefb_setup(char *options) | 1101 | static int __devinit gbefb_setup(char *options) |
1102 | { | 1102 | { |
1103 | char *this_opt; | 1103 | char *this_opt; |
1104 | 1104 | ||
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c index 0f361b6100d2..0c69fa20251b 100644 --- a/drivers/video/pmag-ba-fb.c +++ b/drivers/video/pmag-ba-fb.c | |||
@@ -44,7 +44,7 @@ struct pmagbafb_par { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | 46 | ||
47 | static struct fb_var_screeninfo pmagbafb_defined __initdata = { | 47 | static struct fb_var_screeninfo pmagbafb_defined __devinitdata = { |
48 | .xres = 1024, | 48 | .xres = 1024, |
49 | .yres = 864, | 49 | .yres = 864, |
50 | .xres_virtual = 1024, | 50 | .xres_virtual = 1024, |
@@ -68,7 +68,7 @@ static struct fb_var_screeninfo pmagbafb_defined __initdata = { | |||
68 | .vmode = FB_VMODE_NONINTERLACED, | 68 | .vmode = FB_VMODE_NONINTERLACED, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct fb_fix_screeninfo pmagbafb_fix __initdata = { | 71 | static struct fb_fix_screeninfo pmagbafb_fix __devinitdata = { |
72 | .id = "PMAG-BA", | 72 | .id = "PMAG-BA", |
73 | .smem_len = (1024 * 1024), | 73 | .smem_len = (1024 * 1024), |
74 | .type = FB_TYPE_PACKED_PIXELS, | 74 | .type = FB_TYPE_PACKED_PIXELS, |
@@ -142,7 +142,7 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | 144 | ||
145 | static int __init pmagbafb_probe(struct device *dev) | 145 | static int __devinit pmagbafb_probe(struct device *dev) |
146 | { | 146 | { |
147 | struct tc_dev *tdev = to_tc_dev(dev); | 147 | struct tc_dev *tdev = to_tc_dev(dev); |
148 | resource_size_t start, len; | 148 | resource_size_t start, len; |
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c index 2de0806421b4..22fcb9a3d5c0 100644 --- a/drivers/video/pmagb-b-fb.c +++ b/drivers/video/pmagb-b-fb.c | |||
@@ -45,7 +45,7 @@ struct pmagbbfb_par { | |||
45 | }; | 45 | }; |
46 | 46 | ||
47 | 47 | ||
48 | static struct fb_var_screeninfo pmagbbfb_defined __initdata = { | 48 | static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = { |
49 | .bits_per_pixel = 8, | 49 | .bits_per_pixel = 8, |
50 | .red.length = 8, | 50 | .red.length = 8, |
51 | .green.length = 8, | 51 | .green.length = 8, |
@@ -58,7 +58,7 @@ static struct fb_var_screeninfo pmagbbfb_defined __initdata = { | |||
58 | .vmode = FB_VMODE_NONINTERLACED, | 58 | .vmode = FB_VMODE_NONINTERLACED, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static struct fb_fix_screeninfo pmagbbfb_fix __initdata = { | 61 | static struct fb_fix_screeninfo pmagbbfb_fix __devinitdata = { |
62 | .id = "PMAGB-BA", | 62 | .id = "PMAGB-BA", |
63 | .smem_len = (2048 * 1024), | 63 | .smem_len = (2048 * 1024), |
64 | .type = FB_TYPE_PACKED_PIXELS, | 64 | .type = FB_TYPE_PACKED_PIXELS, |
@@ -148,7 +148,7 @@ static void __init pmagbbfb_erase_cursor(struct fb_info *info) | |||
148 | /* | 148 | /* |
149 | * Set up screen parameters. | 149 | * Set up screen parameters. |
150 | */ | 150 | */ |
151 | static void __init pmagbbfb_screen_setup(struct fb_info *info) | 151 | static void __devinit pmagbbfb_screen_setup(struct fb_info *info) |
152 | { | 152 | { |
153 | struct pmagbbfb_par *par = info->par; | 153 | struct pmagbbfb_par *par = info->par; |
154 | 154 | ||
@@ -180,9 +180,9 @@ static void __init pmagbbfb_screen_setup(struct fb_info *info) | |||
180 | /* | 180 | /* |
181 | * Determine oscillator configuration. | 181 | * Determine oscillator configuration. |
182 | */ | 182 | */ |
183 | static void __init pmagbbfb_osc_setup(struct fb_info *info) | 183 | static void __devinit pmagbbfb_osc_setup(struct fb_info *info) |
184 | { | 184 | { |
185 | static unsigned int pmagbbfb_freqs[] __initdata = { | 185 | static unsigned int pmagbbfb_freqs[] __devinitdata = { |
186 | 130808, 119843, 104000, 92980, 74370, 72800, | 186 | 130808, 119843, 104000, 92980, 74370, 72800, |
187 | 69197, 66000, 65000, 50350, 36000, 32000, 25175 | 187 | 69197, 66000, 65000, 50350, 36000, 32000, 25175 |
188 | }; | 188 | }; |
@@ -247,7 +247,7 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info) | |||
247 | }; | 247 | }; |
248 | 248 | ||
249 | 249 | ||
250 | static int __init pmagbbfb_probe(struct device *dev) | 250 | static int __devinit pmagbbfb_probe(struct device *dev) |
251 | { | 251 | { |
252 | struct tc_dev *tdev = to_tc_dev(dev); | 252 | struct tc_dev *tdev = to_tc_dev(dev); |
253 | resource_size_t start, len; | 253 | resource_size_t start, len; |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index afe7e21dd0ae..1475ed6b575f 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -164,7 +164,8 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq, | |||
164 | gfp_t gfp) | 164 | gfp_t gfp) |
165 | { | 165 | { |
166 | struct vring_virtqueue *vq = to_vvq(_vq); | 166 | struct vring_virtqueue *vq = to_vvq(_vq); |
167 | unsigned int i, avail, head, uninitialized_var(prev); | 167 | unsigned int i, avail, uninitialized_var(prev); |
168 | int head; | ||
168 | 169 | ||
169 | START_USE(vq); | 170 | START_USE(vq); |
170 | 171 | ||
@@ -174,7 +175,7 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq, | |||
174 | * buffers, then go indirect. FIXME: tune this threshold */ | 175 | * buffers, then go indirect. FIXME: tune this threshold */ |
175 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | 176 | if (vq->indirect && (out + in) > 1 && vq->num_free) { |
176 | head = vring_add_indirect(vq, sg, out, in, gfp); | 177 | head = vring_add_indirect(vq, sg, out, in, gfp); |
177 | if (head != vq->vring.num) | 178 | if (likely(head >= 0)) |
178 | goto add_head; | 179 | goto add_head; |
179 | } | 180 | } |
180 | 181 | ||