diff options
30 files changed, 233 insertions, 173 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 736d45602886..826b6e148316 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt  | |||
| @@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 199 | acpi_display_output=video | 199 | acpi_display_output=video | 
| 200 | See above. | 200 | See above. | 
| 201 | 201 | ||
| 202 | acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods | ||
| 203 | early. Needed on some platforms to properly | ||
| 204 | initialize the EC. | ||
| 205 | |||
| 202 | acpi_irq_balance [HW,ACPI] | 206 | acpi_irq_balance [HW,ACPI] | 
| 203 | ACPI will balance active IRQs | 207 | ACPI will balance active IRQs | 
| 204 | default in APIC mode | 208 | default in APIC mode | 
diff --git a/MAINTAINERS b/MAINTAINERS index 412eff60c33d..44c669d92a49 100644 --- a/MAINTAINERS +++ b/MAINTAINERS  | |||
| @@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net> | |||
| 616 | S: Maintained | 616 | S: Maintained | 
| 617 | 617 | ||
| 618 | ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE | 618 | ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE | 
| 619 | M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> | 619 | M: Paulius Zaleckas <paulius.zaleckas@gmail.com> | 
| 620 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 620 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 
| 621 | T: git git://gitorious.org/linux-gemini/mainline.git | 621 | T: git git://gitorious.org/linux-gemini/mainline.git | 
| 622 | S: Maintained | 622 | S: Odd Fixes | 
| 623 | F: arch/arm/mach-gemini/ | 623 | F: arch/arm/mach-gemini/ | 
| 624 | 624 | ||
| 625 | ARM/EBSA110 MACHINE SUPPORT | 625 | ARM/EBSA110 MACHINE SUPPORT | 
| @@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git | |||
| 641 | F: arch/arm/mach-pxa/ezx.c | 641 | F: arch/arm/mach-pxa/ezx.c | 
| 642 | 642 | ||
| 643 | ARM/FARADAY FA526 PORT | 643 | ARM/FARADAY FA526 PORT | 
| 644 | M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> | 644 | M: Paulius Zaleckas <paulius.zaleckas@gmail.com> | 
| 645 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 645 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 
| 646 | S: Maintained | 646 | S: Odd Fixes | 
| 647 | F: arch/arm/mm/*-fa* | 647 | F: arch/arm/mm/*-fa* | 
| 648 | 648 | ||
| 649 | ARM/FOOTBRIDGE ARCHITECTURE | 649 | ARM/FOOTBRIDGE ARCHITECTURE | 
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index e7263854bc7b..fe3bd5ac8b10 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c  | |||
| @@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type) | |||
| 86 | unsigned int reg_both, reg_level, reg_type; | 86 | unsigned int reg_both, reg_level, reg_type; | 
| 87 | 87 | ||
| 88 | reg_type = __raw_readl(base + GPIO_INT_TYPE); | 88 | reg_type = __raw_readl(base + GPIO_INT_TYPE); | 
| 89 | reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE); | 89 | reg_level = __raw_readl(base + GPIO_INT_LEVEL); | 
| 90 | reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); | 90 | reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); | 
| 91 | 91 | ||
| 92 | switch (type) { | 92 | switch (type) { | 
| @@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type) | |||
| 117 | } | 117 | } | 
| 118 | 118 | ||
| 119 | __raw_writel(reg_type, base + GPIO_INT_TYPE); | 119 | __raw_writel(reg_type, base + GPIO_INT_TYPE); | 
| 120 | __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE); | 120 | __raw_writel(reg_level, base + GPIO_INT_LEVEL); | 
| 121 | __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); | 121 | __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); | 
| 122 | 122 | ||
| 123 | gpio_ack_irq(irq); | 123 | gpio_ack_irq(irq); | 
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 7ae58892ba8d..e97b255d97bc 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h  | |||
| @@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock) | |||
| 94 | #define acpi_noirq 0 /* ACPI always enabled on IA64 */ | 94 | #define acpi_noirq 0 /* ACPI always enabled on IA64 */ | 
| 95 | #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ | 95 | #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ | 
| 96 | #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ | 96 | #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ | 
| 97 | #define acpi_ht 0 /* no HT-only mode on IA64 */ | ||
| 97 | #endif | 98 | #endif | 
| 98 | #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ | 99 | #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ | 
| 99 | static inline void disable_acpi(void) { } | 100 | static inline void disable_acpi(void) { } | 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 21f61b8c445b..cc29c0f5300d 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c  | |||
| @@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void) | |||
| 338 | } | 338 | } | 
| 339 | 339 | ||
| 340 | mpic = mpic_alloc(np, r.start, | 340 | mpic = mpic_alloc(np, r.start, | 
| 341 | MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, | 341 | MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | | 
| 342 | MPIC_BROKEN_FRR_NIRQS, | ||
| 342 | 0, 256, " OpenPIC "); | 343 | 0, 256, " OpenPIC "); | 
| 343 | BUG_ON(mpic == NULL); | 344 | BUG_ON(mpic == NULL); | 
| 344 | of_node_put(np); | 345 | of_node_put(np); | 
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 04160a4cc699..a15f582300d8 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c  | |||
| @@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr) | |||
| 46 | __iomem u32 *bptr_vaddr; | 46 | __iomem u32 *bptr_vaddr; | 
| 47 | struct device_node *np; | 47 | struct device_node *np; | 
| 48 | int n = 0; | 48 | int n = 0; | 
| 49 | int ioremappable; | ||
| 49 | 50 | ||
| 50 | WARN_ON (nr < 0 || nr >= NR_CPUS); | 51 | WARN_ON (nr < 0 || nr >= NR_CPUS); | 
| 51 | 52 | ||
| @@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr) | |||
| 59 | return; | 60 | return; | 
| 60 | } | 61 | } | 
| 61 | 62 | ||
| 63 | /* | ||
| 64 | * A secondary core could be in a spinloop in the bootpage | ||
| 65 | * (0xfffff000), somewhere in highmem, or somewhere in lowmem. | ||
| 66 | * The bootpage and highmem can be accessed via ioremap(), but | ||
| 67 | * we need to directly access the spinloop if its in lowmem. | ||
| 68 | */ | ||
| 69 | ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); | ||
| 70 | |||
| 62 | /* Map the spin table */ | 71 | /* Map the spin table */ | 
| 63 | bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); | 72 | if (ioremappable) | 
| 73 | bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); | ||
| 74 | else | ||
| 75 | bptr_vaddr = phys_to_virt(*cpu_rel_addr); | ||
| 64 | 76 | ||
| 65 | local_irq_save(flags); | 77 | local_irq_save(flags); | 
| 66 | 78 | ||
| 67 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); | 79 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); | 
| 68 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); | 80 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); | 
| 69 | 81 | ||
| 82 | if (!ioremappable) | ||
| 83 | flush_dcache_range((ulong)bptr_vaddr, | ||
| 84 | (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); | ||
| 85 | |||
| 70 | /* Wait a bit for the CPU to ack. */ | 86 | /* Wait a bit for the CPU to ack. */ | 
| 71 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) | 87 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) | 
| 72 | mdelay(1); | 88 | mdelay(1); | 
| 73 | 89 | ||
| 74 | local_irq_restore(flags); | 90 | local_irq_restore(flags); | 
| 75 | 91 | ||
| 76 | iounmap(bptr_vaddr); | 92 | if (ioremappable) | 
| 93 | iounmap(bptr_vaddr); | ||
| 77 | 94 | ||
| 78 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); | 95 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); | 
| 79 | } | 96 | } | 
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 0acbcdfa5ca4..af1c5833ff23 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c  | |||
| @@ -1344,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { | |||
| 1344 | }, | 1344 | }, | 
| 1345 | { | 1345 | { | 
| 1346 | .callback = force_acpi_ht, | 1346 | .callback = force_acpi_ht, | 
| 1347 | .ident = "ASUS P2B-DS", | ||
| 1348 | .matches = { | ||
| 1349 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
| 1350 | DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), | ||
| 1351 | }, | ||
| 1352 | }, | ||
| 1353 | { | ||
| 1354 | .callback = force_acpi_ht, | ||
| 1355 | .ident = "ASUS CUR-DLS", | 1347 | .ident = "ASUS CUR-DLS", | 
| 1356 | .matches = { | 1348 | .matches = { | 
| 1357 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 1349 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bbc2c1315c47..b2586f57e1f5 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c  | |||
| @@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle) | |||
| 935 | struct platform_device *dd; | 935 | struct platform_device *dd; | 
| 936 | 936 | ||
| 937 | id = dock_station_count; | 937 | id = dock_station_count; | 
| 938 | memset(&ds, 0, sizeof(ds)); | ||
| 938 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); | 939 | dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); | 
| 939 | if (IS_ERR(dd)) | 940 | if (IS_ERR(dd)) | 
| 940 | return PTR_ERR(dd); | 941 | return PTR_ERR(dd); | 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7c0441f63b39..e88e8ae04fdb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c  | |||
| @@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { | |||
| 110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | 110 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | 
| 111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, | 111 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, | 
| 112 | (void *)2}, | 112 | (void *)2}, | 
| 113 | { set_max_cstate, "Pavilion zv5000", { | ||
| 114 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
| 115 | DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, | ||
| 116 | (void *)1}, | ||
| 117 | { set_max_cstate, "Asus L8400B", { | ||
| 118 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
| 119 | DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, | ||
| 120 | (void *)1}, | ||
| 113 | {}, | 121 | {}, | 
| 114 | }; | 122 | }; | 
| 115 | 123 | ||
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7247819dbd80..e306ba9aa34e 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c  | |||
| @@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) | |||
| 125 | return status; | 125 | return status; | 
| 126 | } | 126 | } | 
| 127 | 127 | ||
| 128 | static int early_pdc_done; | ||
| 129 | |||
| 128 | void acpi_processor_set_pdc(acpi_handle handle) | 130 | void acpi_processor_set_pdc(acpi_handle handle) | 
| 129 | { | 131 | { | 
| 130 | struct acpi_object_list *obj_list; | 132 | struct acpi_object_list *obj_list; | 
| @@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle) | |||
| 132 | if (arch_has_acpi_pdc() == false) | 134 | if (arch_has_acpi_pdc() == false) | 
| 133 | return; | 135 | return; | 
| 134 | 136 | ||
| 137 | if (early_pdc_done) | ||
| 138 | return; | ||
| 139 | |||
| 135 | obj_list = acpi_processor_alloc_pdc(); | 140 | obj_list = acpi_processor_alloc_pdc(); | 
| 136 | if (!obj_list) | 141 | if (!obj_list) | 
| 137 | return; | 142 | return; | 
| @@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id) | |||
| 151 | return 0; | 156 | return 0; | 
| 152 | } | 157 | } | 
| 153 | 158 | ||
| 159 | static int param_early_pdc_optin(char *s) | ||
| 160 | { | ||
| 161 | early_pdc_optin = 1; | ||
| 162 | return 1; | ||
| 163 | } | ||
| 164 | __setup("acpi_early_pdc_eval", param_early_pdc_optin); | ||
| 165 | |||
| 154 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { | 166 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { | 
| 155 | { | 167 | { | 
| 156 | set_early_pdc_optin, "HP Envy", { | 168 | set_early_pdc_optin, "HP Envy", { | 
| @@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void) | |||
| 192 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 204 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 
| 193 | ACPI_UINT32_MAX, | 205 | ACPI_UINT32_MAX, | 
| 194 | early_init_pdc, NULL, NULL, NULL); | 206 | early_init_pdc, NULL, NULL, NULL); | 
| 207 | |||
| 208 | early_pdc_done = 1; | ||
| 195 | } | 209 | } | 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ff9f6226085d..3e009674f333 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c  | |||
| @@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, | |||
| 1336 | 1336 | ||
| 1337 | if (child) | 1337 | if (child) | 
| 1338 | *child = device; | 1338 | *child = device; | 
| 1339 | return 0; | 1339 | |
| 1340 | if (device) | ||
| 1341 | return 0; | ||
| 1342 | else | ||
| 1343 | return -ENODEV; | ||
| 1340 | } | 1344 | } | 
| 1341 | 1345 | ||
| 1346 | /* | ||
| 1347 | * acpi_bus_add and acpi_bus_start | ||
| 1348 | * | ||
| 1349 | * scan a given ACPI tree and (probably recently hot-plugged) | ||
| 1350 | * create and add or starts found devices. | ||
| 1351 | * | ||
| 1352 | * If no devices were found -ENODEV is returned which does not | ||
| 1353 | * mean that this is a real error, there just have been no suitable | ||
| 1354 | * ACPI objects in the table trunk from which the kernel could create | ||
| 1355 | * a device and add/start an appropriate driver. | ||
| 1356 | */ | ||
| 1357 | |||
| 1342 | int | 1358 | int | 
| 1343 | acpi_bus_add(struct acpi_device **child, | 1359 | acpi_bus_add(struct acpi_device **child, | 
| 1344 | struct acpi_device *parent, acpi_handle handle, int type) | 1360 | struct acpi_device *parent, acpi_handle handle, int type) | 
| @@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child, | |||
| 1348 | memset(&ops, 0, sizeof(ops)); | 1364 | memset(&ops, 0, sizeof(ops)); | 
| 1349 | ops.acpi_op_add = 1; | 1365 | ops.acpi_op_add = 1; | 
| 1350 | 1366 | ||
| 1351 | acpi_bus_scan(handle, &ops, child); | 1367 | return acpi_bus_scan(handle, &ops, child); | 
| 1352 | return 0; | ||
| 1353 | } | 1368 | } | 
| 1354 | EXPORT_SYMBOL(acpi_bus_add); | 1369 | EXPORT_SYMBOL(acpi_bus_add); | 
| 1355 | 1370 | ||
| @@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device) | |||
| 1357 | { | 1372 | { | 
| 1358 | struct acpi_bus_ops ops; | 1373 | struct acpi_bus_ops ops; | 
| 1359 | 1374 | ||
| 1375 | if (!device) | ||
| 1376 | return -EINVAL; | ||
| 1377 | |||
| 1360 | memset(&ops, 0, sizeof(ops)); | 1378 | memset(&ops, 0, sizeof(ops)); | 
| 1361 | ops.acpi_op_start = 1; | 1379 | ops.acpi_op_start = 1; | 
| 1362 | 1380 | ||
| 1363 | acpi_bus_scan(device->handle, &ops, NULL); | 1381 | return acpi_bus_scan(device->handle, &ops, NULL); | 
| 1364 | return 0; | ||
| 1365 | } | 1382 | } | 
| 1366 | EXPORT_SYMBOL(acpi_bus_start); | 1383 | EXPORT_SYMBOL(acpi_bus_start); | 
| 1367 | 1384 | ||
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index f336bca7c450..8a0ed2800e63 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c  | |||
| @@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, | |||
| 213 | unsigned long table_end; | 213 | unsigned long table_end; | 
| 214 | acpi_size tbl_size; | 214 | acpi_size tbl_size; | 
| 215 | 215 | ||
| 216 | if (acpi_disabled) | 216 | if (acpi_disabled && !acpi_ht) | 
| 217 | return -ENODEV; | 217 | return -ENODEV; | 
| 218 | 218 | ||
| 219 | if (!handler) | 219 | if (!handler) | 
| @@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) | |||
| 280 | struct acpi_table_header *table = NULL; | 280 | struct acpi_table_header *table = NULL; | 
| 281 | acpi_size tbl_size; | 281 | acpi_size tbl_size; | 
| 282 | 282 | ||
| 283 | if (acpi_disabled) | 283 | if (acpi_disabled && !acpi_ht) | 
| 284 | return -ENODEV; | 284 | return -ENODEV; | 
| 285 | 285 | ||
| 286 | if (!handler) | 286 | if (!handler) | 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f665b05592f3..ab6c97330412 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c  | |||
| @@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
| 598 | return mode; | 598 | return mode; | 
| 599 | } | 599 | } | 
| 600 | 600 | ||
| 601 | /* | ||
| 602 | * EDID is delightfully ambiguous about how interlaced modes are to be | ||
| 603 | * encoded. Our internal representation is of frame height, but some | ||
| 604 | * HDTV detailed timings are encoded as field height. | ||
| 605 | * | ||
| 606 | * The format list here is from CEA, in frame size. Technically we | ||
| 607 | * should be checking refresh rate too. Whatever. | ||
| 608 | */ | ||
| 609 | static void | ||
| 610 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | ||
| 611 | struct detailed_pixel_timing *pt) | ||
| 612 | { | ||
| 613 | int i; | ||
| 614 | static const struct { | ||
| 615 | int w, h; | ||
| 616 | } cea_interlaced[] = { | ||
| 617 | { 1920, 1080 }, | ||
| 618 | { 720, 480 }, | ||
| 619 | { 1440, 480 }, | ||
| 620 | { 2880, 480 }, | ||
| 621 | { 720, 576 }, | ||
| 622 | { 1440, 576 }, | ||
| 623 | { 2880, 576 }, | ||
| 624 | }; | ||
| 625 | static const int n_sizes = | ||
| 626 | sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); | ||
| 627 | |||
| 628 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | ||
| 629 | return; | ||
| 630 | |||
| 631 | for (i = 0; i < n_sizes; i++) { | ||
| 632 | if ((mode->hdisplay == cea_interlaced[i].w) && | ||
| 633 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | ||
| 634 | mode->vdisplay *= 2; | ||
| 635 | mode->vsync_start *= 2; | ||
| 636 | mode->vsync_end *= 2; | ||
| 637 | mode->vtotal *= 2; | ||
| 638 | mode->vtotal |= 1; | ||
| 639 | } | ||
| 640 | } | ||
| 641 | |||
| 642 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 643 | } | ||
| 644 | |||
| 601 | /** | 645 | /** | 
| 602 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 646 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 
| 603 | * @dev: DRM device (needed to create new mode) | 647 | * @dev: DRM device (needed to create new mode) | 
| @@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
| 680 | 724 | ||
| 681 | drm_mode_set_name(mode); | 725 | drm_mode_set_name(mode); | 
| 682 | 726 | ||
| 683 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 727 | drm_mode_do_interlace_quirk(mode, pt); | 
| 684 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 685 | 728 | ||
| 686 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 729 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 
| 687 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 730 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b1d0acbae4e4..c2e8a45780d5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c  | |||
| @@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
| 636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | 636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | 
| 637 | }, | 637 | }, | 
| 638 | }, | 638 | }, | 
| 639 | { | ||
| 640 | .ident = "Clevo M5x0N", | ||
| 641 | .matches = { | ||
| 642 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
| 643 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
| 644 | }, | ||
| 645 | }, | ||
| 639 | { } | 646 | { } | 
| 640 | }; | 647 | }; | 
| 641 | 648 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2cd0fad17dac..0e9cd1d49130 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c  | |||
| @@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
| 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 5861 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 
| 5862 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; | 
| 5863 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; | 
| 5864 | unsigned long flags; | ||
| 5865 | 5864 | ||
| 5866 | spin_lock_irqsave(&bios->lock, flags); | 5865 | mutex_lock(&bios->lock); | 
| 5867 | bios->display.output = dcbent; | 5866 | bios->display.output = dcbent; | 
| 5868 | parse_init_table(bios, table, &iexec); | 5867 | parse_init_table(bios, table, &iexec); | 
| 5869 | bios->display.output = NULL; | 5868 | bios->display.output = NULL; | 
| 5870 | spin_unlock_irqrestore(&bios->lock, flags); | 5869 | mutex_unlock(&bios->lock); | 
| 5871 | } | 5870 | } | 
| 5872 | 5871 | ||
| 5873 | static bool NVInitVBIOS(struct drm_device *dev) | 5872 | static bool NVInitVBIOS(struct drm_device *dev) | 
| @@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
| 5876 | struct nvbios *bios = &dev_priv->VBIOS; | 5875 | struct nvbios *bios = &dev_priv->VBIOS; | 
| 5877 | 5876 | ||
| 5878 | memset(bios, 0, sizeof(struct nvbios)); | 5877 | memset(bios, 0, sizeof(struct nvbios)); | 
| 5879 | spin_lock_init(&bios->lock); | 5878 | mutex_init(&bios->lock); | 
| 5880 | bios->dev = dev; | 5879 | bios->dev = dev; | 
| 5881 | 5880 | ||
| 5882 | if (!NVShadowVBIOS(dev, bios->data)) | 5881 | if (!NVShadowVBIOS(dev, bios->data)) | 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 68446fd4146b..fd94bd6dc264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h  | |||
| @@ -205,7 +205,7 @@ struct nvbios { | |||
| 205 | struct drm_device *dev; | 205 | struct drm_device *dev; | 
| 206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; | 
| 207 | 207 | ||
| 208 | spinlock_t lock; | 208 | struct mutex lock; | 
| 209 | 209 | ||
| 210 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; | 
| 211 | unsigned int length; | 211 | unsigned int length; | 
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341b..21ac6e49b6ee 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c  | |||
| @@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) | |||
| 579 | nouveau_encoder(encoder)->restore.output); | 579 | nouveau_encoder(encoder)->restore.output); | 
| 580 | 580 | ||
| 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 
| 582 | |||
| 583 | nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; | ||
| 582 | } | 584 | } | 
| 583 | 585 | ||
| 584 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 586 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 2a3df5599ab4..7f152f66f196 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c  | |||
| @@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | |||
| 643 | uint8_t count = U8((*ptr)++); | 643 | uint8_t count = U8((*ptr)++); | 
| 644 | SDEBUG(" count: %d\n", count); | 644 | SDEBUG(" count: %d\n", count); | 
| 645 | if (arg == ATOM_UNIT_MICROSEC) | 645 | if (arg == ATOM_UNIT_MICROSEC) | 
| 646 | schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | 646 | udelay(count); | 
| 647 | else | 647 | else | 
| 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 
| 649 | } | 649 | } | 
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index af1c3ca8a4cb..446b765ac72a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c  | |||
| @@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev) | |||
| 543 | void r600_vb_ib_put(struct radeon_device *rdev) | 543 | void r600_vb_ib_put(struct radeon_device *rdev) | 
| 544 | { | 544 | { | 
| 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 545 | radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); | 
| 546 | mutex_lock(&rdev->ib_pool.mutex); | ||
| 547 | list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); | ||
| 548 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 549 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 546 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 
| 550 | } | 547 | } | 
| 551 | 548 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f57480ba1355..c0356bb193e5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h  | |||
| @@ -96,6 +96,7 @@ extern int radeon_audio; | |||
| 96 | * symbol; | 96 | * symbol; | 
| 97 | */ | 97 | */ | 
| 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 98 | #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 
| 99 | /* RADEON_IB_POOL_SIZE must be a power of 2 */ | ||
| 99 | #define RADEON_IB_POOL_SIZE 16 | 100 | #define RADEON_IB_POOL_SIZE 16 | 
| 100 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 101 | #define RADEON_DEBUGFS_MAX_NUM_FILES 32 | 
| 101 | #define RADEONFB_CONN_LIMIT 4 | 102 | #define RADEONFB_CONN_LIMIT 4 | 
| @@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | |||
| 363 | */ | 364 | */ | 
| 364 | struct radeon_ib { | 365 | struct radeon_ib { | 
| 365 | struct list_head list; | 366 | struct list_head list; | 
| 366 | unsigned long idx; | 367 | unsigned idx; | 
| 367 | uint64_t gpu_addr; | 368 | uint64_t gpu_addr; | 
| 368 | struct radeon_fence *fence; | 369 | struct radeon_fence *fence; | 
| 369 | uint32_t *ptr; | 370 | uint32_t *ptr; | 
| 370 | uint32_t length_dw; | 371 | uint32_t length_dw; | 
| 372 | bool free; | ||
| 371 | }; | 373 | }; | 
| 372 | 374 | ||
| 373 | /* | 375 | /* | 
| @@ -377,10 +379,9 @@ struct radeon_ib { | |||
| 377 | struct radeon_ib_pool { | 379 | struct radeon_ib_pool { | 
| 378 | struct mutex mutex; | 380 | struct mutex mutex; | 
| 379 | struct radeon_bo *robj; | 381 | struct radeon_bo *robj; | 
| 380 | struct list_head scheduled_ibs; | ||
| 381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 382 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 
| 382 | bool ready; | 383 | bool ready; | 
| 383 | DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); | 384 | unsigned head_id; | 
| 384 | }; | 385 | }; | 
| 385 | 386 | ||
| 386 | struct radeon_cp { | 387 | struct radeon_cp { | 
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1190148cf5e6..e9d085021c1f 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c  | |||
| @@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
| 86 | &p->validated); | 86 | &p->validated); | 
| 87 | } | 87 | } | 
| 88 | } | 88 | } | 
| 89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated); | 
| 90 | } | 90 | } | 
| 91 | 91 | ||
| 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 
| @@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
| 189 | { | 189 | { | 
| 190 | unsigned i; | 190 | unsigned i; | 
| 191 | 191 | ||
| 192 | if (error && parser->ib) { | 192 | if (!error && parser->ib) { | 
| 193 | radeon_bo_list_unvalidate(&parser->validated, | 193 | radeon_bo_list_fence(&parser->validated, parser->ib->fence); | 
| 194 | parser->ib->fence); | ||
| 195 | } else { | ||
| 196 | radeon_bo_list_unreserve(&parser->validated); | ||
| 197 | } | 194 | } | 
| 195 | radeon_bo_list_unreserve(&parser->validated); | ||
| 198 | for (i = 0; i < parser->nrelocs; i++) { | 196 | for (i = 0; i < parser->nrelocs; i++) { | 
| 199 | if (parser->relocs[i].gobj) { | 197 | if (parser->relocs[i].gobj) { | 
| 200 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d72a71bff218..f1da370928eb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c  | |||
| @@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head) | |||
| 306 | } | 306 | } | 
| 307 | } | 307 | } | 
| 308 | 308 | ||
| 309 | int radeon_bo_list_validate(struct list_head *head, void *fence) | 309 | int radeon_bo_list_validate(struct list_head *head) | 
| 310 | { | 310 | { | 
| 311 | struct radeon_bo_list *lobj; | 311 | struct radeon_bo_list *lobj; | 
| 312 | struct radeon_bo *bo; | 312 | struct radeon_bo *bo; | 
| 313 | struct radeon_fence *old_fence = NULL; | ||
| 314 | int r; | 313 | int r; | 
| 315 | 314 | ||
| 316 | r = radeon_bo_list_reserve(head); | 315 | r = radeon_bo_list_reserve(head); | 
| @@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) | |||
| 334 | } | 333 | } | 
| 335 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 334 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | 
| 336 | lobj->tiling_flags = bo->tiling_flags; | 335 | lobj->tiling_flags = bo->tiling_flags; | 
| 337 | if (fence) { | ||
| 338 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
| 339 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
| 340 | bo->tbo.sync_obj_arg = NULL; | ||
| 341 | } | ||
| 342 | if (old_fence) { | ||
| 343 | radeon_fence_unref(&old_fence); | ||
| 344 | } | ||
| 345 | } | 336 | } | 
| 346 | return 0; | 337 | return 0; | 
| 347 | } | 338 | } | 
| 348 | 339 | ||
| 349 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) | 340 | void radeon_bo_list_fence(struct list_head *head, void *fence) | 
| 350 | { | 341 | { | 
| 351 | struct radeon_bo_list *lobj; | 342 | struct radeon_bo_list *lobj; | 
| 352 | struct radeon_fence *old_fence; | 343 | struct radeon_bo *bo; | 
| 353 | 344 | struct radeon_fence *old_fence = NULL; | |
| 354 | if (fence) | 345 | |
| 355 | list_for_each_entry(lobj, head, list) { | 346 | list_for_each_entry(lobj, head, list) { | 
| 356 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); | 347 | bo = lobj->bo; | 
| 357 | if (old_fence == fence) { | 348 | spin_lock(&bo->tbo.lock); | 
| 358 | lobj->bo->tbo.sync_obj = NULL; | 349 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | 
| 359 | radeon_fence_unref(&old_fence); | 350 | bo->tbo.sync_obj = radeon_fence_ref(fence); | 
| 360 | } | 351 | bo->tbo.sync_obj_arg = NULL; | 
| 352 | spin_unlock(&bo->tbo.lock); | ||
| 353 | if (old_fence) { | ||
| 354 | radeon_fence_unref(&old_fence); | ||
| 361 | } | 355 | } | 
| 362 | radeon_bo_list_unreserve(head); | 356 | } | 
| 363 | } | 357 | } | 
| 364 | 358 | ||
| 365 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 359 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a02f18011ad1..7ab43de1e244 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h  | |||
| @@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | |||
| 156 | struct list_head *head); | 156 | struct list_head *head); | 
| 157 | extern int radeon_bo_list_reserve(struct list_head *head); | 157 | extern int radeon_bo_list_reserve(struct list_head *head); | 
| 158 | extern void radeon_bo_list_unreserve(struct list_head *head); | 158 | extern void radeon_bo_list_unreserve(struct list_head *head); | 
| 159 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | 159 | extern int radeon_bo_list_validate(struct list_head *head); | 
| 160 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | 160 | extern void radeon_bo_list_fence(struct list_head *head, void *fence); | 
| 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 161 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | 
| 162 | struct vm_area_struct *vma); | 162 | struct vm_area_struct *vma); | 
| 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 163 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | 
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 4d12b2d17b4d..694799f6fac1 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c  | |||
| @@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) | |||
| 41 | { | 41 | { | 
| 42 | struct radeon_fence *fence; | 42 | struct radeon_fence *fence; | 
| 43 | struct radeon_ib *nib; | 43 | struct radeon_ib *nib; | 
| 44 | unsigned long i; | 44 | int r = 0, i, c; | 
| 45 | int r = 0; | ||
| 46 | 45 | ||
| 47 | *ib = NULL; | 46 | *ib = NULL; | 
| 48 | r = radeon_fence_create(rdev, &fence); | 47 | r = radeon_fence_create(rdev, &fence); | 
| 49 | if (r) { | 48 | if (r) { | 
| 50 | DRM_ERROR("failed to create fence for new IB\n"); | 49 | dev_err(rdev->dev, "failed to create fence for new IB\n"); | 
| 51 | return r; | 50 | return r; | 
| 52 | } | 51 | } | 
| 53 | mutex_lock(&rdev->ib_pool.mutex); | 52 | mutex_lock(&rdev->ib_pool.mutex); | 
| 54 | i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 53 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { | 
| 55 | if (i < RADEON_IB_POOL_SIZE) { | 54 | i &= (RADEON_IB_POOL_SIZE - 1); | 
| 56 | set_bit(i, rdev->ib_pool.alloc_bm); | 55 | if (rdev->ib_pool.ibs[i].free) { | 
| 57 | rdev->ib_pool.ibs[i].length_dw = 0; | 56 | nib = &rdev->ib_pool.ibs[i]; | 
| 58 | *ib = &rdev->ib_pool.ibs[i]; | 57 | break; | 
| 59 | mutex_unlock(&rdev->ib_pool.mutex); | 58 | } | 
| 60 | goto out; | ||
| 61 | } | 59 | } | 
| 62 | if (list_empty(&rdev->ib_pool.scheduled_ibs)) { | 60 | if (nib == NULL) { | 
| 63 | /* we go do nothings here */ | 61 | /* This should never happen, it means we allocated all | 
| 62 | * IB and haven't scheduled one yet, return EBUSY to | ||
| 63 | * userspace hoping that on ioctl recall we get better | ||
| 64 | * luck | ||
| 65 | */ | ||
| 66 | dev_err(rdev->dev, "no free indirect buffer !\n"); | ||
| 64 | mutex_unlock(&rdev->ib_pool.mutex); | 67 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 65 | DRM_ERROR("all IB allocated none scheduled.\n"); | 68 | radeon_fence_unref(&fence); | 
| 66 | r = -EINVAL; | 69 | return -EBUSY; | 
| 67 | goto out; | ||
| 68 | } | 70 | } | 
| 69 | /* get the first ib on the scheduled list */ | 71 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); | 
| 70 | nib = list_entry(rdev->ib_pool.scheduled_ibs.next, | 72 | nib->free = false; | 
| 71 | struct radeon_ib, list); | 73 | if (nib->fence) { | 
| 72 | if (nib->fence == NULL) { | ||
| 73 | /* we go do nothings here */ | ||
| 74 | mutex_unlock(&rdev->ib_pool.mutex); | 74 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 75 | DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); | 75 | r = radeon_fence_wait(nib->fence, false); | 
| 76 | r = -EINVAL; | 76 | if (r) { | 
| 77 | goto out; | 77 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", | 
| 78 | } | 78 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); | 
| 79 | mutex_unlock(&rdev->ib_pool.mutex); | 79 | mutex_lock(&rdev->ib_pool.mutex); | 
| 80 | 80 | nib->free = true; | |
| 81 | r = radeon_fence_wait(nib->fence, false); | 81 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 82 | if (r) { | 82 | radeon_fence_unref(&fence); | 
| 83 | DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, | 83 | return r; | 
| 84 | (unsigned long)nib->gpu_addr, nib->length_dw); | 84 | } | 
| 85 | DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); | 85 | mutex_lock(&rdev->ib_pool.mutex); | 
| 86 | goto out; | ||
| 87 | } | 86 | } | 
| 88 | radeon_fence_unref(&nib->fence); | 87 | radeon_fence_unref(&nib->fence); | 
| 89 | 88 | nib->fence = fence; | |
| 90 | nib->length_dw = 0; | 89 | nib->length_dw = 0; | 
| 91 | |||
| 92 | /* scheduled list is accessed here */ | ||
| 93 | mutex_lock(&rdev->ib_pool.mutex); | ||
| 94 | list_del(&nib->list); | ||
| 95 | INIT_LIST_HEAD(&nib->list); | ||
| 96 | mutex_unlock(&rdev->ib_pool.mutex); | 90 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 97 | |||
| 98 | *ib = nib; | 91 | *ib = nib; | 
| 99 | out: | 92 | return 0; | 
| 100 | if (r) { | ||
| 101 | radeon_fence_unref(&fence); | ||
| 102 | } else { | ||
| 103 | (*ib)->fence = fence; | ||
| 104 | } | ||
| 105 | return r; | ||
| 106 | } | 93 | } | 
| 107 | 94 | ||
| 108 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 95 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 
| @@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
| 114 | return; | 101 | return; | 
| 115 | } | 102 | } | 
| 116 | mutex_lock(&rdev->ib_pool.mutex); | 103 | mutex_lock(&rdev->ib_pool.mutex); | 
| 117 | if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { | 104 | tmp->free = true; | 
| 118 | /* IB is scheduled & not signaled don't do anythings */ | ||
| 119 | mutex_unlock(&rdev->ib_pool.mutex); | ||
| 120 | return; | ||
| 121 | } | ||
| 122 | list_del(&tmp->list); | ||
| 123 | INIT_LIST_HEAD(&tmp->list); | ||
| 124 | if (tmp->fence) | ||
| 125 | radeon_fence_unref(&tmp->fence); | ||
| 126 | |||
| 127 | tmp->length_dw = 0; | ||
| 128 | clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); | ||
| 129 | mutex_unlock(&rdev->ib_pool.mutex); | 105 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 130 | } | 106 | } | 
| 131 | 107 | ||
| @@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 135 | 111 | ||
| 136 | if (!ib->length_dw || !rdev->cp.ready) { | 112 | if (!ib->length_dw || !rdev->cp.ready) { | 
| 137 | /* TODO: Nothings in the ib we should report. */ | 113 | /* TODO: Nothings in the ib we should report. */ | 
| 138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 114 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); | 
| 139 | return -EINVAL; | 115 | return -EINVAL; | 
| 140 | } | 116 | } | 
| 141 | 117 | ||
| @@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
| 148 | radeon_ring_ib_execute(rdev, ib); | 124 | radeon_ring_ib_execute(rdev, ib); | 
| 149 | radeon_fence_emit(rdev, ib->fence); | 125 | radeon_fence_emit(rdev, ib->fence); | 
| 150 | mutex_lock(&rdev->ib_pool.mutex); | 126 | mutex_lock(&rdev->ib_pool.mutex); | 
| 151 | list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); | 127 | /* once scheduled IB is considered free and protected by the fence */ | 
| 128 | ib->free = true; | ||
| 152 | mutex_unlock(&rdev->ib_pool.mutex); | 129 | mutex_unlock(&rdev->ib_pool.mutex); | 
| 153 | radeon_ring_unlock_commit(rdev); | 130 | radeon_ring_unlock_commit(rdev); | 
| 154 | return 0; | 131 | return 0; | 
| @@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 164 | if (rdev->ib_pool.robj) | 141 | if (rdev->ib_pool.robj) | 
| 165 | return 0; | 142 | return 0; | 
| 166 | /* Allocate 1M object buffer */ | 143 | /* Allocate 1M object buffer */ | 
| 167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | ||
| 168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 144 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 
| 169 | true, RADEON_GEM_DOMAIN_GTT, | 145 | true, RADEON_GEM_DOMAIN_GTT, | 
| 170 | &rdev->ib_pool.robj); | 146 | &rdev->ib_pool.robj); | 
| @@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 195 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | 171 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | 
| 196 | rdev->ib_pool.ibs[i].idx = i; | 172 | rdev->ib_pool.ibs[i].idx = i; | 
| 197 | rdev->ib_pool.ibs[i].length_dw = 0; | 173 | rdev->ib_pool.ibs[i].length_dw = 0; | 
| 198 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); | 174 | rdev->ib_pool.ibs[i].free = true; | 
| 199 | } | 175 | } | 
| 200 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 176 | rdev->ib_pool.head_id = 0; | 
| 201 | rdev->ib_pool.ready = true; | 177 | rdev->ib_pool.ready = true; | 
| 202 | DRM_INFO("radeon: ib pool ready.\n"); | 178 | DRM_INFO("radeon: ib pool ready.\n"); | 
| 203 | if (radeon_debugfs_ib_init(rdev)) { | 179 | if (radeon_debugfs_ib_init(rdev)) { | 
| @@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
| 214 | return; | 190 | return; | 
| 215 | } | 191 | } | 
| 216 | mutex_lock(&rdev->ib_pool.mutex); | 192 | mutex_lock(&rdev->ib_pool.mutex); | 
| 217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | ||
| 218 | if (rdev->ib_pool.robj) { | 193 | if (rdev->ib_pool.robj) { | 
| 219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 194 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 
| 220 | if (likely(r == 0)) { | 195 | if (likely(r == 0)) { | 
| @@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
| 363 | if (ib == NULL) { | 338 | if (ib == NULL) { | 
| 364 | return 0; | 339 | return 0; | 
| 365 | } | 340 | } | 
| 366 | seq_printf(m, "IB %04lu\n", ib->idx); | 341 | seq_printf(m, "IB %04u\n", ib->idx); | 
| 367 | seq_printf(m, "IB fence %p\n", ib->fence); | 342 | seq_printf(m, "IB fence %p\n", ib->fence); | 
| 368 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | 343 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | 
| 369 | for (i = 0; i < ib->length_dw; i++) { | 344 | for (i = 0; i < ib->length_dw; i++) { | 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa64..0c9c0811f42d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c  | |||
| @@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 348 | */ | 348 | */ | 
| 349 | 349 | ||
| 350 | DRM_INFO("It appears like vesafb is loaded. " | 350 | DRM_INFO("It appears like vesafb is loaded. " | 
| 351 | "Ignore above error if any. Entering stealth mode.\n"); | 351 | "Ignore above error if any.\n"); | 
| 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 
| 353 | if (unlikely(ret != 0)) { | 353 | if (unlikely(ret != 0)) { | 
| 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 
| 355 | goto out_no_device; | 355 | goto out_no_device; | 
| 356 | } | 356 | } | 
| 357 | vmw_kms_init(dev_priv); | ||
| 358 | vmw_overlay_init(dev_priv); | ||
| 359 | } else { | ||
| 360 | ret = vmw_request_device(dev_priv); | ||
| 361 | if (unlikely(ret != 0)) | ||
| 362 | goto out_no_device; | ||
| 363 | vmw_kms_init(dev_priv); | ||
| 364 | vmw_overlay_init(dev_priv); | ||
| 365 | vmw_fb_init(dev_priv); | ||
| 366 | } | 357 | } | 
| 358 | ret = vmw_request_device(dev_priv); | ||
| 359 | if (unlikely(ret != 0)) | ||
| 360 | goto out_no_device; | ||
| 361 | vmw_kms_init(dev_priv); | ||
| 362 | vmw_overlay_init(dev_priv); | ||
| 363 | vmw_fb_init(dev_priv); | ||
| 367 | 364 | ||
| 368 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 
| 369 | register_pm_notifier(&dev_priv->pm_nb); | 366 | register_pm_notifier(&dev_priv->pm_nb); | 
| @@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 406 | 403 | ||
| 407 | unregister_pm_notifier(&dev_priv->pm_nb); | 404 | unregister_pm_notifier(&dev_priv->pm_nb); | 
| 408 | 405 | ||
| 409 | if (!dev_priv->stealth) { | 406 | vmw_fb_close(dev_priv); | 
| 410 | vmw_fb_close(dev_priv); | 407 | vmw_kms_close(dev_priv); | 
| 411 | vmw_kms_close(dev_priv); | 408 | vmw_overlay_close(dev_priv); | 
| 412 | vmw_overlay_close(dev_priv); | 409 | vmw_release_device(dev_priv); | 
| 413 | vmw_release_device(dev_priv); | 410 | if (dev_priv->stealth) | 
| 414 | pci_release_regions(dev->pdev); | ||
| 415 | } else { | ||
| 416 | vmw_kms_close(dev_priv); | ||
| 417 | vmw_overlay_close(dev_priv); | ||
| 418 | pci_release_region(dev->pdev, 2); | 411 | pci_release_region(dev->pdev, 2); | 
| 419 | } | 412 | else | 
| 413 | pci_release_regions(dev->pdev); | ||
| 414 | |||
| 420 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 
| 421 | drm_irq_uninstall(dev_priv->dev); | 416 | drm_irq_uninstall(dev_priv->dev); | 
| 422 | if (dev->devname == vmw_devname) | 417 | if (dev->devname == vmw_devname) | 
| @@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 585 | int ret = 0; | 580 | int ret = 0; | 
| 586 | 581 | ||
| 587 | DRM_INFO("Master set.\n"); | 582 | DRM_INFO("Master set.\n"); | 
| 588 | if (dev_priv->stealth) { | ||
| 589 | ret = vmw_request_device(dev_priv); | ||
| 590 | if (unlikely(ret != 0)) | ||
| 591 | return ret; | ||
| 592 | } | ||
| 593 | 583 | ||
| 594 | if (active) { | 584 | if (active) { | 
| 595 | BUG_ON(active != &dev_priv->fbdev_master); | 585 | BUG_ON(active != &dev_priv->fbdev_master); | 
| @@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 649 | 639 | ||
| 650 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 
| 651 | 641 | ||
| 652 | if (dev_priv->stealth) { | ||
| 653 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 654 | if (unlikely(ret != 0)) | ||
| 655 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
| 656 | vmw_release_device(dev_priv); | ||
| 657 | } | ||
| 658 | dev_priv->active_master = &dev_priv->fbdev_master; | 642 | dev_priv->active_master = &dev_priv->fbdev_master; | 
| 659 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 
| 660 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 
| 661 | 645 | ||
| 662 | if (!dev_priv->stealth) | 646 | vmw_fb_on(dev_priv); | 
| 663 | vmw_fb_on(dev_priv); | ||
| 664 | } | 647 | } | 
| 665 | 648 | ||
| 666 | 649 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8b..a93367041cdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c  | |||
| @@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; | 
| 560 | #endif | 560 | #endif | 
| 561 | 561 | ||
| 562 | info->aperture_base = vmw_priv->vram_start; | ||
| 563 | info->aperture_size = vmw_priv->vram_size; | ||
| 564 | |||
| 562 | /* | 565 | /* | 
| 563 | * Dirty & Deferred IO | 566 | * Dirty & Deferred IO | 
| 564 | */ | 567 | */ | 
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index d84a36e545f6..b54aee7cd9e3 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c  | |||
| @@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev) | |||
| 1161 | return 0; | 1161 | return 0; | 
| 1162 | } | 1162 | } | 
| 1163 | 1163 | ||
| 1164 | static int i8042_pm_thaw(struct device *dev) | ||
| 1165 | { | ||
| 1166 | i8042_interrupt(0, NULL); | ||
| 1167 | |||
| 1168 | return 0; | ||
| 1169 | } | ||
| 1170 | |||
| 1164 | static const struct dev_pm_ops i8042_pm_ops = { | 1171 | static const struct dev_pm_ops i8042_pm_ops = { | 
| 1165 | .suspend = i8042_pm_reset, | 1172 | .suspend = i8042_pm_reset, | 
| 1166 | .resume = i8042_pm_restore, | 1173 | .resume = i8042_pm_restore, | 
| 1174 | .thaw = i8042_pm_thaw, | ||
| 1167 | .poweroff = i8042_pm_reset, | 1175 | .poweroff = i8042_pm_reset, | 
| 1168 | .restore = i8042_pm_restore, | 1176 | .restore = i8042_pm_restore, | 
| 1169 | }; | 1177 | }; | 
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 8e952fdab764..cb2fd01eddae 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c  | |||
| @@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func) | |||
| 720 | -ret_val); | 720 | -ret_val); | 
| 721 | goto acpiphp_bus_add_out; | 721 | goto acpiphp_bus_add_out; | 
| 722 | } | 722 | } | 
| 723 | /* | ||
| 724 | * try to start anyway. We could have failed to add | ||
| 725 | * simply because this bus had previously been added | ||
| 726 | * on another add. Don't bother with the return value | ||
| 727 | * we just keep going. | ||
| 728 | */ | ||
| 729 | ret_val = acpi_bus_start(device); | 723 | ret_val = acpi_bus_start(device); | 
| 730 | 724 | ||
| 731 | acpiphp_bus_add_out: | 725 | acpiphp_bus_add_out: | 
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e67e4feb35cb..eb603f1d55ca 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c  | |||
| @@ -5771,7 +5771,7 @@ static void thermal_exit(void) | |||
| 5771 | case TPACPI_THERMAL_ACPI_TMP07: | 5771 | case TPACPI_THERMAL_ACPI_TMP07: | 
| 5772 | case TPACPI_THERMAL_ACPI_UPDT: | 5772 | case TPACPI_THERMAL_ACPI_UPDT: | 
| 5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, | 5773 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, | 
| 5774 | &thermal_temp_input16_group); | 5774 | &thermal_temp_input8_group); | 
| 5775 | break; | 5775 | break; | 
| 5776 | case TPACPI_THERMAL_NONE: | 5776 | case TPACPI_THERMAL_NONE: | 
| 5777 | default: | 5777 | default: | 
diff --git a/include/linux/input.h b/include/linux/input.h index 735ceaf1bc2d..663208afb64c 100644 --- a/include/linux/input.h +++ b/include/linux/input.h  | |||
| @@ -376,6 +376,7 @@ struct input_absinfo { | |||
| 376 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ | 376 | #define KEY_DISPLAY_OFF 245 /* display device to off state */ | 
| 377 | 377 | ||
| 378 | #define KEY_WIMAX 246 | 378 | #define KEY_WIMAX 246 | 
| 379 | #define KEY_RFKILL 247 /* Key that controls all radios */ | ||
| 379 | 380 | ||
| 380 | /* Range 248 - 255 is reserved for special needs of AT keyboard driver */ | 381 | /* Range 248 - 255 is reserved for special needs of AT keyboard driver */ | 
| 381 | 382 | ||
