diff options
Diffstat (limited to 'drivers/acpi/osl.c')
-rw-r--r-- | drivers/acpi/osl.c | 144 |
1 files changed, 80 insertions, 64 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index e5f416c7f66e..54a20ff4b864 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -61,7 +61,6 @@ struct acpi_os_dpc { | |||
61 | acpi_osd_exec_callback function; | 61 | acpi_osd_exec_callback function; |
62 | void *context; | 62 | void *context; |
63 | struct work_struct work; | 63 | struct work_struct work; |
64 | int wait; | ||
65 | }; | 64 | }; |
66 | 65 | ||
67 | #ifdef CONFIG_ACPI_CUSTOM_DSDT | 66 | #ifdef CONFIG_ACPI_CUSTOM_DSDT |
@@ -569,8 +568,10 @@ static const char * const table_sigs[] = { | |||
569 | 568 | ||
570 | #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) | 569 | #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) |
571 | 570 | ||
572 | /* Must not increase 10 or needs code modification below */ | 571 | #define ACPI_OVERRIDE_TABLES 64 |
573 | #define ACPI_OVERRIDE_TABLES 10 | 572 | static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES]; |
573 | |||
574 | #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) | ||
574 | 575 | ||
575 | void __init acpi_initrd_override(void *data, size_t size) | 576 | void __init acpi_initrd_override(void *data, size_t size) |
576 | { | 577 | { |
@@ -579,8 +580,6 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
579 | struct acpi_table_header *table; | 580 | struct acpi_table_header *table; |
580 | char cpio_path[32] = "kernel/firmware/acpi/"; | 581 | char cpio_path[32] = "kernel/firmware/acpi/"; |
581 | struct cpio_data file; | 582 | struct cpio_data file; |
582 | struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES]; | ||
583 | char *p; | ||
584 | 583 | ||
585 | if (data == NULL || size == 0) | 584 | if (data == NULL || size == 0) |
586 | return; | 585 | return; |
@@ -625,8 +624,8 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
625 | table->signature, cpio_path, file.name, table->length); | 624 | table->signature, cpio_path, file.name, table->length); |
626 | 625 | ||
627 | all_tables_size += table->length; | 626 | all_tables_size += table->length; |
628 | early_initrd_files[table_nr].data = file.data; | 627 | acpi_initrd_files[table_nr].data = file.data; |
629 | early_initrd_files[table_nr].size = file.size; | 628 | acpi_initrd_files[table_nr].size = file.size; |
630 | table_nr++; | 629 | table_nr++; |
631 | } | 630 | } |
632 | if (table_nr == 0) | 631 | if (table_nr == 0) |
@@ -652,14 +651,34 @@ void __init acpi_initrd_override(void *data, size_t size) | |||
652 | memblock_reserve(acpi_tables_addr, all_tables_size); | 651 | memblock_reserve(acpi_tables_addr, all_tables_size); |
653 | arch_reserve_mem_area(acpi_tables_addr, all_tables_size); | 652 | arch_reserve_mem_area(acpi_tables_addr, all_tables_size); |
654 | 653 | ||
655 | p = early_ioremap(acpi_tables_addr, all_tables_size); | 654 | /* |
656 | 655 | * early_ioremap only can remap 256k one time. If we map all | |
656 | * tables one time, we will hit the limit. Need to map chunks | ||
657 | * one by one during copying the same as that in relocate_initrd(). | ||
658 | */ | ||
657 | for (no = 0; no < table_nr; no++) { | 659 | for (no = 0; no < table_nr; no++) { |
658 | memcpy(p + total_offset, early_initrd_files[no].data, | 660 | unsigned char *src_p = acpi_initrd_files[no].data; |
659 | early_initrd_files[no].size); | 661 | phys_addr_t size = acpi_initrd_files[no].size; |
660 | total_offset += early_initrd_files[no].size; | 662 | phys_addr_t dest_addr = acpi_tables_addr + total_offset; |
663 | phys_addr_t slop, clen; | ||
664 | char *dest_p; | ||
665 | |||
666 | total_offset += size; | ||
667 | |||
668 | while (size) { | ||
669 | slop = dest_addr & ~PAGE_MASK; | ||
670 | clen = size; | ||
671 | if (clen > MAP_CHUNK_SIZE - slop) | ||
672 | clen = MAP_CHUNK_SIZE - slop; | ||
673 | dest_p = early_ioremap(dest_addr & PAGE_MASK, | ||
674 | clen + slop); | ||
675 | memcpy(dest_p + slop, src_p, clen); | ||
676 | early_iounmap(dest_p, clen + slop); | ||
677 | src_p += clen; | ||
678 | dest_addr += clen; | ||
679 | size -= clen; | ||
680 | } | ||
661 | } | 681 | } |
662 | early_iounmap(p, all_tables_size); | ||
663 | } | 682 | } |
664 | #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ | 683 | #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ |
665 | 684 | ||
@@ -820,7 +839,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) | |||
820 | 839 | ||
821 | void acpi_os_sleep(u64 ms) | 840 | void acpi_os_sleep(u64 ms) |
822 | { | 841 | { |
823 | schedule_timeout_interruptible(msecs_to_jiffies(ms)); | 842 | msleep(ms); |
824 | } | 843 | } |
825 | 844 | ||
826 | void acpi_os_stall(u32 us) | 845 | void acpi_os_stall(u32 us) |
@@ -1067,9 +1086,6 @@ static void acpi_os_execute_deferred(struct work_struct *work) | |||
1067 | { | 1086 | { |
1068 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); | 1087 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); |
1069 | 1088 | ||
1070 | if (dpc->wait) | ||
1071 | acpi_os_wait_events_complete(); | ||
1072 | |||
1073 | dpc->function(dpc->context); | 1089 | dpc->function(dpc->context); |
1074 | kfree(dpc); | 1090 | kfree(dpc); |
1075 | } | 1091 | } |
@@ -1089,8 +1105,8 @@ static void acpi_os_execute_deferred(struct work_struct *work) | |||
1089 | * | 1105 | * |
1090 | ******************************************************************************/ | 1106 | ******************************************************************************/ |
1091 | 1107 | ||
1092 | static acpi_status __acpi_os_execute(acpi_execute_type type, | 1108 | acpi_status acpi_os_execute(acpi_execute_type type, |
1093 | acpi_osd_exec_callback function, void *context, int hp) | 1109 | acpi_osd_exec_callback function, void *context) |
1094 | { | 1110 | { |
1095 | acpi_status status = AE_OK; | 1111 | acpi_status status = AE_OK; |
1096 | struct acpi_os_dpc *dpc; | 1112 | struct acpi_os_dpc *dpc; |
@@ -1117,20 +1133,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
1117 | dpc->context = context; | 1133 | dpc->context = context; |
1118 | 1134 | ||
1119 | /* | 1135 | /* |
1120 | * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq | ||
1121 | * because the hotplug code may call driver .remove() functions, | ||
1122 | * which invoke flush_scheduled_work/acpi_os_wait_events_complete | ||
1123 | * to flush these workqueues. | ||
1124 | * | ||
1125 | * To prevent lockdep from complaining unnecessarily, make sure that | 1136 | * To prevent lockdep from complaining unnecessarily, make sure that |
1126 | * there is a different static lockdep key for each workqueue by using | 1137 | * there is a different static lockdep key for each workqueue by using |
1127 | * INIT_WORK() for each of them separately. | 1138 | * INIT_WORK() for each of them separately. |
1128 | */ | 1139 | */ |
1129 | if (hp) { | 1140 | if (type == OSL_NOTIFY_HANDLER) { |
1130 | queue = kacpi_hotplug_wq; | ||
1131 | dpc->wait = 1; | ||
1132 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | ||
1133 | } else if (type == OSL_NOTIFY_HANDLER) { | ||
1134 | queue = kacpi_notify_wq; | 1141 | queue = kacpi_notify_wq; |
1135 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 1142 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
1136 | } else { | 1143 | } else { |
@@ -1155,28 +1162,59 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
1155 | } | 1162 | } |
1156 | return status; | 1163 | return status; |
1157 | } | 1164 | } |
1165 | EXPORT_SYMBOL(acpi_os_execute); | ||
1158 | 1166 | ||
1159 | acpi_status acpi_os_execute(acpi_execute_type type, | 1167 | void acpi_os_wait_events_complete(void) |
1160 | acpi_osd_exec_callback function, void *context) | ||
1161 | { | 1168 | { |
1162 | return __acpi_os_execute(type, function, context, 0); | 1169 | flush_workqueue(kacpid_wq); |
1170 | flush_workqueue(kacpi_notify_wq); | ||
1163 | } | 1171 | } |
1164 | EXPORT_SYMBOL(acpi_os_execute); | ||
1165 | 1172 | ||
1166 | acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, | 1173 | struct acpi_hp_work { |
1167 | void *context) | 1174 | struct work_struct work; |
1175 | acpi_hp_callback func; | ||
1176 | void *data; | ||
1177 | u32 src; | ||
1178 | }; | ||
1179 | |||
1180 | static void acpi_hotplug_work_fn(struct work_struct *work) | ||
1168 | { | 1181 | { |
1169 | return __acpi_os_execute(0, function, context, 1); | 1182 | struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); |
1183 | |||
1184 | acpi_os_wait_events_complete(); | ||
1185 | hpw->func(hpw->data, hpw->src); | ||
1186 | kfree(hpw); | ||
1170 | } | 1187 | } |
1171 | EXPORT_SYMBOL(acpi_os_hotplug_execute); | ||
1172 | 1188 | ||
1173 | void acpi_os_wait_events_complete(void) | 1189 | acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src) |
1174 | { | 1190 | { |
1175 | flush_workqueue(kacpid_wq); | 1191 | struct acpi_hp_work *hpw; |
1176 | flush_workqueue(kacpi_notify_wq); | 1192 | |
1193 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
1194 | "Scheduling function [%p(%p, %u)] for deferred execution.\n", | ||
1195 | func, data, src)); | ||
1196 | |||
1197 | hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); | ||
1198 | if (!hpw) | ||
1199 | return AE_NO_MEMORY; | ||
1200 | |||
1201 | INIT_WORK(&hpw->work, acpi_hotplug_work_fn); | ||
1202 | hpw->func = func; | ||
1203 | hpw->data = data; | ||
1204 | hpw->src = src; | ||
1205 | /* | ||
1206 | * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because | ||
1207 | * the hotplug code may call driver .remove() functions, which may | ||
1208 | * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush | ||
1209 | * these workqueues. | ||
1210 | */ | ||
1211 | if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { | ||
1212 | kfree(hpw); | ||
1213 | return AE_ERROR; | ||
1214 | } | ||
1215 | return AE_OK; | ||
1177 | } | 1216 | } |
1178 | 1217 | ||
1179 | EXPORT_SYMBOL(acpi_os_wait_events_complete); | ||
1180 | 1218 | ||
1181 | acpi_status | 1219 | acpi_status |
1182 | acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) | 1220 | acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) |
@@ -1335,7 +1373,7 @@ static int __init acpi_os_name_setup(char *str) | |||
1335 | if (!str || !*str) | 1373 | if (!str || !*str) |
1336 | return 0; | 1374 | return 0; |
1337 | 1375 | ||
1338 | for (; count-- && str && *str; str++) { | 1376 | for (; count-- && *str; str++) { |
1339 | if (isalnum(*str) || *str == ' ' || *str == ':') | 1377 | if (isalnum(*str) || *str == ' ' || *str == ':') |
1340 | *p++ = *str; | 1378 | *p++ = *str; |
1341 | else if (*str == '\'' || *str == '"') | 1379 | else if (*str == '\'' || *str == '"') |
@@ -1825,25 +1863,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, | |||
1825 | { | 1863 | { |
1826 | __acpi_os_prepare_extended_sleep = func; | 1864 | __acpi_os_prepare_extended_sleep = func; |
1827 | } | 1865 | } |
1828 | |||
1829 | |||
1830 | void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context, | ||
1831 | void (*func)(struct work_struct *work)) | ||
1832 | { | ||
1833 | struct acpi_hp_work *hp_work; | ||
1834 | int ret; | ||
1835 | |||
1836 | hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL); | ||
1837 | if (!hp_work) | ||
1838 | return; | ||
1839 | |||
1840 | hp_work->handle = handle; | ||
1841 | hp_work->type = type; | ||
1842 | hp_work->context = context; | ||
1843 | |||
1844 | INIT_WORK(&hp_work->work, func); | ||
1845 | ret = queue_work(kacpi_hotplug_wq, &hp_work->work); | ||
1846 | if (!ret) | ||
1847 | kfree(hp_work); | ||
1848 | } | ||
1849 | EXPORT_SYMBOL_GPL(alloc_acpi_hp_work); | ||