diff options
author | David S. Miller <davem@davemloft.net> | 2011-08-08 02:20:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-08-08 02:20:26 -0400 |
commit | 19fd61785a580c60cba900c5171bfadb57dd5056 (patch) | |
tree | 1e491fb014be0dc03f4b6755bb94e73afd38c455 /drivers | |
parent | 57569d0e12eaf31717e295960cd2a26f626c8e5b (diff) | |
parent | 8028837d71ba9904b17281b40f94b93e947fbe38 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers')
259 files changed, 9232 insertions, 4096 deletions
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 73863d86f022..76dc02f15574 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE); | |||
126 | */ | 126 | */ |
127 | u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE); | 127 | u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE); |
128 | 128 | ||
129 | /* | ||
130 | * Disable runtime checking and repair of values returned by control methods. | ||
131 | * Use only if the repair is causing a problem on a particular machine. | ||
132 | */ | ||
133 | u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE); | ||
134 | |||
129 | /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ | 135 | /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ |
130 | 136 | ||
131 | struct acpi_table_fadt acpi_gbl_FADT; | 137 | struct acpi_table_fadt acpi_gbl_FADT; |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index c7f743ca395b..5552125d8340 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -357,6 +357,7 @@ struct acpi_predefined_data { | |||
357 | char *pathname; | 357 | char *pathname; |
358 | const union acpi_predefined_info *predefined; | 358 | const union acpi_predefined_info *predefined; |
359 | union acpi_operand_object *parent_package; | 359 | union acpi_operand_object *parent_package; |
360 | struct acpi_namespace_node *node; | ||
360 | u32 flags; | 361 | u32 flags; |
361 | u8 node_flags; | 362 | u8 node_flags; |
362 | }; | 363 | }; |
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 94e73c97cf85..c445cca490ea 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] = | |||
468 | {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, | 468 | {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, |
469 | {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, | 469 | {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, |
470 | {{"_TC2", 0, ACPI_RTYPE_INTEGER}}, | 470 | {{"_TC2", 0, ACPI_RTYPE_INTEGER}}, |
471 | {{"_TDL", 0, ACPI_RTYPE_INTEGER}}, | ||
471 | {{"_TIP", 1, ACPI_RTYPE_INTEGER}}, | 472 | {{"_TIP", 1, ACPI_RTYPE_INTEGER}}, |
472 | {{"_TIV", 1, ACPI_RTYPE_INTEGER}}, | 473 | {{"_TIV", 1, ACPI_RTYPE_INTEGER}}, |
473 | {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, | 474 | {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, |
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 9fb03fa8ffde..c845c8089f39 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c | |||
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * 1) We have a return value, but if one wasn't expected, just exit, this is | 196 | * Return value validation and possible repair. |
197 | * not a problem. For example, if the "Implicit Return" feature is | ||
198 | * enabled, methods will always return a value. | ||
199 | * | 197 | * |
200 | * 2) If the return value can be of any type, then we cannot perform any | 198 | * 1) Don't perform return value validation/repair if this feature |
201 | * validation, exit. | 199 | * has been disabled via a global option. |
200 | * | ||
201 | * 2) We have a return value, but if one wasn't expected, just exit, | ||
202 | * this is not a problem. For example, if the "Implicit Return" | ||
203 | * feature is enabled, methods will always return a value. | ||
204 | * | ||
205 | * 3) If the return value can be of any type, then we cannot perform | ||
206 | * any validation, just exit. | ||
202 | */ | 207 | */ |
203 | if ((!predefined->info.expected_btypes) || | 208 | if (acpi_gbl_disable_auto_repair || |
209 | (!predefined->info.expected_btypes) || | ||
204 | (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { | 210 | (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { |
205 | goto cleanup; | 211 | goto cleanup; |
206 | } | 212 | } |
@@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, | |||
212 | goto cleanup; | 218 | goto cleanup; |
213 | } | 219 | } |
214 | data->predefined = predefined; | 220 | data->predefined = predefined; |
221 | data->node = node; | ||
215 | data->node_flags = node->flags; | 222 | data->node_flags = node->flags; |
216 | data->pathname = pathname; | 223 | data->pathname = pathname; |
217 | 224 | ||
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 973883babee1..024c4f263f87 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c | |||
@@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data, | |||
503 | { | 503 | { |
504 | union acpi_operand_object *return_object = *return_object_ptr; | 504 | union acpi_operand_object *return_object = *return_object_ptr; |
505 | acpi_status status; | 505 | acpi_status status; |
506 | struct acpi_namespace_node *node; | ||
507 | |||
508 | /* | ||
509 | * We can only sort the _TSS return package if there is no _PSS in the | ||
510 | * same scope. This is because if _PSS is present, the ACPI specification | ||
511 | * dictates that the _TSS Power Dissipation field is to be ignored, and | ||
512 | * therefore some BIOSs leave garbage values in the _TSS Power field(s). | ||
513 | * In this case, it is best to just return the _TSS package as-is. | ||
514 | * (May, 2011) | ||
515 | */ | ||
516 | status = | ||
517 | acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node); | ||
518 | if (ACPI_SUCCESS(status)) { | ||
519 | return (AE_OK); | ||
520 | } | ||
506 | 521 | ||
507 | status = acpi_ns_check_sorted_list(data, return_object, 5, 1, | 522 | status = acpi_ns_check_sorted_list(data, return_object, 5, 1, |
508 | ACPI_SORT_DESCENDING, | 523 | ACPI_SORT_DESCENDING, |
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 48db0944ce4a..62365f6075dd 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Originally, we checked the table signature for "SSDT" or "PSDT" here. | 129 | * Validate the incoming table signature. |
130 | * Next, we added support for OEMx tables, signature "OEM". | 130 | * |
131 | * Valid tables were encountered with a null signature, so we've just | 131 | * 1) Originally, we checked the table signature for "SSDT" or "PSDT". |
132 | * given up on validating the signature, since it seems to be a waste | 132 | * 2) We added support for OEMx tables, signature "OEM". |
133 | * of code. The original code was removed (05/2008). | 133 | * 3) Valid tables were encountered with a null signature, so we just |
134 | * gave up on validating the signature, (05/2008). | ||
135 | * 4) We encountered non-AML tables such as the MADT, which caused | ||
136 | * interpreter errors and kernel faults. So now, we once again allow | ||
137 | * only "SSDT", "OEMx", and now, also a null signature. (05/2011). | ||
134 | */ | 138 | */ |
139 | if ((table_desc->pointer->signature[0] != 0x00) && | ||
140 | (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)) | ||
141 | && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) { | ||
142 | ACPI_ERROR((AE_INFO, | ||
143 | "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx", | ||
144 | acpi_ut_valid_acpi_name(*(u32 *)table_desc-> | ||
145 | pointer-> | ||
146 | signature) ? table_desc-> | ||
147 | pointer->signature : "????", | ||
148 | *(u32 *)table_desc->pointer->signature)); | ||
149 | |||
150 | return_ACPI_STATUS(AE_BAD_SIGNATURE); | ||
151 | } | ||
135 | 152 | ||
136 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 153 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); |
137 | 154 | ||
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index f739a70b1c70..c34aa51af4ee 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig | |||
@@ -10,9 +10,11 @@ config ACPI_APEI | |||
10 | error injection. | 10 | error injection. |
11 | 11 | ||
12 | config ACPI_APEI_GHES | 12 | config ACPI_APEI_GHES |
13 | tristate "APEI Generic Hardware Error Source" | 13 | bool "APEI Generic Hardware Error Source" |
14 | depends on ACPI_APEI && X86 | 14 | depends on ACPI_APEI && X86 |
15 | select ACPI_HED | 15 | select ACPI_HED |
16 | select LLIST | ||
17 | select GENERIC_ALLOCATOR | ||
16 | help | 18 | help |
17 | Generic Hardware Error Source provides a way to report | 19 | Generic Hardware Error Source provides a way to report |
18 | platform hardware errors (such as that from chipset). It | 20 | platform hardware errors (such as that from chipset). It |
@@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER | |||
30 | PCIe AER errors may be reported via APEI firmware first mode. | 32 | PCIe AER errors may be reported via APEI firmware first mode. |
31 | Turn on this option to enable the corresponding support. | 33 | Turn on this option to enable the corresponding support. |
32 | 34 | ||
35 | config ACPI_APEI_MEMORY_FAILURE | ||
36 | bool "APEI memory error recovering support" | ||
37 | depends on ACPI_APEI && MEMORY_FAILURE | ||
38 | help | ||
39 | Memory errors may be reported via APEI firmware first mode. | ||
40 | Turn on this option to enable the memory recovering support. | ||
41 | |||
33 | config ACPI_APEI_EINJ | 42 | config ACPI_APEI_EINJ |
34 | tristate "APEI Error INJection (EINJ)" | 43 | tristate "APEI Error INJection (EINJ)" |
35 | depends on ACPI_APEI && DEBUG_FS | 44 | depends on ACPI_APEI && DEBUG_FS |
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 4a904a4bf05f..8041248fce9b 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop); | |||
157 | * Interpret the specified action. Go through whole action table, | 157 | * Interpret the specified action. Go through whole action table, |
158 | * execute all instructions belong to the action. | 158 | * execute all instructions belong to the action. |
159 | */ | 159 | */ |
160 | int apei_exec_run(struct apei_exec_context *ctx, u8 action) | 160 | int __apei_exec_run(struct apei_exec_context *ctx, u8 action, |
161 | bool optional) | ||
161 | { | 162 | { |
162 | int rc; | 163 | int rc = -ENOENT; |
163 | u32 i, ip; | 164 | u32 i, ip; |
164 | struct acpi_whea_header *entry; | 165 | struct acpi_whea_header *entry; |
165 | apei_exec_ins_func_t run; | 166 | apei_exec_ins_func_t run; |
@@ -198,9 +199,9 @@ rewind: | |||
198 | goto rewind; | 199 | goto rewind; |
199 | } | 200 | } |
200 | 201 | ||
201 | return 0; | 202 | return !optional && rc < 0 ? rc : 0; |
202 | } | 203 | } |
203 | EXPORT_SYMBOL_GPL(apei_exec_run); | 204 | EXPORT_SYMBOL_GPL(__apei_exec_run); |
204 | 205 | ||
205 | typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, | 206 | typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, |
206 | struct acpi_whea_header *entry, | 207 | struct acpi_whea_header *entry, |
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void) | |||
603 | return dapei; | 604 | return dapei; |
604 | } | 605 | } |
605 | EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); | 606 | EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); |
607 | |||
608 | int apei_osc_setup(void) | ||
609 | { | ||
610 | static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; | ||
611 | acpi_handle handle; | ||
612 | u32 capbuf[3]; | ||
613 | struct acpi_osc_context context = { | ||
614 | .uuid_str = whea_uuid_str, | ||
615 | .rev = 1, | ||
616 | .cap.length = sizeof(capbuf), | ||
617 | .cap.pointer = capbuf, | ||
618 | }; | ||
619 | |||
620 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | ||
621 | capbuf[OSC_SUPPORT_TYPE] = 0; | ||
622 | capbuf[OSC_CONTROL_TYPE] = 0; | ||
623 | |||
624 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) | ||
625 | || ACPI_FAILURE(acpi_run_osc(handle, &context))) | ||
626 | return -EIO; | ||
627 | else { | ||
628 | kfree(context.ret.pointer); | ||
629 | return 0; | ||
630 | } | ||
631 | } | ||
632 | EXPORT_SYMBOL_GPL(apei_osc_setup); | ||
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index ef0581f2094d..f57050e7a5e7 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h | |||
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) | |||
50 | return ctx->value; | 50 | return ctx->value; |
51 | } | 51 | } |
52 | 52 | ||
53 | int apei_exec_run(struct apei_exec_context *ctx, u8 action); | 53 | int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional); |
54 | |||
55 | static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action) | ||
56 | { | ||
57 | return __apei_exec_run(ctx, action, 0); | ||
58 | } | ||
59 | |||
60 | /* It is optional whether the firmware provides the action */ | ||
61 | static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action) | ||
62 | { | ||
63 | return __apei_exec_run(ctx, action, 1); | ||
64 | } | ||
54 | 65 | ||
55 | /* Common instruction implementation */ | 66 | /* Common instruction implementation */ |
56 | 67 | ||
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx, | |||
113 | const struct acpi_hest_generic_status *estatus); | 124 | const struct acpi_hest_generic_status *estatus); |
114 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); | 125 | int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); |
115 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus); | 126 | int apei_estatus_check(const struct acpi_hest_generic_status *estatus); |
127 | |||
128 | int apei_osc_setup(void); | ||
116 | #endif | 129 | #endif |
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index f74b2ea11f21..589b96c38704 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c | |||
@@ -46,7 +46,8 @@ | |||
46 | * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the | 46 | * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the |
47 | * EINJ table through an unpublished extension. Use with caution as | 47 | * EINJ table through an unpublished extension. Use with caution as |
48 | * most will ignore the parameter and make their own choice of address | 48 | * most will ignore the parameter and make their own choice of address |
49 | * for error injection. | 49 | * for error injection. This extension is used only if |
50 | * param_extension module parameter is specified. | ||
50 | */ | 51 | */ |
51 | struct einj_parameter { | 52 | struct einj_parameter { |
52 | u64 type; | 53 | u64 type; |
@@ -65,6 +66,9 @@ struct einj_parameter { | |||
65 | ((struct acpi_whea_header *)((char *)(tab) + \ | 66 | ((struct acpi_whea_header *)((char *)(tab) + \ |
66 | sizeof(struct acpi_table_einj))) | 67 | sizeof(struct acpi_table_einj))) |
67 | 68 | ||
69 | static bool param_extension; | ||
70 | module_param(param_extension, bool, 0); | ||
71 | |||
68 | static struct acpi_table_einj *einj_tab; | 72 | static struct acpi_table_einj *einj_tab; |
69 | 73 | ||
70 | static struct apei_resources einj_resources; | 74 | static struct apei_resources einj_resources; |
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) | |||
285 | 289 | ||
286 | einj_exec_ctx_init(&ctx); | 290 | einj_exec_ctx_init(&ctx); |
287 | 291 | ||
288 | rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); | 292 | rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); |
289 | if (rc) | 293 | if (rc) |
290 | return rc; | 294 | return rc; |
291 | apei_exec_ctx_set_input(&ctx, type); | 295 | apei_exec_ctx_set_input(&ctx, type); |
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) | |||
323 | rc = __einj_error_trigger(trigger_paddr); | 327 | rc = __einj_error_trigger(trigger_paddr); |
324 | if (rc) | 328 | if (rc) |
325 | return rc; | 329 | return rc; |
326 | rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); | 330 | rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); |
327 | 331 | ||
328 | return rc; | 332 | return rc; |
329 | } | 333 | } |
@@ -489,14 +493,6 @@ static int __init einj_init(void) | |||
489 | einj_debug_dir, NULL, &error_type_fops); | 493 | einj_debug_dir, NULL, &error_type_fops); |
490 | if (!fentry) | 494 | if (!fentry) |
491 | goto err_cleanup; | 495 | goto err_cleanup; |
492 | fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, | ||
493 | einj_debug_dir, &error_param1); | ||
494 | if (!fentry) | ||
495 | goto err_cleanup; | ||
496 | fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, | ||
497 | einj_debug_dir, &error_param2); | ||
498 | if (!fentry) | ||
499 | goto err_cleanup; | ||
500 | fentry = debugfs_create_file("error_inject", S_IWUSR, | 496 | fentry = debugfs_create_file("error_inject", S_IWUSR, |
501 | einj_debug_dir, NULL, &error_inject_fops); | 497 | einj_debug_dir, NULL, &error_inject_fops); |
502 | if (!fentry) | 498 | if (!fentry) |
@@ -513,12 +509,23 @@ static int __init einj_init(void) | |||
513 | rc = apei_exec_pre_map_gars(&ctx); | 509 | rc = apei_exec_pre_map_gars(&ctx); |
514 | if (rc) | 510 | if (rc) |
515 | goto err_release; | 511 | goto err_release; |
516 | param_paddr = einj_get_parameter_address(); | 512 | if (param_extension) { |
517 | if (param_paddr) { | 513 | param_paddr = einj_get_parameter_address(); |
518 | einj_param = ioremap(param_paddr, sizeof(*einj_param)); | 514 | if (param_paddr) { |
519 | rc = -ENOMEM; | 515 | einj_param = ioremap(param_paddr, sizeof(*einj_param)); |
520 | if (!einj_param) | 516 | rc = -ENOMEM; |
521 | goto err_unmap; | 517 | if (!einj_param) |
518 | goto err_unmap; | ||
519 | fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, | ||
520 | einj_debug_dir, &error_param1); | ||
521 | if (!fentry) | ||
522 | goto err_unmap; | ||
523 | fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, | ||
524 | einj_debug_dir, &error_param2); | ||
525 | if (!fentry) | ||
526 | goto err_unmap; | ||
527 | } else | ||
528 | pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); | ||
522 | } | 529 | } |
523 | 530 | ||
524 | pr_info(EINJ_PFX "Error INJection is initialized.\n"); | 531 | pr_info(EINJ_PFX "Error INJection is initialized.\n"); |
@@ -526,6 +533,8 @@ static int __init einj_init(void) | |||
526 | return 0; | 533 | return 0; |
527 | 534 | ||
528 | err_unmap: | 535 | err_unmap: |
536 | if (einj_param) | ||
537 | iounmap(einj_param); | ||
529 | apei_exec_post_unmap_gars(&ctx); | 538 | apei_exec_post_unmap_gars(&ctx); |
530 | err_release: | 539 | err_release: |
531 | apei_resources_release(&einj_resources); | 540 | apei_resources_release(&einj_resources); |
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index a4cfb64c86a1..903549df809b 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | #define ERST_DBG_PFX "ERST DBG: " | 34 | #define ERST_DBG_PFX "ERST DBG: " |
35 | 35 | ||
36 | #define ERST_DBG_RECORD_LEN_MAX 4096 | 36 | #define ERST_DBG_RECORD_LEN_MAX 0x4000 |
37 | 37 | ||
38 | static void *erst_dbg_buf; | 38 | static void *erst_dbg_buf; |
39 | static unsigned int erst_dbg_buf_len; | 39 | static unsigned int erst_dbg_buf_len; |
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = { | |||
213 | 213 | ||
214 | static __init int erst_dbg_init(void) | 214 | static __init int erst_dbg_init(void) |
215 | { | 215 | { |
216 | if (erst_disable) { | ||
217 | pr_info(ERST_DBG_PFX "ERST support is disabled.\n"); | ||
218 | return -ENODEV; | ||
219 | } | ||
216 | return misc_register(&erst_dbg_dev); | 220 | return misc_register(&erst_dbg_dev); |
217 | } | 221 | } |
218 | 222 | ||
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index e6cef8e1b534..2ca59dc69f7f 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset) | |||
642 | int rc; | 642 | int rc; |
643 | 643 | ||
644 | erst_exec_ctx_init(&ctx); | 644 | erst_exec_ctx_init(&ctx); |
645 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); | 645 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE); |
646 | if (rc) | 646 | if (rc) |
647 | return rc; | 647 | return rc; |
648 | apei_exec_ctx_set_input(&ctx, offset); | 648 | apei_exec_ctx_set_input(&ctx, offset); |
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset) | |||
666 | if (rc) | 666 | if (rc) |
667 | return rc; | 667 | return rc; |
668 | val = apei_exec_ctx_get_output(&ctx); | 668 | val = apei_exec_ctx_get_output(&ctx); |
669 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | 669 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); |
670 | if (rc) | 670 | if (rc) |
671 | return rc; | 671 | return rc; |
672 | 672 | ||
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) | |||
681 | int rc; | 681 | int rc; |
682 | 682 | ||
683 | erst_exec_ctx_init(&ctx); | 683 | erst_exec_ctx_init(&ctx); |
684 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); | 684 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ); |
685 | if (rc) | 685 | if (rc) |
686 | return rc; | 686 | return rc; |
687 | apei_exec_ctx_set_input(&ctx, offset); | 687 | apei_exec_ctx_set_input(&ctx, offset); |
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) | |||
709 | if (rc) | 709 | if (rc) |
710 | return rc; | 710 | return rc; |
711 | val = apei_exec_ctx_get_output(&ctx); | 711 | val = apei_exec_ctx_get_output(&ctx); |
712 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | 712 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); |
713 | if (rc) | 713 | if (rc) |
714 | return rc; | 714 | return rc; |
715 | 715 | ||
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id) | |||
724 | int rc; | 724 | int rc; |
725 | 725 | ||
726 | erst_exec_ctx_init(&ctx); | 726 | erst_exec_ctx_init(&ctx); |
727 | rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); | 727 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR); |
728 | if (rc) | 728 | if (rc) |
729 | return rc; | 729 | return rc; |
730 | apei_exec_ctx_set_input(&ctx, record_id); | 730 | apei_exec_ctx_set_input(&ctx, record_id); |
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id) | |||
748 | if (rc) | 748 | if (rc) |
749 | return rc; | 749 | return rc; |
750 | val = apei_exec_ctx_get_output(&ctx); | 750 | val = apei_exec_ctx_get_output(&ctx); |
751 | rc = apei_exec_run(&ctx, ACPI_ERST_END); | 751 | rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); |
752 | if (rc) | 752 | if (rc) |
753 | return rc; | 753 | return rc; |
754 | 754 | ||
@@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) | |||
932 | static int erst_open_pstore(struct pstore_info *psi); | 932 | static int erst_open_pstore(struct pstore_info *psi); |
933 | static int erst_close_pstore(struct pstore_info *psi); | 933 | static int erst_close_pstore(struct pstore_info *psi); |
934 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, | 934 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, |
935 | struct timespec *time); | 935 | struct timespec *time, struct pstore_info *psi); |
936 | static u64 erst_writer(enum pstore_type_id type, size_t size); | 936 | static u64 erst_writer(enum pstore_type_id type, unsigned int part, |
937 | size_t size, struct pstore_info *psi); | ||
938 | static int erst_clearer(enum pstore_type_id type, u64 id, | ||
939 | struct pstore_info *psi); | ||
937 | 940 | ||
938 | static struct pstore_info erst_info = { | 941 | static struct pstore_info erst_info = { |
939 | .owner = THIS_MODULE, | 942 | .owner = THIS_MODULE, |
@@ -942,7 +945,7 @@ static struct pstore_info erst_info = { | |||
942 | .close = erst_close_pstore, | 945 | .close = erst_close_pstore, |
943 | .read = erst_reader, | 946 | .read = erst_reader, |
944 | .write = erst_writer, | 947 | .write = erst_writer, |
945 | .erase = erst_clear | 948 | .erase = erst_clearer |
946 | }; | 949 | }; |
947 | 950 | ||
948 | #define CPER_CREATOR_PSTORE \ | 951 | #define CPER_CREATOR_PSTORE \ |
@@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi) | |||
983 | } | 986 | } |
984 | 987 | ||
985 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, | 988 | static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, |
986 | struct timespec *time) | 989 | struct timespec *time, struct pstore_info *psi) |
987 | { | 990 | { |
988 | int rc; | 991 | int rc; |
989 | ssize_t len = 0; | 992 | ssize_t len = 0; |
@@ -1037,7 +1040,8 @@ out: | |||
1037 | return (rc < 0) ? rc : (len - sizeof(*rcd)); | 1040 | return (rc < 0) ? rc : (len - sizeof(*rcd)); |
1038 | } | 1041 | } |
1039 | 1042 | ||
1040 | static u64 erst_writer(enum pstore_type_id type, size_t size) | 1043 | static u64 erst_writer(enum pstore_type_id type, unsigned int part, |
1044 | size_t size, struct pstore_info *psi) | ||
1041 | { | 1045 | { |
1042 | struct cper_pstore_record *rcd = (struct cper_pstore_record *) | 1046 | struct cper_pstore_record *rcd = (struct cper_pstore_record *) |
1043 | (erst_info.buf - sizeof(*rcd)); | 1047 | (erst_info.buf - sizeof(*rcd)); |
@@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size) | |||
1080 | return rcd->hdr.record_id; | 1084 | return rcd->hdr.record_id; |
1081 | } | 1085 | } |
1082 | 1086 | ||
1087 | static int erst_clearer(enum pstore_type_id type, u64 id, | ||
1088 | struct pstore_info *psi) | ||
1089 | { | ||
1090 | return erst_clear(id); | ||
1091 | } | ||
1092 | |||
1083 | static int __init erst_init(void) | 1093 | static int __init erst_init(void) |
1084 | { | 1094 | { |
1085 | int rc = 0; | 1095 | int rc = 0; |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f703b2881153..0784f99a4665 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * For more information about Generic Hardware Error Source, please | 12 | * For more information about Generic Hardware Error Source, please |
13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 | 13 | * refer to ACPI Specification version 4.0, section 17.3.2.6 |
14 | * | 14 | * |
15 | * Copyright 2010 Intel Corp. | 15 | * Copyright 2010,2011 Intel Corp. |
16 | * Author: Huang Ying <ying.huang@intel.com> | 16 | * Author: Huang Ying <ying.huang@intel.com> |
17 | * | 17 | * |
18 | * This program is free software; you can redistribute it and/or | 18 | * This program is free software; you can redistribute it and/or |
@@ -42,6 +42,9 @@ | |||
42 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
43 | #include <linux/ratelimit.h> | 43 | #include <linux/ratelimit.h> |
44 | #include <linux/vmalloc.h> | 44 | #include <linux/vmalloc.h> |
45 | #include <linux/irq_work.h> | ||
46 | #include <linux/llist.h> | ||
47 | #include <linux/genalloc.h> | ||
45 | #include <acpi/apei.h> | 48 | #include <acpi/apei.h> |
46 | #include <acpi/atomicio.h> | 49 | #include <acpi/atomicio.h> |
47 | #include <acpi/hed.h> | 50 | #include <acpi/hed.h> |
@@ -53,6 +56,30 @@ | |||
53 | #define GHES_PFX "GHES: " | 56 | #define GHES_PFX "GHES: " |
54 | 57 | ||
55 | #define GHES_ESTATUS_MAX_SIZE 65536 | 58 | #define GHES_ESTATUS_MAX_SIZE 65536 |
59 | #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 | ||
60 | |||
61 | #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 | ||
62 | |||
63 | /* This is just an estimation for memory pool allocation */ | ||
64 | #define GHES_ESTATUS_CACHE_AVG_SIZE 512 | ||
65 | |||
66 | #define GHES_ESTATUS_CACHES_SIZE 4 | ||
67 | |||
68 | #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL | ||
69 | /* Prevent too many caches are allocated because of RCU */ | ||
70 | #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) | ||
71 | |||
72 | #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ | ||
73 | (sizeof(struct ghes_estatus_cache) + (estatus_len)) | ||
74 | #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ | ||
75 | ((struct acpi_hest_generic_status *) \ | ||
76 | ((struct ghes_estatus_cache *)(estatus_cache) + 1)) | ||
77 | |||
78 | #define GHES_ESTATUS_NODE_LEN(estatus_len) \ | ||
79 | (sizeof(struct ghes_estatus_node) + (estatus_len)) | ||
80 | #define GHES_ESTATUS_FROM_NODE(estatus_node) \ | ||
81 | ((struct acpi_hest_generic_status *) \ | ||
82 | ((struct ghes_estatus_node *)(estatus_node) + 1)) | ||
56 | 83 | ||
57 | /* | 84 | /* |
58 | * One struct ghes is created for each generic hardware error source. | 85 | * One struct ghes is created for each generic hardware error source. |
@@ -77,6 +104,22 @@ struct ghes { | |||
77 | }; | 104 | }; |
78 | }; | 105 | }; |
79 | 106 | ||
107 | struct ghes_estatus_node { | ||
108 | struct llist_node llnode; | ||
109 | struct acpi_hest_generic *generic; | ||
110 | }; | ||
111 | |||
112 | struct ghes_estatus_cache { | ||
113 | u32 estatus_len; | ||
114 | atomic_t count; | ||
115 | struct acpi_hest_generic *generic; | ||
116 | unsigned long long time_in; | ||
117 | struct rcu_head rcu; | ||
118 | }; | ||
119 | |||
120 | int ghes_disable; | ||
121 | module_param_named(disable, ghes_disable, bool, 0); | ||
122 | |||
80 | static int ghes_panic_timeout __read_mostly = 30; | 123 | static int ghes_panic_timeout __read_mostly = 30; |
81 | 124 | ||
82 | /* | 125 | /* |
@@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area; | |||
121 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); | 164 | static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); |
122 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); | 165 | static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); |
123 | 166 | ||
167 | /* | ||
168 | * printk is not safe in NMI context. So in NMI handler, we allocate | ||
169 | * required memory from lock-less memory allocator | ||
170 | * (ghes_estatus_pool), save estatus into it, put them into lock-less | ||
171 | * list (ghes_estatus_llist), then delay printk into IRQ context via | ||
172 | * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record | ||
173 | * required pool size by all NMI error source. | ||
174 | */ | ||
175 | static struct gen_pool *ghes_estatus_pool; | ||
176 | static unsigned long ghes_estatus_pool_size_request; | ||
177 | static struct llist_head ghes_estatus_llist; | ||
178 | static struct irq_work ghes_proc_irq_work; | ||
179 | |||
180 | struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; | ||
181 | static atomic_t ghes_estatus_cache_alloced; | ||
182 | |||
124 | static int ghes_ioremap_init(void) | 183 | static int ghes_ioremap_init(void) |
125 | { | 184 | { |
126 | ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, | 185 | ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, |
@@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr) | |||
180 | __flush_tlb_one(vaddr); | 239 | __flush_tlb_one(vaddr); |
181 | } | 240 | } |
182 | 241 | ||
242 | static int ghes_estatus_pool_init(void) | ||
243 | { | ||
244 | ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); | ||
245 | if (!ghes_estatus_pool) | ||
246 | return -ENOMEM; | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, | ||
251 | struct gen_pool_chunk *chunk, | ||
252 | void *data) | ||
253 | { | ||
254 | free_page(chunk->start_addr); | ||
255 | } | ||
256 | |||
257 | static void ghes_estatus_pool_exit(void) | ||
258 | { | ||
259 | gen_pool_for_each_chunk(ghes_estatus_pool, | ||
260 | ghes_estatus_pool_free_chunk_page, NULL); | ||
261 | gen_pool_destroy(ghes_estatus_pool); | ||
262 | } | ||
263 | |||
264 | static int ghes_estatus_pool_expand(unsigned long len) | ||
265 | { | ||
266 | unsigned long i, pages, size, addr; | ||
267 | int ret; | ||
268 | |||
269 | ghes_estatus_pool_size_request += PAGE_ALIGN(len); | ||
270 | size = gen_pool_size(ghes_estatus_pool); | ||
271 | if (size >= ghes_estatus_pool_size_request) | ||
272 | return 0; | ||
273 | pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; | ||
274 | for (i = 0; i < pages; i++) { | ||
275 | addr = __get_free_page(GFP_KERNEL); | ||
276 | if (!addr) | ||
277 | return -ENOMEM; | ||
278 | ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); | ||
279 | if (ret) | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void ghes_estatus_pool_shrink(unsigned long len) | ||
287 | { | ||
288 | ghes_estatus_pool_size_request -= PAGE_ALIGN(len); | ||
289 | } | ||
290 | |||
183 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) | 291 | static struct ghes *ghes_new(struct acpi_hest_generic *generic) |
184 | { | 292 | { |
185 | struct ghes *ghes; | 293 | struct ghes *ghes; |
@@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes) | |||
341 | ghes->flags &= ~GHES_TO_CLEAR; | 449 | ghes->flags &= ~GHES_TO_CLEAR; |
342 | } | 450 | } |
343 | 451 | ||
344 | static void ghes_do_proc(struct ghes *ghes) | 452 | static void ghes_do_proc(const struct acpi_hest_generic_status *estatus) |
345 | { | 453 | { |
346 | int sev, processed = 0; | 454 | int sev, sec_sev; |
347 | struct acpi_hest_generic_data *gdata; | 455 | struct acpi_hest_generic_data *gdata; |
348 | 456 | ||
349 | sev = ghes_severity(ghes->estatus->error_severity); | 457 | sev = ghes_severity(estatus->error_severity); |
350 | apei_estatus_for_each_section(ghes->estatus, gdata) { | 458 | apei_estatus_for_each_section(estatus, gdata) { |
351 | #ifdef CONFIG_X86_MCE | 459 | sec_sev = ghes_severity(gdata->error_severity); |
352 | if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, | 460 | if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, |
353 | CPER_SEC_PLATFORM_MEM)) { | 461 | CPER_SEC_PLATFORM_MEM)) { |
354 | apei_mce_report_mem_error( | 462 | struct cper_sec_mem_err *mem_err; |
355 | sev == GHES_SEV_CORRECTED, | 463 | mem_err = (struct cper_sec_mem_err *)(gdata+1); |
356 | (struct cper_sec_mem_err *)(gdata+1)); | 464 | #ifdef CONFIG_X86_MCE |
357 | processed = 1; | 465 | apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, |
358 | } | 466 | mem_err); |
359 | #endif | 467 | #endif |
468 | #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE | ||
469 | if (sev == GHES_SEV_RECOVERABLE && | ||
470 | sec_sev == GHES_SEV_RECOVERABLE && | ||
471 | mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { | ||
472 | unsigned long pfn; | ||
473 | pfn = mem_err->physical_addr >> PAGE_SHIFT; | ||
474 | memory_failure_queue(pfn, 0, 0); | ||
475 | } | ||
476 | #endif | ||
477 | } | ||
360 | } | 478 | } |
361 | } | 479 | } |
362 | 480 | ||
363 | static void ghes_print_estatus(const char *pfx, struct ghes *ghes) | 481 | static void __ghes_print_estatus(const char *pfx, |
482 | const struct acpi_hest_generic *generic, | ||
483 | const struct acpi_hest_generic_status *estatus) | ||
364 | { | 484 | { |
365 | /* Not more than 2 messages every 5 seconds */ | ||
366 | static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2); | ||
367 | |||
368 | if (pfx == NULL) { | 485 | if (pfx == NULL) { |
369 | if (ghes_severity(ghes->estatus->error_severity) <= | 486 | if (ghes_severity(estatus->error_severity) <= |
370 | GHES_SEV_CORRECTED) | 487 | GHES_SEV_CORRECTED) |
371 | pfx = KERN_WARNING HW_ERR; | 488 | pfx = KERN_WARNING HW_ERR; |
372 | else | 489 | else |
373 | pfx = KERN_ERR HW_ERR; | 490 | pfx = KERN_ERR HW_ERR; |
374 | } | 491 | } |
375 | if (__ratelimit(&ratelimit)) { | 492 | printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", |
376 | printk( | 493 | pfx, generic->header.source_id); |
377 | "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", | 494 | apei_estatus_print(pfx, estatus); |
378 | pfx, ghes->generic->header.source_id); | 495 | } |
379 | apei_estatus_print(pfx, ghes->estatus); | 496 | |
497 | static int ghes_print_estatus(const char *pfx, | ||
498 | const struct acpi_hest_generic *generic, | ||
499 | const struct acpi_hest_generic_status *estatus) | ||
500 | { | ||
501 | /* Not more than 2 messages every 5 seconds */ | ||
502 | static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); | ||
503 | static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); | ||
504 | struct ratelimit_state *ratelimit; | ||
505 | |||
506 | if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) | ||
507 | ratelimit = &ratelimit_corrected; | ||
508 | else | ||
509 | ratelimit = &ratelimit_uncorrected; | ||
510 | if (__ratelimit(ratelimit)) { | ||
511 | __ghes_print_estatus(pfx, generic, estatus); | ||
512 | return 1; | ||
380 | } | 513 | } |
514 | return 0; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * GHES error status reporting throttle, to report more kinds of | ||
519 | * errors, instead of just most frequently occurred errors. | ||
520 | */ | ||
521 | static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) | ||
522 | { | ||
523 | u32 len; | ||
524 | int i, cached = 0; | ||
525 | unsigned long long now; | ||
526 | struct ghes_estatus_cache *cache; | ||
527 | struct acpi_hest_generic_status *cache_estatus; | ||
528 | |||
529 | len = apei_estatus_len(estatus); | ||
530 | rcu_read_lock(); | ||
531 | for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { | ||
532 | cache = rcu_dereference(ghes_estatus_caches[i]); | ||
533 | if (cache == NULL) | ||
534 | continue; | ||
535 | if (len != cache->estatus_len) | ||
536 | continue; | ||
537 | cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); | ||
538 | if (memcmp(estatus, cache_estatus, len)) | ||
539 | continue; | ||
540 | atomic_inc(&cache->count); | ||
541 | now = sched_clock(); | ||
542 | if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) | ||
543 | cached = 1; | ||
544 | break; | ||
545 | } | ||
546 | rcu_read_unlock(); | ||
547 | return cached; | ||
548 | } | ||
549 | |||
550 | static struct ghes_estatus_cache *ghes_estatus_cache_alloc( | ||
551 | struct acpi_hest_generic *generic, | ||
552 | struct acpi_hest_generic_status *estatus) | ||
553 | { | ||
554 | int alloced; | ||
555 | u32 len, cache_len; | ||
556 | struct ghes_estatus_cache *cache; | ||
557 | struct acpi_hest_generic_status *cache_estatus; | ||
558 | |||
559 | alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); | ||
560 | if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { | ||
561 | atomic_dec(&ghes_estatus_cache_alloced); | ||
562 | return NULL; | ||
563 | } | ||
564 | len = apei_estatus_len(estatus); | ||
565 | cache_len = GHES_ESTATUS_CACHE_LEN(len); | ||
566 | cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); | ||
567 | if (!cache) { | ||
568 | atomic_dec(&ghes_estatus_cache_alloced); | ||
569 | return NULL; | ||
570 | } | ||
571 | cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); | ||
572 | memcpy(cache_estatus, estatus, len); | ||
573 | cache->estatus_len = len; | ||
574 | atomic_set(&cache->count, 0); | ||
575 | cache->generic = generic; | ||
576 | cache->time_in = sched_clock(); | ||
577 | return cache; | ||
578 | } | ||
579 | |||
580 | static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) | ||
581 | { | ||
582 | u32 len; | ||
583 | |||
584 | len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); | ||
585 | len = GHES_ESTATUS_CACHE_LEN(len); | ||
586 | gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); | ||
587 | atomic_dec(&ghes_estatus_cache_alloced); | ||
588 | } | ||
589 | |||
590 | static void ghes_estatus_cache_rcu_free(struct rcu_head *head) | ||
591 | { | ||
592 | struct ghes_estatus_cache *cache; | ||
593 | |||
594 | cache = container_of(head, struct ghes_estatus_cache, rcu); | ||
595 | ghes_estatus_cache_free(cache); | ||
596 | } | ||
597 | |||
598 | static void ghes_estatus_cache_add( | ||
599 | struct acpi_hest_generic *generic, | ||
600 | struct acpi_hest_generic_status *estatus) | ||
601 | { | ||
602 | int i, slot = -1, count; | ||
603 | unsigned long long now, duration, period, max_period = 0; | ||
604 | struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; | ||
605 | |||
606 | new_cache = ghes_estatus_cache_alloc(generic, estatus); | ||
607 | if (new_cache == NULL) | ||
608 | return; | ||
609 | rcu_read_lock(); | ||
610 | now = sched_clock(); | ||
611 | for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { | ||
612 | cache = rcu_dereference(ghes_estatus_caches[i]); | ||
613 | if (cache == NULL) { | ||
614 | slot = i; | ||
615 | slot_cache = NULL; | ||
616 | break; | ||
617 | } | ||
618 | duration = now - cache->time_in; | ||
619 | if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { | ||
620 | slot = i; | ||
621 | slot_cache = cache; | ||
622 | break; | ||
623 | } | ||
624 | count = atomic_read(&cache->count); | ||
625 | period = duration; | ||
626 | do_div(period, (count + 1)); | ||
627 | if (period > max_period) { | ||
628 | max_period = period; | ||
629 | slot = i; | ||
630 | slot_cache = cache; | ||
631 | } | ||
632 | } | ||
633 | /* new_cache must be put into array after its contents are written */ | ||
634 | smp_wmb(); | ||
635 | if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, | ||
636 | slot_cache, new_cache) == slot_cache) { | ||
637 | if (slot_cache) | ||
638 | call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); | ||
639 | } else | ||
640 | ghes_estatus_cache_free(new_cache); | ||
641 | rcu_read_unlock(); | ||
381 | } | 642 | } |
382 | 643 | ||
383 | static int ghes_proc(struct ghes *ghes) | 644 | static int ghes_proc(struct ghes *ghes) |
@@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes) | |||
387 | rc = ghes_read_estatus(ghes, 0); | 648 | rc = ghes_read_estatus(ghes, 0); |
388 | if (rc) | 649 | if (rc) |
389 | goto out; | 650 | goto out; |
390 | ghes_print_estatus(NULL, ghes); | 651 | if (!ghes_estatus_cached(ghes->estatus)) { |
391 | ghes_do_proc(ghes); | 652 | if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) |
392 | 653 | ghes_estatus_cache_add(ghes->generic, ghes->estatus); | |
654 | } | ||
655 | ghes_do_proc(ghes->estatus); | ||
393 | out: | 656 | out: |
394 | ghes_clear_estatus(ghes); | 657 | ghes_clear_estatus(ghes); |
395 | return 0; | 658 | return 0; |
@@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this, | |||
447 | return ret; | 710 | return ret; |
448 | } | 711 | } |
449 | 712 | ||
713 | static void ghes_proc_in_irq(struct irq_work *irq_work) | ||
714 | { | ||
715 | struct llist_node *llnode, *next, *tail = NULL; | ||
716 | struct ghes_estatus_node *estatus_node; | ||
717 | struct acpi_hest_generic *generic; | ||
718 | struct acpi_hest_generic_status *estatus; | ||
719 | u32 len, node_len; | ||
720 | |||
721 | /* | ||
722 | * Because the time order of estatus in list is reversed, | ||
723 | * revert it back to proper order. | ||
724 | */ | ||
725 | llnode = llist_del_all(&ghes_estatus_llist); | ||
726 | while (llnode) { | ||
727 | next = llnode->next; | ||
728 | llnode->next = tail; | ||
729 | tail = llnode; | ||
730 | llnode = next; | ||
731 | } | ||
732 | llnode = tail; | ||
733 | while (llnode) { | ||
734 | next = llnode->next; | ||
735 | estatus_node = llist_entry(llnode, struct ghes_estatus_node, | ||
736 | llnode); | ||
737 | estatus = GHES_ESTATUS_FROM_NODE(estatus_node); | ||
738 | len = apei_estatus_len(estatus); | ||
739 | node_len = GHES_ESTATUS_NODE_LEN(len); | ||
740 | ghes_do_proc(estatus); | ||
741 | if (!ghes_estatus_cached(estatus)) { | ||
742 | generic = estatus_node->generic; | ||
743 | if (ghes_print_estatus(NULL, generic, estatus)) | ||
744 | ghes_estatus_cache_add(generic, estatus); | ||
745 | } | ||
746 | gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, | ||
747 | node_len); | ||
748 | llnode = next; | ||
749 | } | ||
750 | } | ||
751 | |||
450 | static int ghes_notify_nmi(struct notifier_block *this, | 752 | static int ghes_notify_nmi(struct notifier_block *this, |
451 | unsigned long cmd, void *data) | 753 | unsigned long cmd, void *data) |
452 | { | 754 | { |
@@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this, | |||
476 | 778 | ||
477 | if (sev_global >= GHES_SEV_PANIC) { | 779 | if (sev_global >= GHES_SEV_PANIC) { |
478 | oops_begin(); | 780 | oops_begin(); |
479 | ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); | 781 | __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic, |
782 | ghes_global->estatus); | ||
480 | /* reboot to log the error! */ | 783 | /* reboot to log the error! */ |
481 | if (panic_timeout == 0) | 784 | if (panic_timeout == 0) |
482 | panic_timeout = ghes_panic_timeout; | 785 | panic_timeout = ghes_panic_timeout; |
@@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this, | |||
484 | } | 787 | } |
485 | 788 | ||
486 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { | 789 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { |
790 | #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
791 | u32 len, node_len; | ||
792 | struct ghes_estatus_node *estatus_node; | ||
793 | struct acpi_hest_generic_status *estatus; | ||
794 | #endif | ||
487 | if (!(ghes->flags & GHES_TO_CLEAR)) | 795 | if (!(ghes->flags & GHES_TO_CLEAR)) |
488 | continue; | 796 | continue; |
489 | /* Do not print estatus because printk is not NMI safe */ | 797 | #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
490 | ghes_do_proc(ghes); | 798 | if (ghes_estatus_cached(ghes->estatus)) |
799 | goto next; | ||
800 | /* Save estatus for further processing in IRQ context */ | ||
801 | len = apei_estatus_len(ghes->estatus); | ||
802 | node_len = GHES_ESTATUS_NODE_LEN(len); | ||
803 | estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, | ||
804 | node_len); | ||
805 | if (estatus_node) { | ||
806 | estatus_node->generic = ghes->generic; | ||
807 | estatus = GHES_ESTATUS_FROM_NODE(estatus_node); | ||
808 | memcpy(estatus, ghes->estatus, len); | ||
809 | llist_add(&estatus_node->llnode, &ghes_estatus_llist); | ||
810 | } | ||
811 | next: | ||
812 | #endif | ||
491 | ghes_clear_estatus(ghes); | 813 | ghes_clear_estatus(ghes); |
492 | } | 814 | } |
815 | #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
816 | irq_work_queue(&ghes_proc_irq_work); | ||
817 | #endif | ||
493 | 818 | ||
494 | out: | 819 | out: |
495 | raw_spin_unlock(&ghes_nmi_lock); | 820 | raw_spin_unlock(&ghes_nmi_lock); |
@@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = { | |||
504 | .notifier_call = ghes_notify_nmi, | 829 | .notifier_call = ghes_notify_nmi, |
505 | }; | 830 | }; |
506 | 831 | ||
832 | static unsigned long ghes_esource_prealloc_size( | ||
833 | const struct acpi_hest_generic *generic) | ||
834 | { | ||
835 | unsigned long block_length, prealloc_records, prealloc_size; | ||
836 | |||
837 | block_length = min_t(unsigned long, generic->error_block_length, | ||
838 | GHES_ESTATUS_MAX_SIZE); | ||
839 | prealloc_records = max_t(unsigned long, | ||
840 | generic->records_to_preallocate, 1); | ||
841 | prealloc_size = min_t(unsigned long, block_length * prealloc_records, | ||
842 | GHES_ESOURCE_PREALLOC_MAX_SIZE); | ||
843 | |||
844 | return prealloc_size; | ||
845 | } | ||
846 | |||
507 | static int __devinit ghes_probe(struct platform_device *ghes_dev) | 847 | static int __devinit ghes_probe(struct platform_device *ghes_dev) |
508 | { | 848 | { |
509 | struct acpi_hest_generic *generic; | 849 | struct acpi_hest_generic *generic; |
510 | struct ghes *ghes = NULL; | 850 | struct ghes *ghes = NULL; |
851 | unsigned long len; | ||
511 | int rc = -EINVAL; | 852 | int rc = -EINVAL; |
512 | 853 | ||
513 | generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; | 854 | generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; |
@@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
573 | mutex_unlock(&ghes_list_mutex); | 914 | mutex_unlock(&ghes_list_mutex); |
574 | break; | 915 | break; |
575 | case ACPI_HEST_NOTIFY_NMI: | 916 | case ACPI_HEST_NOTIFY_NMI: |
917 | len = ghes_esource_prealloc_size(generic); | ||
918 | ghes_estatus_pool_expand(len); | ||
576 | mutex_lock(&ghes_list_mutex); | 919 | mutex_lock(&ghes_list_mutex); |
577 | if (list_empty(&ghes_nmi)) | 920 | if (list_empty(&ghes_nmi)) |
578 | register_die_notifier(&ghes_notifier_nmi); | 921 | register_die_notifier(&ghes_notifier_nmi); |
@@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) | |||
597 | { | 940 | { |
598 | struct ghes *ghes; | 941 | struct ghes *ghes; |
599 | struct acpi_hest_generic *generic; | 942 | struct acpi_hest_generic *generic; |
943 | unsigned long len; | ||
600 | 944 | ||
601 | ghes = platform_get_drvdata(ghes_dev); | 945 | ghes = platform_get_drvdata(ghes_dev); |
602 | generic = ghes->generic; | 946 | generic = ghes->generic; |
@@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) | |||
627 | * freed after NMI handler finishes. | 971 | * freed after NMI handler finishes. |
628 | */ | 972 | */ |
629 | synchronize_rcu(); | 973 | synchronize_rcu(); |
974 | len = ghes_esource_prealloc_size(generic); | ||
975 | ghes_estatus_pool_shrink(len); | ||
630 | break; | 976 | break; |
631 | default: | 977 | default: |
632 | BUG(); | 978 | BUG(); |
@@ -662,15 +1008,43 @@ static int __init ghes_init(void) | |||
662 | return -EINVAL; | 1008 | return -EINVAL; |
663 | } | 1009 | } |
664 | 1010 | ||
1011 | if (ghes_disable) { | ||
1012 | pr_info(GHES_PFX "GHES is not enabled!\n"); | ||
1013 | return -EINVAL; | ||
1014 | } | ||
1015 | |||
1016 | init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); | ||
1017 | |||
665 | rc = ghes_ioremap_init(); | 1018 | rc = ghes_ioremap_init(); |
666 | if (rc) | 1019 | if (rc) |
667 | goto err; | 1020 | goto err; |
668 | 1021 | ||
669 | rc = platform_driver_register(&ghes_platform_driver); | 1022 | rc = ghes_estatus_pool_init(); |
670 | if (rc) | 1023 | if (rc) |
671 | goto err_ioremap_exit; | 1024 | goto err_ioremap_exit; |
672 | 1025 | ||
1026 | rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * | ||
1027 | GHES_ESTATUS_CACHE_ALLOCED_MAX); | ||
1028 | if (rc) | ||
1029 | goto err_pool_exit; | ||
1030 | |||
1031 | rc = platform_driver_register(&ghes_platform_driver); | ||
1032 | if (rc) | ||
1033 | goto err_pool_exit; | ||
1034 | |||
1035 | rc = apei_osc_setup(); | ||
1036 | if (rc == 0 && osc_sb_apei_support_acked) | ||
1037 | pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); | ||
1038 | else if (rc == 0 && !osc_sb_apei_support_acked) | ||
1039 | pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); | ||
1040 | else if (rc && osc_sb_apei_support_acked) | ||
1041 | pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); | ||
1042 | else | ||
1043 | pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); | ||
1044 | |||
673 | return 0; | 1045 | return 0; |
1046 | err_pool_exit: | ||
1047 | ghes_estatus_pool_exit(); | ||
674 | err_ioremap_exit: | 1048 | err_ioremap_exit: |
675 | ghes_ioremap_exit(); | 1049 | ghes_ioremap_exit(); |
676 | err: | 1050 | err: |
@@ -680,6 +1054,7 @@ err: | |||
680 | static void __exit ghes_exit(void) | 1054 | static void __exit ghes_exit(void) |
681 | { | 1055 | { |
682 | platform_driver_unregister(&ghes_platform_driver); | 1056 | platform_driver_unregister(&ghes_platform_driver); |
1057 | ghes_estatus_pool_exit(); | ||
683 | ghes_ioremap_exit(); | 1058 | ghes_ioremap_exit(); |
684 | } | 1059 | } |
685 | 1060 | ||
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 181bc2f7bb74..05fee06f4d6e 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c | |||
@@ -231,16 +231,17 @@ void __init acpi_hest_init(void) | |||
231 | goto err; | 231 | goto err; |
232 | } | 232 | } |
233 | 233 | ||
234 | rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); | 234 | if (!ghes_disable) { |
235 | if (rc) | 235 | rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); |
236 | goto err; | 236 | if (rc) |
237 | 237 | goto err; | |
238 | rc = hest_ghes_dev_register(ghes_count); | 238 | rc = hest_ghes_dev_register(ghes_count); |
239 | if (!rc) { | 239 | if (rc) |
240 | pr_info(HEST_PFX "Table parsing has been initialized.\n"); | 240 | goto err; |
241 | return; | ||
242 | } | 241 | } |
243 | 242 | ||
243 | pr_info(HEST_PFX "Table parsing has been initialized.\n"); | ||
244 | return; | ||
244 | err: | 245 | err: |
245 | hest_disable = 1; | 246 | hest_disable = 1; |
246 | } | 247 | } |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 2c661353e8f2..7711d94a0409 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -55,6 +55,9 @@ | |||
55 | #define ACPI_BATTERY_NOTIFY_INFO 0x81 | 55 | #define ACPI_BATTERY_NOTIFY_INFO 0x81 |
56 | #define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 | 56 | #define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 |
57 | 57 | ||
58 | /* Battery power unit: 0 means mW, 1 means mA */ | ||
59 | #define ACPI_BATTERY_POWER_UNIT_MA 1 | ||
60 | |||
58 | #define _COMPONENT ACPI_BATTERY_COMPONENT | 61 | #define _COMPONENT ACPI_BATTERY_COMPONENT |
59 | 62 | ||
60 | ACPI_MODULE_NAME("battery"); | 63 | ACPI_MODULE_NAME("battery"); |
@@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids); | |||
91 | enum { | 94 | enum { |
92 | ACPI_BATTERY_ALARM_PRESENT, | 95 | ACPI_BATTERY_ALARM_PRESENT, |
93 | ACPI_BATTERY_XINFO_PRESENT, | 96 | ACPI_BATTERY_XINFO_PRESENT, |
94 | /* For buggy DSDTs that report negative 16-bit values for either | ||
95 | * charging or discharging current and/or report 0 as 65536 | ||
96 | * due to bad math. | ||
97 | */ | ||
98 | ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, | ||
99 | ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, | 97 | ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, |
100 | }; | 98 | }; |
101 | 99 | ||
102 | struct acpi_battery { | 100 | struct acpi_battery { |
103 | struct mutex lock; | 101 | struct mutex lock; |
102 | struct mutex sysfs_lock; | ||
104 | struct power_supply bat; | 103 | struct power_supply bat; |
105 | struct acpi_device *device; | 104 | struct acpi_device *device; |
106 | struct notifier_block pm_nb; | 105 | struct notifier_block pm_nb; |
@@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = { | |||
301 | #ifdef CONFIG_ACPI_PROCFS_POWER | 300 | #ifdef CONFIG_ACPI_PROCFS_POWER |
302 | inline char *acpi_battery_units(struct acpi_battery *battery) | 301 | inline char *acpi_battery_units(struct acpi_battery *battery) |
303 | { | 302 | { |
304 | return (battery->power_unit)?"mA":"mW"; | 303 | return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? |
304 | "mA" : "mW"; | ||
305 | } | 305 | } |
306 | #endif | 306 | #endif |
307 | 307 | ||
@@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery) | |||
461 | battery->update_time = jiffies; | 461 | battery->update_time = jiffies; |
462 | kfree(buffer.pointer); | 462 | kfree(buffer.pointer); |
463 | 463 | ||
464 | if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) && | 464 | /* For buggy DSDTs that report negative 16-bit values for either |
465 | battery->rate_now != -1) | 465 | * charging or discharging current and/or report 0 as 65536 |
466 | * due to bad math. | ||
467 | */ | ||
468 | if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA && | ||
469 | battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN && | ||
470 | (s16)(battery->rate_now) < 0) { | ||
466 | battery->rate_now = abs((s16)battery->rate_now); | 471 | battery->rate_now = abs((s16)battery->rate_now); |
472 | printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate" | ||
473 | " invalid.\n"); | ||
474 | } | ||
467 | 475 | ||
468 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) | 476 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) |
469 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) | 477 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) |
@@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) | |||
544 | { | 552 | { |
545 | int result; | 553 | int result; |
546 | 554 | ||
547 | if (battery->power_unit) { | 555 | if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { |
548 | battery->bat.properties = charge_battery_props; | 556 | battery->bat.properties = charge_battery_props; |
549 | battery->bat.num_properties = | 557 | battery->bat.num_properties = |
550 | ARRAY_SIZE(charge_battery_props); | 558 | ARRAY_SIZE(charge_battery_props); |
@@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery) | |||
566 | 574 | ||
567 | static void sysfs_remove_battery(struct acpi_battery *battery) | 575 | static void sysfs_remove_battery(struct acpi_battery *battery) |
568 | { | 576 | { |
569 | if (!battery->bat.dev) | 577 | mutex_lock(&battery->sysfs_lock); |
578 | if (!battery->bat.dev) { | ||
579 | mutex_unlock(&battery->sysfs_lock); | ||
570 | return; | 580 | return; |
581 | } | ||
582 | |||
571 | device_remove_file(battery->bat.dev, &alarm_attr); | 583 | device_remove_file(battery->bat.dev, &alarm_attr); |
572 | power_supply_unregister(&battery->bat); | 584 | power_supply_unregister(&battery->bat); |
573 | battery->bat.dev = NULL; | 585 | battery->bat.dev = NULL; |
574 | } | 586 | mutex_unlock(&battery->sysfs_lock); |
575 | |||
576 | static void acpi_battery_quirks(struct acpi_battery *battery) | ||
577 | { | ||
578 | if (dmi_name_in_vendors("Acer") && battery->power_unit) { | ||
579 | set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags); | ||
580 | } | ||
581 | } | 587 | } |
582 | 588 | ||
583 | /* | 589 | /* |
@@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) | |||
592 | * | 598 | * |
593 | * Handle this correctly so that they won't break userspace. | 599 | * Handle this correctly so that they won't break userspace. |
594 | */ | 600 | */ |
595 | static void acpi_battery_quirks2(struct acpi_battery *battery) | 601 | static void acpi_battery_quirks(struct acpi_battery *battery) |
596 | { | 602 | { |
597 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) | 603 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) |
598 | return ; | 604 | return ; |
@@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
623 | result = acpi_battery_get_info(battery); | 629 | result = acpi_battery_get_info(battery); |
624 | if (result) | 630 | if (result) |
625 | return result; | 631 | return result; |
626 | acpi_battery_quirks(battery); | ||
627 | acpi_battery_init_alarm(battery); | 632 | acpi_battery_init_alarm(battery); |
628 | } | 633 | } |
629 | if (!battery->bat.dev) | 634 | if (!battery->bat.dev) { |
630 | sysfs_add_battery(battery); | 635 | result = sysfs_add_battery(battery); |
636 | if (result) | ||
637 | return result; | ||
638 | } | ||
631 | result = acpi_battery_get_state(battery); | 639 | result = acpi_battery_get_state(battery); |
632 | acpi_battery_quirks2(battery); | 640 | acpi_battery_quirks(battery); |
633 | return result; | 641 | return result; |
634 | } | 642 | } |
635 | 643 | ||
@@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm); | |||
863 | }, \ | 871 | }, \ |
864 | } | 872 | } |
865 | 873 | ||
866 | static struct battery_file { | 874 | static const struct battery_file { |
867 | struct file_operations ops; | 875 | struct file_operations ops; |
868 | mode_t mode; | 876 | mode_t mode; |
869 | const char *name; | 877 | const char *name; |
@@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb, | |||
948 | struct acpi_battery *battery = container_of(nb, struct acpi_battery, | 956 | struct acpi_battery *battery = container_of(nb, struct acpi_battery, |
949 | pm_nb); | 957 | pm_nb); |
950 | switch (mode) { | 958 | switch (mode) { |
959 | case PM_POST_HIBERNATION: | ||
951 | case PM_POST_SUSPEND: | 960 | case PM_POST_SUSPEND: |
952 | sysfs_remove_battery(battery); | 961 | if (battery->bat.dev) { |
953 | sysfs_add_battery(battery); | 962 | sysfs_remove_battery(battery); |
963 | sysfs_add_battery(battery); | ||
964 | } | ||
954 | break; | 965 | break; |
955 | } | 966 | } |
956 | 967 | ||
@@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device) | |||
972 | strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); | 983 | strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); |
973 | device->driver_data = battery; | 984 | device->driver_data = battery; |
974 | mutex_init(&battery->lock); | 985 | mutex_init(&battery->lock); |
986 | mutex_init(&battery->sysfs_lock); | ||
975 | if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, | 987 | if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, |
976 | "_BIX", &handle))) | 988 | "_BIX", &handle))) |
977 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); | 989 | set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); |
978 | acpi_battery_update(battery); | 990 | result = acpi_battery_update(battery); |
991 | if (result) | ||
992 | goto fail; | ||
979 | #ifdef CONFIG_ACPI_PROCFS_POWER | 993 | #ifdef CONFIG_ACPI_PROCFS_POWER |
980 | result = acpi_battery_add_fs(device); | 994 | result = acpi_battery_add_fs(device); |
981 | #endif | 995 | #endif |
982 | if (!result) { | 996 | if (result) { |
983 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | ||
984 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | ||
985 | device->status.battery_present ? "present" : "absent"); | ||
986 | } else { | ||
987 | #ifdef CONFIG_ACPI_PROCFS_POWER | 997 | #ifdef CONFIG_ACPI_PROCFS_POWER |
988 | acpi_battery_remove_fs(device); | 998 | acpi_battery_remove_fs(device); |
989 | #endif | 999 | #endif |
990 | kfree(battery); | 1000 | goto fail; |
991 | } | 1001 | } |
992 | 1002 | ||
1003 | printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", | ||
1004 | ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), | ||
1005 | device->status.battery_present ? "present" : "absent"); | ||
1006 | |||
993 | battery->pm_nb.notifier_call = battery_notify; | 1007 | battery->pm_nb.notifier_call = battery_notify; |
994 | register_pm_notifier(&battery->pm_nb); | 1008 | register_pm_notifier(&battery->pm_nb); |
995 | 1009 | ||
996 | return result; | 1010 | return result; |
1011 | |||
1012 | fail: | ||
1013 | sysfs_remove_battery(battery); | ||
1014 | mutex_destroy(&battery->lock); | ||
1015 | mutex_destroy(&battery->sysfs_lock); | ||
1016 | kfree(battery); | ||
1017 | return result; | ||
997 | } | 1018 | } |
998 | 1019 | ||
999 | static int acpi_battery_remove(struct acpi_device *device, int type) | 1020 | static int acpi_battery_remove(struct acpi_device *device, int type) |
@@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) | |||
1009 | #endif | 1030 | #endif |
1010 | sysfs_remove_battery(battery); | 1031 | sysfs_remove_battery(battery); |
1011 | mutex_destroy(&battery->lock); | 1032 | mutex_destroy(&battery->lock); |
1033 | mutex_destroy(&battery->sysfs_lock); | ||
1012 | kfree(battery); | 1034 | kfree(battery); |
1013 | return 0; | 1035 | return 0; |
1014 | } | 1036 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d1e06c182cdb..437ddbf0c49a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <acpi/acpi_bus.h> | 40 | #include <acpi/acpi_bus.h> |
41 | #include <acpi/acpi_drivers.h> | 41 | #include <acpi/acpi_drivers.h> |
42 | #include <acpi/apei.h> | ||
42 | #include <linux/dmi.h> | 43 | #include <linux/dmi.h> |
43 | #include <linux/suspend.h> | 44 | #include <linux/suspend.h> |
44 | 45 | ||
@@ -519,6 +520,7 @@ out_kfree: | |||
519 | } | 520 | } |
520 | EXPORT_SYMBOL(acpi_run_osc); | 521 | EXPORT_SYMBOL(acpi_run_osc); |
521 | 522 | ||
523 | bool osc_sb_apei_support_acked; | ||
522 | static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; | 524 | static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; |
523 | static void acpi_bus_osc_support(void) | 525 | static void acpi_bus_osc_support(void) |
524 | { | 526 | { |
@@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void) | |||
541 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) | 543 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) |
542 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; | 544 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; |
543 | #endif | 545 | #endif |
546 | |||
547 | if (!ghes_disable) | ||
548 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT; | ||
544 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) | 549 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) |
545 | return; | 550 | return; |
546 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) | 551 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { |
552 | u32 *capbuf_ret = context.ret.pointer; | ||
553 | if (context.ret.length > OSC_SUPPORT_TYPE) | ||
554 | osc_sb_apei_support_acked = | ||
555 | capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT; | ||
547 | kfree(context.ret.pointer); | 556 | kfree(context.ret.pointer); |
548 | /* do we need to check the returned cap? Sounds no */ | 557 | } |
558 | /* do we need to check other returned cap? Sounds no */ | ||
549 | } | 559 | } |
550 | 560 | ||
551 | /* -------------------------------------------------------------------------- | 561 | /* -------------------------------------------------------------------------- |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 1864ad3cf895..19a61136d848 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -77,7 +77,7 @@ struct dock_dependent_device { | |||
77 | struct list_head list; | 77 | struct list_head list; |
78 | struct list_head hotplug_list; | 78 | struct list_head hotplug_list; |
79 | acpi_handle handle; | 79 | acpi_handle handle; |
80 | struct acpi_dock_ops *ops; | 80 | const struct acpi_dock_ops *ops; |
81 | void *context; | 81 | void *context; |
82 | }; | 82 | }; |
83 | 83 | ||
@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); | |||
589 | * the dock driver after _DCK is executed. | 589 | * the dock driver after _DCK is executed. |
590 | */ | 590 | */ |
591 | int | 591 | int |
592 | register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, | 592 | register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, |
593 | void *context) | 593 | void *context) |
594 | { | 594 | { |
595 | struct dock_dependent_device *dd; | 595 | struct dock_dependent_device *dd; |
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 05b44201a614..22f918bacd35 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c | |||
@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, | |||
92 | return count; | 92 | return count; |
93 | } | 93 | } |
94 | 94 | ||
95 | static struct file_operations acpi_ec_io_ops = { | 95 | static const struct file_operations acpi_ec_io_ops = { |
96 | .owner = THIS_MODULE, | 96 | .owner = THIS_MODULE, |
97 | .open = acpi_ec_open_io, | 97 | .open = acpi_ec_open_io, |
98 | .read = acpi_ec_read_io, | 98 | .read = acpi_ec_read_io, |
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 467479f07c1f..0f0356ca1a9e 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) | |||
110 | return result; | 110 | return result; |
111 | } | 111 | } |
112 | 112 | ||
113 | static struct thermal_cooling_device_ops fan_cooling_ops = { | 113 | static const struct thermal_cooling_device_ops fan_cooling_ops = { |
114 | .get_max_state = fan_get_max_state, | 114 | .get_max_state = fan_get_max_state, |
115 | .get_cur_state = fan_get_cur_state, | 115 | .get_cur_state = fan_get_cur_state, |
116 | .set_cur_state = fan_set_cur_state, | 116 | .set_cur_state = fan_set_cur_state, |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 372f9b70f7f4..fa32f584229f 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported) | |||
155 | { | 155 | { |
156 | if (!strcmp("Linux", interface)) { | 156 | if (!strcmp("Linux", interface)) { |
157 | 157 | ||
158 | printk(KERN_NOTICE FW_BUG PREFIX | 158 | printk_once(KERN_NOTICE FW_BUG PREFIX |
159 | "BIOS _OSI(Linux) query %s%s\n", | 159 | "BIOS _OSI(Linux) query %s%s\n", |
160 | osi_linux.enable ? "honored" : "ignored", | 160 | osi_linux.enable ? "honored" : "ignored", |
161 | osi_linux.cmdline ? " via cmdline" : | 161 | osi_linux.cmdline ? " via cmdline" : |
@@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args) | |||
237 | #endif | 237 | #endif |
238 | } | 238 | } |
239 | 239 | ||
240 | #ifdef CONFIG_KEXEC | ||
241 | static unsigned long acpi_rsdp; | ||
242 | static int __init setup_acpi_rsdp(char *arg) | ||
243 | { | ||
244 | acpi_rsdp = simple_strtoul(arg, NULL, 16); | ||
245 | return 0; | ||
246 | } | ||
247 | early_param("acpi_rsdp", setup_acpi_rsdp); | ||
248 | #endif | ||
249 | |||
240 | acpi_physical_address __init acpi_os_get_root_pointer(void) | 250 | acpi_physical_address __init acpi_os_get_root_pointer(void) |
241 | { | 251 | { |
252 | #ifdef CONFIG_KEXEC | ||
253 | if (acpi_rsdp) | ||
254 | return acpi_rsdp; | ||
255 | #endif | ||
256 | |||
242 | if (efi_enabled) { | 257 | if (efi_enabled) { |
243 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) | 258 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
244 | return efi.acpi20; | 259 | return efi.acpi20; |
@@ -1083,7 +1098,13 @@ struct osi_setup_entry { | |||
1083 | bool enable; | 1098 | bool enable; |
1084 | }; | 1099 | }; |
1085 | 1100 | ||
1086 | static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; | 1101 | static struct osi_setup_entry __initdata |
1102 | osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { | ||
1103 | {"Module Device", true}, | ||
1104 | {"Processor Device", true}, | ||
1105 | {"3.0 _SCP Extensions", true}, | ||
1106 | {"Processor Aggregator Device", true}, | ||
1107 | }; | ||
1087 | 1108 | ||
1088 | void __init acpi_osi_setup(char *str) | 1109 | void __init acpi_osi_setup(char *str) |
1089 | { | 1110 | { |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index f907cfbfa13c..7f9eba9a0b02 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus) | |||
303 | /* -------------------------------------------------------------------------- | 303 | /* -------------------------------------------------------------------------- |
304 | PCI Interrupt Routing Support | 304 | PCI Interrupt Routing Support |
305 | -------------------------------------------------------------------------- */ | 305 | -------------------------------------------------------------------------- */ |
306 | #ifdef CONFIG_X86_IO_APIC | ||
307 | extern int noioapicquirk; | ||
308 | extern int noioapicreroute; | ||
309 | |||
310 | static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) | ||
311 | { | ||
312 | struct pci_bus *bus_it; | ||
313 | |||
314 | for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { | ||
315 | if (!bus_it->self) | ||
316 | return 0; | ||
317 | if (bus_it->self->irq_reroute_variant) | ||
318 | return bus_it->self->irq_reroute_variant; | ||
319 | } | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ | ||
325 | * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does | ||
326 | * during interrupt handling). When this INTx generation cannot be disabled, | ||
327 | * we reroute these interrupts to their legacy equivalent to get rid of | ||
328 | * spurious interrupts. | ||
329 | */ | ||
330 | static int acpi_reroute_boot_interrupt(struct pci_dev *dev, | ||
331 | struct acpi_prt_entry *entry) | ||
332 | { | ||
333 | if (noioapicquirk || noioapicreroute) { | ||
334 | return 0; | ||
335 | } else { | ||
336 | switch (bridge_has_boot_interrupt_variant(dev->bus)) { | ||
337 | case 0: | ||
338 | /* no rerouting necessary */ | ||
339 | return 0; | ||
340 | case INTEL_IRQ_REROUTE_VARIANT: | ||
341 | /* | ||
342 | * Remap according to INTx routing table in 6700PXH | ||
343 | * specs, intel order number 302628-002, section | ||
344 | * 2.15.2. Other chipsets (80332, ...) have the same | ||
345 | * mapping and are handled here as well. | ||
346 | */ | ||
347 | dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy " | ||
348 | "IRQ %d\n", entry->index, | ||
349 | (entry->index % 4) + 16); | ||
350 | entry->index = (entry->index % 4) + 16; | ||
351 | return 1; | ||
352 | default: | ||
353 | dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy " | ||
354 | "IRQ: unknown mapping\n", entry->index); | ||
355 | return -1; | ||
356 | } | ||
357 | } | ||
358 | } | ||
359 | #endif /* CONFIG_X86_IO_APIC */ | ||
360 | |||
306 | static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) | 361 | static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) |
307 | { | 362 | { |
308 | struct acpi_prt_entry *entry; | 363 | struct acpi_prt_entry *entry; |
@@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) | |||
311 | 366 | ||
312 | entry = acpi_pci_irq_find_prt_entry(dev, pin); | 367 | entry = acpi_pci_irq_find_prt_entry(dev, pin); |
313 | if (entry) { | 368 | if (entry) { |
369 | #ifdef CONFIG_X86_IO_APIC | ||
370 | acpi_reroute_boot_interrupt(dev, entry); | ||
371 | #endif /* CONFIG_X86_IO_APIC */ | ||
314 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", | 372 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", |
315 | pci_name(dev), pin_name(pin))); | 373 | pci_name(dev), pin_name(pin))); |
316 | return entry; | 374 | return entry; |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d06078d660ad..2672c798272f 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) | |||
485 | root->secondary.end = 0xFF; | 485 | root->secondary.end = 0xFF; |
486 | printk(KERN_WARNING FW_BUG PREFIX | 486 | printk(KERN_WARNING FW_BUG PREFIX |
487 | "no secondary bus range in _CRS\n"); | 487 | "no secondary bus range in _CRS\n"); |
488 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); | 488 | status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, |
489 | NULL, &bus); | ||
489 | if (ACPI_SUCCESS(status)) | 490 | if (ACPI_SUCCESS(status)) |
490 | root->secondary.start = bus; | 491 | root->secondary.start = bus; |
491 | else if (status == AE_NOT_FOUND) | 492 | else if (status == AE_NOT_FOUND) |
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 79cb65332894..870550d6a4bf 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, | |||
244 | return result; | 244 | return result; |
245 | } | 245 | } |
246 | 246 | ||
247 | struct thermal_cooling_device_ops processor_cooling_ops = { | 247 | const struct thermal_cooling_device_ops processor_cooling_ops = { |
248 | .get_max_state = processor_get_max_state, | 248 | .get_max_state = processor_get_max_state, |
249 | .get_cur_state = processor_get_cur_state, | 249 | .get_cur_state = processor_get_cur_state, |
250 | .set_cur_state = processor_set_cur_state, | 250 | .set_cur_state = processor_set_cur_state, |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 50658ff887d9..6e36d0c0057c 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -130,6 +130,9 @@ struct acpi_sbs { | |||
130 | 130 | ||
131 | #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) | 131 | #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) |
132 | 132 | ||
133 | static int acpi_sbs_remove(struct acpi_device *device, int type); | ||
134 | static int acpi_battery_get_state(struct acpi_battery *battery); | ||
135 | |||
133 | static inline int battery_scale(int log) | 136 | static inline int battery_scale(int log) |
134 | { | 137 | { |
135 | int scale = 1; | 138 | int scale = 1; |
@@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, | |||
195 | 198 | ||
196 | if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT) | 199 | if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT) |
197 | return -ENODEV; | 200 | return -ENODEV; |
201 | |||
202 | acpi_battery_get_state(battery); | ||
198 | switch (psp) { | 203 | switch (psp) { |
199 | case POWER_SUPPLY_PROP_STATUS: | 204 | case POWER_SUPPLY_PROP_STATUS: |
200 | if (battery->rate_now < 0) | 205 | if (battery->rate_now < 0) |
@@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, | |||
225 | case POWER_SUPPLY_PROP_POWER_NOW: | 230 | case POWER_SUPPLY_PROP_POWER_NOW: |
226 | val->intval = abs(battery->rate_now) * | 231 | val->intval = abs(battery->rate_now) * |
227 | acpi_battery_ipscale(battery) * 1000; | 232 | acpi_battery_ipscale(battery) * 1000; |
233 | val->intval *= (acpi_battery_mode(battery)) ? | ||
234 | (battery->voltage_now * | ||
235 | acpi_battery_vscale(battery) / 1000) : 1; | ||
228 | break; | 236 | break; |
229 | case POWER_SUPPLY_PROP_CURRENT_AVG: | 237 | case POWER_SUPPLY_PROP_CURRENT_AVG: |
230 | case POWER_SUPPLY_PROP_POWER_AVG: | 238 | case POWER_SUPPLY_PROP_POWER_AVG: |
231 | val->intval = abs(battery->rate_avg) * | 239 | val->intval = abs(battery->rate_avg) * |
232 | acpi_battery_ipscale(battery) * 1000; | 240 | acpi_battery_ipscale(battery) * 1000; |
241 | val->intval *= (acpi_battery_mode(battery)) ? | ||
242 | (battery->voltage_now * | ||
243 | acpi_battery_vscale(battery) / 1000) : 1; | ||
233 | break; | 244 | break; |
234 | case POWER_SUPPLY_PROP_CAPACITY: | 245 | case POWER_SUPPLY_PROP_CAPACITY: |
235 | val->intval = battery->state_of_charge; | 246 | val->intval = battery->state_of_charge; |
@@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context) | |||
903 | } | 914 | } |
904 | } | 915 | } |
905 | 916 | ||
906 | static int acpi_sbs_remove(struct acpi_device *device, int type); | ||
907 | |||
908 | static int acpi_sbs_add(struct acpi_device *device) | 917 | static int acpi_sbs_add(struct acpi_device *device) |
909 | { | 918 | { |
910 | struct acpi_sbs *sbs; | 919 | struct acpi_sbs *sbs; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6c949602cbd1..3ed80b2ca907 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
428 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), | 428 | DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), |
429 | }, | 429 | }, |
430 | }, | 430 | }, |
431 | { | ||
432 | .callback = init_old_suspend_ordering, | ||
433 | .ident = "Asus A8N-SLI DELUXE", | ||
434 | .matches = { | ||
435 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
436 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), | ||
437 | }, | ||
438 | }, | ||
439 | { | ||
440 | .callback = init_old_suspend_ordering, | ||
441 | .ident = "Asus A8N-SLI Premium", | ||
442 | .matches = { | ||
443 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
444 | DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), | ||
445 | }, | ||
446 | }, | ||
431 | {}, | 447 | {}, |
432 | }; | 448 | }; |
433 | #endif /* CONFIG_SUSPEND */ | 449 | #endif /* CONFIG_SUSPEND */ |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 77255f250dbb..c538d0ef10ff 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp) | |||
149 | return result; | 149 | return result; |
150 | } | 150 | } |
151 | 151 | ||
152 | static struct kernel_param_ops param_ops_debug_layer = { | 152 | static const struct kernel_param_ops param_ops_debug_layer = { |
153 | .set = param_set_uint, | 153 | .set = param_set_uint, |
154 | .get = param_get_debug_layer, | 154 | .get = param_get_debug_layer, |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static struct kernel_param_ops param_ops_debug_level = { | 157 | static const struct kernel_param_ops param_ops_debug_level = { |
158 | .set = param_set_uint, | 158 | .set = param_set_uint, |
159 | .get = param_get_debug_level, | 159 | .get = param_get_debug_level, |
160 | }; | 160 | }; |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 2607e17b520f..48fbc647b178 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, | |||
812 | thermal_zone_unbind_cooling_device); | 812 | thermal_zone_unbind_cooling_device); |
813 | } | 813 | } |
814 | 814 | ||
815 | static struct thermal_zone_device_ops acpi_thermal_zone_ops = { | 815 | static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { |
816 | .bind = acpi_thermal_bind_cooling_device, | 816 | .bind = acpi_thermal_bind_cooling_device, |
817 | .unbind = acpi_thermal_unbind_cooling_device, | 817 | .unbind = acpi_thermal_unbind_cooling_device, |
818 | .get_temp = thermal_get_temp, | 818 | .get_temp = thermal_get_temp, |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index ada4b4d9bdc8..08a44b532f7c 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -307,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st | |||
307 | return acpi_video_device_lcd_set_level(video, level); | 307 | return acpi_video_device_lcd_set_level(video, level); |
308 | } | 308 | } |
309 | 309 | ||
310 | static struct thermal_cooling_device_ops video_cooling_ops = { | 310 | static const struct thermal_cooling_device_ops video_cooling_ops = { |
311 | .get_max_state = video_get_max_state, | 311 | .get_max_state = video_get_max_state, |
312 | .get_cur_state = video_get_cur_state, | 312 | .get_cur_state = video_get_cur_state, |
313 | .set_cur_state = video_set_cur_state, | 313 | .set_cur_state = video_set_cur_state, |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index e0a5b555cee1..bb7c5f1085cc 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data) | |||
218 | ata_acpi_uevent(dev->link->ap, dev, event); | 218 | ata_acpi_uevent(dev->link->ap, dev, event); |
219 | } | 219 | } |
220 | 220 | ||
221 | static struct acpi_dock_ops ata_acpi_dev_dock_ops = { | 221 | static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { |
222 | .handler = ata_acpi_dev_notify_dock, | 222 | .handler = ata_acpi_dev_notify_dock, |
223 | .uevent = ata_acpi_dev_uevent, | 223 | .uevent = ata_acpi_dev_uevent, |
224 | }; | 224 | }; |
225 | 225 | ||
226 | static struct acpi_dock_ops ata_acpi_ap_dock_ops = { | 226 | static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { |
227 | .handler = ata_acpi_ap_notify_dock, | 227 | .handler = ata_acpi_ap_notify_dock, |
228 | .uevent = ata_acpi_ap_uevent, | 228 | .uevent = ata_acpi_ap_uevent, |
229 | }; | 229 | }; |
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index b89fffc1d777..33e1bed68fdd 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
@@ -166,7 +166,7 @@ static int create_path(const char *nodepath) | |||
166 | { | 166 | { |
167 | char *path; | 167 | char *path; |
168 | char *s; | 168 | char *s; |
169 | int err; | 169 | int err = 0; |
170 | 170 | ||
171 | /* parent directories do not exist, create them */ | 171 | /* parent directories do not exist, create them */ |
172 | path = kstrdup(nodepath, GFP_KERNEL); | 172 | path = kstrdup(nodepath, GFP_KERNEL); |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index be8714aa9dd6..e18566a0fedd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) |
81 | { | 81 | { |
82 | struct generic_pm_domain *parent = genpd->parent; | 82 | struct generic_pm_domain *parent = genpd->parent; |
83 | DEFINE_WAIT(wait); | ||
84 | int ret = 0; | 83 | int ret = 0; |
85 | 84 | ||
86 | start: | 85 | start: |
@@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
112 | } | 111 | } |
113 | 112 | ||
114 | if (genpd->power_on) { | 113 | if (genpd->power_on) { |
115 | int ret = genpd->power_on(genpd); | 114 | ret = genpd->power_on(genpd); |
116 | if (ret) | 115 | if (ret) |
117 | goto out; | 116 | goto out; |
118 | } | 117 | } |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8dc247c974af..acb3f83b8079 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
226 | callback = NULL; | 226 | callback = NULL; |
227 | 227 | ||
228 | if (callback) { | 228 | if (callback) { |
229 | spin_unlock_irq(&dev->power.lock); | 229 | if (dev->power.irq_safe) |
230 | spin_unlock(&dev->power.lock); | ||
231 | else | ||
232 | spin_unlock_irq(&dev->power.lock); | ||
230 | 233 | ||
231 | callback(dev); | 234 | callback(dev); |
232 | 235 | ||
233 | spin_lock_irq(&dev->power.lock); | 236 | if (dev->power.irq_safe) |
237 | spin_lock(&dev->power.lock); | ||
238 | else | ||
239 | spin_lock_irq(&dev->power.lock); | ||
234 | } | 240 | } |
235 | 241 | ||
236 | dev->power.idle_notification = false; | 242 | dev->power.idle_notification = false; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 49502bc5360a..423fd56bf612 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -616,5 +616,16 @@ config MSM_SMD_PKT | |||
616 | Enables userspace clients to read and write to some packet SMD | 616 | Enables userspace clients to read and write to some packet SMD |
617 | ports via device interface for MSM chipset. | 617 | ports via device interface for MSM chipset. |
618 | 618 | ||
619 | config TILE_SROM | ||
620 | bool "Character-device access via hypervisor to the Tilera SPI ROM" | ||
621 | depends on TILE | ||
622 | default y | ||
623 | ---help--- | ||
624 | This device provides character-level read-write access | ||
625 | to the SROM, typically via the "0", "1", and "2" devices | ||
626 | in /dev/srom/. The Tilera hypervisor makes the flash | ||
627 | device appear much like a simple EEPROM, and knows | ||
628 | how to partition a single ROM for multiple purposes. | ||
629 | |||
619 | endmenu | 630 | endmenu |
620 | 631 | ||
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7a00672bd85d..32762ba769c2 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o | |||
63 | 63 | ||
64 | obj-$(CONFIG_JS_RTC) += js-rtc.o | 64 | obj-$(CONFIG_JS_RTC) += js-rtc.o |
65 | js-rtc-y = rtc.o | 65 | js-rtc-y = rtc.o |
66 | |||
67 | obj-$(CONFIG_TILE_SROM) += tile-srom.o | ||
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c index fca0c51bbc90..810aff9e750f 100644 --- a/drivers/char/ramoops.c +++ b/drivers/char/ramoops.c | |||
@@ -147,6 +147,14 @@ static int __init ramoops_probe(struct platform_device *pdev) | |||
147 | cxt->phys_addr = pdata->mem_address; | 147 | cxt->phys_addr = pdata->mem_address; |
148 | cxt->record_size = pdata->record_size; | 148 | cxt->record_size = pdata->record_size; |
149 | cxt->dump_oops = pdata->dump_oops; | 149 | cxt->dump_oops = pdata->dump_oops; |
150 | /* | ||
151 | * Update the module parameter variables as well so they are visible | ||
152 | * through /sys/module/ramoops/parameters/ | ||
153 | */ | ||
154 | mem_size = pdata->mem_size; | ||
155 | mem_address = pdata->mem_address; | ||
156 | record_size = pdata->record_size; | ||
157 | dump_oops = pdata->dump_oops; | ||
150 | 158 | ||
151 | if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { | 159 | if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { |
152 | pr_err("request mem region failed\n"); | 160 | pr_err("request mem region failed\n"); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 729281961f22..c35a785005b0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1300,345 +1300,14 @@ ctl_table random_table[] = { | |||
1300 | }; | 1300 | }; |
1301 | #endif /* CONFIG_SYSCTL */ | 1301 | #endif /* CONFIG_SYSCTL */ |
1302 | 1302 | ||
1303 | /******************************************************************** | 1303 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; |
1304 | * | ||
1305 | * Random functions for networking | ||
1306 | * | ||
1307 | ********************************************************************/ | ||
1308 | |||
1309 | /* | ||
1310 | * TCP initial sequence number picking. This uses the random number | ||
1311 | * generator to pick an initial secret value. This value is hashed | ||
1312 | * along with the TCP endpoint information to provide a unique | ||
1313 | * starting point for each pair of TCP endpoints. This defeats | ||
1314 | * attacks which rely on guessing the initial TCP sequence number. | ||
1315 | * This algorithm was suggested by Steve Bellovin. | ||
1316 | * | ||
1317 | * Using a very strong hash was taking an appreciable amount of the total | ||
1318 | * TCP connection establishment time, so this is a weaker hash, | ||
1319 | * compensated for by changing the secret periodically. | ||
1320 | */ | ||
1321 | |||
1322 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | ||
1323 | #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) | ||
1324 | #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) | ||
1325 | #define H(x, y, z) ((x) ^ (y) ^ (z)) | ||
1326 | |||
1327 | /* | ||
1328 | * The generic round function. The application is so specific that | ||
1329 | * we don't bother protecting all the arguments with parens, as is generally | ||
1330 | * good macro practice, in favor of extra legibility. | ||
1331 | * Rotation is separate from addition to prevent recomputation | ||
1332 | */ | ||
1333 | #define ROUND(f, a, b, c, d, x, s) \ | ||
1334 | (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) | ||
1335 | #define K1 0 | ||
1336 | #define K2 013240474631UL | ||
1337 | #define K3 015666365641UL | ||
1338 | |||
1339 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1340 | |||
1341 | static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) | ||
1342 | { | ||
1343 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | ||
1344 | |||
1345 | /* Round 1 */ | ||
1346 | ROUND(F, a, b, c, d, in[ 0] + K1, 3); | ||
1347 | ROUND(F, d, a, b, c, in[ 1] + K1, 7); | ||
1348 | ROUND(F, c, d, a, b, in[ 2] + K1, 11); | ||
1349 | ROUND(F, b, c, d, a, in[ 3] + K1, 19); | ||
1350 | ROUND(F, a, b, c, d, in[ 4] + K1, 3); | ||
1351 | ROUND(F, d, a, b, c, in[ 5] + K1, 7); | ||
1352 | ROUND(F, c, d, a, b, in[ 6] + K1, 11); | ||
1353 | ROUND(F, b, c, d, a, in[ 7] + K1, 19); | ||
1354 | ROUND(F, a, b, c, d, in[ 8] + K1, 3); | ||
1355 | ROUND(F, d, a, b, c, in[ 9] + K1, 7); | ||
1356 | ROUND(F, c, d, a, b, in[10] + K1, 11); | ||
1357 | ROUND(F, b, c, d, a, in[11] + K1, 19); | ||
1358 | |||
1359 | /* Round 2 */ | ||
1360 | ROUND(G, a, b, c, d, in[ 1] + K2, 3); | ||
1361 | ROUND(G, d, a, b, c, in[ 3] + K2, 5); | ||
1362 | ROUND(G, c, d, a, b, in[ 5] + K2, 9); | ||
1363 | ROUND(G, b, c, d, a, in[ 7] + K2, 13); | ||
1364 | ROUND(G, a, b, c, d, in[ 9] + K2, 3); | ||
1365 | ROUND(G, d, a, b, c, in[11] + K2, 5); | ||
1366 | ROUND(G, c, d, a, b, in[ 0] + K2, 9); | ||
1367 | ROUND(G, b, c, d, a, in[ 2] + K2, 13); | ||
1368 | ROUND(G, a, b, c, d, in[ 4] + K2, 3); | ||
1369 | ROUND(G, d, a, b, c, in[ 6] + K2, 5); | ||
1370 | ROUND(G, c, d, a, b, in[ 8] + K2, 9); | ||
1371 | ROUND(G, b, c, d, a, in[10] + K2, 13); | ||
1372 | |||
1373 | /* Round 3 */ | ||
1374 | ROUND(H, a, b, c, d, in[ 3] + K3, 3); | ||
1375 | ROUND(H, d, a, b, c, in[ 7] + K3, 9); | ||
1376 | ROUND(H, c, d, a, b, in[11] + K3, 11); | ||
1377 | ROUND(H, b, c, d, a, in[ 2] + K3, 15); | ||
1378 | ROUND(H, a, b, c, d, in[ 6] + K3, 3); | ||
1379 | ROUND(H, d, a, b, c, in[10] + K3, 9); | ||
1380 | ROUND(H, c, d, a, b, in[ 1] + K3, 11); | ||
1381 | ROUND(H, b, c, d, a, in[ 5] + K3, 15); | ||
1382 | ROUND(H, a, b, c, d, in[ 9] + K3, 3); | ||
1383 | ROUND(H, d, a, b, c, in[ 0] + K3, 9); | ||
1384 | ROUND(H, c, d, a, b, in[ 4] + K3, 11); | ||
1385 | ROUND(H, b, c, d, a, in[ 8] + K3, 15); | ||
1386 | |||
1387 | return buf[1] + b; /* "most hashed" word */ | ||
1388 | /* Alternative: return sum of all words? */ | ||
1389 | } | ||
1390 | #endif | ||
1391 | |||
1392 | #undef ROUND | ||
1393 | #undef F | ||
1394 | #undef G | ||
1395 | #undef H | ||
1396 | #undef K1 | ||
1397 | #undef K2 | ||
1398 | #undef K3 | ||
1399 | |||
1400 | /* This should not be decreased so low that ISNs wrap too fast. */ | ||
1401 | #define REKEY_INTERVAL (300 * HZ) | ||
1402 | /* | ||
1403 | * Bit layout of the tcp sequence numbers (before adding current time): | ||
1404 | * bit 24-31: increased after every key exchange | ||
1405 | * bit 0-23: hash(source,dest) | ||
1406 | * | ||
1407 | * The implementation is similar to the algorithm described | ||
1408 | * in the Appendix of RFC 1185, except that | ||
1409 | * - it uses a 1 MHz clock instead of a 250 kHz clock | ||
1410 | * - it performs a rekey every 5 minutes, which is equivalent | ||
1411 | * to a (source,dest) tulple dependent forward jump of the | ||
1412 | * clock by 0..2^(HASH_BITS+1) | ||
1413 | * | ||
1414 | * Thus the average ISN wraparound time is 68 minutes instead of | ||
1415 | * 4.55 hours. | ||
1416 | * | ||
1417 | * SMP cleanup and lock avoidance with poor man's RCU. | ||
1418 | * Manfred Spraul <manfred@colorfullife.com> | ||
1419 | * | ||
1420 | */ | ||
1421 | #define COUNT_BITS 8 | ||
1422 | #define COUNT_MASK ((1 << COUNT_BITS) - 1) | ||
1423 | #define HASH_BITS 24 | ||
1424 | #define HASH_MASK ((1 << HASH_BITS) - 1) | ||
1425 | 1304 | ||
1426 | static struct keydata { | 1305 | static int __init random_int_secret_init(void) |
1427 | __u32 count; /* already shifted to the final position */ | ||
1428 | __u32 secret[12]; | ||
1429 | } ____cacheline_aligned ip_keydata[2]; | ||
1430 | |||
1431 | static unsigned int ip_cnt; | ||
1432 | |||
1433 | static void rekey_seq_generator(struct work_struct *work); | ||
1434 | |||
1435 | static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); | ||
1436 | |||
1437 | /* | ||
1438 | * Lock avoidance: | ||
1439 | * The ISN generation runs lockless - it's just a hash over random data. | ||
1440 | * State changes happen every 5 minutes when the random key is replaced. | ||
1441 | * Synchronization is performed by having two copies of the hash function | ||
1442 | * state and rekey_seq_generator always updates the inactive copy. | ||
1443 | * The copy is then activated by updating ip_cnt. | ||
1444 | * The implementation breaks down if someone blocks the thread | ||
1445 | * that processes SYN requests for more than 5 minutes. Should never | ||
1446 | * happen, and even if that happens only a not perfectly compliant | ||
1447 | * ISN is generated, nothing fatal. | ||
1448 | */ | ||
1449 | static void rekey_seq_generator(struct work_struct *work) | ||
1450 | { | 1306 | { |
1451 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; | 1307 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); |
1452 | |||
1453 | get_random_bytes(keyptr->secret, sizeof(keyptr->secret)); | ||
1454 | keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; | ||
1455 | smp_wmb(); | ||
1456 | ip_cnt++; | ||
1457 | schedule_delayed_work(&rekey_work, | ||
1458 | round_jiffies_relative(REKEY_INTERVAL)); | ||
1459 | } | ||
1460 | |||
1461 | static inline struct keydata *get_keyptr(void) | ||
1462 | { | ||
1463 | struct keydata *keyptr = &ip_keydata[ip_cnt & 1]; | ||
1464 | |||
1465 | smp_rmb(); | ||
1466 | |||
1467 | return keyptr; | ||
1468 | } | ||
1469 | |||
1470 | static __init int seqgen_init(void) | ||
1471 | { | ||
1472 | rekey_seq_generator(NULL); | ||
1473 | return 0; | 1308 | return 0; |
1474 | } | 1309 | } |
1475 | late_initcall(seqgen_init); | 1310 | late_initcall(random_int_secret_init); |
1476 | |||
1477 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1478 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | ||
1479 | __be16 sport, __be16 dport) | ||
1480 | { | ||
1481 | __u32 seq; | ||
1482 | __u32 hash[12]; | ||
1483 | struct keydata *keyptr = get_keyptr(); | ||
1484 | |||
1485 | /* The procedure is the same as for IPv4, but addresses are longer. | ||
1486 | * Thus we must use twothirdsMD4Transform. | ||
1487 | */ | ||
1488 | |||
1489 | memcpy(hash, saddr, 16); | ||
1490 | hash[4] = ((__force u16)sport << 16) + (__force u16)dport; | ||
1491 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); | ||
1492 | |||
1493 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; | ||
1494 | seq += keyptr->count; | ||
1495 | |||
1496 | seq += ktime_to_ns(ktime_get_real()); | ||
1497 | |||
1498 | return seq; | ||
1499 | } | ||
1500 | EXPORT_SYMBOL(secure_tcpv6_sequence_number); | ||
1501 | #endif | ||
1502 | |||
1503 | /* The code below is shamelessly stolen from secure_tcp_sequence_number(). | ||
1504 | * All blames to Andrey V. Savochkin <saw@msu.ru>. | ||
1505 | */ | ||
1506 | __u32 secure_ip_id(__be32 daddr) | ||
1507 | { | ||
1508 | struct keydata *keyptr; | ||
1509 | __u32 hash[4]; | ||
1510 | |||
1511 | keyptr = get_keyptr(); | ||
1512 | |||
1513 | /* | ||
1514 | * Pick a unique starting offset for each IP destination. | ||
1515 | * The dest ip address is placed in the starting vector, | ||
1516 | * which is then hashed with random data. | ||
1517 | */ | ||
1518 | hash[0] = (__force __u32)daddr; | ||
1519 | hash[1] = keyptr->secret[9]; | ||
1520 | hash[2] = keyptr->secret[10]; | ||
1521 | hash[3] = keyptr->secret[11]; | ||
1522 | |||
1523 | return half_md4_transform(hash, keyptr->secret); | ||
1524 | } | ||
1525 | |||
1526 | __u32 secure_ipv6_id(const __be32 daddr[4]) | ||
1527 | { | ||
1528 | const struct keydata *keyptr; | ||
1529 | __u32 hash[4]; | ||
1530 | |||
1531 | keyptr = get_keyptr(); | ||
1532 | |||
1533 | hash[0] = (__force __u32)daddr[0]; | ||
1534 | hash[1] = (__force __u32)daddr[1]; | ||
1535 | hash[2] = (__force __u32)daddr[2]; | ||
1536 | hash[3] = (__force __u32)daddr[3]; | ||
1537 | |||
1538 | return half_md4_transform(hash, keyptr->secret); | ||
1539 | } | ||
1540 | |||
1541 | #ifdef CONFIG_INET | ||
1542 | |||
1543 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | ||
1544 | __be16 sport, __be16 dport) | ||
1545 | { | ||
1546 | __u32 seq; | ||
1547 | __u32 hash[4]; | ||
1548 | struct keydata *keyptr = get_keyptr(); | ||
1549 | |||
1550 | /* | ||
1551 | * Pick a unique starting offset for each TCP connection endpoints | ||
1552 | * (saddr, daddr, sport, dport). | ||
1553 | * Note that the words are placed into the starting vector, which is | ||
1554 | * then mixed with a partial MD4 over random data. | ||
1555 | */ | ||
1556 | hash[0] = (__force u32)saddr; | ||
1557 | hash[1] = (__force u32)daddr; | ||
1558 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | ||
1559 | hash[3] = keyptr->secret[11]; | ||
1560 | |||
1561 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; | ||
1562 | seq += keyptr->count; | ||
1563 | /* | ||
1564 | * As close as possible to RFC 793, which | ||
1565 | * suggests using a 250 kHz clock. | ||
1566 | * Further reading shows this assumes 2 Mb/s networks. | ||
1567 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. | ||
1568 | * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but | ||
1569 | * we also need to limit the resolution so that the u32 seq | ||
1570 | * overlaps less than one time per MSL (2 minutes). | ||
1571 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | ||
1572 | */ | ||
1573 | seq += ktime_to_ns(ktime_get_real()) >> 6; | ||
1574 | |||
1575 | return seq; | ||
1576 | } | ||
1577 | |||
1578 | /* Generate secure starting point for ephemeral IPV4 transport port search */ | ||
1579 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | ||
1580 | { | ||
1581 | struct keydata *keyptr = get_keyptr(); | ||
1582 | u32 hash[4]; | ||
1583 | |||
1584 | /* | ||
1585 | * Pick a unique starting offset for each ephemeral port search | ||
1586 | * (saddr, daddr, dport) and 48bits of random data. | ||
1587 | */ | ||
1588 | hash[0] = (__force u32)saddr; | ||
1589 | hash[1] = (__force u32)daddr; | ||
1590 | hash[2] = (__force u32)dport ^ keyptr->secret[10]; | ||
1591 | hash[3] = keyptr->secret[11]; | ||
1592 | |||
1593 | return half_md4_transform(hash, keyptr->secret); | ||
1594 | } | ||
1595 | EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); | ||
1596 | |||
1597 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1598 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, | ||
1599 | __be16 dport) | ||
1600 | { | ||
1601 | struct keydata *keyptr = get_keyptr(); | ||
1602 | u32 hash[12]; | ||
1603 | |||
1604 | memcpy(hash, saddr, 16); | ||
1605 | hash[4] = (__force u32)dport; | ||
1606 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); | ||
1607 | |||
1608 | return twothirdsMD4Transform((const __u32 *)daddr, hash); | ||
1609 | } | ||
1610 | #endif | ||
1611 | |||
1612 | #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) | ||
1613 | /* Similar to secure_tcp_sequence_number but generate a 48 bit value | ||
1614 | * bit's 32-47 increase every key exchange | ||
1615 | * 0-31 hash(source, dest) | ||
1616 | */ | ||
1617 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | ||
1618 | __be16 sport, __be16 dport) | ||
1619 | { | ||
1620 | u64 seq; | ||
1621 | __u32 hash[4]; | ||
1622 | struct keydata *keyptr = get_keyptr(); | ||
1623 | |||
1624 | hash[0] = (__force u32)saddr; | ||
1625 | hash[1] = (__force u32)daddr; | ||
1626 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | ||
1627 | hash[3] = keyptr->secret[11]; | ||
1628 | |||
1629 | seq = half_md4_transform(hash, keyptr->secret); | ||
1630 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); | ||
1631 | |||
1632 | seq += ktime_to_ns(ktime_get_real()); | ||
1633 | seq &= (1ull << 48) - 1; | ||
1634 | |||
1635 | return seq; | ||
1636 | } | ||
1637 | EXPORT_SYMBOL(secure_dccp_sequence_number); | ||
1638 | #endif | ||
1639 | |||
1640 | #endif /* CONFIG_INET */ | ||
1641 | |||
1642 | 1311 | ||
1643 | /* | 1312 | /* |
1644 | * Get a random word for internal kernel use only. Similar to urandom but | 1313 | * Get a random word for internal kernel use only. Similar to urandom but |
@@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number); | |||
1646 | * value is not cryptographically secure but for several uses the cost of | 1315 | * value is not cryptographically secure but for several uses the cost of |
1647 | * depleting entropy is too high | 1316 | * depleting entropy is too high |
1648 | */ | 1317 | */ |
1649 | DEFINE_PER_CPU(__u32 [4], get_random_int_hash); | 1318 | DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); |
1650 | unsigned int get_random_int(void) | 1319 | unsigned int get_random_int(void) |
1651 | { | 1320 | { |
1652 | struct keydata *keyptr; | ||
1653 | __u32 *hash = get_cpu_var(get_random_int_hash); | 1321 | __u32 *hash = get_cpu_var(get_random_int_hash); |
1654 | int ret; | 1322 | unsigned int ret; |
1655 | 1323 | ||
1656 | keyptr = get_keyptr(); | ||
1657 | hash[0] += current->pid + jiffies + get_cycles(); | 1324 | hash[0] += current->pid + jiffies + get_cycles(); |
1658 | 1325 | md5_transform(hash, random_int_secret); | |
1659 | ret = half_md4_transform(hash, keyptr->secret); | 1326 | ret = hash[0]; |
1660 | put_cpu_var(get_random_int_hash); | 1327 | put_cpu_var(get_random_int_hash); |
1661 | 1328 | ||
1662 | return ret; | 1329 | return ret; |
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c new file mode 100644 index 000000000000..cf3ee008dca2 --- /dev/null +++ b/drivers/char/tile-srom.c | |||
@@ -0,0 +1,481 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * SPI Flash ROM driver | ||
15 | * | ||
16 | * This source code is derived from code provided in "Linux Device | ||
17 | * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and | ||
18 | * Greg Kroah-Hartman, published by O'Reilly Media, Inc. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/moduleparam.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/kernel.h> /* printk() */ | ||
25 | #include <linux/slab.h> /* kmalloc() */ | ||
26 | #include <linux/fs.h> /* everything... */ | ||
27 | #include <linux/errno.h> /* error codes */ | ||
28 | #include <linux/types.h> /* size_t */ | ||
29 | #include <linux/proc_fs.h> | ||
30 | #include <linux/fcntl.h> /* O_ACCMODE */ | ||
31 | #include <linux/aio.h> | ||
32 | #include <linux/pagemap.h> | ||
33 | #include <linux/hugetlb.h> | ||
34 | #include <linux/uaccess.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <hv/hypervisor.h> | ||
37 | #include <linux/ioctl.h> | ||
38 | #include <linux/cdev.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <hv/drv_srom_intf.h> | ||
41 | |||
42 | /* | ||
43 | * Size of our hypervisor I/O requests. We break up large transfers | ||
44 | * so that we don't spend large uninterrupted spans of time in the | ||
45 | * hypervisor. Erasing an SROM sector takes a significant fraction of | ||
46 | * a second, so if we allowed the user to, say, do one I/O to write the | ||
47 | * entire ROM, we'd get soft lockup timeouts, or worse. | ||
48 | */ | ||
49 | #define SROM_CHUNK_SIZE ((size_t)4096) | ||
50 | |||
51 | /* | ||
52 | * When hypervisor is busy (e.g. erasing), poll the status periodically. | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | * Interval to poll the state in msec | ||
57 | */ | ||
58 | #define SROM_WAIT_TRY_INTERVAL 20 | ||
59 | |||
60 | /* | ||
61 | * Maximum times to poll the state | ||
62 | */ | ||
63 | #define SROM_MAX_WAIT_TRY_TIMES 1000 | ||
64 | |||
65 | struct srom_dev { | ||
66 | int hv_devhdl; /* Handle for hypervisor device */ | ||
67 | u32 total_size; /* Size of this device */ | ||
68 | u32 sector_size; /* Size of a sector */ | ||
69 | u32 page_size; /* Size of a page */ | ||
70 | struct mutex lock; /* Allow only one accessor at a time */ | ||
71 | }; | ||
72 | |||
73 | static int srom_major; /* Dynamic major by default */ | ||
74 | module_param(srom_major, int, 0); | ||
75 | MODULE_AUTHOR("Tilera Corporation"); | ||
76 | MODULE_LICENSE("GPL"); | ||
77 | |||
78 | static int srom_devs; /* Number of SROM partitions */ | ||
79 | static struct cdev srom_cdev; | ||
80 | static struct class *srom_class; | ||
81 | static struct srom_dev *srom_devices; | ||
82 | |||
83 | /* | ||
84 | * Handle calling the hypervisor and managing EAGAIN/EBUSY. | ||
85 | */ | ||
86 | |||
87 | static ssize_t _srom_read(int hv_devhdl, void *buf, | ||
88 | loff_t off, size_t count) | ||
89 | { | ||
90 | int retval, retries = SROM_MAX_WAIT_TRY_TIMES; | ||
91 | for (;;) { | ||
92 | retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf, | ||
93 | count, off); | ||
94 | if (retval >= 0) | ||
95 | return retval; | ||
96 | if (retval == HV_EAGAIN) | ||
97 | continue; | ||
98 | if (retval == HV_EBUSY && --retries > 0) { | ||
99 | msleep(SROM_WAIT_TRY_INTERVAL); | ||
100 | continue; | ||
101 | } | ||
102 | pr_err("_srom_read: error %d\n", retval); | ||
103 | return -EIO; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static ssize_t _srom_write(int hv_devhdl, const void *buf, | ||
108 | loff_t off, size_t count) | ||
109 | { | ||
110 | int retval, retries = SROM_MAX_WAIT_TRY_TIMES; | ||
111 | for (;;) { | ||
112 | retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf, | ||
113 | count, off); | ||
114 | if (retval >= 0) | ||
115 | return retval; | ||
116 | if (retval == HV_EAGAIN) | ||
117 | continue; | ||
118 | if (retval == HV_EBUSY && --retries > 0) { | ||
119 | msleep(SROM_WAIT_TRY_INTERVAL); | ||
120 | continue; | ||
121 | } | ||
122 | pr_err("_srom_write: error %d\n", retval); | ||
123 | return -EIO; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * srom_open() - Device open routine. | ||
129 | * @inode: Inode for this device. | ||
130 | * @filp: File for this specific open of the device. | ||
131 | * | ||
132 | * Returns zero, or an error code. | ||
133 | */ | ||
134 | static int srom_open(struct inode *inode, struct file *filp) | ||
135 | { | ||
136 | filp->private_data = &srom_devices[iminor(inode)]; | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | |||
141 | /** | ||
142 | * srom_release() - Device release routine. | ||
143 | * @inode: Inode for this device. | ||
144 | * @filp: File for this specific open of the device. | ||
145 | * | ||
146 | * Returns zero, or an error code. | ||
147 | */ | ||
148 | static int srom_release(struct inode *inode, struct file *filp) | ||
149 | { | ||
150 | struct srom_dev *srom = filp->private_data; | ||
151 | char dummy; | ||
152 | |||
153 | /* Make sure we've flushed anything written to the ROM. */ | ||
154 | mutex_lock(&srom->lock); | ||
155 | if (srom->hv_devhdl >= 0) | ||
156 | _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1); | ||
157 | mutex_unlock(&srom->lock); | ||
158 | |||
159 | filp->private_data = NULL; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | |||
165 | /** | ||
166 | * srom_read() - Read data from the device. | ||
167 | * @filp: File for this specific open of the device. | ||
168 | * @buf: User's data buffer. | ||
169 | * @count: Number of bytes requested. | ||
170 | * @f_pos: File position. | ||
171 | * | ||
172 | * Returns number of bytes read, or an error code. | ||
173 | */ | ||
174 | static ssize_t srom_read(struct file *filp, char __user *buf, | ||
175 | size_t count, loff_t *f_pos) | ||
176 | { | ||
177 | int retval = 0; | ||
178 | void *kernbuf; | ||
179 | struct srom_dev *srom = filp->private_data; | ||
180 | |||
181 | kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); | ||
182 | if (!kernbuf) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | if (mutex_lock_interruptible(&srom->lock)) { | ||
186 | retval = -ERESTARTSYS; | ||
187 | kfree(kernbuf); | ||
188 | return retval; | ||
189 | } | ||
190 | |||
191 | while (count) { | ||
192 | int hv_retval; | ||
193 | int bytes_this_pass = min(count, SROM_CHUNK_SIZE); | ||
194 | |||
195 | hv_retval = _srom_read(srom->hv_devhdl, kernbuf, | ||
196 | *f_pos, bytes_this_pass); | ||
197 | if (hv_retval > 0) { | ||
198 | if (copy_to_user(buf, kernbuf, hv_retval) != 0) { | ||
199 | retval = -EFAULT; | ||
200 | break; | ||
201 | } | ||
202 | } else if (hv_retval <= 0) { | ||
203 | if (retval == 0) | ||
204 | retval = hv_retval; | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | retval += hv_retval; | ||
209 | *f_pos += hv_retval; | ||
210 | buf += hv_retval; | ||
211 | count -= hv_retval; | ||
212 | } | ||
213 | |||
214 | mutex_unlock(&srom->lock); | ||
215 | kfree(kernbuf); | ||
216 | |||
217 | return retval; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * srom_write() - Write data to the device. | ||
222 | * @filp: File for this specific open of the device. | ||
223 | * @buf: User's data buffer. | ||
224 | * @count: Number of bytes requested. | ||
225 | * @f_pos: File position. | ||
226 | * | ||
227 | * Returns number of bytes written, or an error code. | ||
228 | */ | ||
229 | static ssize_t srom_write(struct file *filp, const char __user *buf, | ||
230 | size_t count, loff_t *f_pos) | ||
231 | { | ||
232 | int retval = 0; | ||
233 | void *kernbuf; | ||
234 | struct srom_dev *srom = filp->private_data; | ||
235 | |||
236 | kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); | ||
237 | if (!kernbuf) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | if (mutex_lock_interruptible(&srom->lock)) { | ||
241 | retval = -ERESTARTSYS; | ||
242 | kfree(kernbuf); | ||
243 | return retval; | ||
244 | } | ||
245 | |||
246 | while (count) { | ||
247 | int hv_retval; | ||
248 | int bytes_this_pass = min(count, SROM_CHUNK_SIZE); | ||
249 | |||
250 | if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) { | ||
251 | retval = -EFAULT; | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | hv_retval = _srom_write(srom->hv_devhdl, kernbuf, | ||
256 | *f_pos, bytes_this_pass); | ||
257 | if (hv_retval <= 0) { | ||
258 | if (retval == 0) | ||
259 | retval = hv_retval; | ||
260 | break; | ||
261 | } | ||
262 | |||
263 | retval += hv_retval; | ||
264 | *f_pos += hv_retval; | ||
265 | buf += hv_retval; | ||
266 | count -= hv_retval; | ||
267 | } | ||
268 | |||
269 | mutex_unlock(&srom->lock); | ||
270 | kfree(kernbuf); | ||
271 | |||
272 | return retval; | ||
273 | } | ||
274 | |||
275 | /* Provide our own implementation so we can use srom->total_size. */ | ||
276 | loff_t srom_llseek(struct file *filp, loff_t offset, int origin) | ||
277 | { | ||
278 | struct srom_dev *srom = filp->private_data; | ||
279 | |||
280 | if (mutex_lock_interruptible(&srom->lock)) | ||
281 | return -ERESTARTSYS; | ||
282 | |||
283 | switch (origin) { | ||
284 | case SEEK_END: | ||
285 | offset += srom->total_size; | ||
286 | break; | ||
287 | case SEEK_CUR: | ||
288 | offset += filp->f_pos; | ||
289 | break; | ||
290 | } | ||
291 | |||
292 | if (offset < 0 || offset > srom->total_size) { | ||
293 | offset = -EINVAL; | ||
294 | } else { | ||
295 | filp->f_pos = offset; | ||
296 | filp->f_version = 0; | ||
297 | } | ||
298 | |||
299 | mutex_unlock(&srom->lock); | ||
300 | |||
301 | return offset; | ||
302 | } | ||
303 | |||
304 | static ssize_t total_show(struct device *dev, | ||
305 | struct device_attribute *attr, char *buf) | ||
306 | { | ||
307 | struct srom_dev *srom = dev_get_drvdata(dev); | ||
308 | return sprintf(buf, "%u\n", srom->total_size); | ||
309 | } | ||
310 | |||
311 | static ssize_t sector_show(struct device *dev, | ||
312 | struct device_attribute *attr, char *buf) | ||
313 | { | ||
314 | struct srom_dev *srom = dev_get_drvdata(dev); | ||
315 | return sprintf(buf, "%u\n", srom->sector_size); | ||
316 | } | ||
317 | |||
318 | static ssize_t page_show(struct device *dev, | ||
319 | struct device_attribute *attr, char *buf) | ||
320 | { | ||
321 | struct srom_dev *srom = dev_get_drvdata(dev); | ||
322 | return sprintf(buf, "%u\n", srom->page_size); | ||
323 | } | ||
324 | |||
325 | static struct device_attribute srom_dev_attrs[] = { | ||
326 | __ATTR(total_size, S_IRUGO, total_show, NULL), | ||
327 | __ATTR(sector_size, S_IRUGO, sector_show, NULL), | ||
328 | __ATTR(page_size, S_IRUGO, page_show, NULL), | ||
329 | __ATTR_NULL | ||
330 | }; | ||
331 | |||
332 | static char *srom_devnode(struct device *dev, mode_t *mode) | ||
333 | { | ||
334 | *mode = S_IRUGO | S_IWUSR; | ||
335 | return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * The fops | ||
340 | */ | ||
341 | static const struct file_operations srom_fops = { | ||
342 | .owner = THIS_MODULE, | ||
343 | .llseek = srom_llseek, | ||
344 | .read = srom_read, | ||
345 | .write = srom_write, | ||
346 | .open = srom_open, | ||
347 | .release = srom_release, | ||
348 | }; | ||
349 | |||
350 | /** | ||
351 | * srom_setup_minor() - Initialize per-minor information. | ||
352 | * @srom: Per-device SROM state. | ||
353 | * @index: Device to set up. | ||
354 | */ | ||
355 | static int srom_setup_minor(struct srom_dev *srom, int index) | ||
356 | { | ||
357 | struct device *dev; | ||
358 | int devhdl = srom->hv_devhdl; | ||
359 | |||
360 | mutex_init(&srom->lock); | ||
361 | |||
362 | if (_srom_read(devhdl, &srom->total_size, | ||
363 | SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0) | ||
364 | return -EIO; | ||
365 | if (_srom_read(devhdl, &srom->sector_size, | ||
366 | SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0) | ||
367 | return -EIO; | ||
368 | if (_srom_read(devhdl, &srom->page_size, | ||
369 | SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) | ||
370 | return -EIO; | ||
371 | |||
372 | dev = device_create(srom_class, &platform_bus, | ||
373 | MKDEV(srom_major, index), srom, "%d", index); | ||
374 | return IS_ERR(dev) ? PTR_ERR(dev) : 0; | ||
375 | } | ||
376 | |||
377 | /** srom_init() - Initialize the driver's module. */ | ||
378 | static int srom_init(void) | ||
379 | { | ||
380 | int result, i; | ||
381 | dev_t dev = MKDEV(srom_major, 0); | ||
382 | |||
383 | /* | ||
384 | * Start with a plausible number of partitions; the krealloc() call | ||
385 | * below will yield about log(srom_devs) additional allocations. | ||
386 | */ | ||
387 | srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL); | ||
388 | |||
389 | /* Discover the number of srom partitions. */ | ||
390 | for (i = 0; ; i++) { | ||
391 | int devhdl; | ||
392 | char buf[20]; | ||
393 | struct srom_dev *new_srom_devices = | ||
394 | krealloc(srom_devices, (i+1) * sizeof(struct srom_dev), | ||
395 | GFP_KERNEL | __GFP_ZERO); | ||
396 | if (!new_srom_devices) { | ||
397 | result = -ENOMEM; | ||
398 | goto fail_mem; | ||
399 | } | ||
400 | srom_devices = new_srom_devices; | ||
401 | sprintf(buf, "srom/0/%d", i); | ||
402 | devhdl = hv_dev_open((HV_VirtAddr)buf, 0); | ||
403 | if (devhdl < 0) { | ||
404 | if (devhdl != HV_ENODEV) | ||
405 | pr_notice("srom/%d: hv_dev_open failed: %d.\n", | ||
406 | i, devhdl); | ||
407 | break; | ||
408 | } | ||
409 | srom_devices[i].hv_devhdl = devhdl; | ||
410 | } | ||
411 | srom_devs = i; | ||
412 | |||
413 | /* Bail out early if we have no partitions at all. */ | ||
414 | if (srom_devs == 0) { | ||
415 | result = -ENODEV; | ||
416 | goto fail_mem; | ||
417 | } | ||
418 | |||
419 | /* Register our major, and accept a dynamic number. */ | ||
420 | if (srom_major) | ||
421 | result = register_chrdev_region(dev, srom_devs, "srom"); | ||
422 | else { | ||
423 | result = alloc_chrdev_region(&dev, 0, srom_devs, "srom"); | ||
424 | srom_major = MAJOR(dev); | ||
425 | } | ||
426 | if (result < 0) | ||
427 | goto fail_mem; | ||
428 | |||
429 | /* Register a character device. */ | ||
430 | cdev_init(&srom_cdev, &srom_fops); | ||
431 | srom_cdev.owner = THIS_MODULE; | ||
432 | srom_cdev.ops = &srom_fops; | ||
433 | result = cdev_add(&srom_cdev, dev, srom_devs); | ||
434 | if (result < 0) | ||
435 | goto fail_chrdev; | ||
436 | |||
437 | /* Create a sysfs class. */ | ||
438 | srom_class = class_create(THIS_MODULE, "srom"); | ||
439 | if (IS_ERR(srom_class)) { | ||
440 | result = PTR_ERR(srom_class); | ||
441 | goto fail_cdev; | ||
442 | } | ||
443 | srom_class->dev_attrs = srom_dev_attrs; | ||
444 | srom_class->devnode = srom_devnode; | ||
445 | |||
446 | /* Do per-partition initialization */ | ||
447 | for (i = 0; i < srom_devs; i++) { | ||
448 | result = srom_setup_minor(srom_devices + i, i); | ||
449 | if (result < 0) | ||
450 | goto fail_class; | ||
451 | } | ||
452 | |||
453 | return 0; | ||
454 | |||
455 | fail_class: | ||
456 | for (i = 0; i < srom_devs; i++) | ||
457 | device_destroy(srom_class, MKDEV(srom_major, i)); | ||
458 | class_destroy(srom_class); | ||
459 | fail_cdev: | ||
460 | cdev_del(&srom_cdev); | ||
461 | fail_chrdev: | ||
462 | unregister_chrdev_region(dev, srom_devs); | ||
463 | fail_mem: | ||
464 | kfree(srom_devices); | ||
465 | return result; | ||
466 | } | ||
467 | |||
468 | /** srom_cleanup() - Clean up the driver's module. */ | ||
469 | static void srom_cleanup(void) | ||
470 | { | ||
471 | int i; | ||
472 | for (i = 0; i < srom_devs; i++) | ||
473 | device_destroy(srom_class, MKDEV(srom_major, i)); | ||
474 | class_destroy(srom_class); | ||
475 | cdev_del(&srom_cdev); | ||
476 | unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); | ||
477 | kfree(srom_devices); | ||
478 | } | ||
479 | |||
480 | module_init(srom_init); | ||
481 | module_exit(srom_cleanup); | ||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7fc2f108f490..3f4051a7c5a7 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -80,7 +80,7 @@ enum tis_defaults { | |||
80 | static LIST_HEAD(tis_chips); | 80 | static LIST_HEAD(tis_chips); |
81 | static DEFINE_SPINLOCK(tis_lock); | 81 | static DEFINE_SPINLOCK(tis_lock); |
82 | 82 | ||
83 | #ifdef CONFIG_PNP | 83 | #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) |
84 | static int is_itpm(struct pnp_dev *dev) | 84 | static int is_itpm(struct pnp_dev *dev) |
85 | { | 85 | { |
86 | struct acpi_device *acpi = pnp_acpi_device(dev); | 86 | struct acpi_device *acpi = pnp_acpi_device(dev); |
@@ -93,6 +93,11 @@ static int is_itpm(struct pnp_dev *dev) | |||
93 | 93 | ||
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | #else | ||
97 | static inline int is_itpm(struct pnp_dev *dev) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
96 | #endif | 101 | #endif |
97 | 102 | ||
98 | static int check_locality(struct tpm_chip *chip, int l) | 103 | static int check_locality(struct tpm_chip *chip, int l) |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index bf5092455a8f..d4c542372886 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |||
25 | 25 | ||
26 | DEFINE_MUTEX(cpuidle_lock); | 26 | DEFINE_MUTEX(cpuidle_lock); |
27 | LIST_HEAD(cpuidle_detected_devices); | 27 | LIST_HEAD(cpuidle_detected_devices); |
28 | static void (*pm_idle_old)(void); | ||
29 | 28 | ||
30 | static int enabled_devices; | 29 | static int enabled_devices; |
30 | static int off __read_mostly; | ||
31 | static int initialized __read_mostly; | ||
32 | |||
33 | int cpuidle_disabled(void) | ||
34 | { | ||
35 | return off; | ||
36 | } | ||
37 | void disable_cpuidle(void) | ||
38 | { | ||
39 | off = 1; | ||
40 | } | ||
31 | 41 | ||
32 | #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) | 42 | #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) |
33 | static void cpuidle_kick_cpus(void) | 43 | static void cpuidle_kick_cpus(void) |
@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
46 | * cpuidle_idle_call - the main idle loop | 56 | * cpuidle_idle_call - the main idle loop |
47 | * | 57 | * |
48 | * NOTE: no locks or semaphores should be used here | 58 | * NOTE: no locks or semaphores should be used here |
59 | * return non-zero on failure | ||
49 | */ | 60 | */ |
50 | static void cpuidle_idle_call(void) | 61 | int cpuidle_idle_call(void) |
51 | { | 62 | { |
52 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 63 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
53 | struct cpuidle_state *target_state; | 64 | struct cpuidle_state *target_state; |
54 | int next_state; | 65 | int next_state; |
55 | 66 | ||
67 | if (off) | ||
68 | return -ENODEV; | ||
69 | |||
70 | if (!initialized) | ||
71 | return -ENODEV; | ||
72 | |||
56 | /* check if the device is ready */ | 73 | /* check if the device is ready */ |
57 | if (!dev || !dev->enabled) { | 74 | if (!dev || !dev->enabled) |
58 | if (pm_idle_old) | 75 | return -EBUSY; |
59 | pm_idle_old(); | ||
60 | else | ||
61 | #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) | ||
62 | default_idle(); | ||
63 | #else | ||
64 | local_irq_enable(); | ||
65 | #endif | ||
66 | return; | ||
67 | } | ||
68 | 76 | ||
69 | #if 0 | 77 | #if 0 |
70 | /* shows regressions, re-enable for 2.6.29 */ | 78 | /* shows regressions, re-enable for 2.6.29 */ |
@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void) | |||
89 | next_state = cpuidle_curr_governor->select(dev); | 97 | next_state = cpuidle_curr_governor->select(dev); |
90 | if (need_resched()) { | 98 | if (need_resched()) { |
91 | local_irq_enable(); | 99 | local_irq_enable(); |
92 | return; | 100 | return 0; |
93 | } | 101 | } |
94 | 102 | ||
95 | target_state = &dev->states[next_state]; | 103 | target_state = &dev->states[next_state]; |
@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void) | |||
114 | /* give the governor an opportunity to reflect on the outcome */ | 122 | /* give the governor an opportunity to reflect on the outcome */ |
115 | if (cpuidle_curr_governor->reflect) | 123 | if (cpuidle_curr_governor->reflect) |
116 | cpuidle_curr_governor->reflect(dev); | 124 | cpuidle_curr_governor->reflect(dev); |
125 | |||
126 | return 0; | ||
117 | } | 127 | } |
118 | 128 | ||
119 | /** | 129 | /** |
@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void) | |||
121 | */ | 131 | */ |
122 | void cpuidle_install_idle_handler(void) | 132 | void cpuidle_install_idle_handler(void) |
123 | { | 133 | { |
124 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | 134 | if (enabled_devices) { |
125 | /* Make sure all changes finished before we switch to new idle */ | 135 | /* Make sure all changes finished before we switch to new idle */ |
126 | smp_wmb(); | 136 | smp_wmb(); |
127 | pm_idle = cpuidle_idle_call; | 137 | initialized = 1; |
128 | } | 138 | } |
129 | } | 139 | } |
130 | 140 | ||
@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void) | |||
133 | */ | 143 | */ |
134 | void cpuidle_uninstall_idle_handler(void) | 144 | void cpuidle_uninstall_idle_handler(void) |
135 | { | 145 | { |
136 | if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { | 146 | if (enabled_devices) { |
137 | pm_idle = pm_idle_old; | 147 | initialized = 0; |
138 | cpuidle_kick_cpus(); | 148 | cpuidle_kick_cpus(); |
139 | } | 149 | } |
140 | } | 150 | } |
@@ -427,7 +437,8 @@ static int __init cpuidle_init(void) | |||
427 | { | 437 | { |
428 | int ret; | 438 | int ret; |
429 | 439 | ||
430 | pm_idle_old = pm_idle; | 440 | if (cpuidle_disabled()) |
441 | return -ENODEV; | ||
431 | 442 | ||
432 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | 443 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); |
433 | if (ret) | 444 | if (ret) |
@@ -438,4 +449,5 @@ static int __init cpuidle_init(void) | |||
438 | return 0; | 449 | return 0; |
439 | } | 450 | } |
440 | 451 | ||
452 | module_param(off, int, 0444); | ||
441 | core_initcall(cpuidle_init); | 453 | core_initcall(cpuidle_init); |
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 33e50d556f17..38c3fd8b9d76 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h | |||
@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors; | |||
13 | extern struct list_head cpuidle_detected_devices; | 13 | extern struct list_head cpuidle_detected_devices; |
14 | extern struct mutex cpuidle_lock; | 14 | extern struct mutex cpuidle_lock; |
15 | extern spinlock_t cpuidle_driver_lock; | 15 | extern spinlock_t cpuidle_driver_lock; |
16 | extern int cpuidle_disabled(void); | ||
16 | 17 | ||
17 | /* idle loop */ | 18 | /* idle loop */ |
18 | extern void cpuidle_install_idle_handler(void); | 19 | extern void cpuidle_install_idle_handler(void); |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index fd1601e3d125..3f7e3cedd133 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
26 | if (!drv) | 26 | if (!drv) |
27 | return -EINVAL; | 27 | return -EINVAL; |
28 | 28 | ||
29 | if (cpuidle_disabled()) | ||
30 | return -ENODEV; | ||
31 | |||
29 | spin_lock(&cpuidle_driver_lock); | 32 | spin_lock(&cpuidle_driver_lock); |
30 | if (cpuidle_curr_driver) { | 33 | if (cpuidle_curr_driver) { |
31 | spin_unlock(&cpuidle_driver_lock); | 34 | spin_unlock(&cpuidle_driver_lock); |
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 724c164d31c9..ea2f8e7aa24a 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c | |||
@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
81 | if (!gov || !gov->select) | 81 | if (!gov || !gov->select) |
82 | return -EINVAL; | 82 | return -EINVAL; |
83 | 83 | ||
84 | if (cpuidle_disabled()) | ||
85 | return -ENODEV; | ||
86 | |||
84 | mutex_lock(&cpuidle_lock); | 87 | mutex_lock(&cpuidle_lock); |
85 | if (__cpuidle_find_governor(gov->name) == NULL) { | 88 | if (__cpuidle_find_governor(gov->name) == NULL) { |
86 | ret = 0; | 89 | ret = 0; |
diff --git a/drivers/dma/TODO b/drivers/dma/TODO index a4af8589330c..734ed0206cd5 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO | |||
@@ -9,6 +9,5 @@ TODO for slave dma | |||
9 | - mxs-dma.c | 9 | - mxs-dma.c |
10 | - dw_dmac | 10 | - dw_dmac |
11 | - intel_mid_dma | 11 | - intel_mid_dma |
12 | - ste_dma40 | ||
13 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | 12 | 4. Check other subsystems for dma drivers and merge/move to dmaengine |
14 | 5. Remove dma_slave_config's dma direction. | 13 | 5. Remove dma_slave_config's dma direction. |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e6d7228b1479..196a7378d332 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -156,14 +156,10 @@ struct pl08x_driver_data { | |||
156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | 156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ |
157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | 157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) |
158 | 158 | ||
159 | /* Minimum period between work queue runs */ | ||
160 | #define PL08X_WQ_PERIODMIN 20 | ||
161 | |||
162 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 159 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
163 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 160 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
164 | 161 | ||
165 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ | 162 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
166 | #define PL08X_MAX_ALLOCS 0x40 | ||
167 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) | 163 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
168 | #define PL08X_ALIGN 8 | 164 | #define PL08X_ALIGN 8 |
169 | 165 | ||
@@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
495 | 491 | ||
496 | struct pl08x_lli_build_data { | 492 | struct pl08x_lli_build_data { |
497 | struct pl08x_txd *txd; | 493 | struct pl08x_txd *txd; |
498 | struct pl08x_driver_data *pl08x; | ||
499 | struct pl08x_bus_data srcbus; | 494 | struct pl08x_bus_data srcbus; |
500 | struct pl08x_bus_data dstbus; | 495 | struct pl08x_bus_data dstbus; |
501 | size_t remainder; | 496 | size_t remainder; |
497 | u32 lli_bus; | ||
502 | }; | 498 | }; |
503 | 499 | ||
504 | /* | 500 | /* |
@@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | |||
551 | llis_va[num_llis].src = bd->srcbus.addr; | 547 | llis_va[num_llis].src = bd->srcbus.addr; |
552 | llis_va[num_llis].dst = bd->dstbus.addr; | 548 | llis_va[num_llis].dst = bd->dstbus.addr; |
553 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | 549 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); |
554 | if (bd->pl08x->lli_buses & PL08X_AHB2) | 550 | llis_va[num_llis].lli |= bd->lli_bus; |
555 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; | ||
556 | 551 | ||
557 | if (cctl & PL080_CONTROL_SRC_INCR) | 552 | if (cctl & PL080_CONTROL_SRC_INCR) |
558 | bd->srcbus.addr += len; | 553 | bd->srcbus.addr += len; |
@@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
605 | cctl = txd->cctl; | 600 | cctl = txd->cctl; |
606 | 601 | ||
607 | bd.txd = txd; | 602 | bd.txd = txd; |
608 | bd.pl08x = pl08x; | ||
609 | bd.srcbus.addr = txd->src_addr; | 603 | bd.srcbus.addr = txd->src_addr; |
610 | bd.dstbus.addr = txd->dst_addr; | 604 | bd.dstbus.addr = txd->dst_addr; |
605 | bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; | ||
611 | 606 | ||
612 | /* Find maximum width of the source bus */ | 607 | /* Find maximum width of the source bus */ |
613 | bd.srcbus.maxwidth = | 608 | bd.srcbus.maxwidth = |
@@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
622 | /* Set up the bus widths to the maximum */ | 617 | /* Set up the bus widths to the maximum */ |
623 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | 618 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
624 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | 619 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
625 | dev_vdbg(&pl08x->adev->dev, | ||
626 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | ||
627 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); | ||
628 | |||
629 | 620 | ||
630 | /* | 621 | /* |
631 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 622 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
632 | */ | 623 | */ |
633 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * | 624 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
634 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 625 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
635 | dev_vdbg(&pl08x->adev->dev, | ||
636 | "%s max bytes per lli = %zu\n", | ||
637 | __func__, max_bytes_per_lli); | ||
638 | 626 | ||
639 | /* We need to count this down to zero */ | 627 | /* We need to count this down to zero */ |
640 | bd.remainder = txd->len; | 628 | bd.remainder = txd->len; |
641 | dev_vdbg(&pl08x->adev->dev, | ||
642 | "%s remainder = %zu\n", | ||
643 | __func__, bd.remainder); | ||
644 | 629 | ||
645 | /* | 630 | /* |
646 | * Choose bus to align to | 631 | * Choose bus to align to |
@@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
649 | */ | 634 | */ |
650 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | 635 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); |
651 | 636 | ||
637 | dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", | ||
638 | bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", | ||
639 | bd.srcbus.buswidth, | ||
640 | bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", | ||
641 | bd.dstbus.buswidth, | ||
642 | bd.remainder, max_bytes_per_lli); | ||
643 | dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", | ||
644 | mbus == &bd.srcbus ? "src" : "dst", | ||
645 | sbus == &bd.srcbus ? "src" : "dst"); | ||
646 | |||
652 | if (txd->len < mbus->buswidth) { | 647 | if (txd->len < mbus->buswidth) { |
653 | /* Less than a bus width available - send as single bytes */ | 648 | /* Less than a bus width available - send as single bytes */ |
654 | while (bd.remainder) { | 649 | while (bd.remainder) { |
@@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
840 | { | 835 | { |
841 | int i; | 836 | int i; |
842 | 837 | ||
838 | dev_vdbg(&pl08x->adev->dev, | ||
839 | "%-3s %-9s %-10s %-10s %-10s %s\n", | ||
840 | "lli", "", "csrc", "cdst", "clli", "cctl"); | ||
843 | for (i = 0; i < num_llis; i++) { | 841 | for (i = 0; i < num_llis; i++) { |
844 | dev_vdbg(&pl08x->adev->dev, | 842 | dev_vdbg(&pl08x->adev->dev, |
845 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", | 843 | "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", |
846 | i, | 844 | i, &llis_va[i], llis_va[i].src, |
847 | &llis_va[i], | 845 | llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl |
848 | llis_va[i].src, | ||
849 | llis_va[i].dst, | ||
850 | llis_va[i].cctl, | ||
851 | llis_va[i].lli | ||
852 | ); | 846 | ); |
853 | } | 847 | } |
854 | } | 848 | } |
@@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1054 | 1048 | ||
1055 | /* PrimeCell DMA extension */ | 1049 | /* PrimeCell DMA extension */ |
1056 | struct burst_table { | 1050 | struct burst_table { |
1057 | int burstwords; | 1051 | u32 burstwords; |
1058 | u32 reg; | 1052 | u32 reg; |
1059 | }; | 1053 | }; |
1060 | 1054 | ||
1061 | static const struct burst_table burst_sizes[] = { | 1055 | static const struct burst_table burst_sizes[] = { |
1062 | { | 1056 | { |
1063 | .burstwords = 256, | 1057 | .burstwords = 256, |
1064 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1058 | .reg = PL080_BSIZE_256, |
1065 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1066 | }, | 1059 | }, |
1067 | { | 1060 | { |
1068 | .burstwords = 128, | 1061 | .burstwords = 128, |
1069 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1062 | .reg = PL080_BSIZE_128, |
1070 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1071 | }, | 1063 | }, |
1072 | { | 1064 | { |
1073 | .burstwords = 64, | 1065 | .burstwords = 64, |
1074 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1066 | .reg = PL080_BSIZE_64, |
1075 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1076 | }, | 1067 | }, |
1077 | { | 1068 | { |
1078 | .burstwords = 32, | 1069 | .burstwords = 32, |
1079 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1070 | .reg = PL080_BSIZE_32, |
1080 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1081 | }, | 1071 | }, |
1082 | { | 1072 | { |
1083 | .burstwords = 16, | 1073 | .burstwords = 16, |
1084 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1074 | .reg = PL080_BSIZE_16, |
1085 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1086 | }, | 1075 | }, |
1087 | { | 1076 | { |
1088 | .burstwords = 8, | 1077 | .burstwords = 8, |
1089 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1078 | .reg = PL080_BSIZE_8, |
1090 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1091 | }, | 1079 | }, |
1092 | { | 1080 | { |
1093 | .burstwords = 4, | 1081 | .burstwords = 4, |
1094 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1082 | .reg = PL080_BSIZE_4, |
1095 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1096 | }, | 1083 | }, |
1097 | { | 1084 | { |
1098 | .burstwords = 1, | 1085 | .burstwords = 0, |
1099 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1086 | .reg = PL080_BSIZE_1, |
1100 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1101 | }, | 1087 | }, |
1102 | }; | 1088 | }; |
1103 | 1089 | ||
1090 | /* | ||
1091 | * Given the source and destination available bus masks, select which | ||
1092 | * will be routed to each port. We try to have source and destination | ||
1093 | * on separate ports, but always respect the allowable settings. | ||
1094 | */ | ||
1095 | static u32 pl08x_select_bus(u8 src, u8 dst) | ||
1096 | { | ||
1097 | u32 cctl = 0; | ||
1098 | |||
1099 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1100 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1101 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1102 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1103 | |||
1104 | return cctl; | ||
1105 | } | ||
1106 | |||
1107 | static u32 pl08x_cctl(u32 cctl) | ||
1108 | { | ||
1109 | cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1110 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1111 | PL080_CONTROL_PROT_MASK); | ||
1112 | |||
1113 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1114 | return cctl | PL080_CONTROL_PROT_SYS; | ||
1115 | } | ||
1116 | |||
1117 | static u32 pl08x_width(enum dma_slave_buswidth width) | ||
1118 | { | ||
1119 | switch (width) { | ||
1120 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
1121 | return PL080_WIDTH_8BIT; | ||
1122 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1123 | return PL080_WIDTH_16BIT; | ||
1124 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1125 | return PL080_WIDTH_32BIT; | ||
1126 | default: | ||
1127 | return ~0; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | static u32 pl08x_burst(u32 maxburst) | ||
1132 | { | ||
1133 | int i; | ||
1134 | |||
1135 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | ||
1136 | if (burst_sizes[i].burstwords <= maxburst) | ||
1137 | break; | ||
1138 | |||
1139 | return burst_sizes[i].reg; | ||
1140 | } | ||
1141 | |||
1104 | static int dma_set_runtime_config(struct dma_chan *chan, | 1142 | static int dma_set_runtime_config(struct dma_chan *chan, |
1105 | struct dma_slave_config *config) | 1143 | struct dma_slave_config *config) |
1106 | { | 1144 | { |
1107 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1145 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1108 | struct pl08x_driver_data *pl08x = plchan->host; | 1146 | struct pl08x_driver_data *pl08x = plchan->host; |
1109 | struct pl08x_channel_data *cd = plchan->cd; | ||
1110 | enum dma_slave_buswidth addr_width; | 1147 | enum dma_slave_buswidth addr_width; |
1111 | dma_addr_t addr; | 1148 | u32 width, burst, maxburst; |
1112 | u32 maxburst; | ||
1113 | u32 cctl = 0; | 1149 | u32 cctl = 0; |
1114 | int i; | ||
1115 | 1150 | ||
1116 | if (!plchan->slave) | 1151 | if (!plchan->slave) |
1117 | return -EINVAL; | 1152 | return -EINVAL; |
@@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1119 | /* Transfer direction */ | 1154 | /* Transfer direction */ |
1120 | plchan->runtime_direction = config->direction; | 1155 | plchan->runtime_direction = config->direction; |
1121 | if (config->direction == DMA_TO_DEVICE) { | 1156 | if (config->direction == DMA_TO_DEVICE) { |
1122 | addr = config->dst_addr; | ||
1123 | addr_width = config->dst_addr_width; | 1157 | addr_width = config->dst_addr_width; |
1124 | maxburst = config->dst_maxburst; | 1158 | maxburst = config->dst_maxburst; |
1125 | } else if (config->direction == DMA_FROM_DEVICE) { | 1159 | } else if (config->direction == DMA_FROM_DEVICE) { |
1126 | addr = config->src_addr; | ||
1127 | addr_width = config->src_addr_width; | 1160 | addr_width = config->src_addr_width; |
1128 | maxburst = config->src_maxburst; | 1161 | maxburst = config->src_maxburst; |
1129 | } else { | 1162 | } else { |
@@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1132 | return -EINVAL; | 1165 | return -EINVAL; |
1133 | } | 1166 | } |
1134 | 1167 | ||
1135 | switch (addr_width) { | 1168 | width = pl08x_width(addr_width); |
1136 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 1169 | if (width == ~0) { |
1137 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1138 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1139 | break; | ||
1140 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1141 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1142 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1143 | break; | ||
1144 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1145 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1146 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1147 | break; | ||
1148 | default: | ||
1149 | dev_err(&pl08x->adev->dev, | 1170 | dev_err(&pl08x->adev->dev, |
1150 | "bad runtime_config: alien address width\n"); | 1171 | "bad runtime_config: alien address width\n"); |
1151 | return -EINVAL; | 1172 | return -EINVAL; |
1152 | } | 1173 | } |
1153 | 1174 | ||
1175 | cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; | ||
1176 | cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; | ||
1177 | |||
1154 | /* | 1178 | /* |
1155 | * Now decide on a maxburst: | ||
1156 | * If this channel will only request single transfers, set this | 1179 | * If this channel will only request single transfers, set this |
1157 | * down to ONE element. Also select one element if no maxburst | 1180 | * down to ONE element. Also select one element if no maxburst |
1158 | * is specified. | 1181 | * is specified. |
1159 | */ | 1182 | */ |
1160 | if (plchan->cd->single || maxburst == 0) { | 1183 | if (plchan->cd->single) |
1161 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1184 | maxburst = 1; |
1162 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1185 | |
1186 | burst = pl08x_burst(maxburst); | ||
1187 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | ||
1188 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | ||
1189 | |||
1190 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { | ||
1191 | plchan->src_addr = config->src_addr; | ||
1192 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | ||
1193 | pl08x_select_bus(plchan->cd->periph_buses, | ||
1194 | pl08x->mem_buses); | ||
1163 | } else { | 1195 | } else { |
1164 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | 1196 | plchan->dst_addr = config->dst_addr; |
1165 | if (burst_sizes[i].burstwords <= maxburst) | 1197 | plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | |
1166 | break; | 1198 | pl08x_select_bus(pl08x->mem_buses, |
1167 | cctl |= burst_sizes[i].reg; | 1199 | plchan->cd->periph_buses); |
1168 | } | 1200 | } |
1169 | 1201 | ||
1170 | plchan->runtime_addr = addr; | ||
1171 | |||
1172 | /* Modify the default channel data to fit PrimeCell request */ | ||
1173 | cd->cctl = cctl; | ||
1174 | |||
1175 | dev_dbg(&pl08x->adev->dev, | 1202 | dev_dbg(&pl08x->adev->dev, |
1176 | "configured channel %s (%s) for %s, data width %d, " | 1203 | "configured channel %s (%s) for %s, data width %d, " |
1177 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1204 | "maxburst %d words, LE, CCTL=0x%08x\n", |
@@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1270 | return 0; | 1297 | return 0; |
1271 | } | 1298 | } |
1272 | 1299 | ||
1273 | /* | ||
1274 | * Given the source and destination available bus masks, select which | ||
1275 | * will be routed to each port. We try to have source and destination | ||
1276 | * on separate ports, but always respect the allowable settings. | ||
1277 | */ | ||
1278 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1279 | { | ||
1280 | u32 cctl = 0; | ||
1281 | |||
1282 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1283 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1284 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1285 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1286 | |||
1287 | return cctl; | ||
1288 | } | ||
1289 | |||
1290 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1300 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, |
1291 | unsigned long flags) | 1301 | unsigned long flags) |
1292 | { | 1302 | { |
@@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1338 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1348 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
1339 | 1349 | ||
1340 | if (pl08x->vd->dualmaster) | 1350 | if (pl08x->vd->dualmaster) |
1341 | txd->cctl |= pl08x_select_bus(pl08x, | 1351 | txd->cctl |= pl08x_select_bus(pl08x->mem_buses, |
1342 | pl08x->mem_buses, pl08x->mem_buses); | 1352 | pl08x->mem_buses); |
1343 | 1353 | ||
1344 | ret = pl08x_prep_channel_resources(plchan, txd); | 1354 | ret = pl08x_prep_channel_resources(plchan, txd); |
1345 | if (ret) | 1355 | if (ret) |
@@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1356 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1366 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1357 | struct pl08x_driver_data *pl08x = plchan->host; | 1367 | struct pl08x_driver_data *pl08x = plchan->host; |
1358 | struct pl08x_txd *txd; | 1368 | struct pl08x_txd *txd; |
1359 | u8 src_buses, dst_buses; | ||
1360 | int ret; | 1369 | int ret; |
1361 | 1370 | ||
1362 | /* | 1371 | /* |
@@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1390 | txd->direction = direction; | 1399 | txd->direction = direction; |
1391 | txd->len = sgl->length; | 1400 | txd->len = sgl->length; |
1392 | 1401 | ||
1393 | txd->cctl = plchan->cd->cctl & | ||
1394 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1395 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1396 | PL080_CONTROL_PROT_MASK); | ||
1397 | |||
1398 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1399 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1400 | |||
1401 | if (direction == DMA_TO_DEVICE) { | 1402 | if (direction == DMA_TO_DEVICE) { |
1402 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1403 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1403 | txd->cctl |= PL080_CONTROL_SRC_INCR; | 1404 | txd->cctl = plchan->dst_cctl; |
1404 | txd->src_addr = sgl->dma_address; | 1405 | txd->src_addr = sgl->dma_address; |
1405 | if (plchan->runtime_addr) | 1406 | txd->dst_addr = plchan->dst_addr; |
1406 | txd->dst_addr = plchan->runtime_addr; | ||
1407 | else | ||
1408 | txd->dst_addr = plchan->cd->addr; | ||
1409 | src_buses = pl08x->mem_buses; | ||
1410 | dst_buses = plchan->cd->periph_buses; | ||
1411 | } else if (direction == DMA_FROM_DEVICE) { | 1407 | } else if (direction == DMA_FROM_DEVICE) { |
1412 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1408 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1413 | txd->cctl |= PL080_CONTROL_DST_INCR; | 1409 | txd->cctl = plchan->src_cctl; |
1414 | if (plchan->runtime_addr) | 1410 | txd->src_addr = plchan->src_addr; |
1415 | txd->src_addr = plchan->runtime_addr; | ||
1416 | else | ||
1417 | txd->src_addr = plchan->cd->addr; | ||
1418 | txd->dst_addr = sgl->dma_address; | 1411 | txd->dst_addr = sgl->dma_address; |
1419 | src_buses = plchan->cd->periph_buses; | ||
1420 | dst_buses = pl08x->mem_buses; | ||
1421 | } else { | 1412 | } else { |
1422 | dev_err(&pl08x->adev->dev, | 1413 | dev_err(&pl08x->adev->dev, |
1423 | "%s direction unsupported\n", __func__); | 1414 | "%s direction unsupported\n", __func__); |
1424 | return NULL; | 1415 | return NULL; |
1425 | } | 1416 | } |
1426 | 1417 | ||
1427 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); | ||
1428 | |||
1429 | ret = pl08x_prep_channel_resources(plchan, txd); | 1418 | ret = pl08x_prep_channel_resources(plchan, txd); |
1430 | if (ret) | 1419 | if (ret) |
1431 | return NULL; | 1420 | return NULL; |
@@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1676 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1665 | return mask ? IRQ_HANDLED : IRQ_NONE; |
1677 | } | 1666 | } |
1678 | 1667 | ||
1668 | static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) | ||
1669 | { | ||
1670 | u32 cctl = pl08x_cctl(chan->cd->cctl); | ||
1671 | |||
1672 | chan->slave = true; | ||
1673 | chan->name = chan->cd->bus_id; | ||
1674 | chan->src_addr = chan->cd->addr; | ||
1675 | chan->dst_addr = chan->cd->addr; | ||
1676 | chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | | ||
1677 | pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); | ||
1678 | chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | | ||
1679 | pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); | ||
1680 | } | ||
1681 | |||
1679 | /* | 1682 | /* |
1680 | * Initialise the DMAC memcpy/slave channels. | 1683 | * Initialise the DMAC memcpy/slave channels. |
1681 | * Make a local wrapper to hold required data | 1684 | * Make a local wrapper to hold required data |
@@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1707 | chan->state = PL08X_CHAN_IDLE; | 1710 | chan->state = PL08X_CHAN_IDLE; |
1708 | 1711 | ||
1709 | if (slave) { | 1712 | if (slave) { |
1710 | chan->slave = true; | ||
1711 | chan->name = pl08x->pd->slave_channels[i].bus_id; | ||
1712 | chan->cd = &pl08x->pd->slave_channels[i]; | 1713 | chan->cd = &pl08x->pd->slave_channels[i]; |
1714 | pl08x_dma_slave_init(chan); | ||
1713 | } else { | 1715 | } else { |
1714 | chan->cd = &pl08x->pd->memcpy_channel; | 1716 | chan->cd = &pl08x->pd->memcpy_channel; |
1715 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | 1717 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d718..6a483eac7b3f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1216 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1216 | atdma->dma_common.cap_mask = pdata->cap_mask; |
1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1217 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1218 | 1218 | ||
1219 | size = io->end - io->start + 1; | 1219 | size = resource_size(io); |
1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1220 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
1221 | err = -EBUSY; | 1221 | err = -EBUSY; |
1222 | goto err_kfree; | 1222 | goto err_kfree; |
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) | |||
1362 | atdma->regs = NULL; | 1362 | atdma->regs = NULL; |
1363 | 1363 | ||
1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1364 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1365 | release_mem_region(io->start, io->end - io->start + 1); | 1365 | release_mem_region(io->start, resource_size(io)); |
1366 | 1366 | ||
1367 | kfree(atdma); | 1367 | kfree(atdma); |
1368 | 1368 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a92d95eac86b..4234f416ef11 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -41,6 +41,8 @@ struct coh901318_desc { | |||
41 | struct coh901318_lli *lli; | 41 | struct coh901318_lli *lli; |
42 | enum dma_data_direction dir; | 42 | enum dma_data_direction dir; |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | u32 head_config; | ||
45 | u32 head_ctrl; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | struct coh901318_base { | 48 | struct coh901318_base { |
@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) | |||
661 | 663 | ||
662 | coh901318_desc_submit(cohc, cohd); | 664 | coh901318_desc_submit(cohc, cohd); |
663 | 665 | ||
666 | /* Program the transaction head */ | ||
667 | coh901318_set_conf(cohc, cohd->head_config); | ||
668 | coh901318_set_ctrl(cohc, cohd->head_ctrl); | ||
664 | coh901318_prep_linked_list(cohc, cohd->lli); | 669 | coh901318_prep_linked_list(cohc, cohd->lli); |
665 | 670 | ||
666 | /* start dma job on this channel */ | 671 | /* start dma job on this channel */ |
@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1091 | } else | 1096 | } else |
1092 | goto err_direction; | 1097 | goto err_direction; |
1093 | 1098 | ||
1094 | coh901318_set_conf(cohc, config); | ||
1095 | |||
1096 | /* The dma only supports transmitting packages up to | 1099 | /* The dma only supports transmitting packages up to |
1097 | * MAX_DMA_PACKET_SIZE. Calculate to total number of | 1100 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
1098 | * dma elemts required to send the entire sg list | 1101 | * dma elemts required to send the entire sg list |
@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1129 | if (ret) | 1132 | if (ret) |
1130 | goto err_lli_fill; | 1133 | goto err_lli_fill; |
1131 | 1134 | ||
1132 | /* | ||
1133 | * Set the default ctrl for the channel to the one from the lli, | ||
1134 | * things may have changed due to odd buffer alignment etc. | ||
1135 | */ | ||
1136 | coh901318_set_ctrl(cohc, lli->control); | ||
1137 | 1135 | ||
1138 | COH_DBG(coh901318_list_print(cohc, lli)); | 1136 | COH_DBG(coh901318_list_print(cohc, lli)); |
1139 | 1137 | ||
1140 | /* Pick a descriptor to handle this transfer */ | 1138 | /* Pick a descriptor to handle this transfer */ |
1141 | cohd = coh901318_desc_get(cohc); | 1139 | cohd = coh901318_desc_get(cohc); |
1140 | cohd->head_config = config; | ||
1141 | /* | ||
1142 | * Set the default head ctrl for the channel to the one from the | ||
1143 | * lli, things may have changed due to odd buffer alignment | ||
1144 | * etc. | ||
1145 | */ | ||
1146 | cohd->head_ctrl = lli->control; | ||
1142 | cohd->dir = direction; | 1147 | cohd->dir = direction; |
1143 | cohd->flags = flags; | 1148 | cohd->flags = flags; |
1144 | cohd->desc.tx_submit = coh901318_tx_submit; | 1149 | cohd->desc.tx_submit = coh901318_tx_submit; |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 48694c34d96b..b48967b499da 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -62,9 +62,9 @@ | |||
62 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
63 | 63 | ||
64 | static DEFINE_MUTEX(dma_list_mutex); | 64 | static DEFINE_MUTEX(dma_list_mutex); |
65 | static DEFINE_IDR(dma_idr); | ||
65 | static LIST_HEAD(dma_device_list); | 66 | static LIST_HEAD(dma_device_list); |
66 | static long dmaengine_ref_count; | 67 | static long dmaengine_ref_count; |
67 | static struct idr dma_idr; | ||
68 | 68 | ||
69 | /* --- sysfs implementation --- */ | 69 | /* --- sysfs implementation --- */ |
70 | 70 | ||
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
510 | dma_chan_name(chan)); | 510 | dma_chan_name(chan)); |
511 | list_del_rcu(&device->global_node); | 511 | list_del_rcu(&device->global_node); |
512 | } else if (err) | 512 | } else if (err) |
513 | pr_err("dmaengine: failed to get %s: (%d)\n", | 513 | pr_debug("dmaengine: failed to get %s: (%d)\n", |
514 | dma_chan_name(chan), err); | 514 | dma_chan_name(chan), err); |
515 | else | 515 | else |
516 | break; | 516 | break; |
517 | if (--device->privatecnt == 0) | 517 | if (--device->privatecnt == 0) |
@@ -1050,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); | |||
1050 | 1050 | ||
1051 | static int __init dma_bus_init(void) | 1051 | static int __init dma_bus_init(void) |
1052 | { | 1052 | { |
1053 | idr_init(&dma_idr); | ||
1054 | mutex_init(&dma_list_mutex); | ||
1055 | return class_register(&dma_devclass); | 1053 | return class_register(&dma_devclass); |
1056 | } | 1054 | } |
1057 | arch_initcall(dma_bus_init); | 1055 | arch_initcall(dma_bus_init); |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 0766c1e53b1d..5d7a49bd7c26 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) | |||
902 | * | 902 | * |
903 | * Returns a valid DMA descriptor or %NULL in case of failure. | 903 | * Returns a valid DMA descriptor or %NULL in case of failure. |
904 | */ | 904 | */ |
905 | struct dma_async_tx_descriptor * | 905 | static struct dma_async_tx_descriptor * |
906 | ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | 906 | ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, |
907 | dma_addr_t src, size_t len, unsigned long flags) | 907 | dma_addr_t src, size_t len, unsigned long flags) |
908 | { | 908 | { |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 1eb60ded2f0d..7bd7e98548cd 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1305 | goto err_request_irq; | 1305 | goto err_request_irq; |
1306 | 1306 | ||
1307 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); | 1307 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
1308 | if (!sdma->script_addrs) | 1308 | if (!sdma->script_addrs) { |
1309 | ret = -ENOMEM; | ||
1309 | goto err_alloc; | 1310 | goto err_alloc; |
1311 | } | ||
1310 | 1312 | ||
1311 | if (of_id) | 1313 | if (of_id) |
1312 | pdev->id_entry = of_id->data; | 1314 | pdev->id_entry = of_id->data; |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index f653517ef744..8a3fdd87db97 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) | |||
1351 | return -EAGAIN; | 1351 | return -EAGAIN; |
1352 | } | 1352 | } |
1353 | device->state = SUSPENDED; | 1353 | device->state = SUSPENDED; |
1354 | pci_set_drvdata(pci, device); | ||
1355 | pci_save_state(pci); | 1354 | pci_save_state(pci); |
1356 | pci_disable_device(pci); | 1355 | pci_disable_device(pci); |
1357 | pci_set_power_state(pci, PCI_D3hot); | 1356 | pci_set_power_state(pci, PCI_D3hot); |
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci) | |||
1380 | } | 1379 | } |
1381 | device->state = RUNNING; | 1380 | device->state = RUNNING; |
1382 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | 1381 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); |
1383 | pci_set_drvdata(pci, device); | ||
1384 | return 0; | 1382 | return 0; |
1385 | } | 1383 | } |
1386 | 1384 | ||
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d845dc4b7103..f519c93a61e7 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -73,10 +73,10 @@ | |||
73 | /* provide a lookup table for setting the source address in the base or | 73 | /* provide a lookup table for setting the source address in the base or |
74 | * extended descriptor of an xor or pq descriptor | 74 | * extended descriptor of an xor or pq descriptor |
75 | */ | 75 | */ |
76 | static const u8 xor_idx_to_desc __read_mostly = 0xd0; | 76 | static const u8 xor_idx_to_desc = 0xe0; |
77 | static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; | 77 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; |
78 | static const u8 pq_idx_to_desc __read_mostly = 0xf8; | 78 | static const u8 pq_idx_to_desc = 0xf8; |
79 | static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; | 79 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; |
80 | 80 | ||
81 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | 81 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) |
82 | { | 82 | { |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index fab37d1cf48d..5e3a40f79945 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
72 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, | 72 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, |
73 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, | 73 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, |
74 | 74 | ||
75 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, | ||
76 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, | ||
77 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, | ||
78 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, | ||
79 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, | ||
80 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, | ||
81 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, | ||
82 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, | ||
83 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, | ||
84 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, | ||
85 | |||
75 | { 0, } | 86 | { 0, } |
76 | }; | 87 | }; |
77 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 88 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index fd7d2b308cf2..6815905a772f 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev) | |||
1706 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | 1706 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); |
1707 | 1707 | ||
1708 | /* Remap IPU common registers */ | 1708 | /* Remap IPU common registers */ |
1709 | ipu_data.reg_ipu = ioremap(mem_ipu->start, | 1709 | ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); |
1710 | mem_ipu->end - mem_ipu->start + 1); | ||
1711 | if (!ipu_data.reg_ipu) { | 1710 | if (!ipu_data.reg_ipu) { |
1712 | ret = -ENOMEM; | 1711 | ret = -ENOMEM; |
1713 | goto err_ioremap_ipu; | 1712 | goto err_ioremap_ipu; |
1714 | } | 1713 | } |
1715 | 1714 | ||
1716 | /* Remap Image Converter and Image DMA Controller registers */ | 1715 | /* Remap Image Converter and Image DMA Controller registers */ |
1717 | ipu_data.reg_ic = ioremap(mem_ic->start, | 1716 | ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); |
1718 | mem_ic->end - mem_ic->start + 1); | ||
1719 | if (!ipu_data.reg_ic) { | 1717 | if (!ipu_data.reg_ic) { |
1720 | ret = -ENOMEM; | 1718 | ret = -ENOMEM; |
1721 | goto err_ioremap_ic; | 1719 | goto err_ioremap_ic; |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 06f9f27dbe7c..9a353c2216d0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev) | |||
1304 | if (!res) | 1304 | if (!res) |
1305 | return -ENODEV; | 1305 | return -ENODEV; |
1306 | 1306 | ||
1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); | 1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, |
1308 | resource_size(res)); | ||
1308 | if (!msp->xor_base) | 1309 | if (!msp->xor_base) |
1309 | return -EBUSY; | 1310 | return -EBUSY; |
1310 | 1311 | ||
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 88aad4f54002..be641cbd36fc 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
327 | 327 | ||
328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
329 | 329 | ||
330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 330 | if (mxs_chan->chan_irq != NO_IRQ) { |
331 | 0, "mxs-dma", mxs_dma); | 331 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
332 | if (ret) | 332 | 0, "mxs-dma", mxs_dma); |
333 | goto err_irq; | 333 | if (ret) |
334 | goto err_irq; | ||
335 | } | ||
334 | 336 | ||
335 | ret = clk_enable(mxs_dma->clk); | 337 | ret = clk_enable(mxs_dma->clk); |
336 | if (ret) | 338 | if (ret) |
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
535 | switch (cmd) { | 537 | switch (cmd) { |
536 | case DMA_TERMINATE_ALL: | 538 | case DMA_TERMINATE_ALL: |
537 | mxs_dma_disable_chan(mxs_chan); | 539 | mxs_dma_disable_chan(mxs_chan); |
540 | mxs_dma_reset_chan(mxs_chan); | ||
538 | break; | 541 | break; |
539 | case DMA_PAUSE: | 542 | case DMA_PAUSE: |
540 | mxs_dma_pause_chan(mxs_chan); | 543 | mxs_dma_pause_chan(mxs_chan); |
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = { | |||
707 | }, { | 710 | }, { |
708 | .name = "mxs-dma-apbx", | 711 | .name = "mxs-dma-apbx", |
709 | .driver_data = MXS_DMA_APBX, | 712 | .driver_data = MXS_DMA_APBX, |
713 | }, { | ||
714 | /* end of list */ | ||
710 | } | 715 | } |
711 | }; | 716 | }; |
712 | 717 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45b..1ac8d4b580b7 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -45,7 +45,8 @@ | |||
45 | #define DMA_STATUS_MASK_BITS 0x3 | 45 | #define DMA_STATUS_MASK_BITS 0x3 |
46 | #define DMA_STATUS_SHIFT_BITS 16 | 46 | #define DMA_STATUS_SHIFT_BITS 16 |
47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | 47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | 48 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
49 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) | ||
49 | 50 | ||
50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | 51 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | 52 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
@@ -61,6 +62,9 @@ | |||
61 | 62 | ||
62 | #define MAX_CHAN_NR 8 | 63 | #define MAX_CHAN_NR 8 |
63 | 64 | ||
65 | #define DMA_MASK_CTL0_MODE 0x33333333 | ||
66 | #define DMA_MASK_CTL2_MODE 0x00003333 | ||
67 | |||
64 | static unsigned int init_nr_desc_per_channel = 64; | 68 | static unsigned int init_nr_desc_per_channel = 64; |
65 | module_param(init_nr_desc_per_channel, uint, 0644); | 69 | module_param(init_nr_desc_per_channel, uint, 0644); |
66 | MODULE_PARM_DESC(init_nr_desc_per_channel, | 70 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
@@ -133,6 +137,7 @@ struct pch_dma { | |||
133 | #define PCH_DMA_CTL3 0x0C | 137 | #define PCH_DMA_CTL3 0x0C |
134 | #define PCH_DMA_STS0 0x10 | 138 | #define PCH_DMA_STS0 0x10 |
135 | #define PCH_DMA_STS1 0x14 | 139 | #define PCH_DMA_STS1 0x14 |
140 | #define PCH_DMA_STS2 0x18 | ||
136 | 141 | ||
137 | #define dma_readl(pd, name) \ | 142 | #define dma_readl(pd, name) \ |
138 | readl((pd)->membase + PCH_DMA_##name) | 143 | readl((pd)->membase + PCH_DMA_##name) |
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) | |||
183 | { | 188 | { |
184 | struct pch_dma *pd = to_pd(chan->device); | 189 | struct pch_dma *pd = to_pd(chan->device); |
185 | u32 val; | 190 | u32 val; |
191 | int pos; | ||
192 | |||
193 | if (chan->chan_id < 8) | ||
194 | pos = chan->chan_id; | ||
195 | else | ||
196 | pos = chan->chan_id + 8; | ||
186 | 197 | ||
187 | val = dma_readl(pd, CTL2); | 198 | val = dma_readl(pd, CTL2); |
188 | 199 | ||
189 | if (enable) | 200 | if (enable) |
190 | val |= 0x1 << chan->chan_id; | 201 | val |= 0x1 << pos; |
191 | else | 202 | else |
192 | val &= ~(0x1 << chan->chan_id); | 203 | val &= ~(0x1 << pos); |
193 | 204 | ||
194 | dma_writel(pd, CTL2, val); | 205 | dma_writel(pd, CTL2, val); |
195 | 206 | ||
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
202 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 213 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
203 | struct pch_dma *pd = to_pd(chan->device); | 214 | struct pch_dma *pd = to_pd(chan->device); |
204 | u32 val; | 215 | u32 val; |
216 | u32 mask_mode; | ||
217 | u32 mask_ctl; | ||
205 | 218 | ||
206 | if (chan->chan_id < 8) { | 219 | if (chan->chan_id < 8) { |
207 | val = dma_readl(pd, CTL0); | 220 | val = dma_readl(pd, CTL0); |
208 | 221 | ||
222 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
223 | (DMA_CTL0_BITS_PER_CH * chan->chan_id); | ||
224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
226 | val &= mask_mode; | ||
209 | if (pd_chan->dir == DMA_TO_DEVICE) | 227 | if (pd_chan->dir == DMA_TO_DEVICE) |
210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
211 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 231 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
214 | DMA_CTL0_DIR_SHIFT_BITS)); | 232 | DMA_CTL0_DIR_SHIFT_BITS)); |
215 | 233 | ||
234 | val |= mask_ctl; | ||
216 | dma_writel(pd, CTL0, val); | 235 | dma_writel(pd, CTL0, val); |
217 | } else { | 236 | } else { |
218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 237 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
219 | val = dma_readl(pd, CTL3); | 238 | val = dma_readl(pd, CTL3); |
220 | 239 | ||
240 | mask_mode = DMA_CTL0_MODE_MASK_BITS << | ||
241 | (DMA_CTL0_BITS_PER_CH * ch); | ||
242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
243 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
244 | val &= mask_mode; | ||
221 | if (pd_chan->dir == DMA_TO_DEVICE) | 245 | if (pd_chan->dir == DMA_TO_DEVICE) |
222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
223 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
224 | else | 248 | else |
225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 249 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
226 | DMA_CTL0_DIR_SHIFT_BITS)); | 250 | DMA_CTL0_DIR_SHIFT_BITS)); |
227 | 251 | val |= mask_ctl; | |
228 | dma_writel(pd, CTL3, val); | 252 | dma_writel(pd, CTL3, val); |
229 | } | 253 | } |
230 | 254 | ||
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |||
236 | { | 260 | { |
237 | struct pch_dma *pd = to_pd(chan->device); | 261 | struct pch_dma *pd = to_pd(chan->device); |
238 | u32 val; | 262 | u32 val; |
263 | u32 mask_ctl; | ||
264 | u32 mask_dir; | ||
239 | 265 | ||
240 | if (chan->chan_id < 8) { | 266 | if (chan->chan_id < 8) { |
267 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | ||
268 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
269 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ | ||
270 | DMA_CTL0_DIR_SHIFT_BITS); | ||
241 | val = dma_readl(pd, CTL0); | 271 | val = dma_readl(pd, CTL0); |
242 | 272 | val &= mask_dir; | |
243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | ||
245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 273 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
246 | 274 | val |= mask_ctl; | |
247 | dma_writel(pd, CTL0, val); | 275 | dma_writel(pd, CTL0, val); |
248 | } else { | 276 | } else { |
249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 277 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
250 | 278 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | |
279 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
280 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ | ||
281 | DMA_CTL0_DIR_SHIFT_BITS); | ||
251 | val = dma_readl(pd, CTL3); | 282 | val = dma_readl(pd, CTL3); |
252 | 283 | val &= mask_dir; | |
253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
254 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | 284 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
256 | 285 | val |= mask_ctl; | |
257 | dma_writel(pd, CTL3, val); | 286 | dma_writel(pd, CTL3, val); |
258 | |||
259 | } | 287 | } |
260 | 288 | ||
261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 289 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
262 | chan->chan_id, val); | 290 | chan->chan_id, val); |
263 | } | 291 | } |
264 | 292 | ||
265 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | 293 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
266 | { | 294 | { |
267 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | 295 | struct pch_dma *pd = to_pd(pd_chan->chan.device); |
268 | u32 val; | 296 | u32 val; |
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | |||
272 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | 300 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
273 | } | 301 | } |
274 | 302 | ||
303 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) | ||
304 | { | ||
305 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | ||
306 | u32 val; | ||
307 | |||
308 | val = dma_readl(pd, STS2); | ||
309 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | ||
310 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); | ||
311 | } | ||
312 | |||
275 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | 313 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
276 | { | 314 | { |
277 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | 315 | u32 sts; |
316 | |||
317 | if (pd_chan->chan.chan_id < 8) | ||
318 | sts = pdc_get_status0(pd_chan); | ||
319 | else | ||
320 | sts = pdc_get_status2(pd_chan); | ||
321 | |||
322 | |||
323 | if (sts == DMA_STATUS_IDLE) | ||
278 | return true; | 324 | return true; |
279 | else | 325 | else |
280 | return false; | 326 | return false; |
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
495 | list_add_tail(&desc->desc_node, &tmp_list); | 541 | list_add_tail(&desc->desc_node, &tmp_list); |
496 | } | 542 | } |
497 | 543 | ||
498 | spin_lock_bh(&pd_chan->lock); | 544 | spin_lock_irq(&pd_chan->lock); |
499 | list_splice(&tmp_list, &pd_chan->free_list); | 545 | list_splice(&tmp_list, &pd_chan->free_list); |
500 | pd_chan->descs_allocated = i; | 546 | pd_chan->descs_allocated = i; |
501 | pd_chan->completed_cookie = chan->cookie = 1; | 547 | pd_chan->completed_cookie = chan->cookie = 1; |
502 | spin_unlock_bh(&pd_chan->lock); | 548 | spin_unlock_irq(&pd_chan->lock); |
503 | 549 | ||
504 | pdc_enable_irq(chan, 1); | 550 | pdc_enable_irq(chan, 1); |
505 | 551 | ||
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan) | |||
517 | BUG_ON(!list_empty(&pd_chan->active_list)); | 563 | BUG_ON(!list_empty(&pd_chan->active_list)); |
518 | BUG_ON(!list_empty(&pd_chan->queue)); | 564 | BUG_ON(!list_empty(&pd_chan->queue)); |
519 | 565 | ||
520 | spin_lock_bh(&pd_chan->lock); | 566 | spin_lock_irq(&pd_chan->lock); |
521 | list_splice_init(&pd_chan->free_list, &tmp_list); | 567 | list_splice_init(&pd_chan->free_list, &tmp_list); |
522 | pd_chan->descs_allocated = 0; | 568 | pd_chan->descs_allocated = 0; |
523 | spin_unlock_bh(&pd_chan->lock); | 569 | spin_unlock_irq(&pd_chan->lock); |
524 | 570 | ||
525 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | 571 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
526 | pci_pool_free(pd->pool, desc, desc->txd.phys); | 572 | pci_pool_free(pd->pool, desc, desc->txd.phys); |
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
536 | dma_cookie_t last_completed; | 582 | dma_cookie_t last_completed; |
537 | int ret; | 583 | int ret; |
538 | 584 | ||
539 | spin_lock_bh(&pd_chan->lock); | 585 | spin_lock_irq(&pd_chan->lock); |
540 | last_completed = pd_chan->completed_cookie; | 586 | last_completed = pd_chan->completed_cookie; |
541 | last_used = chan->cookie; | 587 | last_used = chan->cookie; |
542 | spin_unlock_bh(&pd_chan->lock); | 588 | spin_unlock_irq(&pd_chan->lock); |
543 | 589 | ||
544 | ret = dma_async_is_complete(cookie, last_completed, last_used); | 590 | ret = dma_async_is_complete(cookie, last_completed, last_used); |
545 | 591 | ||
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
654 | if (cmd != DMA_TERMINATE_ALL) | 700 | if (cmd != DMA_TERMINATE_ALL) |
655 | return -ENXIO; | 701 | return -ENXIO; |
656 | 702 | ||
657 | spin_lock_bh(&pd_chan->lock); | 703 | spin_lock_irq(&pd_chan->lock); |
658 | 704 | ||
659 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | 705 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); |
660 | 706 | ||
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
664 | list_for_each_entry_safe(desc, _d, &list, desc_node) | 710 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
665 | pdc_chain_complete(pd_chan, desc); | 711 | pdc_chain_complete(pd_chan, desc); |
666 | 712 | ||
667 | spin_unlock_bh(&pd_chan->lock); | 713 | spin_unlock_irq(&pd_chan->lock); |
668 | 714 | ||
669 | return 0; | 715 | return 0; |
670 | } | 716 | } |
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
693 | struct pch_dma *pd = (struct pch_dma *)devid; | 739 | struct pch_dma *pd = (struct pch_dma *)devid; |
694 | struct pch_dma_chan *pd_chan; | 740 | struct pch_dma_chan *pd_chan; |
695 | u32 sts0; | 741 | u32 sts0; |
742 | u32 sts2; | ||
696 | int i; | 743 | int i; |
697 | int ret = IRQ_NONE; | 744 | int ret0 = IRQ_NONE; |
745 | int ret2 = IRQ_NONE; | ||
698 | 746 | ||
699 | sts0 = dma_readl(pd, STS0); | 747 | sts0 = dma_readl(pd, STS0); |
748 | sts2 = dma_readl(pd, STS2); | ||
700 | 749 | ||
701 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | 750 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); |
702 | 751 | ||
703 | for (i = 0; i < pd->dma.chancnt; i++) { | 752 | for (i = 0; i < pd->dma.chancnt; i++) { |
704 | pd_chan = &pd->channels[i]; | 753 | pd_chan = &pd->channels[i]; |
705 | 754 | ||
706 | if (sts0 & DMA_STATUS_IRQ(i)) { | 755 | if (i < 8) { |
707 | if (sts0 & DMA_STATUS_ERR(i)) | 756 | if (sts0 & DMA_STATUS_IRQ(i)) { |
708 | set_bit(0, &pd_chan->err_status); | 757 | if (sts0 & DMA_STATUS0_ERR(i)) |
758 | set_bit(0, &pd_chan->err_status); | ||
709 | 759 | ||
710 | tasklet_schedule(&pd_chan->tasklet); | 760 | tasklet_schedule(&pd_chan->tasklet); |
711 | ret = IRQ_HANDLED; | 761 | ret0 = IRQ_HANDLED; |
712 | } | 762 | } |
763 | } else { | ||
764 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { | ||
765 | if (sts2 & DMA_STATUS2_ERR(i)) | ||
766 | set_bit(0, &pd_chan->err_status); | ||
713 | 767 | ||
768 | tasklet_schedule(&pd_chan->tasklet); | ||
769 | ret2 = IRQ_HANDLED; | ||
770 | } | ||
771 | } | ||
714 | } | 772 | } |
715 | 773 | ||
716 | /* clear interrupt bits in status register */ | 774 | /* clear interrupt bits in status register */ |
717 | dma_writel(pd, STS0, sts0); | 775 | if (ret0) |
776 | dma_writel(pd, STS0, sts0); | ||
777 | if (ret2) | ||
778 | dma_writel(pd, STS2, sts2); | ||
718 | 779 | ||
719 | return ret; | 780 | return ret0 | ret2; |
720 | } | 781 | } |
721 | 782 | ||
722 | #ifdef CONFIG_PM | 783 | #ifdef CONFIG_PM |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6abe1ec1f2ce..00eee59e8b33 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -82,7 +82,7 @@ struct dma_pl330_dmac { | |||
82 | spinlock_t pool_lock; | 82 | spinlock_t pool_lock; |
83 | 83 | ||
84 | /* Peripheral channels connected to this DMAC */ | 84 | /* Peripheral channels connected to this DMAC */ |
85 | struct dma_pl330_chan peripherals[0]; /* keep at end */ | 85 | struct dma_pl330_chan *peripherals; /* keep at end */ |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct dma_pl330_desc { | 88 | struct dma_pl330_desc { |
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
451 | desc->txd.cookie = 0; | 451 | desc->txd.cookie = 0; |
452 | async_tx_ack(&desc->txd); | 452 | async_tx_ack(&desc->txd); |
453 | 453 | ||
454 | desc->req.rqtype = peri->rqtype; | 454 | if (peri) { |
455 | desc->req.peri = peri->peri_id; | 455 | desc->req.rqtype = peri->rqtype; |
456 | desc->req.peri = peri->peri_id; | ||
457 | } else { | ||
458 | desc->req.rqtype = MEMTOMEM; | ||
459 | desc->req.peri = 0; | ||
460 | } | ||
456 | 461 | ||
457 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 462 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
458 | 463 | ||
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
529 | struct pl330_info *pi; | 534 | struct pl330_info *pi; |
530 | int burst; | 535 | int burst; |
531 | 536 | ||
532 | if (unlikely(!pch || !len || !peri)) | 537 | if (unlikely(!pch || !len)) |
533 | return NULL; | 538 | return NULL; |
534 | 539 | ||
535 | if (peri->rqtype != MEMTOMEM) | 540 | if (peri && peri->rqtype != MEMTOMEM) |
536 | return NULL; | 541 | return NULL; |
537 | 542 | ||
538 | pi = &pch->dmac->pif; | 543 | pi = &pch->dmac->pif; |
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
577 | int i, burst_size; | 582 | int i, burst_size; |
578 | dma_addr_t addr; | 583 | dma_addr_t addr; |
579 | 584 | ||
580 | if (unlikely(!pch || !sgl || !sg_len)) | 585 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
581 | return NULL; | 586 | return NULL; |
582 | 587 | ||
583 | /* Make sure the direction is consistent */ | 588 | /* Make sure the direction is consistent */ |
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
666 | struct dma_device *pd; | 671 | struct dma_device *pd; |
667 | struct resource *res; | 672 | struct resource *res; |
668 | int i, ret, irq; | 673 | int i, ret, irq; |
674 | int num_chan; | ||
669 | 675 | ||
670 | pdat = adev->dev.platform_data; | 676 | pdat = adev->dev.platform_data; |
671 | 677 | ||
672 | if (!pdat || !pdat->nr_valid_peri) { | ||
673 | dev_err(&adev->dev, "platform data missing\n"); | ||
674 | return -ENODEV; | ||
675 | } | ||
676 | |||
677 | /* Allocate a new DMAC and its Channels */ | 678 | /* Allocate a new DMAC and its Channels */ |
678 | pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) | 679 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); |
679 | + sizeof(*pdmac), GFP_KERNEL); | ||
680 | if (!pdmac) { | 680 | if (!pdmac) { |
681 | dev_err(&adev->dev, "unable to allocate mem\n"); | 681 | dev_err(&adev->dev, "unable to allocate mem\n"); |
682 | return -ENOMEM; | 682 | return -ENOMEM; |
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
685 | pi = &pdmac->pif; | 685 | pi = &pdmac->pif; |
686 | pi->dev = &adev->dev; | 686 | pi->dev = &adev->dev; |
687 | pi->pl330_data = NULL; | 687 | pi->pl330_data = NULL; |
688 | pi->mcbufsz = pdat->mcbuf_sz; | 688 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
689 | 689 | ||
690 | res = &adev->res; | 690 | res = &adev->res; |
691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | 691 | request_mem_region(res->start, resource_size(res), "dma-pl330"); |
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
717 | INIT_LIST_HEAD(&pd->channels); | 717 | INIT_LIST_HEAD(&pd->channels); |
718 | 718 | ||
719 | /* Initialize channel parameters */ | 719 | /* Initialize channel parameters */ |
720 | for (i = 0; i < pdat->nr_valid_peri; i++) { | 720 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
721 | struct dma_pl330_peri *peri = &pdat->peri[i]; | 721 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
722 | pch = &pdmac->peripherals[i]; | ||
723 | 722 | ||
724 | switch (peri->rqtype) { | 723 | for (i = 0; i < num_chan; i++) { |
725 | case MEMTOMEM: | 724 | pch = &pdmac->peripherals[i]; |
725 | if (pdat) { | ||
726 | struct dma_pl330_peri *peri = &pdat->peri[i]; | ||
727 | |||
728 | switch (peri->rqtype) { | ||
729 | case MEMTOMEM: | ||
730 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | ||
731 | break; | ||
732 | case MEMTODEV: | ||
733 | case DEVTOMEM: | ||
734 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
735 | break; | ||
736 | default: | ||
737 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
738 | continue; | ||
739 | } | ||
740 | pch->chan.private = peri; | ||
741 | } else { | ||
726 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | 742 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
727 | break; | 743 | pch->chan.private = NULL; |
728 | case MEMTODEV: | ||
729 | case DEVTOMEM: | ||
730 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | ||
731 | break; | ||
732 | default: | ||
733 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | ||
734 | continue; | ||
735 | } | 744 | } |
736 | 745 | ||
737 | INIT_LIST_HEAD(&pch->work_list); | 746 | INIT_LIST_HEAD(&pch->work_list); |
738 | spin_lock_init(&pch->lock); | 747 | spin_lock_init(&pch->lock); |
739 | pch->pl330_chid = NULL; | 748 | pch->pl330_chid = NULL; |
740 | pch->chan.private = peri; | ||
741 | pch->chan.device = pd; | 749 | pch->chan.device = pd; |
742 | pch->chan.chan_id = i; | 750 | pch->chan.chan_id = i; |
743 | pch->dmac = pdmac; | 751 | pch->dmac = pdmac; |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 028330044201..7f49235d14b9 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | |||
70 | 70 | ||
71 | static u16 dmaor_read(struct sh_dmae_device *shdev) | 71 | static u16 dmaor_read(struct sh_dmae_device *shdev) |
72 | { | 72 | { |
73 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); | 73 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); |
74 | |||
75 | if (shdev->pdata->dmaor_is_32bit) | ||
76 | return __raw_readl(addr); | ||
77 | else | ||
78 | return __raw_readw(addr); | ||
74 | } | 79 | } |
75 | 80 | ||
76 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | 81 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) |
77 | { | 82 | { |
78 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | 83 | u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); |
84 | |||
85 | if (shdev->pdata->dmaor_is_32bit) | ||
86 | __raw_writel(data, addr); | ||
87 | else | ||
88 | __raw_writew(data, addr); | ||
89 | } | ||
90 | |||
91 | static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
92 | { | ||
93 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
94 | |||
95 | __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
96 | } | ||
97 | |||
98 | static u32 chcr_read(struct sh_dmae_chan *sh_dc) | ||
99 | { | ||
100 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
101 | |||
102 | return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); | ||
79 | } | 103 | } |
80 | 104 | ||
81 | /* | 105 | /* |
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
120 | 144 | ||
121 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | 145 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
122 | { | 146 | { |
123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 147 | u32 chcr = chcr_read(sh_chan); |
124 | 148 | ||
125 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | 149 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
126 | return true; /* working */ | 150 | return true; /* working */ |
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | |||
130 | 154 | ||
131 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | 155 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
132 | { | 156 | { |
133 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 157 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
134 | struct sh_dmae_device, common); | ||
135 | struct sh_dmae_pdata *pdata = shdev->pdata; | 158 | struct sh_dmae_pdata *pdata = shdev->pdata; |
136 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | 159 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | |
137 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | 160 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); |
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | |||
144 | 167 | ||
145 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | 168 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) |
146 | { | 169 | { |
147 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 170 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
148 | struct sh_dmae_device, common); | ||
149 | struct sh_dmae_pdata *pdata = shdev->pdata; | 171 | struct sh_dmae_pdata *pdata = shdev->pdata; |
150 | int i; | 172 | int i; |
151 | 173 | ||
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | |||
169 | 191 | ||
170 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 192 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
171 | { | 193 | { |
172 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 194 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
195 | u32 chcr = chcr_read(sh_chan); | ||
196 | |||
197 | if (shdev->pdata->needs_tend_set) | ||
198 | sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); | ||
173 | 199 | ||
174 | chcr |= CHCR_DE | CHCR_IE; | 200 | chcr |= CHCR_DE | shdev->chcr_ie_bit; |
175 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); | 201 | chcr_write(sh_chan, chcr & ~CHCR_TE); |
176 | } | 202 | } |
177 | 203 | ||
178 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 204 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
179 | { | 205 | { |
180 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 206 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
207 | u32 chcr = chcr_read(sh_chan); | ||
181 | 208 | ||
182 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | 209 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); |
183 | sh_dmae_writel(sh_chan, chcr, CHCR); | 210 | chcr_write(sh_chan, chcr); |
184 | } | 211 | } |
185 | 212 | ||
186 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 213 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) | |||
192 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | 219 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, |
193 | LOG2_DEFAULT_XFER_SIZE); | 220 | LOG2_DEFAULT_XFER_SIZE); |
194 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | 221 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); |
195 | sh_dmae_writel(sh_chan, chcr, CHCR); | 222 | chcr_write(sh_chan, chcr); |
196 | } | 223 | } |
197 | 224 | ||
198 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 225 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |||
202 | return -EBUSY; | 229 | return -EBUSY; |
203 | 230 | ||
204 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | 231 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); |
205 | sh_dmae_writel(sh_chan, val, CHCR); | 232 | chcr_write(sh_chan, val); |
206 | 233 | ||
207 | return 0; | 234 | return 0; |
208 | } | 235 | } |
209 | 236 | ||
210 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 237 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
211 | { | 238 | { |
212 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 239 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
213 | struct sh_dmae_device, common); | ||
214 | struct sh_dmae_pdata *pdata = shdev->pdata; | 240 | struct sh_dmae_pdata *pdata = shdev->pdata; |
215 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | 241 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; |
216 | u16 __iomem *addr = shdev->dmars; | 242 | u16 __iomem *addr = shdev->dmars; |
217 | int shift = chan_pdata->dmars_bit; | 243 | unsigned int shift = chan_pdata->dmars_bit; |
218 | 244 | ||
219 | if (dmae_is_busy(sh_chan)) | 245 | if (dmae_is_busy(sh_chan)) |
220 | return -EBUSY; | 246 | return -EBUSY; |
221 | 247 | ||
248 | if (pdata->no_dmars) | ||
249 | return 0; | ||
250 | |||
222 | /* in the case of a missing DMARS resource use first memory window */ | 251 | /* in the case of a missing DMARS resource use first memory window */ |
223 | if (!addr) | 252 | if (!addr) |
224 | addr = (u16 __iomem *)shdev->chan_reg; | 253 | addr = (u16 __iomem *)shdev->chan_reg; |
@@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | |||
296 | static const struct sh_dmae_slave_config *sh_dmae_find_slave( | 325 | static const struct sh_dmae_slave_config *sh_dmae_find_slave( |
297 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) | 326 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) |
298 | { | 327 | { |
299 | struct dma_device *dma_dev = sh_chan->common.device; | 328 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
300 | struct sh_dmae_device *shdev = container_of(dma_dev, | ||
301 | struct sh_dmae_device, common); | ||
302 | struct sh_dmae_pdata *pdata = shdev->pdata; | 329 | struct sh_dmae_pdata *pdata = shdev->pdata; |
303 | int i; | 330 | int i; |
304 | 331 | ||
@@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
771 | 798 | ||
772 | spin_lock_bh(&sh_chan->desc_lock); | 799 | spin_lock_bh(&sh_chan->desc_lock); |
773 | /* DMA work check */ | 800 | /* DMA work check */ |
774 | if (dmae_is_busy(sh_chan)) { | 801 | if (dmae_is_busy(sh_chan)) |
775 | spin_unlock_bh(&sh_chan->desc_lock); | 802 | goto sh_chan_xfer_ld_queue_end; |
776 | return; | ||
777 | } | ||
778 | 803 | ||
779 | /* Find the first not transferred descriptor */ | 804 | /* Find the first not transferred descriptor */ |
780 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 805 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
@@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
788 | break; | 813 | break; |
789 | } | 814 | } |
790 | 815 | ||
816 | sh_chan_xfer_ld_queue_end: | ||
791 | spin_unlock_bh(&sh_chan->desc_lock); | 817 | spin_unlock_bh(&sh_chan->desc_lock); |
792 | } | 818 | } |
793 | 819 | ||
@@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
846 | 872 | ||
847 | spin_lock(&sh_chan->desc_lock); | 873 | spin_lock(&sh_chan->desc_lock); |
848 | 874 | ||
849 | chcr = sh_dmae_readl(sh_chan, CHCR); | 875 | chcr = chcr_read(sh_chan); |
850 | 876 | ||
851 | if (chcr & CHCR_TE) { | 877 | if (chcr & CHCR_TE) { |
852 | /* DMA stop */ | 878 | /* DMA stop */ |
@@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1144 | /* platform data */ | 1170 | /* platform data */ |
1145 | shdev->pdata = pdata; | 1171 | shdev->pdata = pdata; |
1146 | 1172 | ||
1173 | if (pdata->chcr_offset) | ||
1174 | shdev->chcr_offset = pdata->chcr_offset; | ||
1175 | else | ||
1176 | shdev->chcr_offset = CHCR; | ||
1177 | |||
1178 | if (pdata->chcr_ie_bit) | ||
1179 | shdev->chcr_ie_bit = pdata->chcr_ie_bit; | ||
1180 | else | ||
1181 | shdev->chcr_ie_bit = CHCR_IE; | ||
1182 | |||
1147 | platform_set_drvdata(pdev, shdev); | 1183 | platform_set_drvdata(pdev, shdev); |
1148 | 1184 | ||
1149 | pm_runtime_enable(&pdev->dev); | 1185 | pm_runtime_enable(&pdev->dev); |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 5ae9fc512180..dc56576f9fdb 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -47,10 +47,14 @@ struct sh_dmae_device { | |||
47 | struct list_head node; | 47 | struct list_head node; |
48 | u32 __iomem *chan_reg; | 48 | u32 __iomem *chan_reg; |
49 | u16 __iomem *dmars; | 49 | u16 __iomem *dmars; |
50 | unsigned int chcr_offset; | ||
51 | u32 chcr_ie_bit; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 54 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) |
53 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) | 55 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) |
54 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) | 56 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) |
57 | #define to_sh_dev(chan) container_of(chan->common.device,\ | ||
58 | struct sh_dmae_device, common) | ||
55 | 59 | ||
56 | #endif /* __DMA_SHDMA_H */ | 60 | #endif /* __DMA_SHDMA_H */ |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 29d1addbe0cf..cd3a7c726bf8 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/amba/bus.h> | ||
17 | 18 | ||
18 | #include <plat/ste_dma40.h> | 19 | #include <plat/ste_dma40.h> |
19 | 20 | ||
@@ -45,9 +46,6 @@ | |||
45 | #define D40_ALLOC_PHY (1 << 30) | 46 | #define D40_ALLOC_PHY (1 << 30) |
46 | #define D40_ALLOC_LOG_FREE 0 | 47 | #define D40_ALLOC_LOG_FREE 0 |
47 | 48 | ||
48 | /* Hardware designer of the block */ | ||
49 | #define D40_HW_DESIGNER 0x8 | ||
50 | |||
51 | /** | 49 | /** |
52 | * enum 40_command - The different commands and/or statuses. | 50 | * enum 40_command - The different commands and/or statuses. |
53 | * | 51 | * |
@@ -186,6 +184,8 @@ struct d40_base; | |||
186 | * @log_def: Default logical channel settings. | 184 | * @log_def: Default logical channel settings. |
187 | * @lcla: Space for one dst src pair for logical channel transfers. | 185 | * @lcla: Space for one dst src pair for logical channel transfers. |
188 | * @lcpa: Pointer to dst and src lcpa settings. | 186 | * @lcpa: Pointer to dst and src lcpa settings. |
187 | * @runtime_addr: runtime configured address. | ||
188 | * @runtime_direction: runtime configured direction. | ||
189 | * | 189 | * |
190 | * This struct can either "be" a logical or a physical channel. | 190 | * This struct can either "be" a logical or a physical channel. |
191 | */ | 191 | */ |
@@ -200,6 +200,7 @@ struct d40_chan { | |||
200 | struct dma_chan chan; | 200 | struct dma_chan chan; |
201 | struct tasklet_struct tasklet; | 201 | struct tasklet_struct tasklet; |
202 | struct list_head client; | 202 | struct list_head client; |
203 | struct list_head pending_queue; | ||
203 | struct list_head active; | 204 | struct list_head active; |
204 | struct list_head queue; | 205 | struct list_head queue; |
205 | struct stedma40_chan_cfg dma_cfg; | 206 | struct stedma40_chan_cfg dma_cfg; |
@@ -645,7 +646,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | |||
645 | 646 | ||
646 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 647 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
647 | { | 648 | { |
648 | list_add_tail(&desc->node, &d40c->queue); | 649 | list_add_tail(&desc->node, &d40c->pending_queue); |
650 | } | ||
651 | |||
652 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | ||
653 | { | ||
654 | struct d40_desc *d; | ||
655 | |||
656 | if (list_empty(&d40c->pending_queue)) | ||
657 | return NULL; | ||
658 | |||
659 | d = list_first_entry(&d40c->pending_queue, | ||
660 | struct d40_desc, | ||
661 | node); | ||
662 | return d; | ||
649 | } | 663 | } |
650 | 664 | ||
651 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 665 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
@@ -802,6 +816,11 @@ static void d40_term_all(struct d40_chan *d40c) | |||
802 | d40_desc_free(d40c, d40d); | 816 | d40_desc_free(d40c, d40d); |
803 | } | 817 | } |
804 | 818 | ||
819 | /* Release pending descriptors */ | ||
820 | while ((d40d = d40_first_pending(d40c))) { | ||
821 | d40_desc_remove(d40d); | ||
822 | d40_desc_free(d40c, d40d); | ||
823 | } | ||
805 | 824 | ||
806 | d40c->pending_tx = 0; | 825 | d40c->pending_tx = 0; |
807 | d40c->busy = false; | 826 | d40c->busy = false; |
@@ -2092,7 +2111,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
2092 | struct scatterlist *sg; | 2111 | struct scatterlist *sg; |
2093 | int i; | 2112 | int i; |
2094 | 2113 | ||
2095 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); | 2114 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
2096 | for (i = 0; i < periods; i++) { | 2115 | for (i = 0; i < periods; i++) { |
2097 | sg_dma_address(&sg[i]) = dma_addr; | 2116 | sg_dma_address(&sg[i]) = dma_addr; |
2098 | sg_dma_len(&sg[i]) = period_len; | 2117 | sg_dma_len(&sg[i]) = period_len; |
@@ -2152,24 +2171,87 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2152 | 2171 | ||
2153 | spin_lock_irqsave(&d40c->lock, flags); | 2172 | spin_lock_irqsave(&d40c->lock, flags); |
2154 | 2173 | ||
2155 | /* Busy means that pending jobs are already being processed */ | 2174 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
2175 | |||
2176 | /* Busy means that queued jobs are already being processed */ | ||
2156 | if (!d40c->busy) | 2177 | if (!d40c->busy) |
2157 | (void) d40_queue_start(d40c); | 2178 | (void) d40_queue_start(d40c); |
2158 | 2179 | ||
2159 | spin_unlock_irqrestore(&d40c->lock, flags); | 2180 | spin_unlock_irqrestore(&d40c->lock, flags); |
2160 | } | 2181 | } |
2161 | 2182 | ||
2183 | static int | ||
2184 | dma40_config_to_halfchannel(struct d40_chan *d40c, | ||
2185 | struct stedma40_half_channel_info *info, | ||
2186 | enum dma_slave_buswidth width, | ||
2187 | u32 maxburst) | ||
2188 | { | ||
2189 | enum stedma40_periph_data_width addr_width; | ||
2190 | int psize; | ||
2191 | |||
2192 | switch (width) { | ||
2193 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
2194 | addr_width = STEDMA40_BYTE_WIDTH; | ||
2195 | break; | ||
2196 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
2197 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
2198 | break; | ||
2199 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
2200 | addr_width = STEDMA40_WORD_WIDTH; | ||
2201 | break; | ||
2202 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
2203 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
2204 | break; | ||
2205 | default: | ||
2206 | dev_err(d40c->base->dev, | ||
2207 | "illegal peripheral address width " | ||
2208 | "requested (%d)\n", | ||
2209 | width); | ||
2210 | return -EINVAL; | ||
2211 | } | ||
2212 | |||
2213 | if (chan_is_logical(d40c)) { | ||
2214 | if (maxburst >= 16) | ||
2215 | psize = STEDMA40_PSIZE_LOG_16; | ||
2216 | else if (maxburst >= 8) | ||
2217 | psize = STEDMA40_PSIZE_LOG_8; | ||
2218 | else if (maxburst >= 4) | ||
2219 | psize = STEDMA40_PSIZE_LOG_4; | ||
2220 | else | ||
2221 | psize = STEDMA40_PSIZE_LOG_1; | ||
2222 | } else { | ||
2223 | if (maxburst >= 16) | ||
2224 | psize = STEDMA40_PSIZE_PHY_16; | ||
2225 | else if (maxburst >= 8) | ||
2226 | psize = STEDMA40_PSIZE_PHY_8; | ||
2227 | else if (maxburst >= 4) | ||
2228 | psize = STEDMA40_PSIZE_PHY_4; | ||
2229 | else | ||
2230 | psize = STEDMA40_PSIZE_PHY_1; | ||
2231 | } | ||
2232 | |||
2233 | info->data_width = addr_width; | ||
2234 | info->psize = psize; | ||
2235 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2236 | |||
2237 | return 0; | ||
2238 | } | ||
2239 | |||
2162 | /* Runtime reconfiguration extension */ | 2240 | /* Runtime reconfiguration extension */ |
2163 | static void d40_set_runtime_config(struct dma_chan *chan, | 2241 | static int d40_set_runtime_config(struct dma_chan *chan, |
2164 | struct dma_slave_config *config) | 2242 | struct dma_slave_config *config) |
2165 | { | 2243 | { |
2166 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2244 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2167 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | 2245 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
2168 | enum dma_slave_buswidth config_addr_width; | 2246 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
2169 | dma_addr_t config_addr; | 2247 | dma_addr_t config_addr; |
2170 | u32 config_maxburst; | 2248 | u32 src_maxburst, dst_maxburst; |
2171 | enum stedma40_periph_data_width addr_width; | 2249 | int ret; |
2172 | int psize; | 2250 | |
2251 | src_addr_width = config->src_addr_width; | ||
2252 | src_maxburst = config->src_maxburst; | ||
2253 | dst_addr_width = config->dst_addr_width; | ||
2254 | dst_maxburst = config->dst_maxburst; | ||
2173 | 2255 | ||
2174 | if (config->direction == DMA_FROM_DEVICE) { | 2256 | if (config->direction == DMA_FROM_DEVICE) { |
2175 | dma_addr_t dev_addr_rx = | 2257 | dma_addr_t dev_addr_rx = |
@@ -2188,8 +2270,11 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2188 | cfg->dir); | 2270 | cfg->dir); |
2189 | cfg->dir = STEDMA40_PERIPH_TO_MEM; | 2271 | cfg->dir = STEDMA40_PERIPH_TO_MEM; |
2190 | 2272 | ||
2191 | config_addr_width = config->src_addr_width; | 2273 | /* Configure the memory side */ |
2192 | config_maxburst = config->src_maxburst; | 2274 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2275 | dst_addr_width = src_addr_width; | ||
2276 | if (dst_maxburst == 0) | ||
2277 | dst_maxburst = src_maxburst; | ||
2193 | 2278 | ||
2194 | } else if (config->direction == DMA_TO_DEVICE) { | 2279 | } else if (config->direction == DMA_TO_DEVICE) { |
2195 | dma_addr_t dev_addr_tx = | 2280 | dma_addr_t dev_addr_tx = |
@@ -2208,68 +2293,39 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2208 | cfg->dir); | 2293 | cfg->dir); |
2209 | cfg->dir = STEDMA40_MEM_TO_PERIPH; | 2294 | cfg->dir = STEDMA40_MEM_TO_PERIPH; |
2210 | 2295 | ||
2211 | config_addr_width = config->dst_addr_width; | 2296 | /* Configure the memory side */ |
2212 | config_maxburst = config->dst_maxburst; | 2297 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
2213 | 2298 | src_addr_width = dst_addr_width; | |
2299 | if (src_maxburst == 0) | ||
2300 | src_maxburst = dst_maxburst; | ||
2214 | } else { | 2301 | } else { |
2215 | dev_err(d40c->base->dev, | 2302 | dev_err(d40c->base->dev, |
2216 | "unrecognized channel direction %d\n", | 2303 | "unrecognized channel direction %d\n", |
2217 | config->direction); | 2304 | config->direction); |
2218 | return; | 2305 | return -EINVAL; |
2219 | } | 2306 | } |
2220 | 2307 | ||
2221 | switch (config_addr_width) { | 2308 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
2222 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
2223 | addr_width = STEDMA40_BYTE_WIDTH; | ||
2224 | break; | ||
2225 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
2226 | addr_width = STEDMA40_HALFWORD_WIDTH; | ||
2227 | break; | ||
2228 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
2229 | addr_width = STEDMA40_WORD_WIDTH; | ||
2230 | break; | ||
2231 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
2232 | addr_width = STEDMA40_DOUBLEWORD_WIDTH; | ||
2233 | break; | ||
2234 | default: | ||
2235 | dev_err(d40c->base->dev, | 2309 | dev_err(d40c->base->dev, |
2236 | "illegal peripheral address width " | 2310 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
2237 | "requested (%d)\n", | 2311 | src_maxburst, |
2238 | config->src_addr_width); | 2312 | src_addr_width, |
2239 | return; | 2313 | dst_maxburst, |
2314 | dst_addr_width); | ||
2315 | return -EINVAL; | ||
2240 | } | 2316 | } |
2241 | 2317 | ||
2242 | if (chan_is_logical(d40c)) { | 2318 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
2243 | if (config_maxburst >= 16) | 2319 | src_addr_width, |
2244 | psize = STEDMA40_PSIZE_LOG_16; | 2320 | src_maxburst); |
2245 | else if (config_maxburst >= 8) | 2321 | if (ret) |
2246 | psize = STEDMA40_PSIZE_LOG_8; | 2322 | return ret; |
2247 | else if (config_maxburst >= 4) | ||
2248 | psize = STEDMA40_PSIZE_LOG_4; | ||
2249 | else | ||
2250 | psize = STEDMA40_PSIZE_LOG_1; | ||
2251 | } else { | ||
2252 | if (config_maxburst >= 16) | ||
2253 | psize = STEDMA40_PSIZE_PHY_16; | ||
2254 | else if (config_maxburst >= 8) | ||
2255 | psize = STEDMA40_PSIZE_PHY_8; | ||
2256 | else if (config_maxburst >= 4) | ||
2257 | psize = STEDMA40_PSIZE_PHY_4; | ||
2258 | else if (config_maxburst >= 2) | ||
2259 | psize = STEDMA40_PSIZE_PHY_2; | ||
2260 | else | ||
2261 | psize = STEDMA40_PSIZE_PHY_1; | ||
2262 | } | ||
2263 | 2323 | ||
2264 | /* Set up all the endpoint configs */ | 2324 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
2265 | cfg->src_info.data_width = addr_width; | 2325 | dst_addr_width, |
2266 | cfg->src_info.psize = psize; | 2326 | dst_maxburst); |
2267 | cfg->src_info.big_endian = false; | 2327 | if (ret) |
2268 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2328 | return ret; |
2269 | cfg->dst_info.data_width = addr_width; | ||
2270 | cfg->dst_info.psize = psize; | ||
2271 | cfg->dst_info.big_endian = false; | ||
2272 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | ||
2273 | 2329 | ||
2274 | /* Fill in register values */ | 2330 | /* Fill in register values */ |
2275 | if (chan_is_logical(d40c)) | 2331 | if (chan_is_logical(d40c)) |
@@ -2282,12 +2338,14 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2282 | d40c->runtime_addr = config_addr; | 2338 | d40c->runtime_addr = config_addr; |
2283 | d40c->runtime_direction = config->direction; | 2339 | d40c->runtime_direction = config->direction; |
2284 | dev_dbg(d40c->base->dev, | 2340 | dev_dbg(d40c->base->dev, |
2285 | "configured channel %s for %s, data width %d, " | 2341 | "configured channel %s for %s, data width %d/%d, " |
2286 | "maxburst %d bytes, LE, no flow control\n", | 2342 | "maxburst %d/%d elements, LE, no flow control\n", |
2287 | dma_chan_name(chan), | 2343 | dma_chan_name(chan), |
2288 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2344 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
2289 | config_addr_width, | 2345 | src_addr_width, dst_addr_width, |
2290 | config_maxburst); | 2346 | src_maxburst, dst_maxburst); |
2347 | |||
2348 | return 0; | ||
2291 | } | 2349 | } |
2292 | 2350 | ||
2293 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2351 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -2308,9 +2366,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2308 | case DMA_RESUME: | 2366 | case DMA_RESUME: |
2309 | return d40_resume(d40c); | 2367 | return d40_resume(d40c); |
2310 | case DMA_SLAVE_CONFIG: | 2368 | case DMA_SLAVE_CONFIG: |
2311 | d40_set_runtime_config(chan, | 2369 | return d40_set_runtime_config(chan, |
2312 | (struct dma_slave_config *) arg); | 2370 | (struct dma_slave_config *) arg); |
2313 | return 0; | ||
2314 | default: | 2371 | default: |
2315 | break; | 2372 | break; |
2316 | } | 2373 | } |
@@ -2341,6 +2398,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2341 | 2398 | ||
2342 | INIT_LIST_HEAD(&d40c->active); | 2399 | INIT_LIST_HEAD(&d40c->active); |
2343 | INIT_LIST_HEAD(&d40c->queue); | 2400 | INIT_LIST_HEAD(&d40c->queue); |
2401 | INIT_LIST_HEAD(&d40c->pending_queue); | ||
2344 | INIT_LIST_HEAD(&d40c->client); | 2402 | INIT_LIST_HEAD(&d40c->client); |
2345 | 2403 | ||
2346 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2404 | tasklet_init(&d40c->tasklet, dma_tasklet, |
@@ -2502,25 +2560,6 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2502 | 2560 | ||
2503 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 2561 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
2504 | { | 2562 | { |
2505 | static const struct d40_reg_val dma_id_regs[] = { | ||
2506 | /* Peripheral Id */ | ||
2507 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | ||
2508 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | ||
2509 | /* | ||
2510 | * D40_DREG_PERIPHID2 Depends on HW revision: | ||
2511 | * DB8500ed has 0x0008, | ||
2512 | * ? has 0x0018, | ||
2513 | * DB8500v1 has 0x0028 | ||
2514 | * DB8500v2 has 0x0038 | ||
2515 | */ | ||
2516 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | ||
2517 | |||
2518 | /* PCell Id */ | ||
2519 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | ||
2520 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | ||
2521 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | ||
2522 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | ||
2523 | }; | ||
2524 | struct stedma40_platform_data *plat_data; | 2563 | struct stedma40_platform_data *plat_data; |
2525 | struct clk *clk = NULL; | 2564 | struct clk *clk = NULL; |
2526 | void __iomem *virtbase = NULL; | 2565 | void __iomem *virtbase = NULL; |
@@ -2529,8 +2568,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2529 | int num_log_chans = 0; | 2568 | int num_log_chans = 0; |
2530 | int num_phy_chans; | 2569 | int num_phy_chans; |
2531 | int i; | 2570 | int i; |
2532 | u32 val; | 2571 | u32 pid; |
2533 | u32 rev; | 2572 | u32 cid; |
2573 | u8 rev; | ||
2534 | 2574 | ||
2535 | clk = clk_get(&pdev->dev, NULL); | 2575 | clk = clk_get(&pdev->dev, NULL); |
2536 | 2576 | ||
@@ -2554,32 +2594,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2554 | if (!virtbase) | 2594 | if (!virtbase) |
2555 | goto failure; | 2595 | goto failure; |
2556 | 2596 | ||
2557 | /* HW version check */ | 2597 | /* This is just a regular AMBA PrimeCell ID actually */ |
2558 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2598 | for (pid = 0, i = 0; i < 4; i++) |
2559 | if (dma_id_regs[i].val != | 2599 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) |
2560 | readl(virtbase + dma_id_regs[i].reg)) { | 2600 | & 255) << (i * 8); |
2561 | d40_err(&pdev->dev, | 2601 | for (cid = 0, i = 0; i < 4; i++) |
2562 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2602 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) |
2563 | dma_id_regs[i].val, | 2603 | & 255) << (i * 8); |
2564 | dma_id_regs[i].reg, | ||
2565 | readl(virtbase + dma_id_regs[i].reg)); | ||
2566 | goto failure; | ||
2567 | } | ||
2568 | } | ||
2569 | 2604 | ||
2570 | /* Get silicon revision and designer */ | 2605 | if (cid != AMBA_CID) { |
2571 | val = readl(virtbase + D40_DREG_PERIPHID2); | 2606 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); |
2572 | 2607 | goto failure; | |
2573 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != | 2608 | } |
2574 | D40_HW_DESIGNER) { | 2609 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { |
2575 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", | 2610 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
2576 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, | 2611 | AMBA_MANF_BITS(pid), |
2577 | D40_HW_DESIGNER); | 2612 | AMBA_VENDOR_ST); |
2578 | goto failure; | 2613 | goto failure; |
2579 | } | 2614 | } |
2580 | 2615 | /* | |
2581 | rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> | 2616 | * HW revision: |
2582 | D40_DREG_PERIPHID2_REV_POS; | 2617 | * DB8500ed has revision 0 |
2618 | * ? has revision 1 | ||
2619 | * DB8500v1 has revision 2 | ||
2620 | * DB8500v2 has revision 3 | ||
2621 | */ | ||
2622 | rev = AMBA_REV_BITS(pid); | ||
2583 | 2623 | ||
2584 | /* The number of physical channels on this HW */ | 2624 | /* The number of physical channels on this HW */ |
2585 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2625 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 195ee65ee7f3..b44c455158de 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -184,9 +184,6 @@ | |||
184 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
185 | #define D40_DREG_PERIPHID1 0xFE4 | 185 | #define D40_DREG_PERIPHID1 0xFE4 |
186 | #define D40_DREG_PERIPHID2 0xFE8 | 186 | #define D40_DREG_PERIPHID2 0xFE8 |
187 | #define D40_DREG_PERIPHID2_REV_POS 4 | ||
188 | #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) | ||
189 | #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf | ||
190 | #define D40_DREG_PERIPHID3 0xFEC | 187 | #define D40_DREG_PERIPHID3 0xFEC |
191 | #define D40_DREG_CELLID0 0xFF0 | 188 | #define D40_DREG_CELLID0 0xFF0 |
192 | #define D40_DREG_CELLID1 0xFF4 | 189 | #define D40_DREG_CELLID1 0xFF4 |
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index 30da70d06a6d..cdae207028a7 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c | |||
@@ -45,13 +45,13 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
45 | return 0; | 45 | return 0; |
46 | } | 46 | } |
47 | 47 | ||
48 | static struct pci_device_id __initdata pci_eisa_pci_tbl[] = { | 48 | static struct pci_device_id pci_eisa_pci_tbl[] = { |
49 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 49 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
50 | PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, | 50 | PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, |
51 | { 0, } | 51 | { 0, } |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static struct pci_driver __initdata pci_eisa_driver = { | 54 | static struct pci_driver __refdata pci_eisa_driver = { |
55 | .name = "pci_eisa", | 55 | .name = "pci_eisa", |
56 | .id_table = pci_eisa_pci_tbl, | 56 | .id_table = pci_eisa_pci_tbl, |
57 | .probe = pci_eisa_init, | 57 | .probe = pci_eisa_init, |
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 5f29aafd4462..eb80b549ed8d 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
@@ -78,6 +78,7 @@ | |||
78 | #include <linux/kobject.h> | 78 | #include <linux/kobject.h> |
79 | #include <linux/device.h> | 79 | #include <linux/device.h> |
80 | #include <linux/slab.h> | 80 | #include <linux/slab.h> |
81 | #include <linux/pstore.h> | ||
81 | 82 | ||
82 | #include <asm/uaccess.h> | 83 | #include <asm/uaccess.h> |
83 | 84 | ||
@@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables"); | |||
89 | MODULE_LICENSE("GPL"); | 90 | MODULE_LICENSE("GPL"); |
90 | MODULE_VERSION(EFIVARS_VERSION); | 91 | MODULE_VERSION(EFIVARS_VERSION); |
91 | 92 | ||
93 | #define DUMP_NAME_LEN 52 | ||
94 | |||
92 | /* | 95 | /* |
93 | * The maximum size of VariableName + Data = 1024 | 96 | * The maximum size of VariableName + Data = 1024 |
94 | * Therefore, it's reasonable to save that much | 97 | * Therefore, it's reasonable to save that much |
@@ -119,6 +122,10 @@ struct efivar_attribute { | |||
119 | ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); | 122 | ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); |
120 | }; | 123 | }; |
121 | 124 | ||
125 | #define PSTORE_EFI_ATTRIBUTES \ | ||
126 | (EFI_VARIABLE_NON_VOLATILE | \ | ||
127 | EFI_VARIABLE_BOOTSERVICE_ACCESS | \ | ||
128 | EFI_VARIABLE_RUNTIME_ACCESS) | ||
122 | 129 | ||
123 | #define EFIVAR_ATTR(_name, _mode, _show, _store) \ | 130 | #define EFIVAR_ATTR(_name, _mode, _show, _store) \ |
124 | struct efivar_attribute efivar_attr_##_name = { \ | 131 | struct efivar_attribute efivar_attr_##_name = { \ |
@@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars, | |||
141 | 148 | ||
142 | /* Return the number of unicode characters in data */ | 149 | /* Return the number of unicode characters in data */ |
143 | static unsigned long | 150 | static unsigned long |
144 | utf8_strlen(efi_char16_t *data, unsigned long maxlength) | 151 | utf16_strnlen(efi_char16_t *s, size_t maxlength) |
145 | { | 152 | { |
146 | unsigned long length = 0; | 153 | unsigned long length = 0; |
147 | 154 | ||
148 | while (*data++ != 0 && length < maxlength) | 155 | while (*s++ != 0 && length < maxlength) |
149 | length++; | 156 | length++; |
150 | return length; | 157 | return length; |
151 | } | 158 | } |
152 | 159 | ||
160 | static inline unsigned long | ||
161 | utf16_strlen(efi_char16_t *s) | ||
162 | { | ||
163 | return utf16_strnlen(s, ~0UL); | ||
164 | } | ||
165 | |||
153 | /* | 166 | /* |
154 | * Return the number of bytes is the length of this string | 167 | * Return the number of bytes is the length of this string |
155 | * Note: this is NOT the same as the number of unicode characters | 168 | * Note: this is NOT the same as the number of unicode characters |
156 | */ | 169 | */ |
157 | static inline unsigned long | 170 | static inline unsigned long |
158 | utf8_strsize(efi_char16_t *data, unsigned long maxlength) | 171 | utf16_strsize(efi_char16_t *data, unsigned long maxlength) |
159 | { | 172 | { |
160 | return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); | 173 | return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); |
174 | } | ||
175 | |||
176 | static inline int | ||
177 | utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) | ||
178 | { | ||
179 | while (1) { | ||
180 | if (len == 0) | ||
181 | return 0; | ||
182 | if (*a < *b) | ||
183 | return -1; | ||
184 | if (*a > *b) | ||
185 | return 1; | ||
186 | if (*a == 0) /* implies *b == 0 */ | ||
187 | return 0; | ||
188 | a++; | ||
189 | b++; | ||
190 | len--; | ||
191 | } | ||
161 | } | 192 | } |
162 | 193 | ||
163 | static efi_status_t | 194 | static efi_status_t |
164 | get_var_data(struct efivars *efivars, struct efi_variable *var) | 195 | get_var_data_locked(struct efivars *efivars, struct efi_variable *var) |
165 | { | 196 | { |
166 | efi_status_t status; | 197 | efi_status_t status; |
167 | 198 | ||
168 | spin_lock(&efivars->lock); | ||
169 | var->DataSize = 1024; | 199 | var->DataSize = 1024; |
170 | status = efivars->ops->get_variable(var->VariableName, | 200 | status = efivars->ops->get_variable(var->VariableName, |
171 | &var->VendorGuid, | 201 | &var->VendorGuid, |
172 | &var->Attributes, | 202 | &var->Attributes, |
173 | &var->DataSize, | 203 | &var->DataSize, |
174 | var->Data); | 204 | var->Data); |
205 | return status; | ||
206 | } | ||
207 | |||
208 | static efi_status_t | ||
209 | get_var_data(struct efivars *efivars, struct efi_variable *var) | ||
210 | { | ||
211 | efi_status_t status; | ||
212 | |||
213 | spin_lock(&efivars->lock); | ||
214 | status = get_var_data_locked(efivars, var); | ||
175 | spin_unlock(&efivars->lock); | 215 | spin_unlock(&efivars->lock); |
216 | |||
176 | if (status != EFI_SUCCESS) { | 217 | if (status != EFI_SUCCESS) { |
177 | printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n", | 218 | printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n", |
178 | status); | 219 | status); |
@@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = { | |||
387 | .default_attrs = def_attrs, | 428 | .default_attrs = def_attrs, |
388 | }; | 429 | }; |
389 | 430 | ||
431 | static struct pstore_info efi_pstore_info; | ||
432 | |||
390 | static inline void | 433 | static inline void |
391 | efivar_unregister(struct efivar_entry *var) | 434 | efivar_unregister(struct efivar_entry *var) |
392 | { | 435 | { |
393 | kobject_put(&var->kobj); | 436 | kobject_put(&var->kobj); |
394 | } | 437 | } |
395 | 438 | ||
439 | #ifdef CONFIG_PSTORE | ||
440 | |||
441 | static int efi_pstore_open(struct pstore_info *psi) | ||
442 | { | ||
443 | struct efivars *efivars = psi->data; | ||
444 | |||
445 | spin_lock(&efivars->lock); | ||
446 | efivars->walk_entry = list_first_entry(&efivars->list, | ||
447 | struct efivar_entry, list); | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | static int efi_pstore_close(struct pstore_info *psi) | ||
452 | { | ||
453 | struct efivars *efivars = psi->data; | ||
454 | |||
455 | spin_unlock(&efivars->lock); | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, | ||
460 | struct timespec *timespec, struct pstore_info *psi) | ||
461 | { | ||
462 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | ||
463 | struct efivars *efivars = psi->data; | ||
464 | char name[DUMP_NAME_LEN]; | ||
465 | int i; | ||
466 | unsigned int part, size; | ||
467 | unsigned long time; | ||
468 | |||
469 | while (&efivars->walk_entry->list != &efivars->list) { | ||
470 | if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid, | ||
471 | vendor)) { | ||
472 | for (i = 0; i < DUMP_NAME_LEN; i++) { | ||
473 | name[i] = efivars->walk_entry->var.VariableName[i]; | ||
474 | } | ||
475 | if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) { | ||
476 | *id = part; | ||
477 | timespec->tv_sec = time; | ||
478 | timespec->tv_nsec = 0; | ||
479 | get_var_data_locked(efivars, &efivars->walk_entry->var); | ||
480 | size = efivars->walk_entry->var.DataSize; | ||
481 | memcpy(psi->buf, efivars->walk_entry->var.Data, size); | ||
482 | efivars->walk_entry = list_entry(efivars->walk_entry->list.next, | ||
483 | struct efivar_entry, list); | ||
484 | return size; | ||
485 | } | ||
486 | } | ||
487 | efivars->walk_entry = list_entry(efivars->walk_entry->list.next, | ||
488 | struct efivar_entry, list); | ||
489 | } | ||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part, | ||
494 | size_t size, struct pstore_info *psi) | ||
495 | { | ||
496 | char name[DUMP_NAME_LEN]; | ||
497 | char stub_name[DUMP_NAME_LEN]; | ||
498 | efi_char16_t efi_name[DUMP_NAME_LEN]; | ||
499 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | ||
500 | struct efivars *efivars = psi->data; | ||
501 | struct efivar_entry *entry, *found = NULL; | ||
502 | int i; | ||
503 | |||
504 | sprintf(stub_name, "dump-type%u-%u-", type, part); | ||
505 | sprintf(name, "%s%lu", stub_name, get_seconds()); | ||
506 | |||
507 | spin_lock(&efivars->lock); | ||
508 | |||
509 | for (i = 0; i < DUMP_NAME_LEN; i++) | ||
510 | efi_name[i] = stub_name[i]; | ||
511 | |||
512 | /* | ||
513 | * Clean up any entries with the same name | ||
514 | */ | ||
515 | |||
516 | list_for_each_entry(entry, &efivars->list, list) { | ||
517 | get_var_data_locked(efivars, &entry->var); | ||
518 | |||
519 | if (efi_guidcmp(entry->var.VendorGuid, vendor)) | ||
520 | continue; | ||
521 | if (utf16_strncmp(entry->var.VariableName, efi_name, | ||
522 | utf16_strlen(efi_name))) | ||
523 | continue; | ||
524 | /* Needs to be a prefix */ | ||
525 | if (entry->var.VariableName[utf16_strlen(efi_name)] == 0) | ||
526 | continue; | ||
527 | |||
528 | /* found */ | ||
529 | found = entry; | ||
530 | efivars->ops->set_variable(entry->var.VariableName, | ||
531 | &entry->var.VendorGuid, | ||
532 | PSTORE_EFI_ATTRIBUTES, | ||
533 | 0, NULL); | ||
534 | } | ||
535 | |||
536 | if (found) | ||
537 | list_del(&found->list); | ||
538 | |||
539 | for (i = 0; i < DUMP_NAME_LEN; i++) | ||
540 | efi_name[i] = name[i]; | ||
541 | |||
542 | efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES, | ||
543 | size, psi->buf); | ||
544 | |||
545 | spin_unlock(&efivars->lock); | ||
546 | |||
547 | if (found) | ||
548 | efivar_unregister(found); | ||
549 | |||
550 | if (size) | ||
551 | efivar_create_sysfs_entry(efivars, | ||
552 | utf16_strsize(efi_name, | ||
553 | DUMP_NAME_LEN * 2), | ||
554 | efi_name, &vendor); | ||
555 | |||
556 | return part; | ||
557 | }; | ||
558 | |||
559 | static int efi_pstore_erase(enum pstore_type_id type, u64 id, | ||
560 | struct pstore_info *psi) | ||
561 | { | ||
562 | efi_pstore_write(type, id, 0, psi); | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | #else | ||
567 | static int efi_pstore_open(struct pstore_info *psi) | ||
568 | { | ||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | static int efi_pstore_close(struct pstore_info *psi) | ||
573 | { | ||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, | ||
578 | struct timespec *time, struct pstore_info *psi) | ||
579 | { | ||
580 | return -1; | ||
581 | } | ||
582 | |||
583 | static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part, | ||
584 | size_t size, struct pstore_info *psi) | ||
585 | { | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int efi_pstore_erase(enum pstore_type_id type, u64 id, | ||
590 | struct pstore_info *psi) | ||
591 | { | ||
592 | return 0; | ||
593 | } | ||
594 | #endif | ||
595 | |||
596 | static struct pstore_info efi_pstore_info = { | ||
597 | .owner = THIS_MODULE, | ||
598 | .name = "efi", | ||
599 | .open = efi_pstore_open, | ||
600 | .close = efi_pstore_close, | ||
601 | .read = efi_pstore_read, | ||
602 | .write = efi_pstore_write, | ||
603 | .erase = efi_pstore_erase, | ||
604 | }; | ||
396 | 605 | ||
397 | static ssize_t efivar_create(struct file *filp, struct kobject *kobj, | 606 | static ssize_t efivar_create(struct file *filp, struct kobject *kobj, |
398 | struct bin_attribute *bin_attr, | 607 | struct bin_attribute *bin_attr, |
@@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, | |||
414 | * Does this variable already exist? | 623 | * Does this variable already exist? |
415 | */ | 624 | */ |
416 | list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { | 625 | list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { |
417 | strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); | 626 | strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); |
418 | strsize2 = utf8_strsize(new_var->VariableName, 1024); | 627 | strsize2 = utf16_strsize(new_var->VariableName, 1024); |
419 | if (strsize1 == strsize2 && | 628 | if (strsize1 == strsize2 && |
420 | !memcmp(&(search_efivar->var.VariableName), | 629 | !memcmp(&(search_efivar->var.VariableName), |
421 | new_var->VariableName, strsize1) && | 630 | new_var->VariableName, strsize1) && |
@@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, | |||
447 | 656 | ||
448 | /* Create the entry in sysfs. Locking is not required here */ | 657 | /* Create the entry in sysfs. Locking is not required here */ |
449 | status = efivar_create_sysfs_entry(efivars, | 658 | status = efivar_create_sysfs_entry(efivars, |
450 | utf8_strsize(new_var->VariableName, | 659 | utf16_strsize(new_var->VariableName, |
451 | 1024), | 660 | 1024), |
452 | new_var->VariableName, | 661 | new_var->VariableName, |
453 | &new_var->VendorGuid); | 662 | &new_var->VendorGuid); |
454 | if (status) { | 663 | if (status) { |
@@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, | |||
477 | * Does this variable already exist? | 686 | * Does this variable already exist? |
478 | */ | 687 | */ |
479 | list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { | 688 | list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { |
480 | strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); | 689 | strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); |
481 | strsize2 = utf8_strsize(del_var->VariableName, 1024); | 690 | strsize2 = utf16_strsize(del_var->VariableName, 1024); |
482 | if (strsize1 == strsize2 && | 691 | if (strsize1 == strsize2 && |
483 | !memcmp(&(search_efivar->var.VariableName), | 692 | !memcmp(&(search_efivar->var.VariableName), |
484 | del_var->VariableName, strsize1) && | 693 | del_var->VariableName, strsize1) && |
@@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars, | |||
763 | if (error) | 972 | if (error) |
764 | unregister_efivars(efivars); | 973 | unregister_efivars(efivars); |
765 | 974 | ||
975 | efivars->efi_pstore_info = efi_pstore_info; | ||
976 | |||
977 | efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); | ||
978 | if (efivars->efi_pstore_info.buf) { | ||
979 | efivars->efi_pstore_info.bufsize = 1024; | ||
980 | efivars->efi_pstore_info.data = efivars; | ||
981 | mutex_init(&efivars->efi_pstore_info.buf_mutex); | ||
982 | pstore_register(&efivars->efi_pstore_info); | ||
983 | } | ||
984 | |||
766 | out: | 985 | out: |
767 | kfree(variable_name); | 986 | kfree(variable_name); |
768 | 987 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 6778f56a4c64..d539efd96d4b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -103,6 +103,22 @@ config GPIO_MPC5200 | |||
103 | def_bool y | 103 | def_bool y |
104 | depends on PPC_MPC52xx | 104 | depends on PPC_MPC52xx |
105 | 105 | ||
106 | config GPIO_MSM_V1 | ||
107 | tristate "Qualcomm MSM GPIO v1" | ||
108 | depends on GPIOLIB && ARCH_MSM | ||
109 | help | ||
110 | Say yes here to support the GPIO interface on ARM v6 based | ||
111 | Qualcomm MSM chips. Most of the pins on the MSM can be | ||
112 | selected for GPIO, and are controlled by this driver. | ||
113 | |||
114 | config GPIO_MSM_V2 | ||
115 | tristate "Qualcomm MSM GPIO v2" | ||
116 | depends on GPIOLIB && ARCH_MSM | ||
117 | help | ||
118 | Say yes here to support the GPIO interface on ARM v7 based | ||
119 | Qualcomm MSM chips. Most of the pins on the MSM can be | ||
120 | selected for GPIO, and are controlled by this driver. | ||
121 | |||
106 | config GPIO_MXC | 122 | config GPIO_MXC |
107 | def_bool y | 123 | def_bool y |
108 | depends on ARCH_MXC | 124 | depends on ARCH_MXC |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 4b81d4e1e709..9588948c96f0 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -27,6 +27,8 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o | |||
27 | obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o | 27 | obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o |
28 | obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o | 28 | obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o |
29 | obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o | 29 | obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o |
30 | obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o | ||
31 | obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o | ||
30 | obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o | 32 | obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o |
31 | obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o | 33 | obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o |
32 | obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o | 34 | obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o |
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c index ed795e64eea7..050c05d91896 100644 --- a/drivers/gpio/gpio-ab8500.c +++ b/drivers/gpio/gpio-ab8500.c | |||
@@ -516,5 +516,5 @@ module_exit(ab8500_gpio_exit); | |||
516 | 516 | ||
517 | MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); | 517 | MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); |
518 | MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO"); | 518 | MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO"); |
519 | MODULE_ALIAS("AB8500 GPIO driver"); | 519 | MODULE_ALIAS("platform:ab8500-gpio"); |
520 | MODULE_LICENSE("GPL v2"); | 520 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c new file mode 100644 index 000000000000..52a4d4286eba --- /dev/null +++ b/drivers/gpio/gpio-msm-v1.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Google, Inc. | ||
3 | * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/bitops.h> | ||
17 | #include <linux/gpio.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <mach/cpu.h> | ||
23 | #include <mach/msm_gpiomux.h> | ||
24 | #include <mach/msm_iomap.h> | ||
25 | |||
26 | /* see 80-VA736-2 Rev C pp 695-751 | ||
27 | ** | ||
28 | ** These are actually the *shadow* gpio registers, since the | ||
29 | ** real ones (which allow full access) are only available to the | ||
30 | ** ARM9 side of the world. | ||
31 | ** | ||
32 | ** Since the _BASE need to be page-aligned when we're mapping them | ||
33 | ** to virtual addresses, adjust for the additional offset in these | ||
34 | ** macros. | ||
35 | */ | ||
36 | |||
37 | #define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off)) | ||
38 | #define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off)) | ||
39 | #define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off)) | ||
40 | #define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off)) | ||
41 | |||
42 | /* | ||
43 | * MSM7X00 registers | ||
44 | */ | ||
45 | /* output value */ | ||
46 | #define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */ | ||
47 | #define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */ | ||
48 | #define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */ | ||
49 | #define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */ | ||
50 | #define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */ | ||
51 | #define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */ | ||
52 | |||
53 | /* same pin map as above, output enable */ | ||
54 | #define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10) | ||
55 | #define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08) | ||
56 | #define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14) | ||
57 | #define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18) | ||
58 | #define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C) | ||
59 | #define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54) | ||
60 | |||
61 | /* same pin map as above, input read */ | ||
62 | #define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34) | ||
63 | #define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20) | ||
64 | #define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38) | ||
65 | #define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C) | ||
66 | #define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40) | ||
67 | #define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44) | ||
68 | |||
69 | /* same pin map as above, 1=edge 0=level interrup */ | ||
70 | #define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60) | ||
71 | #define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50) | ||
72 | #define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64) | ||
73 | #define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68) | ||
74 | #define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C) | ||
75 | #define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0) | ||
76 | |||
77 | /* same pin map as above, 1=positive 0=negative */ | ||
78 | #define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70) | ||
79 | #define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58) | ||
80 | #define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74) | ||
81 | #define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78) | ||
82 | #define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C) | ||
83 | #define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC) | ||
84 | |||
85 | /* same pin map as above, interrupt enable */ | ||
86 | #define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80) | ||
87 | #define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60) | ||
88 | #define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84) | ||
89 | #define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88) | ||
90 | #define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C) | ||
91 | #define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8) | ||
92 | |||
93 | /* same pin map as above, write 1 to clear interrupt */ | ||
94 | #define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90) | ||
95 | #define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68) | ||
96 | #define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94) | ||
97 | #define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98) | ||
98 | #define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C) | ||
99 | #define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4) | ||
100 | |||
101 | /* same pin map as above, 1=interrupt pending */ | ||
102 | #define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0) | ||
103 | #define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70) | ||
104 | #define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4) | ||
105 | #define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8) | ||
106 | #define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC) | ||
107 | #define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0) | ||
108 | |||
109 | /* | ||
110 | * QSD8X50 registers | ||
111 | */ | ||
112 | /* output value */ | ||
113 | #define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */ | ||
114 | #define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */ | ||
115 | #define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */ | ||
116 | #define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */ | ||
117 | #define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */ | ||
118 | #define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */ | ||
119 | #define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */ | ||
120 | #define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */ | ||
121 | |||
122 | /* same pin map as above, output enable */ | ||
123 | #define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20) | ||
124 | #define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08) | ||
125 | #define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24) | ||
126 | #define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28) | ||
127 | #define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C) | ||
128 | #define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30) | ||
129 | #define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34) | ||
130 | #define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38) | ||
131 | |||
132 | /* same pin map as above, input read */ | ||
133 | #define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50) | ||
134 | #define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20) | ||
135 | #define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54) | ||
136 | #define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58) | ||
137 | #define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C) | ||
138 | #define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60) | ||
139 | #define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64) | ||
140 | #define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68) | ||
141 | |||
142 | /* same pin map as above, 1=edge 0=level interrup */ | ||
143 | #define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70) | ||
144 | #define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50) | ||
145 | #define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74) | ||
146 | #define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78) | ||
147 | #define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C) | ||
148 | #define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80) | ||
149 | #define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84) | ||
150 | #define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88) | ||
151 | |||
152 | /* same pin map as above, 1=positive 0=negative */ | ||
153 | #define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90) | ||
154 | #define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58) | ||
155 | #define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94) | ||
156 | #define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98) | ||
157 | #define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C) | ||
158 | #define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0) | ||
159 | #define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4) | ||
160 | #define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8) | ||
161 | |||
162 | /* same pin map as above, interrupt enable */ | ||
163 | #define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0) | ||
164 | #define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60) | ||
165 | #define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4) | ||
166 | #define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8) | ||
167 | #define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC) | ||
168 | #define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0) | ||
169 | #define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4) | ||
170 | #define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8) | ||
171 | |||
172 | /* same pin map as above, write 1 to clear interrupt */ | ||
173 | #define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0) | ||
174 | #define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68) | ||
175 | #define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4) | ||
176 | #define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8) | ||
177 | #define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC) | ||
178 | #define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0) | ||
179 | #define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4) | ||
180 | #define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8) | ||
181 | |||
182 | /* same pin map as above, 1=interrupt pending */ | ||
183 | #define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0) | ||
184 | #define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70) | ||
185 | #define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4) | ||
186 | #define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8) | ||
187 | #define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC) | ||
188 | #define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100) | ||
189 | #define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104) | ||
190 | #define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108) | ||
191 | |||
192 | /* | ||
193 | * MSM7X30 registers | ||
194 | */ | ||
195 | /* output value */ | ||
196 | #define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */ | ||
197 | #define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */ | ||
198 | #define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */ | ||
199 | #define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */ | ||
200 | #define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */ | ||
201 | #define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */ | ||
202 | #define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */ | ||
203 | #define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */ | ||
204 | |||
205 | /* same pin map as above, output enable */ | ||
206 | #define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10) | ||
207 | #define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08) | ||
208 | #define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14) | ||
209 | #define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18) | ||
210 | #define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C) | ||
211 | #define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54) | ||
212 | #define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8) | ||
213 | #define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218) | ||
214 | |||
215 | /* same pin map as above, input read */ | ||
216 | #define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34) | ||
217 | #define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20) | ||
218 | #define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38) | ||
219 | #define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C) | ||
220 | #define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40) | ||
221 | #define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44) | ||
222 | #define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC) | ||
223 | #define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C) | ||
224 | |||
225 | /* same pin map as above, 1=edge 0=level interrup */ | ||
226 | #define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60) | ||
227 | #define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50) | ||
228 | #define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64) | ||
229 | #define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68) | ||
230 | #define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C) | ||
231 | #define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0) | ||
232 | #define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0) | ||
233 | #define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240) | ||
234 | |||
235 | /* same pin map as above, 1=positive 0=negative */ | ||
236 | #define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70) | ||
237 | #define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58) | ||
238 | #define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74) | ||
239 | #define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78) | ||
240 | #define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C) | ||
241 | #define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC) | ||
242 | #define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4) | ||
243 | #define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228) | ||
244 | |||
245 | /* same pin map as above, interrupt enable */ | ||
246 | #define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80) | ||
247 | #define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60) | ||
248 | #define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84) | ||
249 | #define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88) | ||
250 | #define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C) | ||
251 | #define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8) | ||
252 | #define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8) | ||
253 | #define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C) | ||
254 | |||
255 | /* same pin map as above, write 1 to clear interrupt */ | ||
256 | #define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90) | ||
257 | #define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68) | ||
258 | #define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94) | ||
259 | #define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98) | ||
260 | #define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C) | ||
261 | #define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4) | ||
262 | #define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC) | ||
263 | #define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230) | ||
264 | |||
265 | /* same pin map as above, 1=interrupt pending */ | ||
266 | #define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0) | ||
267 | #define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70) | ||
268 | #define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4) | ||
269 | #define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8) | ||
270 | #define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC) | ||
271 | #define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0) | ||
272 | #define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0) | ||
273 | #define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234) | ||
274 | |||
275 | #define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0) | ||
276 | |||
277 | #define MSM_GPIO_BANK(soc, bank, first, last) \ | ||
278 | { \ | ||
279 | .regs = { \ | ||
280 | .out = soc##_GPIO_OUT_##bank, \ | ||
281 | .in = soc##_GPIO_IN_##bank, \ | ||
282 | .int_status = soc##_GPIO_INT_STATUS_##bank, \ | ||
283 | .int_clear = soc##_GPIO_INT_CLEAR_##bank, \ | ||
284 | .int_en = soc##_GPIO_INT_EN_##bank, \ | ||
285 | .int_edge = soc##_GPIO_INT_EDGE_##bank, \ | ||
286 | .int_pos = soc##_GPIO_INT_POS_##bank, \ | ||
287 | .oe = soc##_GPIO_OE_##bank, \ | ||
288 | }, \ | ||
289 | .chip = { \ | ||
290 | .base = (first), \ | ||
291 | .ngpio = (last) - (first) + 1, \ | ||
292 | .get = msm_gpio_get, \ | ||
293 | .set = msm_gpio_set, \ | ||
294 | .direction_input = msm_gpio_direction_input, \ | ||
295 | .direction_output = msm_gpio_direction_output, \ | ||
296 | .to_irq = msm_gpio_to_irq, \ | ||
297 | .request = msm_gpio_request, \ | ||
298 | .free = msm_gpio_free, \ | ||
299 | } \ | ||
300 | } | ||
301 | |||
302 | #define MSM_GPIO_BROKEN_INT_CLEAR 1 | ||
303 | |||
304 | struct msm_gpio_regs { | ||
305 | void __iomem *out; | ||
306 | void __iomem *in; | ||
307 | void __iomem *int_status; | ||
308 | void __iomem *int_clear; | ||
309 | void __iomem *int_en; | ||
310 | void __iomem *int_edge; | ||
311 | void __iomem *int_pos; | ||
312 | void __iomem *oe; | ||
313 | }; | ||
314 | |||
315 | struct msm_gpio_chip { | ||
316 | spinlock_t lock; | ||
317 | struct gpio_chip chip; | ||
318 | struct msm_gpio_regs regs; | ||
319 | #if MSM_GPIO_BROKEN_INT_CLEAR | ||
320 | unsigned int_status_copy; | ||
321 | #endif | ||
322 | unsigned int both_edge_detect; | ||
323 | unsigned int int_enable[2]; /* 0: awake, 1: sleep */ | ||
324 | }; | ||
325 | |||
326 | static int msm_gpio_write(struct msm_gpio_chip *msm_chip, | ||
327 | unsigned offset, unsigned on) | ||
328 | { | ||
329 | unsigned mask = BIT(offset); | ||
330 | unsigned val; | ||
331 | |||
332 | val = readl(msm_chip->regs.out); | ||
333 | if (on) | ||
334 | writel(val | mask, msm_chip->regs.out); | ||
335 | else | ||
336 | writel(val & ~mask, msm_chip->regs.out); | ||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip) | ||
341 | { | ||
342 | int loop_limit = 100; | ||
343 | unsigned pol, val, val2, intstat; | ||
344 | do { | ||
345 | val = readl(msm_chip->regs.in); | ||
346 | pol = readl(msm_chip->regs.int_pos); | ||
347 | pol = (pol & ~msm_chip->both_edge_detect) | | ||
348 | (~val & msm_chip->both_edge_detect); | ||
349 | writel(pol, msm_chip->regs.int_pos); | ||
350 | intstat = readl(msm_chip->regs.int_status); | ||
351 | val2 = readl(msm_chip->regs.in); | ||
352 | if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0) | ||
353 | return; | ||
354 | } while (loop_limit-- > 0); | ||
355 | printk(KERN_ERR "msm_gpio_update_both_edge_detect, " | ||
356 | "failed to reach stable state %x != %x\n", val, val2); | ||
357 | } | ||
358 | |||
359 | static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip, | ||
360 | unsigned offset) | ||
361 | { | ||
362 | unsigned bit = BIT(offset); | ||
363 | |||
364 | #if MSM_GPIO_BROKEN_INT_CLEAR | ||
365 | /* Save interrupts that already triggered before we loose them. */ | ||
366 | /* Any interrupt that triggers between the read of int_status */ | ||
367 | /* and the write to int_clear will still be lost though. */ | ||
368 | msm_chip->int_status_copy |= readl(msm_chip->regs.int_status); | ||
369 | msm_chip->int_status_copy &= ~bit; | ||
370 | #endif | ||
371 | writel(bit, msm_chip->regs.int_clear); | ||
372 | msm_gpio_update_both_edge_detect(msm_chip); | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
377 | { | ||
378 | struct msm_gpio_chip *msm_chip; | ||
379 | unsigned long irq_flags; | ||
380 | |||
381 | msm_chip = container_of(chip, struct msm_gpio_chip, chip); | ||
382 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
383 | writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe); | ||
384 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static int | ||
389 | msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) | ||
390 | { | ||
391 | struct msm_gpio_chip *msm_chip; | ||
392 | unsigned long irq_flags; | ||
393 | |||
394 | msm_chip = container_of(chip, struct msm_gpio_chip, chip); | ||
395 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
396 | msm_gpio_write(msm_chip, offset, value); | ||
397 | writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe); | ||
398 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) | ||
403 | { | ||
404 | struct msm_gpio_chip *msm_chip; | ||
405 | |||
406 | msm_chip = container_of(chip, struct msm_gpio_chip, chip); | ||
407 | return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0; | ||
408 | } | ||
409 | |||
410 | static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | ||
411 | { | ||
412 | struct msm_gpio_chip *msm_chip; | ||
413 | unsigned long irq_flags; | ||
414 | |||
415 | msm_chip = container_of(chip, struct msm_gpio_chip, chip); | ||
416 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
417 | msm_gpio_write(msm_chip, offset, value); | ||
418 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
419 | } | ||
420 | |||
421 | static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | ||
422 | { | ||
423 | return MSM_GPIO_TO_INT(chip->base + offset); | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_MSM_GPIOMUX | ||
427 | static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) | ||
428 | { | ||
429 | return msm_gpiomux_get(chip->base + offset); | ||
430 | } | ||
431 | |||
432 | static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) | ||
433 | { | ||
434 | msm_gpiomux_put(chip->base + offset); | ||
435 | } | ||
436 | #else | ||
437 | #define msm_gpio_request NULL | ||
438 | #define msm_gpio_free NULL | ||
439 | #endif | ||
440 | |||
441 | static struct msm_gpio_chip *msm_gpio_chips; | ||
442 | static int msm_gpio_count; | ||
443 | |||
444 | static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = { | ||
445 | MSM_GPIO_BANK(MSM7X00, 0, 0, 15), | ||
446 | MSM_GPIO_BANK(MSM7X00, 1, 16, 42), | ||
447 | MSM_GPIO_BANK(MSM7X00, 2, 43, 67), | ||
448 | MSM_GPIO_BANK(MSM7X00, 3, 68, 94), | ||
449 | MSM_GPIO_BANK(MSM7X00, 4, 95, 106), | ||
450 | MSM_GPIO_BANK(MSM7X00, 5, 107, 121), | ||
451 | }; | ||
452 | |||
453 | static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = { | ||
454 | MSM_GPIO_BANK(MSM7X30, 0, 0, 15), | ||
455 | MSM_GPIO_BANK(MSM7X30, 1, 16, 43), | ||
456 | MSM_GPIO_BANK(MSM7X30, 2, 44, 67), | ||
457 | MSM_GPIO_BANK(MSM7X30, 3, 68, 94), | ||
458 | MSM_GPIO_BANK(MSM7X30, 4, 95, 106), | ||
459 | MSM_GPIO_BANK(MSM7X30, 5, 107, 133), | ||
460 | MSM_GPIO_BANK(MSM7X30, 6, 134, 150), | ||
461 | MSM_GPIO_BANK(MSM7X30, 7, 151, 181), | ||
462 | }; | ||
463 | |||
464 | static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = { | ||
465 | MSM_GPIO_BANK(QSD8X50, 0, 0, 15), | ||
466 | MSM_GPIO_BANK(QSD8X50, 1, 16, 42), | ||
467 | MSM_GPIO_BANK(QSD8X50, 2, 43, 67), | ||
468 | MSM_GPIO_BANK(QSD8X50, 3, 68, 94), | ||
469 | MSM_GPIO_BANK(QSD8X50, 4, 95, 103), | ||
470 | MSM_GPIO_BANK(QSD8X50, 5, 104, 121), | ||
471 | MSM_GPIO_BANK(QSD8X50, 6, 122, 152), | ||
472 | MSM_GPIO_BANK(QSD8X50, 7, 153, 164), | ||
473 | }; | ||
474 | |||
475 | static void msm_gpio_irq_ack(struct irq_data *d) | ||
476 | { | ||
477 | unsigned long irq_flags; | ||
478 | struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); | ||
479 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
480 | msm_gpio_clear_detect_status(msm_chip, | ||
481 | d->irq - gpio_to_irq(msm_chip->chip.base)); | ||
482 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
483 | } | ||
484 | |||
485 | static void msm_gpio_irq_mask(struct irq_data *d) | ||
486 | { | ||
487 | unsigned long irq_flags; | ||
488 | struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); | ||
489 | unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); | ||
490 | |||
491 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
492 | /* level triggered interrupts are also latched */ | ||
493 | if (!(readl(msm_chip->regs.int_edge) & BIT(offset))) | ||
494 | msm_gpio_clear_detect_status(msm_chip, offset); | ||
495 | msm_chip->int_enable[0] &= ~BIT(offset); | ||
496 | writel(msm_chip->int_enable[0], msm_chip->regs.int_en); | ||
497 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
498 | } | ||
499 | |||
500 | static void msm_gpio_irq_unmask(struct irq_data *d) | ||
501 | { | ||
502 | unsigned long irq_flags; | ||
503 | struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); | ||
504 | unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); | ||
505 | |||
506 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
507 | /* level triggered interrupts are also latched */ | ||
508 | if (!(readl(msm_chip->regs.int_edge) & BIT(offset))) | ||
509 | msm_gpio_clear_detect_status(msm_chip, offset); | ||
510 | msm_chip->int_enable[0] |= BIT(offset); | ||
511 | writel(msm_chip->int_enable[0], msm_chip->regs.int_en); | ||
512 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
513 | } | ||
514 | |||
515 | static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) | ||
516 | { | ||
517 | unsigned long irq_flags; | ||
518 | struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); | ||
519 | unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); | ||
520 | |||
521 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
522 | |||
523 | if (on) | ||
524 | msm_chip->int_enable[1] |= BIT(offset); | ||
525 | else | ||
526 | msm_chip->int_enable[1] &= ~BIT(offset); | ||
527 | |||
528 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) | ||
533 | { | ||
534 | unsigned long irq_flags; | ||
535 | struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); | ||
536 | unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); | ||
537 | unsigned val, mask = BIT(offset); | ||
538 | |||
539 | spin_lock_irqsave(&msm_chip->lock, irq_flags); | ||
540 | val = readl(msm_chip->regs.int_edge); | ||
541 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { | ||
542 | writel(val | mask, msm_chip->regs.int_edge); | ||
543 | __irq_set_handler_locked(d->irq, handle_edge_irq); | ||
544 | } else { | ||
545 | writel(val & ~mask, msm_chip->regs.int_edge); | ||
546 | __irq_set_handler_locked(d->irq, handle_level_irq); | ||
547 | } | ||
548 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { | ||
549 | msm_chip->both_edge_detect |= mask; | ||
550 | msm_gpio_update_both_edge_detect(msm_chip); | ||
551 | } else { | ||
552 | msm_chip->both_edge_detect &= ~mask; | ||
553 | val = readl(msm_chip->regs.int_pos); | ||
554 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) | ||
555 | writel(val | mask, msm_chip->regs.int_pos); | ||
556 | else | ||
557 | writel(val & ~mask, msm_chip->regs.int_pos); | ||
558 | } | ||
559 | spin_unlock_irqrestore(&msm_chip->lock, irq_flags); | ||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
564 | { | ||
565 | int i, j, mask; | ||
566 | unsigned val; | ||
567 | |||
568 | for (i = 0; i < msm_gpio_count; i++) { | ||
569 | struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i]; | ||
570 | val = readl(msm_chip->regs.int_status); | ||
571 | val &= msm_chip->int_enable[0]; | ||
572 | while (val) { | ||
573 | mask = val & -val; | ||
574 | j = fls(mask) - 1; | ||
575 | /* printk("%s %08x %08x bit %d gpio %d irq %d\n", | ||
576 | __func__, v, m, j, msm_chip->chip.start + j, | ||
577 | FIRST_GPIO_IRQ + msm_chip->chip.start + j); */ | ||
578 | val &= ~mask; | ||
579 | generic_handle_irq(FIRST_GPIO_IRQ + | ||
580 | msm_chip->chip.base + j); | ||
581 | } | ||
582 | } | ||
583 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
584 | } | ||
585 | |||
586 | static struct irq_chip msm_gpio_irq_chip = { | ||
587 | .name = "msmgpio", | ||
588 | .irq_ack = msm_gpio_irq_ack, | ||
589 | .irq_mask = msm_gpio_irq_mask, | ||
590 | .irq_unmask = msm_gpio_irq_unmask, | ||
591 | .irq_set_wake = msm_gpio_irq_set_wake, | ||
592 | .irq_set_type = msm_gpio_irq_set_type, | ||
593 | }; | ||
594 | |||
595 | static int __init msm_init_gpio(void) | ||
596 | { | ||
597 | int i, j = 0; | ||
598 | |||
599 | if (cpu_is_msm7x01()) { | ||
600 | msm_gpio_chips = msm_gpio_chips_msm7x01; | ||
601 | msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01); | ||
602 | } else if (cpu_is_msm7x30()) { | ||
603 | msm_gpio_chips = msm_gpio_chips_msm7x30; | ||
604 | msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30); | ||
605 | } else if (cpu_is_qsd8x50()) { | ||
606 | msm_gpio_chips = msm_gpio_chips_qsd8x50; | ||
607 | msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50); | ||
608 | } else { | ||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { | ||
613 | if (i - FIRST_GPIO_IRQ >= | ||
614 | msm_gpio_chips[j].chip.base + | ||
615 | msm_gpio_chips[j].chip.ngpio) | ||
616 | j++; | ||
617 | irq_set_chip_data(i, &msm_gpio_chips[j]); | ||
618 | irq_set_chip_and_handler(i, &msm_gpio_irq_chip, | ||
619 | handle_edge_irq); | ||
620 | set_irq_flags(i, IRQF_VALID); | ||
621 | } | ||
622 | |||
623 | for (i = 0; i < msm_gpio_count; i++) { | ||
624 | spin_lock_init(&msm_gpio_chips[i].lock); | ||
625 | writel(0, msm_gpio_chips[i].regs.int_en); | ||
626 | gpiochip_add(&msm_gpio_chips[i].chip); | ||
627 | } | ||
628 | |||
629 | irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); | ||
630 | irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); | ||
631 | irq_set_irq_wake(INT_GPIO_GROUP1, 1); | ||
632 | irq_set_irq_wake(INT_GPIO_GROUP2, 2); | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | postcore_initcall(msm_init_gpio); | ||
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c new file mode 100644 index 000000000000..5cb1227d69cf --- /dev/null +++ b/drivers/gpio/gpio-msm-v2.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
15 | * 02110-1301, USA. | ||
16 | * | ||
17 | */ | ||
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
19 | |||
20 | #include <linux/bitmap.h> | ||
21 | #include <linux/bitops.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | |||
31 | #include <asm/mach/irq.h> | ||
32 | |||
33 | #include <mach/msm_gpiomux.h> | ||
34 | #include <mach/msm_iomap.h> | ||
35 | |||
36 | /* Bits of interest in the GPIO_IN_OUT register. | ||
37 | */ | ||
38 | enum { | ||
39 | GPIO_IN = 0, | ||
40 | GPIO_OUT = 1 | ||
41 | }; | ||
42 | |||
43 | /* Bits of interest in the GPIO_INTR_STATUS register. | ||
44 | */ | ||
45 | enum { | ||
46 | INTR_STATUS = 0, | ||
47 | }; | ||
48 | |||
49 | /* Bits of interest in the GPIO_CFG register. | ||
50 | */ | ||
51 | enum { | ||
52 | GPIO_OE = 9, | ||
53 | }; | ||
54 | |||
55 | /* Bits of interest in the GPIO_INTR_CFG register. | ||
56 | * When a GPIO triggers, two separate decisions are made, controlled | ||
57 | * by two separate flags. | ||
58 | * | ||
59 | * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS | ||
60 | * register for that GPIO will be updated to reflect the triggering of that | ||
61 | * gpio. If this bit is 0, this register will not be updated. | ||
62 | * - Second, INTR_ENABLE controls whether an interrupt is triggered. | ||
63 | * | ||
64 | * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt | ||
65 | * can be triggered but the status register will not reflect it. | ||
66 | */ | ||
67 | enum { | ||
68 | INTR_ENABLE = 0, | ||
69 | INTR_POL_CTL = 1, | ||
70 | INTR_DECT_CTL = 2, | ||
71 | INTR_RAW_STATUS_EN = 3, | ||
72 | }; | ||
73 | |||
74 | /* Codes of interest in GPIO_INTR_CFG_SU. | ||
75 | */ | ||
76 | enum { | ||
77 | TARGET_PROC_SCORPION = 4, | ||
78 | TARGET_PROC_NONE = 7, | ||
79 | }; | ||
80 | |||
81 | |||
82 | #define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio))) | ||
83 | #define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio))) | ||
84 | #define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio))) | ||
85 | #define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio))) | ||
86 | #define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio))) | ||
87 | |||
88 | /** | ||
89 | * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure | ||
90 | * | ||
91 | * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By | ||
92 | * keeping track of which gpios are unmasked as irq sources, we avoid | ||
93 | * having to do readl calls on hundreds of iomapped registers each time | ||
94 | * the summary interrupt fires in order to locate the active interrupts. | ||
95 | * | ||
96 | * @wake_irqs: a bitmap for tracking which interrupt lines are enabled | ||
97 | * as wakeup sources. When the device is suspended, interrupts which are | ||
98 | * not wakeup sources are disabled. | ||
99 | * | ||
100 | * @dual_edge_irqs: a bitmap used to track which irqs are configured | ||
101 | * as dual-edge, as this is not supported by the hardware and requires | ||
102 | * some special handling in the driver. | ||
103 | */ | ||
104 | struct msm_gpio_dev { | ||
105 | struct gpio_chip gpio_chip; | ||
106 | DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS); | ||
107 | DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS); | ||
108 | DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS); | ||
109 | }; | ||
110 | |||
111 | static DEFINE_SPINLOCK(tlmm_lock); | ||
112 | |||
113 | static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip) | ||
114 | { | ||
115 | return container_of(chip, struct msm_gpio_dev, gpio_chip); | ||
116 | } | ||
117 | |||
118 | static inline void set_gpio_bits(unsigned n, void __iomem *reg) | ||
119 | { | ||
120 | writel(readl(reg) | n, reg); | ||
121 | } | ||
122 | |||
123 | static inline void clear_gpio_bits(unsigned n, void __iomem *reg) | ||
124 | { | ||
125 | writel(readl(reg) & ~n, reg); | ||
126 | } | ||
127 | |||
128 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) | ||
129 | { | ||
130 | return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN); | ||
131 | } | ||
132 | |||
133 | static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val) | ||
134 | { | ||
135 | writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset)); | ||
136 | } | ||
137 | |||
138 | static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
139 | { | ||
140 | unsigned long irq_flags; | ||
141 | |||
142 | spin_lock_irqsave(&tlmm_lock, irq_flags); | ||
143 | clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); | ||
144 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int msm_gpio_direction_output(struct gpio_chip *chip, | ||
149 | unsigned offset, | ||
150 | int val) | ||
151 | { | ||
152 | unsigned long irq_flags; | ||
153 | |||
154 | spin_lock_irqsave(&tlmm_lock, irq_flags); | ||
155 | msm_gpio_set(chip, offset, val); | ||
156 | set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); | ||
157 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) | ||
162 | { | ||
163 | return msm_gpiomux_get(chip->base + offset); | ||
164 | } | ||
165 | |||
166 | static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) | ||
167 | { | ||
168 | msm_gpiomux_put(chip->base + offset); | ||
169 | } | ||
170 | |||
171 | static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | ||
172 | { | ||
173 | return MSM_GPIO_TO_INT(chip->base + offset); | ||
174 | } | ||
175 | |||
176 | static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq) | ||
177 | { | ||
178 | return irq - MSM_GPIO_TO_INT(chip->base); | ||
179 | } | ||
180 | |||
181 | static struct msm_gpio_dev msm_gpio = { | ||
182 | .gpio_chip = { | ||
183 | .base = 0, | ||
184 | .ngpio = NR_GPIO_IRQS, | ||
185 | .direction_input = msm_gpio_direction_input, | ||
186 | .direction_output = msm_gpio_direction_output, | ||
187 | .get = msm_gpio_get, | ||
188 | .set = msm_gpio_set, | ||
189 | .to_irq = msm_gpio_to_irq, | ||
190 | .request = msm_gpio_request, | ||
191 | .free = msm_gpio_free, | ||
192 | }, | ||
193 | }; | ||
194 | |||
195 | /* For dual-edge interrupts in software, since the hardware has no | ||
196 | * such support: | ||
197 | * | ||
198 | * At appropriate moments, this function may be called to flip the polarity | ||
199 | * settings of both-edge irq lines to try and catch the next edge. | ||
200 | * | ||
201 | * The attempt is considered successful if: | ||
202 | * - the status bit goes high, indicating that an edge was caught, or | ||
203 | * - the input value of the gpio doesn't change during the attempt. | ||
204 | * If the value changes twice during the process, that would cause the first | ||
205 | * test to fail but would force the second, as two opposite | ||
206 | * transitions would cause a detection no matter the polarity setting. | ||
207 | * | ||
208 | * The do-loop tries to sledge-hammer closed the timing hole between | ||
209 | * the initial value-read and the polarity-write - if the line value changes | ||
210 | * during that window, an interrupt is lost, the new polarity setting is | ||
211 | * incorrect, and the first success test will fail, causing a retry. | ||
212 | * | ||
213 | * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c. | ||
214 | */ | ||
215 | static void msm_gpio_update_dual_edge_pos(unsigned gpio) | ||
216 | { | ||
217 | int loop_limit = 100; | ||
218 | unsigned val, val2, intstat; | ||
219 | |||
220 | do { | ||
221 | val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); | ||
222 | if (val) | ||
223 | clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); | ||
224 | else | ||
225 | set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); | ||
226 | val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); | ||
227 | intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS); | ||
228 | if (intstat || val == val2) | ||
229 | return; | ||
230 | } while (loop_limit-- > 0); | ||
231 | pr_err("dual-edge irq failed to stabilize, " | ||
232 | "interrupts dropped. %#08x != %#08x\n", | ||
233 | val, val2); | ||
234 | } | ||
235 | |||
236 | static void msm_gpio_irq_ack(struct irq_data *d) | ||
237 | { | ||
238 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); | ||
239 | |||
240 | writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); | ||
241 | if (test_bit(gpio, msm_gpio.dual_edge_irqs)) | ||
242 | msm_gpio_update_dual_edge_pos(gpio); | ||
243 | } | ||
244 | |||
245 | static void msm_gpio_irq_mask(struct irq_data *d) | ||
246 | { | ||
247 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); | ||
248 | unsigned long irq_flags; | ||
249 | |||
250 | spin_lock_irqsave(&tlmm_lock, irq_flags); | ||
251 | writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); | ||
252 | clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | ||
253 | __clear_bit(gpio, msm_gpio.enabled_irqs); | ||
254 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||
255 | } | ||
256 | |||
257 | static void msm_gpio_irq_unmask(struct irq_data *d) | ||
258 | { | ||
259 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); | ||
260 | unsigned long irq_flags; | ||
261 | |||
262 | spin_lock_irqsave(&tlmm_lock, irq_flags); | ||
263 | __set_bit(gpio, msm_gpio.enabled_irqs); | ||
264 | set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | ||
265 | writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); | ||
266 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||
267 | } | ||
268 | |||
269 | static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) | ||
270 | { | ||
271 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); | ||
272 | unsigned long irq_flags; | ||
273 | uint32_t bits; | ||
274 | |||
275 | spin_lock_irqsave(&tlmm_lock, irq_flags); | ||
276 | |||
277 | bits = readl(GPIO_INTR_CFG(gpio)); | ||
278 | |||
279 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { | ||
280 | bits |= BIT(INTR_DECT_CTL); | ||
281 | __irq_set_handler_locked(d->irq, handle_edge_irq); | ||
282 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) | ||
283 | __set_bit(gpio, msm_gpio.dual_edge_irqs); | ||
284 | else | ||
285 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); | ||
286 | } else { | ||
287 | bits &= ~BIT(INTR_DECT_CTL); | ||
288 | __irq_set_handler_locked(d->irq, handle_level_irq); | ||
289 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); | ||
290 | } | ||
291 | |||
292 | if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) | ||
293 | bits |= BIT(INTR_POL_CTL); | ||
294 | else | ||
295 | bits &= ~BIT(INTR_POL_CTL); | ||
296 | |||
297 | writel(bits, GPIO_INTR_CFG(gpio)); | ||
298 | |||
299 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) | ||
300 | msm_gpio_update_dual_edge_pos(gpio); | ||
301 | |||
302 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * When the summary IRQ is raised, any number of GPIO lines may be high. | ||
309 | * It is the job of the summary handler to find all those GPIO lines | ||
310 | * which have been set as summary IRQ lines and which are triggered, | ||
311 | * and to call their interrupt handlers. | ||
312 | */ | ||
313 | static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
314 | { | ||
315 | unsigned long i; | ||
316 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
317 | |||
318 | chained_irq_enter(chip, desc); | ||
319 | |||
320 | for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); | ||
321 | i < NR_GPIO_IRQS; | ||
322 | i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) { | ||
323 | if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS)) | ||
324 | generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, | ||
325 | i)); | ||
326 | } | ||
327 | |||
328 | chained_irq_exit(chip, desc); | ||
329 | } | ||
330 | |||
331 | static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) | ||
332 | { | ||
333 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); | ||
334 | |||
335 | if (on) { | ||
336 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) | ||
337 | irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); | ||
338 | set_bit(gpio, msm_gpio.wake_irqs); | ||
339 | } else { | ||
340 | clear_bit(gpio, msm_gpio.wake_irqs); | ||
341 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) | ||
342 | irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static struct irq_chip msm_gpio_irq_chip = { | ||
349 | .name = "msmgpio", | ||
350 | .irq_mask = msm_gpio_irq_mask, | ||
351 | .irq_unmask = msm_gpio_irq_unmask, | ||
352 | .irq_ack = msm_gpio_irq_ack, | ||
353 | .irq_set_type = msm_gpio_irq_set_type, | ||
354 | .irq_set_wake = msm_gpio_irq_set_wake, | ||
355 | }; | ||
356 | |||
357 | static int __devinit msm_gpio_probe(struct platform_device *dev) | ||
358 | { | ||
359 | int i, irq, ret; | ||
360 | |||
361 | bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS); | ||
362 | bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS); | ||
363 | bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS); | ||
364 | msm_gpio.gpio_chip.label = dev->name; | ||
365 | ret = gpiochip_add(&msm_gpio.gpio_chip); | ||
366 | if (ret < 0) | ||
367 | return ret; | ||
368 | |||
369 | for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { | ||
370 | irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); | ||
371 | irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, | ||
372 | handle_level_irq); | ||
373 | set_irq_flags(irq, IRQF_VALID); | ||
374 | } | ||
375 | |||
376 | irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ, | ||
377 | msm_summary_irq_handler); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static int __devexit msm_gpio_remove(struct platform_device *dev) | ||
382 | { | ||
383 | int ret = gpiochip_remove(&msm_gpio.gpio_chip); | ||
384 | |||
385 | if (ret < 0) | ||
386 | return ret; | ||
387 | |||
388 | irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static struct platform_driver msm_gpio_driver = { | ||
394 | .probe = msm_gpio_probe, | ||
395 | .remove = __devexit_p(msm_gpio_remove), | ||
396 | .driver = { | ||
397 | .name = "msmgpio", | ||
398 | .owner = THIS_MODULE, | ||
399 | }, | ||
400 | }; | ||
401 | |||
402 | static struct platform_device msm_device_gpio = { | ||
403 | .name = "msmgpio", | ||
404 | .id = -1, | ||
405 | }; | ||
406 | |||
407 | static int __init msm_gpio_init(void) | ||
408 | { | ||
409 | int rc; | ||
410 | |||
411 | rc = platform_driver_register(&msm_gpio_driver); | ||
412 | if (!rc) { | ||
413 | rc = platform_device_register(&msm_device_gpio); | ||
414 | if (rc) | ||
415 | platform_driver_unregister(&msm_gpio_driver); | ||
416 | } | ||
417 | |||
418 | return rc; | ||
419 | } | ||
420 | |||
421 | static void __exit msm_gpio_exit(void) | ||
422 | { | ||
423 | platform_device_unregister(&msm_device_gpio); | ||
424 | platform_driver_unregister(&msm_gpio_driver); | ||
425 | } | ||
426 | |||
427 | postcore_initcall(msm_gpio_init); | ||
428 | module_exit(msm_gpio_exit); | ||
429 | |||
430 | MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); | ||
431 | MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs"); | ||
432 | MODULE_LICENSE("GPL v2"); | ||
433 | MODULE_ALIAS("platform:msmgpio"); | ||
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d8c892d07c9..9d2668a50872 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
90 | struct drm_device *dev = minor->dev; | 90 | struct drm_device *dev = minor->dev; |
91 | struct dentry *ent; | 91 | struct dentry *ent; |
92 | struct drm_info_node *tmp; | 92 | struct drm_info_node *tmp; |
93 | char name[64]; | ||
94 | int i, ret; | 93 | int i, ret; |
95 | 94 | ||
96 | for (i = 0; i < count; i++) { | 95 | for (i = 0; i < count; i++) { |
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
108 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, | 107 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, |
109 | root, tmp, &drm_debugfs_fops); | 108 | root, tmp, &drm_debugfs_fops); |
110 | if (!ent) { | 109 | if (!ent) { |
110 | char name[64]; | ||
111 | strncpy(name, root->d_name.name, | ||
112 | min(root->d_name.len, 64U)); | ||
111 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", | 113 | DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", |
112 | name, files[i].name); | 114 | name, files[i].name); |
113 | kfree(tmp); | 115 | kfree(tmp); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 756af4d7ec74..7425e5c9bd75 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -127,6 +127,23 @@ static const u8 edid_header[] = { | |||
127 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 | 127 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 |
128 | }; | 128 | }; |
129 | 129 | ||
130 | /* | ||
131 | * Sanity check the header of the base EDID block. Return 8 if the header | ||
132 | * is perfect, down to 0 if it's totally wrong. | ||
133 | */ | ||
134 | int drm_edid_header_is_valid(const u8 *raw_edid) | ||
135 | { | ||
136 | int i, score = 0; | ||
137 | |||
138 | for (i = 0; i < sizeof(edid_header); i++) | ||
139 | if (raw_edid[i] == edid_header[i]) | ||
140 | score++; | ||
141 | |||
142 | return score; | ||
143 | } | ||
144 | EXPORT_SYMBOL(drm_edid_header_is_valid); | ||
145 | |||
146 | |||
130 | /* | 147 | /* |
131 | * Sanity check the EDID block (base or extension). Return 0 if the block | 148 | * Sanity check the EDID block (base or extension). Return 0 if the block |
132 | * doesn't check out, or 1 if it's valid. | 149 | * doesn't check out, or 1 if it's valid. |
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid) | |||
139 | struct edid *edid = (struct edid *)raw_edid; | 156 | struct edid *edid = (struct edid *)raw_edid; |
140 | 157 | ||
141 | if (raw_edid[0] == 0x00) { | 158 | if (raw_edid[0] == 0x00) { |
142 | int score = 0; | 159 | int score = drm_edid_header_is_valid(raw_edid); |
143 | |||
144 | for (i = 0; i < sizeof(edid_header); i++) | ||
145 | if (raw_edid[i] == edid_header[i]) | ||
146 | score++; | ||
147 | |||
148 | if (score == 8) ; | 160 | if (score == 8) ; |
149 | else if (score >= 6) { | 161 | else if (score >= 6) { |
150 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | 162 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); |
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio); | |||
1439 | static void drm_add_display_info(struct edid *edid, | 1451 | static void drm_add_display_info(struct edid *edid, |
1440 | struct drm_display_info *info) | 1452 | struct drm_display_info *info) |
1441 | { | 1453 | { |
1454 | u8 *edid_ext; | ||
1455 | |||
1442 | info->width_mm = edid->width_cm * 10; | 1456 | info->width_mm = edid->width_cm * 10; |
1443 | info->height_mm = edid->height_cm * 10; | 1457 | info->height_mm = edid->height_cm * 10; |
1444 | 1458 | ||
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid, | |||
1483 | info->color_formats = DRM_COLOR_FORMAT_YCRCB444; | 1497 | info->color_formats = DRM_COLOR_FORMAT_YCRCB444; |
1484 | if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) | 1498 | if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) |
1485 | info->color_formats = DRM_COLOR_FORMAT_YCRCB422; | 1499 | info->color_formats = DRM_COLOR_FORMAT_YCRCB422; |
1500 | |||
1501 | /* Get data from CEA blocks if present */ | ||
1502 | edid_ext = drm_find_cea_extension(edid); | ||
1503 | if (!edid_ext) | ||
1504 | return; | ||
1505 | |||
1506 | info->cea_rev = edid_ext[1]; | ||
1486 | } | 1507 | } |
1487 | 1508 | ||
1488 | /** | 1509 | /** |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2022a5c966bb..3830e9e478c0 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state) | |||
291 | if (!dev->irq_enabled) | 291 | if (!dev->irq_enabled) |
292 | return; | 292 | return; |
293 | 293 | ||
294 | if (state) | 294 | if (state) { |
295 | dev->driver->irq_uninstall(dev); | 295 | if (dev->driver->irq_uninstall) |
296 | else { | 296 | dev->driver->irq_uninstall(dev); |
297 | dev->driver->irq_preinstall(dev); | 297 | } else { |
298 | dev->driver->irq_postinstall(dev); | 298 | if (dev->driver->irq_preinstall) |
299 | dev->driver->irq_preinstall(dev); | ||
300 | if (dev->driver->irq_postinstall) | ||
301 | dev->driver->irq_postinstall(dev); | ||
299 | } | 302 | } |
300 | } | 303 | } |
301 | 304 | ||
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev) | |||
338 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); | 341 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); |
339 | 342 | ||
340 | /* Before installing handler */ | 343 | /* Before installing handler */ |
341 | dev->driver->irq_preinstall(dev); | 344 | if (dev->driver->irq_preinstall) |
345 | dev->driver->irq_preinstall(dev); | ||
342 | 346 | ||
343 | /* Install handler */ | 347 | /* Install handler */ |
344 | if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) | 348 | if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) |
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev) | |||
363 | vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); | 367 | vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); |
364 | 368 | ||
365 | /* After installing handler */ | 369 | /* After installing handler */ |
366 | ret = dev->driver->irq_postinstall(dev); | 370 | if (dev->driver->irq_postinstall) |
371 | ret = dev->driver->irq_postinstall(dev); | ||
372 | |||
367 | if (ret < 0) { | 373 | if (ret < 0) { |
368 | mutex_lock(&dev->struct_mutex); | 374 | mutex_lock(&dev->struct_mutex); |
369 | dev->irq_enabled = 0; | 375 | dev->irq_enabled = 0; |
370 | mutex_unlock(&dev->struct_mutex); | 376 | mutex_unlock(&dev->struct_mutex); |
377 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
378 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
379 | free_irq(drm_dev_to_irq(dev), dev); | ||
371 | } | 380 | } |
372 | 381 | ||
373 | return ret; | 382 | return ret; |
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
413 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 422 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
414 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 423 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
415 | 424 | ||
416 | dev->driver->irq_uninstall(dev); | 425 | if (dev->driver->irq_uninstall) |
426 | dev->driver->irq_uninstall(dev); | ||
417 | 427 | ||
418 | free_irq(drm_dev_to_irq(dev), dev); | 428 | free_irq(drm_dev_to_irq(dev), dev); |
419 | 429 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e2662497d50f..a8ab6263e0d7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1338,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = { | |||
1338 | .llseek = default_llseek, | 1338 | .llseek = default_llseek, |
1339 | }; | 1339 | }; |
1340 | 1340 | ||
1341 | static int | ||
1342 | i915_max_freq_open(struct inode *inode, | ||
1343 | struct file *filp) | ||
1344 | { | ||
1345 | filp->private_data = inode->i_private; | ||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static ssize_t | ||
1350 | i915_max_freq_read(struct file *filp, | ||
1351 | char __user *ubuf, | ||
1352 | size_t max, | ||
1353 | loff_t *ppos) | ||
1354 | { | ||
1355 | struct drm_device *dev = filp->private_data; | ||
1356 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1357 | char buf[80]; | ||
1358 | int len; | ||
1359 | |||
1360 | len = snprintf(buf, sizeof (buf), | ||
1361 | "max freq: %d\n", dev_priv->max_delay * 50); | ||
1362 | |||
1363 | if (len > sizeof (buf)) | ||
1364 | len = sizeof (buf); | ||
1365 | |||
1366 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
1367 | } | ||
1368 | |||
1369 | static ssize_t | ||
1370 | i915_max_freq_write(struct file *filp, | ||
1371 | const char __user *ubuf, | ||
1372 | size_t cnt, | ||
1373 | loff_t *ppos) | ||
1374 | { | ||
1375 | struct drm_device *dev = filp->private_data; | ||
1376 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1377 | char buf[20]; | ||
1378 | int val = 1; | ||
1379 | |||
1380 | if (cnt > 0) { | ||
1381 | if (cnt > sizeof (buf) - 1) | ||
1382 | return -EINVAL; | ||
1383 | |||
1384 | if (copy_from_user(buf, ubuf, cnt)) | ||
1385 | return -EFAULT; | ||
1386 | buf[cnt] = 0; | ||
1387 | |||
1388 | val = simple_strtoul(buf, NULL, 0); | ||
1389 | } | ||
1390 | |||
1391 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); | ||
1392 | |||
1393 | /* | ||
1394 | * Turbo will still be enabled, but won't go above the set value. | ||
1395 | */ | ||
1396 | dev_priv->max_delay = val / 50; | ||
1397 | |||
1398 | gen6_set_rps(dev, val / 50); | ||
1399 | |||
1400 | return cnt; | ||
1401 | } | ||
1402 | |||
1403 | static const struct file_operations i915_max_freq_fops = { | ||
1404 | .owner = THIS_MODULE, | ||
1405 | .open = i915_max_freq_open, | ||
1406 | .read = i915_max_freq_read, | ||
1407 | .write = i915_max_freq_write, | ||
1408 | .llseek = default_llseek, | ||
1409 | }; | ||
1410 | |||
1411 | static int | ||
1412 | i915_cache_sharing_open(struct inode *inode, | ||
1413 | struct file *filp) | ||
1414 | { | ||
1415 | filp->private_data = inode->i_private; | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | static ssize_t | ||
1420 | i915_cache_sharing_read(struct file *filp, | ||
1421 | char __user *ubuf, | ||
1422 | size_t max, | ||
1423 | loff_t *ppos) | ||
1424 | { | ||
1425 | struct drm_device *dev = filp->private_data; | ||
1426 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1427 | char buf[80]; | ||
1428 | u32 snpcr; | ||
1429 | int len; | ||
1430 | |||
1431 | mutex_lock(&dev_priv->dev->struct_mutex); | ||
1432 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | ||
1433 | mutex_unlock(&dev_priv->dev->struct_mutex); | ||
1434 | |||
1435 | len = snprintf(buf, sizeof (buf), | ||
1436 | "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> | ||
1437 | GEN6_MBC_SNPCR_SHIFT); | ||
1438 | |||
1439 | if (len > sizeof (buf)) | ||
1440 | len = sizeof (buf); | ||
1441 | |||
1442 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
1443 | } | ||
1444 | |||
1445 | static ssize_t | ||
1446 | i915_cache_sharing_write(struct file *filp, | ||
1447 | const char __user *ubuf, | ||
1448 | size_t cnt, | ||
1449 | loff_t *ppos) | ||
1450 | { | ||
1451 | struct drm_device *dev = filp->private_data; | ||
1452 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1453 | char buf[20]; | ||
1454 | u32 snpcr; | ||
1455 | int val = 1; | ||
1456 | |||
1457 | if (cnt > 0) { | ||
1458 | if (cnt > sizeof (buf) - 1) | ||
1459 | return -EINVAL; | ||
1460 | |||
1461 | if (copy_from_user(buf, ubuf, cnt)) | ||
1462 | return -EFAULT; | ||
1463 | buf[cnt] = 0; | ||
1464 | |||
1465 | val = simple_strtoul(buf, NULL, 0); | ||
1466 | } | ||
1467 | |||
1468 | if (val < 0 || val > 3) | ||
1469 | return -EINVAL; | ||
1470 | |||
1471 | DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); | ||
1472 | |||
1473 | /* Update the cache sharing policy here as well */ | ||
1474 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); | ||
1475 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | ||
1476 | snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); | ||
1477 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | ||
1478 | |||
1479 | return cnt; | ||
1480 | } | ||
1481 | |||
1482 | static const struct file_operations i915_cache_sharing_fops = { | ||
1483 | .owner = THIS_MODULE, | ||
1484 | .open = i915_cache_sharing_open, | ||
1485 | .read = i915_cache_sharing_read, | ||
1486 | .write = i915_cache_sharing_write, | ||
1487 | .llseek = default_llseek, | ||
1488 | }; | ||
1489 | |||
1341 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | 1490 | /* As the drm_debugfs_init() routines are called before dev->dev_private is |
1342 | * allocated we need to hook into the minor for release. */ | 1491 | * allocated we need to hook into the minor for release. */ |
1343 | static int | 1492 | static int |
@@ -1437,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) | |||
1437 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); | 1586 | return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); |
1438 | } | 1587 | } |
1439 | 1588 | ||
1589 | static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) | ||
1590 | { | ||
1591 | struct drm_device *dev = minor->dev; | ||
1592 | struct dentry *ent; | ||
1593 | |||
1594 | ent = debugfs_create_file("i915_max_freq", | ||
1595 | S_IRUGO | S_IWUSR, | ||
1596 | root, dev, | ||
1597 | &i915_max_freq_fops); | ||
1598 | if (IS_ERR(ent)) | ||
1599 | return PTR_ERR(ent); | ||
1600 | |||
1601 | return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); | ||
1602 | } | ||
1603 | |||
1604 | static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) | ||
1605 | { | ||
1606 | struct drm_device *dev = minor->dev; | ||
1607 | struct dentry *ent; | ||
1608 | |||
1609 | ent = debugfs_create_file("i915_cache_sharing", | ||
1610 | S_IRUGO | S_IWUSR, | ||
1611 | root, dev, | ||
1612 | &i915_cache_sharing_fops); | ||
1613 | if (IS_ERR(ent)) | ||
1614 | return PTR_ERR(ent); | ||
1615 | |||
1616 | return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); | ||
1617 | } | ||
1618 | |||
1440 | static struct drm_info_list i915_debugfs_list[] = { | 1619 | static struct drm_info_list i915_debugfs_list[] = { |
1441 | {"i915_capabilities", i915_capabilities, 0}, | 1620 | {"i915_capabilities", i915_capabilities, 0}, |
1442 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1621 | {"i915_gem_objects", i915_gem_object_info, 0}, |
@@ -1490,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
1490 | ret = i915_forcewake_create(minor->debugfs_root, minor); | 1669 | ret = i915_forcewake_create(minor->debugfs_root, minor); |
1491 | if (ret) | 1670 | if (ret) |
1492 | return ret; | 1671 | return ret; |
1672 | ret = i915_max_freq_create(minor->debugfs_root, minor); | ||
1673 | if (ret) | ||
1674 | return ret; | ||
1675 | ret = i915_cache_sharing_create(minor->debugfs_root, minor); | ||
1676 | if (ret) | ||
1677 | return ret; | ||
1493 | 1678 | ||
1494 | return drm_debugfs_create_files(i915_debugfs_list, | 1679 | return drm_debugfs_create_files(i915_debugfs_list, |
1495 | I915_DEBUGFS_ENTRIES, | 1680 | I915_DEBUGFS_ENTRIES, |
@@ -1504,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
1504 | 1, minor); | 1689 | 1, minor); |
1505 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, | 1690 | drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, |
1506 | 1, minor); | 1691 | 1, minor); |
1692 | drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, | ||
1693 | 1, minor); | ||
1694 | drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, | ||
1695 | 1, minor); | ||
1507 | } | 1696 | } |
1508 | 1697 | ||
1509 | #endif /* CONFIG_DEBUG_FS */ | 1698 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 12712824a6d2..8a3942c4f099 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev) | |||
61 | static int i915_init_phys_hws(struct drm_device *dev) | 61 | static int i915_init_phys_hws(struct drm_device *dev) |
62 | { | 62 | { |
63 | drm_i915_private_t *dev_priv = dev->dev_private; | 63 | drm_i915_private_t *dev_priv = dev->dev_private; |
64 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
65 | 64 | ||
66 | /* Program Hardware Status Page */ | 65 | /* Program Hardware Status Page */ |
67 | dev_priv->status_page_dmah = | 66 | dev_priv->status_page_dmah = |
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
71 | DRM_ERROR("Can not allocate hardware status page\n"); | 70 | DRM_ERROR("Can not allocate hardware status page\n"); |
72 | return -ENOMEM; | 71 | return -ENOMEM; |
73 | } | 72 | } |
74 | ring->status_page.page_addr = | ||
75 | (void __force __iomem *)dev_priv->status_page_dmah->vaddr; | ||
76 | 73 | ||
77 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); | 74 | memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, |
75 | 0, PAGE_SIZE); | ||
78 | 76 | ||
79 | i915_write_hws_pga(dev); | 77 | i915_write_hws_pga(dev); |
80 | 78 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6867e193d85e..feb4f164fd1b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -544,6 +544,7 @@ typedef struct drm_i915_private { | |||
544 | u32 savePIPEB_LINK_M1; | 544 | u32 savePIPEB_LINK_M1; |
545 | u32 savePIPEB_LINK_N1; | 545 | u32 savePIPEB_LINK_N1; |
546 | u32 saveMCHBAR_RENDER_STANDBY; | 546 | u32 saveMCHBAR_RENDER_STANDBY; |
547 | u32 savePCH_PORT_HOTPLUG; | ||
547 | 548 | ||
548 | struct { | 549 | struct { |
549 | /** Bridge to intel-gtt-ko */ | 550 | /** Bridge to intel-gtt-ko */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d1cd8b89f47d..a546a71fb060 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3112,7 +3112,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3112 | 3112 | ||
3113 | if (pipelined != obj->ring) { | 3113 | if (pipelined != obj->ring) { |
3114 | ret = i915_gem_object_wait_rendering(obj); | 3114 | ret = i915_gem_object_wait_rendering(obj); |
3115 | if (ret) | 3115 | if (ret == -ERESTARTSYS) |
3116 | return ret; | 3116 | return ret; |
3117 | } | 3117 | } |
3118 | 3118 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 23d1ae67d279..02f96fd0d52d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
306 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
307 | struct intel_encoder *encoder; | 307 | struct intel_encoder *encoder; |
308 | 308 | ||
309 | mutex_lock(&mode_config->mutex); | ||
309 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | 310 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
310 | 311 | ||
311 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 312 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
312 | if (encoder->hot_plug) | 313 | if (encoder->hot_plug) |
313 | encoder->hot_plug(encoder); | 314 | encoder->hot_plug(encoder); |
314 | 315 | ||
316 | mutex_unlock(&mode_config->mutex); | ||
317 | |||
315 | /* Just fire off a uevent and let userspace tell us what to do */ | 318 | /* Just fire off a uevent and let userspace tell us what to do */ |
316 | drm_helper_hpd_irq_event(dev); | 319 | drm_helper_hpd_irq_event(dev); |
317 | } | 320 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 02db299f621a..d1331f771e2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -78,6 +78,14 @@ | |||
78 | #define GRDOM_RENDER (1<<2) | 78 | #define GRDOM_RENDER (1<<2) |
79 | #define GRDOM_MEDIA (3<<2) | 79 | #define GRDOM_MEDIA (3<<2) |
80 | 80 | ||
81 | #define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ | ||
82 | #define GEN6_MBC_SNPCR_SHIFT 21 | ||
83 | #define GEN6_MBC_SNPCR_MASK (3<<21) | ||
84 | #define GEN6_MBC_SNPCR_MAX (0<<21) | ||
85 | #define GEN6_MBC_SNPCR_MED (1<<21) | ||
86 | #define GEN6_MBC_SNPCR_LOW (2<<21) | ||
87 | #define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ | ||
88 | |||
81 | #define GEN6_GDRST 0x941c | 89 | #define GEN6_GDRST 0x941c |
82 | #define GEN6_GRDOM_FULL (1 << 0) | 90 | #define GEN6_GRDOM_FULL (1 << 0) |
83 | #define GEN6_GRDOM_RENDER (1 << 1) | 91 | #define GEN6_GRDOM_RENDER (1 << 1) |
@@ -1506,6 +1514,7 @@ | |||
1506 | #define VIDEO_DIP_SELECT_AVI (0 << 19) | 1514 | #define VIDEO_DIP_SELECT_AVI (0 << 19) |
1507 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) | 1515 | #define VIDEO_DIP_SELECT_VENDOR (1 << 19) |
1508 | #define VIDEO_DIP_SELECT_SPD (3 << 19) | 1516 | #define VIDEO_DIP_SELECT_SPD (3 << 19) |
1517 | #define VIDEO_DIP_SELECT_MASK (3 << 19) | ||
1509 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) | 1518 | #define VIDEO_DIP_FREQ_ONCE (0 << 16) |
1510 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) | 1519 | #define VIDEO_DIP_FREQ_VSYNC (1 << 16) |
1511 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | 1520 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) |
@@ -2084,9 +2093,6 @@ | |||
2084 | #define DP_PIPEB_SELECT (1 << 30) | 2093 | #define DP_PIPEB_SELECT (1 << 30) |
2085 | #define DP_PIPE_MASK (1 << 30) | 2094 | #define DP_PIPE_MASK (1 << 30) |
2086 | 2095 | ||
2087 | #define DP_PIPE_ENABLED(V, P) \ | ||
2088 | (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) | ||
2089 | |||
2090 | /* Link training mode - select a suitable mode for each stage */ | 2096 | /* Link training mode - select a suitable mode for each stage */ |
2091 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) | 2097 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) |
2092 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) | 2098 | #define DP_LINK_TRAIN_PAT_2 (1 << 28) |
@@ -3024,6 +3030,20 @@ | |||
3024 | #define _TRANSA_DP_LINK_M2 0xe0048 | 3030 | #define _TRANSA_DP_LINK_M2 0xe0048 |
3025 | #define _TRANSA_DP_LINK_N2 0xe004c | 3031 | #define _TRANSA_DP_LINK_N2 0xe004c |
3026 | 3032 | ||
3033 | /* Per-transcoder DIP controls */ | ||
3034 | |||
3035 | #define _VIDEO_DIP_CTL_A 0xe0200 | ||
3036 | #define _VIDEO_DIP_DATA_A 0xe0208 | ||
3037 | #define _VIDEO_DIP_GCP_A 0xe0210 | ||
3038 | |||
3039 | #define _VIDEO_DIP_CTL_B 0xe1200 | ||
3040 | #define _VIDEO_DIP_DATA_B 0xe1208 | ||
3041 | #define _VIDEO_DIP_GCP_B 0xe1210 | ||
3042 | |||
3043 | #define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) | ||
3044 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | ||
3045 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | ||
3046 | |||
3027 | #define _TRANS_HTOTAL_B 0xe1000 | 3047 | #define _TRANS_HTOTAL_B 0xe1000 |
3028 | #define _TRANS_HBLANK_B 0xe1004 | 3048 | #define _TRANS_HBLANK_B 0xe1004 |
3029 | #define _TRANS_HSYNC_B 0xe1008 | 3049 | #define _TRANS_HSYNC_B 0xe1008 |
@@ -3076,6 +3096,16 @@ | |||
3076 | #define TRANS_6BPC (2<<5) | 3096 | #define TRANS_6BPC (2<<5) |
3077 | #define TRANS_12BPC (3<<5) | 3097 | #define TRANS_12BPC (3<<5) |
3078 | 3098 | ||
3099 | #define _TRANSA_CHICKEN2 0xf0064 | ||
3100 | #define _TRANSB_CHICKEN2 0xf1064 | ||
3101 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) | ||
3102 | #define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) | ||
3103 | |||
3104 | #define SOUTH_CHICKEN1 0xc2000 | ||
3105 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 | ||
3106 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 | ||
3107 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) | ||
3108 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) | ||
3079 | #define SOUTH_CHICKEN2 0xc2004 | 3109 | #define SOUTH_CHICKEN2 0xc2004 |
3080 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) | 3110 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
3081 | 3111 | ||
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 285758603ac8..87677d60d0df 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -812,6 +812,7 @@ int i915_save_state(struct drm_device *dev) | |||
812 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); | 812 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); |
813 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 813 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
814 | I915_READ(RSTDBYCTL); | 814 | I915_READ(RSTDBYCTL); |
815 | dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); | ||
815 | } else { | 816 | } else { |
816 | dev_priv->saveIER = I915_READ(IER); | 817 | dev_priv->saveIER = I915_READ(IER); |
817 | dev_priv->saveIMR = I915_READ(IMR); | 818 | dev_priv->saveIMR = I915_READ(IMR); |
@@ -863,6 +864,7 @@ int i915_restore_state(struct drm_device *dev) | |||
863 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | 864 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); |
864 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | 865 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); |
865 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | 866 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); |
867 | I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); | ||
866 | } else { | 868 | } else { |
867 | I915_WRITE(IER, dev_priv->saveIER); | 869 | I915_WRITE(IER, dev_priv->saveIER); |
868 | I915_WRITE(IMR, dev_priv->saveIMR); | 870 | I915_WRITE(IMR, dev_priv->saveIMR); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 393a39922e53..35364e68a091 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -980,11 +980,29 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | |||
980 | pipe_name(pipe)); | 980 | pipe_name(pipe)); |
981 | } | 981 | } |
982 | 982 | ||
983 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, | ||
984 | int reg, u32 port_sel, u32 val) | ||
985 | { | ||
986 | if ((val & DP_PORT_EN) == 0) | ||
987 | return false; | ||
988 | |||
989 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
990 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); | ||
991 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); | ||
992 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) | ||
993 | return false; | ||
994 | } else { | ||
995 | if ((val & DP_PIPE_MASK) != (pipe << 30)) | ||
996 | return false; | ||
997 | } | ||
998 | return true; | ||
999 | } | ||
1000 | |||
983 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | 1001 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
984 | enum pipe pipe, int reg) | 1002 | enum pipe pipe, int reg, u32 port_sel) |
985 | { | 1003 | { |
986 | u32 val = I915_READ(reg); | 1004 | u32 val = I915_READ(reg); |
987 | WARN(DP_PIPE_ENABLED(val, pipe), | 1005 | WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), |
988 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1006 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
989 | reg, pipe_name(pipe)); | 1007 | reg, pipe_name(pipe)); |
990 | } | 1008 | } |
@@ -1004,9 +1022,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1004 | int reg; | 1022 | int reg; |
1005 | u32 val; | 1023 | u32 val; |
1006 | 1024 | ||
1007 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); | 1025 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1008 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); | 1026 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1009 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); | 1027 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1010 | 1028 | ||
1011 | reg = PCH_ADPA; | 1029 | reg = PCH_ADPA; |
1012 | val = I915_READ(reg); | 1030 | val = I915_READ(reg); |
@@ -1276,6 +1294,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1276 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | 1294 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1277 | } | 1295 | } |
1278 | 1296 | ||
1297 | /* | ||
1298 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1299 | * trigger in order to latch. The display address reg provides this. | ||
1300 | */ | ||
1301 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1302 | enum plane plane) | ||
1303 | { | ||
1304 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | ||
1305 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); | ||
1306 | } | ||
1307 | |||
1279 | /** | 1308 | /** |
1280 | * intel_enable_plane - enable a display plane on a given pipe | 1309 | * intel_enable_plane - enable a display plane on a given pipe |
1281 | * @dev_priv: i915 private structure | 1310 | * @dev_priv: i915 private structure |
@@ -1299,20 +1328,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv, | |||
1299 | return; | 1328 | return; |
1300 | 1329 | ||
1301 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | 1330 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1331 | intel_flush_display_plane(dev_priv, plane); | ||
1302 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1332 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1303 | } | 1333 | } |
1304 | 1334 | ||
1305 | /* | ||
1306 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1307 | * trigger in order to latch. The display address reg provides this. | ||
1308 | */ | ||
1309 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1310 | enum plane plane) | ||
1311 | { | ||
1312 | u32 reg = DSPADDR(plane); | ||
1313 | I915_WRITE(reg, I915_READ(reg)); | ||
1314 | } | ||
1315 | |||
1316 | /** | 1335 | /** |
1317 | * intel_disable_plane - disable a display plane | 1336 | * intel_disable_plane - disable a display plane |
1318 | * @dev_priv: i915 private structure | 1337 | * @dev_priv: i915 private structure |
@@ -1338,19 +1357,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, | |||
1338 | } | 1357 | } |
1339 | 1358 | ||
1340 | static void disable_pch_dp(struct drm_i915_private *dev_priv, | 1359 | static void disable_pch_dp(struct drm_i915_private *dev_priv, |
1341 | enum pipe pipe, int reg) | 1360 | enum pipe pipe, int reg, u32 port_sel) |
1342 | { | 1361 | { |
1343 | u32 val = I915_READ(reg); | 1362 | u32 val = I915_READ(reg); |
1344 | if (DP_PIPE_ENABLED(val, pipe)) | 1363 | if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { |
1364 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); | ||
1345 | I915_WRITE(reg, val & ~DP_PORT_EN); | 1365 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1366 | } | ||
1346 | } | 1367 | } |
1347 | 1368 | ||
1348 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | 1369 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, |
1349 | enum pipe pipe, int reg) | 1370 | enum pipe pipe, int reg) |
1350 | { | 1371 | { |
1351 | u32 val = I915_READ(reg); | 1372 | u32 val = I915_READ(reg); |
1352 | if (HDMI_PIPE_ENABLED(val, pipe)) | 1373 | if (HDMI_PIPE_ENABLED(val, pipe)) { |
1374 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | ||
1375 | reg, pipe); | ||
1353 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1376 | I915_WRITE(reg, val & ~PORT_ENABLE); |
1377 | } | ||
1354 | } | 1378 | } |
1355 | 1379 | ||
1356 | /* Disable any ports connected to this transcoder */ | 1380 | /* Disable any ports connected to this transcoder */ |
@@ -1362,9 +1386,9 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1362 | val = I915_READ(PCH_PP_CONTROL); | 1386 | val = I915_READ(PCH_PP_CONTROL); |
1363 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | 1387 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); |
1364 | 1388 | ||
1365 | disable_pch_dp(dev_priv, pipe, PCH_DP_B); | 1389 | disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1366 | disable_pch_dp(dev_priv, pipe, PCH_DP_C); | 1390 | disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1367 | disable_pch_dp(dev_priv, pipe, PCH_DP_D); | 1391 | disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1368 | 1392 | ||
1369 | reg = PCH_ADPA; | 1393 | reg = PCH_ADPA; |
1370 | val = I915_READ(reg); | 1394 | val = I915_READ(reg); |
@@ -2096,7 +2120,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2096 | 2120 | ||
2097 | /* no fb bound */ | 2121 | /* no fb bound */ |
2098 | if (!crtc->fb) { | 2122 | if (!crtc->fb) { |
2099 | DRM_DEBUG_KMS("No FB bound\n"); | 2123 | DRM_ERROR("No FB bound\n"); |
2100 | return 0; | 2124 | return 0; |
2101 | } | 2125 | } |
2102 | 2126 | ||
@@ -2105,6 +2129,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2105 | case 1: | 2129 | case 1: |
2106 | break; | 2130 | break; |
2107 | default: | 2131 | default: |
2132 | DRM_ERROR("no plane for crtc\n"); | ||
2108 | return -EINVAL; | 2133 | return -EINVAL; |
2109 | } | 2134 | } |
2110 | 2135 | ||
@@ -2114,6 +2139,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2114 | NULL); | 2139 | NULL); |
2115 | if (ret != 0) { | 2140 | if (ret != 0) { |
2116 | mutex_unlock(&dev->struct_mutex); | 2141 | mutex_unlock(&dev->struct_mutex); |
2142 | DRM_ERROR("pin & fence failed\n"); | ||
2117 | return ret; | 2143 | return ret; |
2118 | } | 2144 | } |
2119 | 2145 | ||
@@ -2142,6 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2142 | if (ret) { | 2168 | if (ret) { |
2143 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 2169 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
2144 | mutex_unlock(&dev->struct_mutex); | 2170 | mutex_unlock(&dev->struct_mutex); |
2171 | DRM_ERROR("failed to update base address\n"); | ||
2145 | return ret; | 2172 | return ret; |
2146 | } | 2173 | } |
2147 | 2174 | ||
@@ -2248,6 +2275,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) | |||
2248 | FDI_FE_ERRC_ENABLE); | 2275 | FDI_FE_ERRC_ENABLE); |
2249 | } | 2276 | } |
2250 | 2277 | ||
2278 | static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) | ||
2279 | { | ||
2280 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2281 | u32 flags = I915_READ(SOUTH_CHICKEN1); | ||
2282 | |||
2283 | flags |= FDI_PHASE_SYNC_OVR(pipe); | ||
2284 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ | ||
2285 | flags |= FDI_PHASE_SYNC_EN(pipe); | ||
2286 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ | ||
2287 | POSTING_READ(SOUTH_CHICKEN1); | ||
2288 | } | ||
2289 | |||
2251 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2290 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2252 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2291 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2253 | { | 2292 | { |
@@ -2398,6 +2437,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2398 | POSTING_READ(reg); | 2437 | POSTING_READ(reg); |
2399 | udelay(150); | 2438 | udelay(150); |
2400 | 2439 | ||
2440 | if (HAS_PCH_CPT(dev)) | ||
2441 | cpt_phase_pointer_enable(dev, pipe); | ||
2442 | |||
2401 | for (i = 0; i < 4; i++ ) { | 2443 | for (i = 0; i < 4; i++ ) { |
2402 | reg = FDI_TX_CTL(pipe); | 2444 | reg = FDI_TX_CTL(pipe); |
2403 | temp = I915_READ(reg); | 2445 | temp = I915_READ(reg); |
@@ -2514,6 +2556,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2514 | POSTING_READ(reg); | 2556 | POSTING_READ(reg); |
2515 | udelay(150); | 2557 | udelay(150); |
2516 | 2558 | ||
2559 | if (HAS_PCH_CPT(dev)) | ||
2560 | cpt_phase_pointer_enable(dev, pipe); | ||
2561 | |||
2517 | for (i = 0; i < 4; i++ ) { | 2562 | for (i = 0; i < 4; i++ ) { |
2518 | reg = FDI_TX_CTL(pipe); | 2563 | reg = FDI_TX_CTL(pipe); |
2519 | temp = I915_READ(reg); | 2564 | temp = I915_READ(reg); |
@@ -2623,6 +2668,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) | |||
2623 | } | 2668 | } |
2624 | } | 2669 | } |
2625 | 2670 | ||
2671 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) | ||
2672 | { | ||
2673 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2674 | u32 flags = I915_READ(SOUTH_CHICKEN1); | ||
2675 | |||
2676 | flags &= ~(FDI_PHASE_SYNC_EN(pipe)); | ||
2677 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ | ||
2678 | flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); | ||
2679 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ | ||
2680 | POSTING_READ(SOUTH_CHICKEN1); | ||
2681 | } | ||
2626 | static void ironlake_fdi_disable(struct drm_crtc *crtc) | 2682 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2627 | { | 2683 | { |
2628 | struct drm_device *dev = crtc->dev; | 2684 | struct drm_device *dev = crtc->dev; |
@@ -2652,6 +2708,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) | |||
2652 | I915_WRITE(FDI_RX_CHICKEN(pipe), | 2708 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2653 | I915_READ(FDI_RX_CHICKEN(pipe) & | 2709 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2654 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); | 2710 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); |
2711 | } else if (HAS_PCH_CPT(dev)) { | ||
2712 | cpt_phase_pointer_disable(dev, pipe); | ||
2655 | } | 2713 | } |
2656 | 2714 | ||
2657 | /* still set train pattern 1 */ | 2715 | /* still set train pattern 1 */ |
@@ -2862,14 +2920,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2862 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | 2920 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
2863 | } | 2921 | } |
2864 | 2922 | ||
2923 | /* | ||
2924 | * On ILK+ LUT must be loaded before the pipe is running but with | ||
2925 | * clocks enabled | ||
2926 | */ | ||
2927 | intel_crtc_load_lut(crtc); | ||
2928 | |||
2865 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | 2929 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
2866 | intel_enable_plane(dev_priv, plane, pipe); | 2930 | intel_enable_plane(dev_priv, plane, pipe); |
2867 | 2931 | ||
2868 | if (is_pch_port) | 2932 | if (is_pch_port) |
2869 | ironlake_pch_enable(crtc); | 2933 | ironlake_pch_enable(crtc); |
2870 | 2934 | ||
2871 | intel_crtc_load_lut(crtc); | ||
2872 | |||
2873 | mutex_lock(&dev->struct_mutex); | 2935 | mutex_lock(&dev->struct_mutex); |
2874 | intel_update_fbc(dev); | 2936 | intel_update_fbc(dev); |
2875 | mutex_unlock(&dev->struct_mutex); | 2937 | mutex_unlock(&dev->struct_mutex); |
@@ -4538,7 +4600,9 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4538 | if (connector->encoder != encoder) | 4600 | if (connector->encoder != encoder) |
4539 | continue; | 4601 | continue; |
4540 | 4602 | ||
4541 | if (connector->display_info.bpc < display_bpc) { | 4603 | /* Don't use an invalid EDID bpc value */ |
4604 | if (connector->display_info.bpc && | ||
4605 | connector->display_info.bpc < display_bpc) { | ||
4542 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); | 4606 | DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4543 | display_bpc = connector->display_info.bpc; | 4607 | display_bpc = connector->display_info.bpc; |
4544 | } | 4608 | } |
@@ -5153,7 +5217,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5153 | temp |= PIPE_12BPC; | 5217 | temp |= PIPE_12BPC; |
5154 | break; | 5218 | break; |
5155 | default: | 5219 | default: |
5156 | WARN(1, "intel_choose_pipe_bpp returned invalid value\n"); | 5220 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
5221 | pipe_bpp); | ||
5157 | temp |= PIPE_8BPC; | 5222 | temp |= PIPE_8BPC; |
5158 | pipe_bpp = 24; | 5223 | pipe_bpp = 24; |
5159 | break; | 5224 | break; |
@@ -5238,7 +5303,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5238 | } else if (is_sdvo && is_tv) | 5303 | } else if (is_sdvo && is_tv) |
5239 | factor = 20; | 5304 | factor = 20; |
5240 | 5305 | ||
5241 | if (clock.m1 < factor * clock.n) | 5306 | if (clock.m < factor * clock.n) |
5242 | fp |= FP_CB_TUNE; | 5307 | fp |= FP_CB_TUNE; |
5243 | 5308 | ||
5244 | dpll = 0; | 5309 | dpll = 0; |
@@ -5516,6 +5581,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5516 | 5581 | ||
5517 | drm_vblank_post_modeset(dev, pipe); | 5582 | drm_vblank_post_modeset(dev, pipe); |
5518 | 5583 | ||
5584 | intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; | ||
5585 | |||
5519 | return ret; | 5586 | return ret; |
5520 | } | 5587 | } |
5521 | 5588 | ||
@@ -7714,10 +7781,12 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
7714 | ILK_DPARB_CLK_GATE | | 7781 | ILK_DPARB_CLK_GATE | |
7715 | ILK_DPFD_CLK_GATE); | 7782 | ILK_DPFD_CLK_GATE); |
7716 | 7783 | ||
7717 | for_each_pipe(pipe) | 7784 | for_each_pipe(pipe) { |
7718 | I915_WRITE(DSPCNTR(pipe), | 7785 | I915_WRITE(DSPCNTR(pipe), |
7719 | I915_READ(DSPCNTR(pipe)) | | 7786 | I915_READ(DSPCNTR(pipe)) | |
7720 | DISPPLANE_TRICKLE_FEED_DISABLE); | 7787 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7788 | intel_flush_display_plane(dev_priv, pipe); | ||
7789 | } | ||
7721 | } | 7790 | } |
7722 | 7791 | ||
7723 | static void ivybridge_init_clock_gating(struct drm_device *dev) | 7792 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
@@ -7734,10 +7803,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
7734 | 7803 | ||
7735 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 7804 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
7736 | 7805 | ||
7737 | for_each_pipe(pipe) | 7806 | for_each_pipe(pipe) { |
7738 | I915_WRITE(DSPCNTR(pipe), | 7807 | I915_WRITE(DSPCNTR(pipe), |
7739 | I915_READ(DSPCNTR(pipe)) | | 7808 | I915_READ(DSPCNTR(pipe)) | |
7740 | DISPPLANE_TRICKLE_FEED_DISABLE); | 7809 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7810 | intel_flush_display_plane(dev_priv, pipe); | ||
7811 | } | ||
7741 | } | 7812 | } |
7742 | 7813 | ||
7743 | static void g4x_init_clock_gating(struct drm_device *dev) | 7814 | static void g4x_init_clock_gating(struct drm_device *dev) |
@@ -7820,6 +7891,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) | |||
7820 | static void cpt_init_clock_gating(struct drm_device *dev) | 7891 | static void cpt_init_clock_gating(struct drm_device *dev) |
7821 | { | 7892 | { |
7822 | struct drm_i915_private *dev_priv = dev->dev_private; | 7893 | struct drm_i915_private *dev_priv = dev->dev_private; |
7894 | int pipe; | ||
7823 | 7895 | ||
7824 | /* | 7896 | /* |
7825 | * On Ibex Peak and Cougar Point, we need to disable clock | 7897 | * On Ibex Peak and Cougar Point, we need to disable clock |
@@ -7829,6 +7901,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) | |||
7829 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | 7901 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7830 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | 7902 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
7831 | DPLS_EDP_PPS_FIX_DIS); | 7903 | DPLS_EDP_PPS_FIX_DIS); |
7904 | /* Without this, mode sets may fail silently on FDI */ | ||
7905 | for_each_pipe(pipe) | ||
7906 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
7832 | } | 7907 | } |
7833 | 7908 | ||
7834 | static void ironlake_teardown_rc6(struct drm_device *dev) | 7909 | static void ironlake_teardown_rc6(struct drm_device *dev) |
@@ -8178,6 +8253,9 @@ struct intel_quirk intel_quirks[] = { | |||
8178 | 8253 | ||
8179 | /* Lenovo U160 cannot use SSC on LVDS */ | 8254 | /* Lenovo U160 cannot use SSC on LVDS */ |
8180 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | 8255 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8256 | |||
8257 | /* Sony Vaio Y cannot use SSC on LVDS */ | ||
8258 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | ||
8181 | }; | 8259 | }; |
8182 | 8260 | ||
8183 | static void intel_init_quirks(struct drm_device *dev) | 8261 | static void intel_init_quirks(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f797fb58ba9c..0feae908bb37 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -50,9 +50,10 @@ struct intel_dp { | |||
50 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | 51 | int force_audio; |
52 | uint32_t color_range; | 52 | uint32_t color_range; |
53 | int dpms_mode; | ||
53 | uint8_t link_bw; | 54 | uint8_t link_bw; |
54 | uint8_t lane_count; | 55 | uint8_t lane_count; |
55 | uint8_t dpcd[4]; | 56 | uint8_t dpcd[8]; |
56 | struct i2c_adapter adapter; | 57 | struct i2c_adapter adapter; |
57 | struct i2c_algo_dp_aux_data algo; | 58 | struct i2c_algo_dp_aux_data algo; |
58 | bool is_pch_edp; | 59 | bool is_pch_edp; |
@@ -316,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
316 | else | 317 | else |
317 | precharge = 5; | 318 | precharge = 5; |
318 | 319 | ||
319 | if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { | 320 | /* Try to wait for any previous AUX channel activity */ |
320 | DRM_ERROR("dp_aux_ch not started status 0x%08x\n", | 321 | for (try = 0; try < 3; try++) { |
321 | I915_READ(ch_ctl)); | 322 | status = I915_READ(ch_ctl); |
323 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | ||
324 | break; | ||
325 | msleep(1); | ||
326 | } | ||
327 | |||
328 | if (try == 3) { | ||
329 | WARN(1, "dp_aux_ch not started status 0x%08x\n", | ||
330 | I915_READ(ch_ctl)); | ||
322 | return -EBUSY; | 331 | return -EBUSY; |
323 | } | 332 | } |
324 | 333 | ||
@@ -770,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
770 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 779 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
771 | intel_dp->link_configuration[0] = intel_dp->link_bw; | 780 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
772 | intel_dp->link_configuration[1] = intel_dp->lane_count; | 781 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
782 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | ||
773 | 783 | ||
774 | /* | 784 | /* |
775 | * Check for DPCD version > 1.1 and enhanced framing support | 785 | * Check for DPCD version > 1.1 and enhanced framing support |
@@ -1011,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
1011 | 1021 | ||
1012 | if (is_edp(intel_dp)) | 1022 | if (is_edp(intel_dp)) |
1013 | ironlake_edp_backlight_on(dev); | 1023 | ironlake_edp_backlight_on(dev); |
1024 | |||
1025 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
1014 | } | 1026 | } |
1015 | 1027 | ||
1016 | static void | 1028 | static void |
@@ -1045,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1045 | if (is_edp(intel_dp)) | 1057 | if (is_edp(intel_dp)) |
1046 | ironlake_edp_backlight_on(dev); | 1058 | ironlake_edp_backlight_on(dev); |
1047 | } | 1059 | } |
1060 | intel_dp->dpms_mode = mode; | ||
1048 | } | 1061 | } |
1049 | 1062 | ||
1050 | /* | 1063 | /* |
@@ -1334,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1334 | u32 reg; | 1347 | u32 reg; |
1335 | uint32_t DP = intel_dp->DP; | 1348 | uint32_t DP = intel_dp->DP; |
1336 | 1349 | ||
1337 | /* Enable output, wait for it to become active */ | 1350 | /* |
1338 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | 1351 | * On CPT we have to enable the port in training pattern 1, which |
1339 | POSTING_READ(intel_dp->output_reg); | 1352 | * will happen below in intel_dp_set_link_train. Otherwise, enable |
1340 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1353 | * the port and wait for it to become active. |
1354 | */ | ||
1355 | if (!HAS_PCH_CPT(dev)) { | ||
1356 | I915_WRITE(intel_dp->output_reg, intel_dp->DP); | ||
1357 | POSTING_READ(intel_dp->output_reg); | ||
1358 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1359 | } | ||
1341 | 1360 | ||
1342 | /* Write the link configuration data */ | 1361 | /* Write the link configuration data */ |
1343 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1362 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
@@ -1370,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1370 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1389 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1371 | 1390 | ||
1372 | if (!intel_dp_set_link_train(intel_dp, reg, | 1391 | if (!intel_dp_set_link_train(intel_dp, reg, |
1373 | DP_TRAINING_PATTERN_1)) | 1392 | DP_TRAINING_PATTERN_1 | |
1393 | DP_LINK_SCRAMBLING_DISABLE)) | ||
1374 | break; | 1394 | break; |
1375 | /* Set training pattern 1 */ | 1395 | /* Set training pattern 1 */ |
1376 | 1396 | ||
@@ -1445,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1445 | 1465 | ||
1446 | /* channel eq pattern */ | 1466 | /* channel eq pattern */ |
1447 | if (!intel_dp_set_link_train(intel_dp, reg, | 1467 | if (!intel_dp_set_link_train(intel_dp, reg, |
1448 | DP_TRAINING_PATTERN_2)) | 1468 | DP_TRAINING_PATTERN_2 | |
1469 | DP_LINK_SCRAMBLING_DISABLE)) | ||
1449 | break; | 1470 | break; |
1450 | 1471 | ||
1451 | udelay(400); | 1472 | udelay(400); |
@@ -1559,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1559 | POSTING_READ(intel_dp->output_reg); | 1580 | POSTING_READ(intel_dp->output_reg); |
1560 | } | 1581 | } |
1561 | 1582 | ||
1583 | static bool | ||
1584 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | ||
1585 | { | ||
1586 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | ||
1587 | sizeof (intel_dp->dpcd)) && | ||
1588 | (intel_dp->dpcd[DP_DPCD_REV] != 0)) { | ||
1589 | return true; | ||
1590 | } | ||
1591 | |||
1592 | return false; | ||
1593 | } | ||
1594 | |||
1562 | /* | 1595 | /* |
1563 | * According to DP spec | 1596 | * According to DP spec |
1564 | * 5.1.2: | 1597 | * 5.1.2: |
@@ -1571,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1571 | static void | 1604 | static void |
1572 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1605 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1573 | { | 1606 | { |
1574 | int ret; | 1607 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) |
1608 | return; | ||
1575 | 1609 | ||
1576 | if (!intel_dp->base.base.crtc) | 1610 | if (!intel_dp->base.base.crtc) |
1577 | return; | 1611 | return; |
1578 | 1612 | ||
1613 | /* Try to read receiver status if the link appears to be up */ | ||
1579 | if (!intel_dp_get_link_status(intel_dp)) { | 1614 | if (!intel_dp_get_link_status(intel_dp)) { |
1580 | intel_dp_link_down(intel_dp); | 1615 | intel_dp_link_down(intel_dp); |
1581 | return; | 1616 | return; |
1582 | } | 1617 | } |
1583 | 1618 | ||
1584 | /* Try to read receiver status if the link appears to be up */ | 1619 | /* Now read the DPCD to see if it's actually running */ |
1585 | ret = intel_dp_aux_native_read(intel_dp, | 1620 | if (!intel_dp_get_dpcd(intel_dp)) { |
1586 | 0x000, intel_dp->dpcd, | ||
1587 | sizeof (intel_dp->dpcd)); | ||
1588 | if (ret != sizeof(intel_dp->dpcd)) { | ||
1589 | intel_dp_link_down(intel_dp); | 1621 | intel_dp_link_down(intel_dp); |
1590 | return; | 1622 | return; |
1591 | } | 1623 | } |
1592 | 1624 | ||
1593 | if (!intel_channel_eq_ok(intel_dp)) { | 1625 | if (!intel_channel_eq_ok(intel_dp)) { |
1626 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | ||
1627 | drm_get_encoder_name(&intel_dp->base.base)); | ||
1594 | intel_dp_start_link_train(intel_dp); | 1628 | intel_dp_start_link_train(intel_dp); |
1595 | intel_dp_complete_link_train(intel_dp); | 1629 | intel_dp_complete_link_train(intel_dp); |
1596 | } | 1630 | } |
1597 | } | 1631 | } |
1598 | 1632 | ||
1599 | static enum drm_connector_status | 1633 | static enum drm_connector_status |
1634 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) | ||
1635 | { | ||
1636 | if (intel_dp_get_dpcd(intel_dp)) | ||
1637 | return connector_status_connected; | ||
1638 | return connector_status_disconnected; | ||
1639 | } | ||
1640 | |||
1641 | static enum drm_connector_status | ||
1600 | ironlake_dp_detect(struct intel_dp *intel_dp) | 1642 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1601 | { | 1643 | { |
1602 | enum drm_connector_status status; | 1644 | enum drm_connector_status status; |
1603 | bool ret; | ||
1604 | 1645 | ||
1605 | /* Can't disconnect eDP, but you can close the lid... */ | 1646 | /* Can't disconnect eDP, but you can close the lid... */ |
1606 | if (is_edp(intel_dp)) { | 1647 | if (is_edp(intel_dp)) { |
@@ -1610,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
1610 | return status; | 1651 | return status; |
1611 | } | 1652 | } |
1612 | 1653 | ||
1613 | status = connector_status_disconnected; | 1654 | return intel_dp_detect_dpcd(intel_dp); |
1614 | ret = intel_dp_aux_native_read_retry(intel_dp, | ||
1615 | 0x000, intel_dp->dpcd, | ||
1616 | sizeof (intel_dp->dpcd)); | ||
1617 | if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) | ||
1618 | status = connector_status_connected; | ||
1619 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | ||
1620 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | ||
1621 | return status; | ||
1622 | } | 1655 | } |
1623 | 1656 | ||
1624 | static enum drm_connector_status | 1657 | static enum drm_connector_status |
@@ -1626,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1626 | { | 1659 | { |
1627 | struct drm_device *dev = intel_dp->base.base.dev; | 1660 | struct drm_device *dev = intel_dp->base.base.dev; |
1628 | struct drm_i915_private *dev_priv = dev->dev_private; | 1661 | struct drm_i915_private *dev_priv = dev->dev_private; |
1629 | enum drm_connector_status status; | ||
1630 | uint32_t temp, bit; | 1662 | uint32_t temp, bit; |
1631 | 1663 | ||
1632 | switch (intel_dp->output_reg) { | 1664 | switch (intel_dp->output_reg) { |
@@ -1648,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1648 | if ((temp & bit) == 0) | 1680 | if ((temp & bit) == 0) |
1649 | return connector_status_disconnected; | 1681 | return connector_status_disconnected; |
1650 | 1682 | ||
1651 | status = connector_status_disconnected; | 1683 | return intel_dp_detect_dpcd(intel_dp); |
1652 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, | ||
1653 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | ||
1654 | { | ||
1655 | if (intel_dp->dpcd[DP_DPCD_REV] != 0) | ||
1656 | status = connector_status_connected; | ||
1657 | } | ||
1658 | |||
1659 | return status; | ||
1660 | } | 1684 | } |
1661 | 1685 | ||
1662 | /** | 1686 | /** |
@@ -1679,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1679 | status = ironlake_dp_detect(intel_dp); | 1703 | status = ironlake_dp_detect(intel_dp); |
1680 | else | 1704 | else |
1681 | status = g4x_dp_detect(intel_dp); | 1705 | status = g4x_dp_detect(intel_dp); |
1706 | |||
1707 | DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", | ||
1708 | intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], | ||
1709 | intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], | ||
1710 | intel_dp->dpcd[6], intel_dp->dpcd[7]); | ||
1711 | |||
1682 | if (status != connector_status_connected) | 1712 | if (status != connector_status_connected) |
1683 | return status; | 1713 | return status; |
1684 | 1714 | ||
@@ -1924,6 +1954,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1924 | return; | 1954 | return; |
1925 | 1955 | ||
1926 | intel_dp->output_reg = output_reg; | 1956 | intel_dp->output_reg = output_reg; |
1957 | intel_dp->dpms_mode = -1; | ||
1927 | 1958 | ||
1928 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1959 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1929 | if (!intel_connector) { | 1960 | if (!intel_connector) { |
@@ -2000,7 +2031,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2000 | 2031 | ||
2001 | /* Cache some DPCD data in the eDP case */ | 2032 | /* Cache some DPCD data in the eDP case */ |
2002 | if (is_edp(intel_dp)) { | 2033 | if (is_edp(intel_dp)) { |
2003 | int ret; | 2034 | bool ret; |
2004 | u32 pp_on, pp_div; | 2035 | u32 pp_on, pp_div; |
2005 | 2036 | ||
2006 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | 2037 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
@@ -2013,11 +2044,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2013 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | 2044 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ |
2014 | 2045 | ||
2015 | ironlake_edp_panel_vdd_on(intel_dp); | 2046 | ironlake_edp_panel_vdd_on(intel_dp); |
2016 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | 2047 | ret = intel_dp_get_dpcd(intel_dp); |
2017 | intel_dp->dpcd, | ||
2018 | sizeof(intel_dp->dpcd)); | ||
2019 | ironlake_edp_panel_vdd_off(intel_dp); | 2048 | ironlake_edp_panel_vdd_off(intel_dp); |
2020 | if (ret == sizeof(intel_dp->dpcd)) { | 2049 | if (ret) { |
2021 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 2050 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2022 | dev_priv->no_aux_handshake = | 2051 | dev_priv->no_aux_handshake = |
2023 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | 2052 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6e990f9760ef..7b330e76a435 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -178,10 +178,28 @@ struct intel_crtc { | |||
178 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) | 178 | #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
179 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 179 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
180 | 180 | ||
181 | #define DIP_HEADER_SIZE 5 | ||
182 | |||
181 | #define DIP_TYPE_AVI 0x82 | 183 | #define DIP_TYPE_AVI 0x82 |
182 | #define DIP_VERSION_AVI 0x2 | 184 | #define DIP_VERSION_AVI 0x2 |
183 | #define DIP_LEN_AVI 13 | 185 | #define DIP_LEN_AVI 13 |
184 | 186 | ||
187 | #define DIP_TYPE_SPD 0x3 | ||
188 | #define DIP_VERSION_SPD 0x1 | ||
189 | #define DIP_LEN_SPD 25 | ||
190 | #define DIP_SPD_UNKNOWN 0 | ||
191 | #define DIP_SPD_DSTB 0x1 | ||
192 | #define DIP_SPD_DVDP 0x2 | ||
193 | #define DIP_SPD_DVHS 0x3 | ||
194 | #define DIP_SPD_HDDVR 0x4 | ||
195 | #define DIP_SPD_DVC 0x5 | ||
196 | #define DIP_SPD_DSC 0x6 | ||
197 | #define DIP_SPD_VCD 0x7 | ||
198 | #define DIP_SPD_GAME 0x8 | ||
199 | #define DIP_SPD_PC 0x9 | ||
200 | #define DIP_SPD_BD 0xa | ||
201 | #define DIP_SPD_SCD 0xb | ||
202 | |||
185 | struct dip_infoframe { | 203 | struct dip_infoframe { |
186 | uint8_t type; /* HB0 */ | 204 | uint8_t type; /* HB0 */ |
187 | uint8_t ver; /* HB1 */ | 205 | uint8_t ver; /* HB1 */ |
@@ -206,6 +224,11 @@ struct dip_infoframe { | |||
206 | uint16_t left_bar_end; | 224 | uint16_t left_bar_end; |
207 | uint16_t right_bar_start; | 225 | uint16_t right_bar_start; |
208 | } avi; | 226 | } avi; |
227 | struct { | ||
228 | uint8_t vn[8]; | ||
229 | uint8_t pd[16]; | ||
230 | uint8_t sdi; | ||
231 | } spd; | ||
209 | uint8_t payload[27]; | 232 | uint8_t payload[27]; |
210 | } __attribute__ ((packed)) body; | 233 | } __attribute__ ((packed)) body; |
211 | } __attribute__((packed)); | 234 | } __attribute__((packed)); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed8e6903915..226ba830f383 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -45,6 +45,8 @@ struct intel_hdmi { | |||
45 | bool has_hdmi_sink; | 45 | bool has_hdmi_sink; |
46 | bool has_audio; | 46 | bool has_audio; |
47 | int force_audio; | 47 | int force_audio; |
48 | void (*write_infoframe)(struct drm_encoder *encoder, | ||
49 | struct dip_infoframe *frame); | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 52 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | |||
58 | struct intel_hdmi, base); | 60 | struct intel_hdmi, base); |
59 | } | 61 | } |
60 | 62 | ||
61 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) | 63 | void intel_dip_infoframe_csum(struct dip_infoframe *frame) |
62 | { | 64 | { |
63 | uint8_t *data = (uint8_t *)avi_if; | 65 | uint8_t *data = (uint8_t *)frame; |
64 | uint8_t sum = 0; | 66 | uint8_t sum = 0; |
65 | unsigned i; | 67 | unsigned i; |
66 | 68 | ||
67 | avi_if->checksum = 0; | 69 | frame->checksum = 0; |
68 | avi_if->ecc = 0; | 70 | frame->ecc = 0; |
69 | 71 | ||
70 | for (i = 0; i < sizeof(*avi_if); i++) | 72 | /* Header isn't part of the checksum */ |
73 | for (i = 5; i < frame->len; i++) | ||
71 | sum += data[i]; | 74 | sum += data[i]; |
72 | 75 | ||
73 | avi_if->checksum = 0x100 - sum; | 76 | frame->checksum = 0x100 - sum; |
74 | } | 77 | } |
75 | 78 | ||
76 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | 79 | static u32 intel_infoframe_index(struct dip_infoframe *frame) |
77 | { | 80 | { |
78 | struct dip_infoframe avi_if = { | 81 | u32 flags = 0; |
79 | .type = DIP_TYPE_AVI, | 82 | |
80 | .ver = DIP_VERSION_AVI, | 83 | switch (frame->type) { |
81 | .len = DIP_LEN_AVI, | 84 | case DIP_TYPE_AVI: |
82 | }; | 85 | flags |= VIDEO_DIP_SELECT_AVI; |
83 | uint32_t *data = (uint32_t *)&avi_if; | 86 | break; |
87 | case DIP_TYPE_SPD: | ||
88 | flags |= VIDEO_DIP_SELECT_SPD; | ||
89 | break; | ||
90 | default: | ||
91 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | ||
92 | break; | ||
93 | } | ||
94 | |||
95 | return flags; | ||
96 | } | ||
97 | |||
98 | static u32 intel_infoframe_flags(struct dip_infoframe *frame) | ||
99 | { | ||
100 | u32 flags = 0; | ||
101 | |||
102 | switch (frame->type) { | ||
103 | case DIP_TYPE_AVI: | ||
104 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; | ||
105 | break; | ||
106 | case DIP_TYPE_SPD: | ||
107 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC; | ||
108 | break; | ||
109 | default: | ||
110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | return flags; | ||
115 | } | ||
116 | |||
117 | static void i9xx_write_infoframe(struct drm_encoder *encoder, | ||
118 | struct dip_infoframe *frame) | ||
119 | { | ||
120 | uint32_t *data = (uint32_t *)frame; | ||
84 | struct drm_device *dev = encoder->dev; | 121 | struct drm_device *dev = encoder->dev; |
85 | struct drm_i915_private *dev_priv = dev->dev_private; | 122 | struct drm_i915_private *dev_priv = dev->dev_private; |
86 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 123 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
87 | u32 port; | 124 | u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); |
88 | unsigned i; | 125 | unsigned i, len = DIP_HEADER_SIZE + frame->len; |
89 | 126 | ||
90 | if (!intel_hdmi->has_hdmi_sink) | ||
91 | return; | ||
92 | 127 | ||
93 | /* XXX first guess at handling video port, is this corrent? */ | 128 | /* XXX first guess at handling video port, is this corrent? */ |
94 | if (intel_hdmi->sdvox_reg == SDVOB) | 129 | if (intel_hdmi->sdvox_reg == SDVOB) |
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | |||
98 | else | 133 | else |
99 | return; | 134 | return; |
100 | 135 | ||
101 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | 136 | flags = intel_infoframe_index(frame); |
102 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); | 137 | |
138 | val &= ~VIDEO_DIP_SELECT_MASK; | ||
103 | 139 | ||
104 | intel_dip_infoframe_csum(&avi_if); | 140 | I915_WRITE(VIDEO_DIP_CTL, val | port | flags); |
105 | for (i = 0; i < sizeof(avi_if); i += 4) { | 141 | |
142 | for (i = 0; i < len; i += 4) { | ||
106 | I915_WRITE(VIDEO_DIP_DATA, *data); | 143 | I915_WRITE(VIDEO_DIP_DATA, *data); |
107 | data++; | 144 | data++; |
108 | } | 145 | } |
109 | 146 | ||
110 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | | 147 | flags |= intel_infoframe_flags(frame); |
111 | VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | | 148 | |
112 | VIDEO_DIP_ENABLE_AVI); | 149 | I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); |
150 | } | ||
151 | |||
152 | static void ironlake_write_infoframe(struct drm_encoder *encoder, | ||
153 | struct dip_infoframe *frame) | ||
154 | { | ||
155 | uint32_t *data = (uint32_t *)frame; | ||
156 | struct drm_device *dev = encoder->dev; | ||
157 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
158 | struct drm_crtc *crtc = encoder->crtc; | ||
159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
160 | int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | ||
161 | unsigned i, len = DIP_HEADER_SIZE + frame->len; | ||
162 | u32 flags, val = I915_READ(reg); | ||
163 | |||
164 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
165 | |||
166 | flags = intel_infoframe_index(frame); | ||
167 | |||
168 | val &= ~VIDEO_DIP_SELECT_MASK; | ||
169 | |||
170 | I915_WRITE(reg, val | flags); | ||
171 | |||
172 | for (i = 0; i < len; i += 4) { | ||
173 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | ||
174 | data++; | ||
175 | } | ||
176 | |||
177 | flags |= intel_infoframe_flags(frame); | ||
178 | |||
179 | I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); | ||
180 | } | ||
181 | static void intel_set_infoframe(struct drm_encoder *encoder, | ||
182 | struct dip_infoframe *frame) | ||
183 | { | ||
184 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
185 | |||
186 | if (!intel_hdmi->has_hdmi_sink) | ||
187 | return; | ||
188 | |||
189 | intel_dip_infoframe_csum(frame); | ||
190 | intel_hdmi->write_infoframe(encoder, frame); | ||
191 | } | ||
192 | |||
193 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) | ||
194 | { | ||
195 | struct dip_infoframe avi_if = { | ||
196 | .type = DIP_TYPE_AVI, | ||
197 | .ver = DIP_VERSION_AVI, | ||
198 | .len = DIP_LEN_AVI, | ||
199 | }; | ||
200 | |||
201 | intel_set_infoframe(encoder, &avi_if); | ||
202 | } | ||
203 | |||
204 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) | ||
205 | { | ||
206 | struct dip_infoframe spd_if; | ||
207 | |||
208 | memset(&spd_if, 0, sizeof(spd_if)); | ||
209 | spd_if.type = DIP_TYPE_SPD; | ||
210 | spd_if.ver = DIP_VERSION_SPD; | ||
211 | spd_if.len = DIP_LEN_SPD; | ||
212 | strcpy(spd_if.body.spd.vn, "Intel"); | ||
213 | strcpy(spd_if.body.spd.pd, "Integrated gfx"); | ||
214 | spd_if.body.spd.sdi = DIP_SPD_PC; | ||
215 | |||
216 | intel_set_infoframe(encoder, &spd_if); | ||
113 | } | 217 | } |
114 | 218 | ||
115 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 219 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
@@ -156,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
156 | POSTING_READ(intel_hdmi->sdvox_reg); | 260 | POSTING_READ(intel_hdmi->sdvox_reg); |
157 | 261 | ||
158 | intel_hdmi_set_avi_infoframe(encoder); | 262 | intel_hdmi_set_avi_infoframe(encoder); |
263 | intel_hdmi_set_spd_infoframe(encoder); | ||
159 | } | 264 | } |
160 | 265 | ||
161 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | 266 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) |
@@ -433,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
433 | 538 | ||
434 | intel_hdmi->sdvox_reg = sdvox_reg; | 539 | intel_hdmi->sdvox_reg = sdvox_reg; |
435 | 540 | ||
541 | if (!HAS_PCH_SPLIT(dev)) | ||
542 | intel_hdmi->write_infoframe = i9xx_write_infoframe; | ||
543 | else | ||
544 | intel_hdmi->write_infoframe = ironlake_write_infoframe; | ||
545 | |||
436 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | 546 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
437 | 547 | ||
438 | intel_hdmi_add_properties(intel_hdmi, connector); | 548 | intel_hdmi_add_properties(intel_hdmi, connector); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b28f7bd9f88a..2e8ddfcba40c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -690,6 +690,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
690 | }, | 690 | }, |
691 | { | 691 | { |
692 | .callback = intel_no_lvds_dmi_callback, | 692 | .callback = intel_no_lvds_dmi_callback, |
693 | .ident = "Dell OptiPlex FX170", | ||
694 | .matches = { | ||
695 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
696 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), | ||
697 | }, | ||
698 | }, | ||
699 | { | ||
700 | .callback = intel_no_lvds_dmi_callback, | ||
693 | .ident = "AOpen Mini PC", | 701 | .ident = "AOpen Mini PC", |
694 | .matches = { | 702 | .matches = { |
695 | DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), | 703 | DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a06ff07a4d3b..05f500cd9c24 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev, | |||
83 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 83 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
84 | if (scaled_width > scaled_height) { /* pillar */ | 84 | if (scaled_width > scaled_height) { /* pillar */ |
85 | width = scaled_height / mode->vdisplay; | 85 | width = scaled_height / mode->vdisplay; |
86 | if (width & 1) | ||
87 | width++; | ||
86 | x = (adjusted_mode->hdisplay - width + 1) / 2; | 88 | x = (adjusted_mode->hdisplay - width + 1) / 2; |
87 | y = 0; | 89 | y = 0; |
88 | height = adjusted_mode->vdisplay; | 90 | height = adjusted_mode->vdisplay; |
89 | } else if (scaled_width < scaled_height) { /* letter */ | 91 | } else if (scaled_width < scaled_height) { /* letter */ |
90 | height = scaled_width / mode->hdisplay; | 92 | height = scaled_width / mode->hdisplay; |
93 | if (height & 1) | ||
94 | height++; | ||
91 | y = (adjusted_mode->vdisplay - height + 1) / 2; | 95 | y = (adjusted_mode->vdisplay - height + 1) / 2; |
92 | x = 0; | 96 | x = 0; |
93 | width = adjusted_mode->hdisplay; | 97 | width = adjusted_mode->hdisplay; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e9615685a39c..47b9b2777038 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1321,6 +1321,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1321 | ring->get_seqno = pc_render_get_seqno; | 1321 | ring->get_seqno = pc_render_get_seqno; |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | if (!I915_NEED_GFX_HWS(dev)) | ||
1325 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1326 | |||
1324 | ring->dev = dev; | 1327 | ring->dev = dev; |
1325 | INIT_LIST_HEAD(&ring->active_list); | 1328 | INIT_LIST_HEAD(&ring->active_list); |
1326 | INIT_LIST_HEAD(&ring->request_list); | 1329 | INIT_LIST_HEAD(&ring->request_list); |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3896ef811102..9f363e0c4b60 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | ccflags-y := -Iinclude/drm | 5 | ccflags-y := -Iinclude/drm |
6 | 6 | ||
7 | hostprogs-y := mkregtable | 7 | hostprogs-y := mkregtable |
8 | clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h | ||
8 | 9 | ||
9 | quiet_cmd_mkregtable = MKREGTABLE $@ | 10 | quiet_cmd_mkregtable = MKREGTABLE $@ |
10 | cmd_mkregtable = $(obj)/mkregtable $< > $@ | 11 | cmd_mkregtable = $(obj)/mkregtable $< > $@ |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index ebdb0fdb8348..e88c64417a8a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) | |||
1245 | char name[512]; | 1245 | char name[512]; |
1246 | int i; | 1246 | int i; |
1247 | 1247 | ||
1248 | if (!ctx) | ||
1249 | return NULL; | ||
1250 | |||
1248 | ctx->card = card; | 1251 | ctx->card = card; |
1249 | ctx->bios = bios; | 1252 | ctx->bios = bios; |
1250 | 1253 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 189e86522b5b..a134790903d3 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 | |||
428 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | 428 | last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); |
429 | 429 | ||
430 | i = (reg >> 7); | 430 | i = (reg >> 7); |
431 | if (i > last_reg) { | 431 | if (i >= last_reg) { |
432 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 432 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
433 | return -EINVAL; | 433 | return -EINVAL; |
434 | } | 434 | } |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index db8ef1905d5f..cf83aa05a684 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
915 | { | 915 | { |
916 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; | 916 | struct r600_cs_track *track = (struct r600_cs_track *)p->track; |
917 | struct radeon_cs_reloc *reloc; | 917 | struct radeon_cs_reloc *reloc; |
918 | u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm); | ||
919 | u32 m, i, tmp, *ib; | 918 | u32 m, i, tmp, *ib; |
920 | int r; | 919 | int r; |
921 | 920 | ||
922 | i = (reg >> 7); | 921 | i = (reg >> 7); |
923 | if (i > last_reg) { | 922 | if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { |
924 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | 923 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
925 | return -EINVAL; | 924 | return -EINVAL; |
926 | } | 925 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a74217cd192f..e0138b674aca 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -2557,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2557 | u16 offset, misc, misc2 = 0; | 2557 | u16 offset, misc, misc2 = 0; |
2558 | u8 rev, blocks, tmp; | 2558 | u8 rev, blocks, tmp; |
2559 | int state_index = 0; | 2559 | int state_index = 0; |
2560 | struct radeon_i2c_bus_rec i2c_bus; | ||
2560 | 2561 | ||
2561 | rdev->pm.default_power_state_index = -1; | 2562 | rdev->pm.default_power_state_index = -1; |
2562 | 2563 | ||
@@ -2575,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2575 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); | 2576 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); |
2576 | if (offset) { | 2577 | if (offset) { |
2577 | u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; | 2578 | u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; |
2578 | struct radeon_i2c_bus_rec i2c_bus; | ||
2579 | 2579 | ||
2580 | rev = RBIOS8(offset); | 2580 | rev = RBIOS8(offset); |
2581 | 2581 | ||
@@ -2617,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2617 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | 2617 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); |
2618 | } | 2618 | } |
2619 | } | 2619 | } |
2620 | } else { | ||
2621 | /* boards with a thermal chip, but no overdrive table */ | ||
2622 | |||
2623 | /* Asus 9600xt has an f75375 on the monid bus */ | ||
2624 | if ((dev->pdev->device == 0x4152) && | ||
2625 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
2626 | (dev->pdev->subsystem_device == 0xc002)) { | ||
2627 | i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | ||
2628 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | ||
2629 | if (rdev->pm.i2c_bus) { | ||
2630 | struct i2c_board_info info = { }; | ||
2631 | const char *name = "f75375"; | ||
2632 | info.addr = 0x28; | ||
2633 | strlcpy(info.type, name, sizeof(info.type)); | ||
2634 | i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); | ||
2635 | DRM_INFO("Possible %s thermal controller at 0x%02x\n", | ||
2636 | name, info.addr); | ||
2637 | } | ||
2638 | } | ||
2620 | } | 2639 | } |
2621 | 2640 | ||
2622 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2641 | if (rdev->flags & RADEON_IS_MOBILITY) { |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 9792d4ffdc86..6d6b5f16bc09 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -430,6 +430,45 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
430 | return 0; | 430 | return 0; |
431 | } | 431 | } |
432 | 432 | ||
433 | /* | ||
434 | * Some integrated ATI Radeon chipset implementations (e. g. | ||
435 | * Asus M2A-VM HDMI) may indicate the availability of a DDC, | ||
436 | * even when there's no monitor connected. For these connectors | ||
437 | * following DDC probe extension will be applied: check also for the | ||
438 | * availability of EDID with at least a correct EDID header. Only then, | ||
439 | * DDC is assumed to be available. This prevents drm_get_edid() and | ||
440 | * drm_edid_block_valid() from periodically dumping data and kernel | ||
441 | * errors into the logs and onto the terminal. | ||
442 | */ | ||
443 | static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, | ||
444 | uint32_t supported_device, | ||
445 | int connector_type) | ||
446 | { | ||
447 | /* Asus M2A-VM HDMI board sends data to i2c bus even, | ||
448 | * if HDMI add-on card is not plugged in or HDMI is disabled in | ||
449 | * BIOS. Valid DDC can only be assumed, if also a valid EDID header | ||
450 | * can be retrieved via i2c bus during DDC probe */ | ||
451 | if ((dev->pdev->device == 0x791e) && | ||
452 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
453 | (dev->pdev->subsystem_device == 0x826d)) { | ||
454 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
455 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
456 | return true; | ||
457 | } | ||
458 | /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus | ||
459 | * for a DVI connector that is not implemented */ | ||
460 | if ((dev->pdev->device == 0x796e) && | ||
461 | (dev->pdev->subsystem_vendor == 0x1019) && | ||
462 | (dev->pdev->subsystem_device == 0x2615)) { | ||
463 | if ((connector_type == DRM_MODE_CONNECTOR_DVID) && | ||
464 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
465 | return true; | ||
466 | } | ||
467 | |||
468 | /* Default: no EDID header probe required for DDC probing */ | ||
469 | return false; | ||
470 | } | ||
471 | |||
433 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | 472 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, |
434 | struct drm_connector *connector) | 473 | struct drm_connector *connector) |
435 | { | 474 | { |
@@ -661,7 +700,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
661 | ret = connector_status_disconnected; | 700 | ret = connector_status_disconnected; |
662 | 701 | ||
663 | if (radeon_connector->ddc_bus) | 702 | if (radeon_connector->ddc_bus) |
664 | dret = radeon_ddc_probe(radeon_connector); | 703 | dret = radeon_ddc_probe(radeon_connector, |
704 | radeon_connector->requires_extended_probe); | ||
665 | if (dret) { | 705 | if (dret) { |
666 | if (radeon_connector->edid) { | 706 | if (radeon_connector->edid) { |
667 | kfree(radeon_connector->edid); | 707 | kfree(radeon_connector->edid); |
@@ -833,7 +873,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
833 | bool dret = false; | 873 | bool dret = false; |
834 | 874 | ||
835 | if (radeon_connector->ddc_bus) | 875 | if (radeon_connector->ddc_bus) |
836 | dret = radeon_ddc_probe(radeon_connector); | 876 | dret = radeon_ddc_probe(radeon_connector, |
877 | radeon_connector->requires_extended_probe); | ||
837 | if (dret) { | 878 | if (dret) { |
838 | if (radeon_connector->edid) { | 879 | if (radeon_connector->edid) { |
839 | kfree(radeon_connector->edid); | 880 | kfree(radeon_connector->edid); |
@@ -1251,7 +1292,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1251 | if (radeon_dp_getdpcd(radeon_connector)) | 1292 | if (radeon_dp_getdpcd(radeon_connector)) |
1252 | ret = connector_status_connected; | 1293 | ret = connector_status_connected; |
1253 | } else { | 1294 | } else { |
1254 | if (radeon_ddc_probe(radeon_connector)) | 1295 | if (radeon_ddc_probe(radeon_connector, |
1296 | radeon_connector->requires_extended_probe)) | ||
1255 | ret = connector_status_connected; | 1297 | ret = connector_status_connected; |
1256 | } | 1298 | } |
1257 | } | 1299 | } |
@@ -1406,6 +1448,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1406 | radeon_connector->shared_ddc = shared_ddc; | 1448 | radeon_connector->shared_ddc = shared_ddc; |
1407 | radeon_connector->connector_object_id = connector_object_id; | 1449 | radeon_connector->connector_object_id = connector_object_id; |
1408 | radeon_connector->hpd = *hpd; | 1450 | radeon_connector->hpd = *hpd; |
1451 | radeon_connector->requires_extended_probe = | ||
1452 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1453 | connector_type); | ||
1409 | radeon_connector->router = *router; | 1454 | radeon_connector->router = *router; |
1410 | if (router->ddc_valid || router->cd_valid) { | 1455 | if (router->ddc_valid || router->cd_valid) { |
1411 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1456 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
@@ -1752,6 +1797,9 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1752 | radeon_connector->devices = supported_device; | 1797 | radeon_connector->devices = supported_device; |
1753 | radeon_connector->connector_object_id = connector_object_id; | 1798 | radeon_connector->connector_object_id = connector_object_id; |
1754 | radeon_connector->hpd = *hpd; | 1799 | radeon_connector->hpd = *hpd; |
1800 | radeon_connector->requires_extended_probe = | ||
1801 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1802 | connector_type); | ||
1755 | switch (connector_type) { | 1803 | switch (connector_type) { |
1756 | case DRM_MODE_CONNECTOR_VGA: | 1804 | case DRM_MODE_CONNECTOR_VGA: |
1757 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1805 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7cfaa7e2f3b5..440e6ecccc40 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -704,8 +704,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
704 | rdev->gpu_lockup = false; | 704 | rdev->gpu_lockup = false; |
705 | rdev->accel_working = false; | 705 | rdev->accel_working = false; |
706 | 706 | ||
707 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", | 707 | DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", |
708 | radeon_family_name[rdev->family], pdev->vendor, pdev->device); | 708 | radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
709 | pdev->subsystem_vendor, pdev->subsystem_device); | ||
709 | 710 | ||
710 | /* mutex initialization are all done here so we | 711 | /* mutex initialization are all done here so we |
711 | * can recall function without having locking issues */ | 712 | * can recall function without having locking issues */ |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 28f4655905bc..1a858944e4f3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -751,8 +751,17 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
751 | if (!radeon_connector->ddc_bus) | 751 | if (!radeon_connector->ddc_bus) |
752 | return -1; | 752 | return -1; |
753 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); | 753 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); |
754 | /* Log EDID retrieval status here. In particular with regard to | ||
755 | * connectors with requires_extended_probe flag set, that will prevent | ||
756 | * function radeon_dvi_detect() to fetch EDID on this connector, | ||
757 | * as long as there is no valid EDID header found */ | ||
754 | if (edid) { | 758 | if (edid) { |
759 | DRM_INFO("Radeon display connector %s: Found valid EDID", | ||
760 | drm_get_connector_name(connector)); | ||
755 | kfree(edid); | 761 | kfree(edid); |
762 | } else { | ||
763 | DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID", | ||
764 | drm_get_connector_name(connector)); | ||
756 | } | 765 | } |
757 | return ret; | 766 | return ret; |
758 | } | 767 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 85f033f19a8a..e71d2ed7fa11 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -50,8 +50,8 @@ | |||
50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs | 50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs |
51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query | 51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query |
52 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query | 52 | * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query |
53 | * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker | 53 | * 2.10.0 - fusion 2D tiling |
54 | * 2.11.0 - backend map | 54 | * 2.11.0 - backend map, initial compute support for the CS checker |
55 | */ | 55 | */ |
56 | #define KMS_DRIVER_MAJOR 2 | 56 | #define KMS_DRIVER_MAJOR 2 |
57 | #define KMS_DRIVER_MINOR 11 | 57 | #define KMS_DRIVER_MINOR 11 |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 781196db792f..6c111c1fa3f9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -32,17 +32,17 @@ | |||
32 | * radeon_ddc_probe | 32 | * radeon_ddc_probe |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | 35 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) |
36 | { | 36 | { |
37 | u8 out_buf[] = { 0x0, 0x0}; | 37 | u8 out = 0x0; |
38 | u8 buf[2]; | 38 | u8 buf[8]; |
39 | int ret; | 39 | int ret; |
40 | struct i2c_msg msgs[] = { | 40 | struct i2c_msg msgs[] = { |
41 | { | 41 | { |
42 | .addr = 0x50, | 42 | .addr = 0x50, |
43 | .flags = 0, | 43 | .flags = 0, |
44 | .len = 1, | 44 | .len = 1, |
45 | .buf = out_buf, | 45 | .buf = &out, |
46 | }, | 46 | }, |
47 | { | 47 | { |
48 | .addr = 0x50, | 48 | .addr = 0x50, |
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
52 | } | 52 | } |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* Read 8 bytes from i2c for extended probe of EDID header */ | ||
56 | if (requires_extended_probe) | ||
57 | msgs[1].len = 8; | ||
58 | |||
55 | /* on hw with routers, select right port */ | 59 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.ddc_valid) | 60 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_ddc_port(radeon_connector); | 61 | radeon_router_select_ddc_port(radeon_connector); |
58 | 62 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 63 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 64 | if (ret != 2) |
61 | return true; | 65 | /* Couldn't find an accessible DDC on this connector */ |
62 | 66 | return false; | |
63 | return false; | 67 | if (requires_extended_probe) { |
68 | /* Probe also for valid EDID header | ||
69 | * EDID header starts with: | ||
70 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. | ||
71 | * Only the first 6 bytes must be valid as | ||
72 | * drm_edid_block_valid() can fix the last 2 bytes */ | ||
73 | if (drm_edid_header_is_valid(buf) < 6) { | ||
74 | /* Couldn't find an accessible EDID on this | ||
75 | * connector */ | ||
76 | return false; | ||
77 | } | ||
78 | } | ||
79 | return true; | ||
64 | } | 80 | } |
65 | 81 | ||
66 | /* bit banging i2c */ | 82 | /* bit banging i2c */ |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 6df4e3cec0c2..d09031c03e26 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -438,6 +438,9 @@ struct radeon_connector { | |||
438 | struct radeon_i2c_chan *ddc_bus; | 438 | struct radeon_i2c_chan *ddc_bus; |
439 | /* some systems have an hdmi and vga port with a shared ddc line */ | 439 | /* some systems have an hdmi and vga port with a shared ddc line */ |
440 | bool shared_ddc; | 440 | bool shared_ddc; |
441 | /* for some Radeon chip families we apply an additional EDID header | ||
442 | check as part of the DDC probe */ | ||
443 | bool requires_extended_probe; | ||
441 | bool use_digital; | 444 | bool use_digital; |
442 | /* we need to mind the EDID between detect | 445 | /* we need to mind the EDID between detect |
443 | and get modes due to analog/digital/tvencoder */ | 446 | and get modes due to analog/digital/tvencoder */ |
@@ -514,7 +517,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
514 | u8 val); | 517 | u8 val); |
515 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); | 518 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
516 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | 519 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); |
517 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 520 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, |
521 | bool requires_extended_probe); | ||
518 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 522 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
519 | 523 | ||
520 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); | 524 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c index 3be60da52123..67cbcfa35122 100644 --- a/drivers/ide/cy82c693.c +++ b/drivers/ide/cy82c693.c | |||
@@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) | |||
141 | pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); | 141 | pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); |
142 | pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); | 142 | pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); |
143 | } | 143 | } |
144 | if (hwif->index > 0) | ||
145 | pci_dev_put(dev); | ||
144 | } | 146 | } |
145 | 147 | ||
146 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) | 148 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) |
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 542603b394e4..962693b10a1c 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/ata_platform.h> | 20 | #include <linux/ata_platform.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | 23 | #include <linux/io.h> |
23 | 24 | ||
24 | static void __devinit plat_ide_setup_ports(struct ide_hw *hw, | 25 | static void __devinit plat_ide_setup_ports(struct ide_hw *hw, |
@@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) | |||
95 | plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); | 96 | plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); |
96 | hw.dev = &pdev->dev; | 97 | hw.dev = &pdev->dev; |
97 | 98 | ||
98 | d.irq_flags = res_irq->flags; | 99 | d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; |
100 | if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) | ||
101 | d.irq_flags |= IRQF_SHARED; | ||
102 | |||
99 | if (mmio) | 103 | if (mmio) |
100 | d.host_flags |= IDE_HFLAG_MMIO; | 104 | d.host_flags |= IDE_HFLAG_MMIO; |
101 | 105 | ||
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index ce281d152275..67df91af8424 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c | |||
@@ -483,7 +483,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev, | |||
483 | 483 | ||
484 | buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL); | 484 | buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL); |
485 | if (!buttons) | 485 | if (!buttons) |
486 | return -ENODEV; | 486 | return -ENOMEM; |
487 | 487 | ||
488 | pp = NULL; | 488 | pp = NULL; |
489 | i = 0; | 489 | i = 0; |
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index ab0acaf7fe8f..756348a7f93a 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c | |||
@@ -754,8 +754,11 @@ fail3: | |||
754 | device_remove_file(&client->dev, &dev_attr_disable_kp); | 754 | device_remove_file(&client->dev, &dev_attr_disable_kp); |
755 | fail2: | 755 | fail2: |
756 | while (--pwm >= 0) | 756 | while (--pwm >= 0) |
757 | if (lm->pwm[pwm].enabled) | 757 | if (lm->pwm[pwm].enabled) { |
758 | device_remove_file(lm->pwm[pwm].cdev.dev, | ||
759 | &dev_attr_time); | ||
758 | led_classdev_unregister(&lm->pwm[pwm].cdev); | 760 | led_classdev_unregister(&lm->pwm[pwm].cdev); |
761 | } | ||
759 | fail1: | 762 | fail1: |
760 | input_free_device(idev); | 763 | input_free_device(idev); |
761 | kfree(lm); | 764 | kfree(lm); |
@@ -775,8 +778,10 @@ static int __devexit lm8323_remove(struct i2c_client *client) | |||
775 | device_remove_file(&lm->client->dev, &dev_attr_disable_kp); | 778 | device_remove_file(&lm->client->dev, &dev_attr_disable_kp); |
776 | 779 | ||
777 | for (i = 0; i < 3; i++) | 780 | for (i = 0; i < 3; i++) |
778 | if (lm->pwm[i].enabled) | 781 | if (lm->pwm[i].enabled) { |
782 | device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time); | ||
779 | led_classdev_unregister(&lm->pwm[i].cdev); | 783 | led_classdev_unregister(&lm->pwm[i].cdev); |
784 | } | ||
780 | 785 | ||
781 | kfree(lm); | 786 | kfree(lm); |
782 | 787 | ||
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index da3828fc2c09..f270447ba951 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | #include <linux/input.h> | 24 | #include <linux/input.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
@@ -37,7 +38,7 @@ | |||
37 | #define KBC_ROW_SCAN_DLY 5 | 38 | #define KBC_ROW_SCAN_DLY 5 |
38 | 39 | ||
39 | /* KBC uses a 32KHz clock so a cycle = 1/32Khz */ | 40 | /* KBC uses a 32KHz clock so a cycle = 1/32Khz */ |
40 | #define KBC_CYCLE_USEC 32 | 41 | #define KBC_CYCLE_MS 32 |
41 | 42 | ||
42 | /* KBC Registers */ | 43 | /* KBC Registers */ |
43 | 44 | ||
@@ -647,7 +648,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) | |||
647 | debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); | 648 | debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); |
648 | scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows; | 649 | scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows; |
649 | kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt; | 650 | kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt; |
650 | kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000; | 651 | kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS); |
651 | 652 | ||
652 | input_dev->name = pdev->name; | 653 | input_dev->name = pdev->name; |
653 | input_dev->id.bustype = BUS_HOST; | 654 | input_dev->id.bustype = BUS_HOST; |
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c index c456f63b6bae..783597a9a64a 100644 --- a/drivers/input/misc/kxtj9.c +++ b/drivers/input/misc/kxtj9.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/i2c.h> | 21 | #include <linux/i2c.h> |
22 | #include <linux/input.h> | 22 | #include <linux/input.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/module.h> | ||
24 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
25 | #include <linux/input/kxtj9.h> | 26 | #include <linux/input/kxtj9.h> |
26 | #include <linux/input-polldev.h> | 27 | #include <linux/input-polldev.h> |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 20f8f9284f02..6c76cf792991 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
26 | #include <linux/input-polldev.h> | 26 | #include <linux/input-polldev.h> |
27 | #include <linux/of_device.h> | ||
27 | 28 | ||
28 | #define MMA8450_DRV_NAME "mma8450" | 29 | #define MMA8450_DRV_NAME "mma8450" |
29 | 30 | ||
@@ -229,10 +230,17 @@ static const struct i2c_device_id mma8450_id[] = { | |||
229 | }; | 230 | }; |
230 | MODULE_DEVICE_TABLE(i2c, mma8450_id); | 231 | MODULE_DEVICE_TABLE(i2c, mma8450_id); |
231 | 232 | ||
233 | static const struct of_device_id mma8450_dt_ids[] = { | ||
234 | { .compatible = "fsl,mma8450", }, | ||
235 | { /* sentinel */ } | ||
236 | }; | ||
237 | MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); | ||
238 | |||
232 | static struct i2c_driver mma8450_driver = { | 239 | static struct i2c_driver mma8450_driver = { |
233 | .driver = { | 240 | .driver = { |
234 | .name = MMA8450_DRV_NAME, | 241 | .name = MMA8450_DRV_NAME, |
235 | .owner = THIS_MODULE, | 242 | .owner = THIS_MODULE, |
243 | .of_match_table = mma8450_dt_ids, | ||
236 | }, | 244 | }, |
237 | .probe = mma8450_probe, | 245 | .probe = mma8450_probe, |
238 | .remove = __devexit_p(mma8450_remove), | 246 | .remove = __devexit_p(mma8450_remove), |
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 95577c15ae56..4d17d9f3320b 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #define DEBUG | 32 | #define DEBUG |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/input.h> | 34 | #include <linux/input.h> |
35 | #include <linux/module.h> | ||
35 | #include <linux/serio.h> | 36 | #include <linux/serio.h> |
36 | #include <linux/libps2.h> | 37 | #include <linux/libps2.h> |
37 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 80baa53da5b1..d64c5a43aaad 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | 26 | #include <linux/of_address.h> | |
27 | #include <linux/of_device.h> | 27 | #include <linux/of_device.h> |
28 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
29 | 29 | ||
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c index bc3b5187f3a3..131f9d1c921b 100644 --- a/drivers/input/touchscreen/ad7879.c +++ b/drivers/input/touchscreen/ad7879.c | |||
@@ -249,12 +249,14 @@ static void __ad7879_enable(struct ad7879 *ts) | |||
249 | 249 | ||
250 | static void __ad7879_disable(struct ad7879 *ts) | 250 | static void __ad7879_disable(struct ad7879 *ts) |
251 | { | 251 | { |
252 | u16 reg = (ts->cmd_crtl2 & ~AD7879_PM(-1)) | | ||
253 | AD7879_PM(AD7879_PM_SHUTDOWN); | ||
252 | disable_irq(ts->irq); | 254 | disable_irq(ts->irq); |
253 | 255 | ||
254 | if (del_timer_sync(&ts->timer)) | 256 | if (del_timer_sync(&ts->timer)) |
255 | ad7879_ts_event_release(ts); | 257 | ad7879_ts_event_release(ts); |
256 | 258 | ||
257 | ad7879_write(ts, AD7879_REG_CTRL2, AD7879_PM(AD7879_PM_SHUTDOWN)); | 259 | ad7879_write(ts, AD7879_REG_CTRL2, reg); |
258 | } | 260 | } |
259 | 261 | ||
260 | 262 | ||
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 8420129fc5ee..f75a66e7d312 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -241,12 +241,13 @@ config DM_MIRROR | |||
241 | needed for live data migration tools such as 'pvmove'. | 241 | needed for live data migration tools such as 'pvmove'. |
242 | 242 | ||
243 | config DM_RAID | 243 | config DM_RAID |
244 | tristate "RAID 4/5/6 target (EXPERIMENTAL)" | 244 | tristate "RAID 1/4/5/6 target (EXPERIMENTAL)" |
245 | depends on BLK_DEV_DM && EXPERIMENTAL | 245 | depends on BLK_DEV_DM && EXPERIMENTAL |
246 | select MD_RAID1 | ||
246 | select MD_RAID456 | 247 | select MD_RAID456 |
247 | select BLK_DEV_MD | 248 | select BLK_DEV_MD |
248 | ---help--- | 249 | ---help--- |
249 | A dm target that supports RAID4, RAID5 and RAID6 mappings | 250 | A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings |
250 | 251 | ||
251 | A RAID-5 set of N drives with a capacity of C MB per drive provides | 252 | A RAID-5 set of N drives with a capacity of C MB per drive provides |
252 | the capacity of C * (N - 1) MB, and protects against a failure | 253 | the capacity of C * (N - 1) MB, and protects against a failure |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index bae6c4e23d3f..49da55c1528a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/device-mapper.h> | 30 | #include <linux/device-mapper.h> |
31 | 31 | ||
32 | #define DM_MSG_PREFIX "crypt" | 32 | #define DM_MSG_PREFIX "crypt" |
33 | #define MESG_STR(x) x, sizeof(x) | ||
34 | 33 | ||
35 | /* | 34 | /* |
36 | * context holding the current state of a multi-part conversion | 35 | * context holding the current state of a multi-part conversion |
@@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, | |||
239 | struct dm_crypt_request *dmreq) | 238 | struct dm_crypt_request *dmreq) |
240 | { | 239 | { |
241 | memset(iv, 0, cc->iv_size); | 240 | memset(iv, 0, cc->iv_size); |
242 | *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); | 241 | *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); |
243 | 242 | ||
244 | return 0; | 243 | return 0; |
245 | } | 244 | } |
@@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, | |||
248 | struct dm_crypt_request *dmreq) | 247 | struct dm_crypt_request *dmreq) |
249 | { | 248 | { |
250 | memset(iv, 0, cc->iv_size); | 249 | memset(iv, 0, cc->iv_size); |
251 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); | 250 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
252 | 251 | ||
253 | return 0; | 252 | return 0; |
254 | } | 253 | } |
@@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, | |||
415 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; | 414 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; |
416 | 415 | ||
417 | memset(iv, 0, cc->iv_size); | 416 | memset(iv, 0, cc->iv_size); |
418 | *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); | 417 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
419 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); | 418 | crypto_cipher_encrypt_one(essiv_tfm, iv, iv); |
420 | 419 | ||
421 | return 0; | 420 | return 0; |
@@ -1575,11 +1574,17 @@ bad_mem: | |||
1575 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 1574 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1576 | { | 1575 | { |
1577 | struct crypt_config *cc; | 1576 | struct crypt_config *cc; |
1578 | unsigned int key_size; | 1577 | unsigned int key_size, opt_params; |
1579 | unsigned long long tmpll; | 1578 | unsigned long long tmpll; |
1580 | int ret; | 1579 | int ret; |
1580 | struct dm_arg_set as; | ||
1581 | const char *opt_string; | ||
1582 | |||
1583 | static struct dm_arg _args[] = { | ||
1584 | {0, 1, "Invalid number of feature args"}, | ||
1585 | }; | ||
1581 | 1586 | ||
1582 | if (argc != 5) { | 1587 | if (argc < 5) { |
1583 | ti->error = "Not enough arguments"; | 1588 | ti->error = "Not enough arguments"; |
1584 | return -EINVAL; | 1589 | return -EINVAL; |
1585 | } | 1590 | } |
@@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1648 | } | 1653 | } |
1649 | cc->start = tmpll; | 1654 | cc->start = tmpll; |
1650 | 1655 | ||
1656 | argv += 5; | ||
1657 | argc -= 5; | ||
1658 | |||
1659 | /* Optional parameters */ | ||
1660 | if (argc) { | ||
1661 | as.argc = argc; | ||
1662 | as.argv = argv; | ||
1663 | |||
1664 | ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); | ||
1665 | if (ret) | ||
1666 | goto bad; | ||
1667 | |||
1668 | opt_string = dm_shift_arg(&as); | ||
1669 | |||
1670 | if (opt_params == 1 && opt_string && | ||
1671 | !strcasecmp(opt_string, "allow_discards")) | ||
1672 | ti->num_discard_requests = 1; | ||
1673 | else if (opt_params) { | ||
1674 | ret = -EINVAL; | ||
1675 | ti->error = "Invalid feature arguments"; | ||
1676 | goto bad; | ||
1677 | } | ||
1678 | } | ||
1679 | |||
1651 | ret = -ENOMEM; | 1680 | ret = -ENOMEM; |
1652 | cc->io_queue = alloc_workqueue("kcryptd_io", | 1681 | cc->io_queue = alloc_workqueue("kcryptd_io", |
1653 | WQ_NON_REENTRANT| | 1682 | WQ_NON_REENTRANT| |
@@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
1682 | struct dm_crypt_io *io; | 1711 | struct dm_crypt_io *io; |
1683 | struct crypt_config *cc; | 1712 | struct crypt_config *cc; |
1684 | 1713 | ||
1685 | if (bio->bi_rw & REQ_FLUSH) { | 1714 | /* |
1715 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. | ||
1716 | * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight | ||
1717 | * - for REQ_DISCARD caller must use flush if IO ordering matters | ||
1718 | */ | ||
1719 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { | ||
1686 | cc = ti->private; | 1720 | cc = ti->private; |
1687 | bio->bi_bdev = cc->dev->bdev; | 1721 | bio->bi_bdev = cc->dev->bdev; |
1722 | if (bio_sectors(bio)) | ||
1723 | bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); | ||
1688 | return DM_MAPIO_REMAPPED; | 1724 | return DM_MAPIO_REMAPPED; |
1689 | } | 1725 | } |
1690 | 1726 | ||
@@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type, | |||
1727 | 1763 | ||
1728 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, | 1764 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1729 | cc->dev->name, (unsigned long long)cc->start); | 1765 | cc->dev->name, (unsigned long long)cc->start); |
1766 | |||
1767 | if (ti->num_discard_requests) | ||
1768 | DMEMIT(" 1 allow_discards"); | ||
1769 | |||
1730 | break; | 1770 | break; |
1731 | } | 1771 | } |
1732 | return 0; | 1772 | return 0; |
@@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1770 | if (argc < 2) | 1810 | if (argc < 2) |
1771 | goto error; | 1811 | goto error; |
1772 | 1812 | ||
1773 | if (!strnicmp(argv[0], MESG_STR("key"))) { | 1813 | if (!strcasecmp(argv[0], "key")) { |
1774 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | 1814 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { |
1775 | DMWARN("not suspended during key manipulation."); | 1815 | DMWARN("not suspended during key manipulation."); |
1776 | return -EINVAL; | 1816 | return -EINVAL; |
1777 | } | 1817 | } |
1778 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { | 1818 | if (argc == 3 && !strcasecmp(argv[1], "set")) { |
1779 | ret = crypt_set_key(cc, argv[2]); | 1819 | ret = crypt_set_key(cc, argv[2]); |
1780 | if (ret) | 1820 | if (ret) |
1781 | return ret; | 1821 | return ret; |
@@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1783 | ret = cc->iv_gen_ops->init(cc); | 1823 | ret = cc->iv_gen_ops->init(cc); |
1784 | return ret; | 1824 | return ret; |
1785 | } | 1825 | } |
1786 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { | 1826 | if (argc == 2 && !strcasecmp(argv[1], "wipe")) { |
1787 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | 1827 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { |
1788 | ret = cc->iv_gen_ops->wipe(cc); | 1828 | ret = cc->iv_gen_ops->wipe(cc); |
1789 | if (ret) | 1829 | if (ret) |
@@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti, | |||
1823 | 1863 | ||
1824 | static struct target_type crypt_target = { | 1864 | static struct target_type crypt_target = { |
1825 | .name = "crypt", | 1865 | .name = "crypt", |
1826 | .version = {1, 10, 0}, | 1866 | .version = {1, 11, 0}, |
1827 | .module = THIS_MODULE, | 1867 | .module = THIS_MODULE, |
1828 | .ctr = crypt_ctr, | 1868 | .ctr = crypt_ctr, |
1829 | .dtr = crypt_dtr, | 1869 | .dtr = crypt_dtr, |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index ea790623c30b..89f73ca22cfa 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2003 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
6 | */ | 6 | */ |
@@ -15,6 +15,9 @@ | |||
15 | 15 | ||
16 | #define DM_MSG_PREFIX "flakey" | 16 | #define DM_MSG_PREFIX "flakey" |
17 | 17 | ||
18 | #define all_corrupt_bio_flags_match(bio, fc) \ | ||
19 | (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) | ||
20 | |||
18 | /* | 21 | /* |
19 | * Flakey: Used for testing only, simulates intermittent, | 22 | * Flakey: Used for testing only, simulates intermittent, |
20 | * catastrophic device failure. | 23 | * catastrophic device failure. |
@@ -25,60 +28,189 @@ struct flakey_c { | |||
25 | sector_t start; | 28 | sector_t start; |
26 | unsigned up_interval; | 29 | unsigned up_interval; |
27 | unsigned down_interval; | 30 | unsigned down_interval; |
31 | unsigned long flags; | ||
32 | unsigned corrupt_bio_byte; | ||
33 | unsigned corrupt_bio_rw; | ||
34 | unsigned corrupt_bio_value; | ||
35 | unsigned corrupt_bio_flags; | ||
36 | }; | ||
37 | |||
38 | enum feature_flag_bits { | ||
39 | DROP_WRITES | ||
28 | }; | 40 | }; |
29 | 41 | ||
42 | static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | ||
43 | struct dm_target *ti) | ||
44 | { | ||
45 | int r; | ||
46 | unsigned argc; | ||
47 | const char *arg_name; | ||
48 | |||
49 | static struct dm_arg _args[] = { | ||
50 | {0, 6, "Invalid number of feature args"}, | ||
51 | {1, UINT_MAX, "Invalid corrupt bio byte"}, | ||
52 | {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, | ||
53 | {0, UINT_MAX, "Invalid corrupt bio flags mask"}, | ||
54 | }; | ||
55 | |||
56 | /* No feature arguments supplied. */ | ||
57 | if (!as->argc) | ||
58 | return 0; | ||
59 | |||
60 | r = dm_read_arg_group(_args, as, &argc, &ti->error); | ||
61 | if (r) | ||
62 | return r; | ||
63 | |||
64 | while (argc) { | ||
65 | arg_name = dm_shift_arg(as); | ||
66 | argc--; | ||
67 | |||
68 | /* | ||
69 | * drop_writes | ||
70 | */ | ||
71 | if (!strcasecmp(arg_name, "drop_writes")) { | ||
72 | if (test_and_set_bit(DROP_WRITES, &fc->flags)) { | ||
73 | ti->error = "Feature drop_writes duplicated"; | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | |||
77 | continue; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> | ||
82 | */ | ||
83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { | ||
84 | if (!argc) | ||
85 | ti->error = "Feature corrupt_bio_byte requires parameters"; | ||
86 | |||
87 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); | ||
88 | if (r) | ||
89 | return r; | ||
90 | argc--; | ||
91 | |||
92 | /* | ||
93 | * Direction r or w? | ||
94 | */ | ||
95 | arg_name = dm_shift_arg(as); | ||
96 | if (!strcasecmp(arg_name, "w")) | ||
97 | fc->corrupt_bio_rw = WRITE; | ||
98 | else if (!strcasecmp(arg_name, "r")) | ||
99 | fc->corrupt_bio_rw = READ; | ||
100 | else { | ||
101 | ti->error = "Invalid corrupt bio direction (r or w)"; | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | argc--; | ||
105 | |||
106 | /* | ||
107 | * Value of byte (0-255) to write in place of correct one. | ||
108 | */ | ||
109 | r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); | ||
110 | if (r) | ||
111 | return r; | ||
112 | argc--; | ||
113 | |||
114 | /* | ||
115 | * Only corrupt bios with these flags set. | ||
116 | */ | ||
117 | r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); | ||
118 | if (r) | ||
119 | return r; | ||
120 | argc--; | ||
121 | |||
122 | continue; | ||
123 | } | ||
124 | |||
125 | ti->error = "Unrecognised flakey feature requested"; | ||
126 | return -EINVAL; | ||
127 | } | ||
128 | |||
129 | if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { | ||
130 | ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; | ||
131 | return -EINVAL; | ||
132 | } | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
30 | /* | 137 | /* |
31 | * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval> | 138 | * Construct a flakey mapping: |
139 | * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*] | ||
140 | * | ||
141 | * Feature args: | ||
142 | * [drop_writes] | ||
143 | * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>] | ||
144 | * | ||
145 | * Nth_byte starts from 1 for the first byte. | ||
146 | * Direction is r for READ or w for WRITE. | ||
147 | * bio_flags is ignored if 0. | ||
32 | */ | 148 | */ |
33 | static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 149 | static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
34 | { | 150 | { |
151 | static struct dm_arg _args[] = { | ||
152 | {0, UINT_MAX, "Invalid up interval"}, | ||
153 | {0, UINT_MAX, "Invalid down interval"}, | ||
154 | }; | ||
155 | |||
156 | int r; | ||
35 | struct flakey_c *fc; | 157 | struct flakey_c *fc; |
36 | unsigned long long tmp; | 158 | unsigned long long tmpll; |
159 | struct dm_arg_set as; | ||
160 | const char *devname; | ||
37 | 161 | ||
38 | if (argc != 4) { | 162 | as.argc = argc; |
39 | ti->error = "dm-flakey: Invalid argument count"; | 163 | as.argv = argv; |
164 | |||
165 | if (argc < 4) { | ||
166 | ti->error = "Invalid argument count"; | ||
40 | return -EINVAL; | 167 | return -EINVAL; |
41 | } | 168 | } |
42 | 169 | ||
43 | fc = kmalloc(sizeof(*fc), GFP_KERNEL); | 170 | fc = kzalloc(sizeof(*fc), GFP_KERNEL); |
44 | if (!fc) { | 171 | if (!fc) { |
45 | ti->error = "dm-flakey: Cannot allocate linear context"; | 172 | ti->error = "Cannot allocate linear context"; |
46 | return -ENOMEM; | 173 | return -ENOMEM; |
47 | } | 174 | } |
48 | fc->start_time = jiffies; | 175 | fc->start_time = jiffies; |
49 | 176 | ||
50 | if (sscanf(argv[1], "%llu", &tmp) != 1) { | 177 | devname = dm_shift_arg(&as); |
51 | ti->error = "dm-flakey: Invalid device sector"; | 178 | |
179 | if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) { | ||
180 | ti->error = "Invalid device sector"; | ||
52 | goto bad; | 181 | goto bad; |
53 | } | 182 | } |
54 | fc->start = tmp; | 183 | fc->start = tmpll; |
55 | 184 | ||
56 | if (sscanf(argv[2], "%u", &fc->up_interval) != 1) { | 185 | r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); |
57 | ti->error = "dm-flakey: Invalid up interval"; | 186 | if (r) |
58 | goto bad; | 187 | goto bad; |
59 | } | ||
60 | 188 | ||
61 | if (sscanf(argv[3], "%u", &fc->down_interval) != 1) { | 189 | r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); |
62 | ti->error = "dm-flakey: Invalid down interval"; | 190 | if (r) |
63 | goto bad; | 191 | goto bad; |
64 | } | ||
65 | 192 | ||
66 | if (!(fc->up_interval + fc->down_interval)) { | 193 | if (!(fc->up_interval + fc->down_interval)) { |
67 | ti->error = "dm-flakey: Total (up + down) interval is zero"; | 194 | ti->error = "Total (up + down) interval is zero"; |
68 | goto bad; | 195 | goto bad; |
69 | } | 196 | } |
70 | 197 | ||
71 | if (fc->up_interval + fc->down_interval < fc->up_interval) { | 198 | if (fc->up_interval + fc->down_interval < fc->up_interval) { |
72 | ti->error = "dm-flakey: Interval overflow"; | 199 | ti->error = "Interval overflow"; |
73 | goto bad; | 200 | goto bad; |
74 | } | 201 | } |
75 | 202 | ||
76 | if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) { | 203 | r = parse_features(&as, fc, ti); |
77 | ti->error = "dm-flakey: Device lookup failed"; | 204 | if (r) |
205 | goto bad; | ||
206 | |||
207 | if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) { | ||
208 | ti->error = "Device lookup failed"; | ||
78 | goto bad; | 209 | goto bad; |
79 | } | 210 | } |
80 | 211 | ||
81 | ti->num_flush_requests = 1; | 212 | ti->num_flush_requests = 1; |
213 | ti->num_discard_requests = 1; | ||
82 | ti->private = fc; | 214 | ti->private = fc; |
83 | return 0; | 215 | return 0; |
84 | 216 | ||
@@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) | |||
99 | { | 231 | { |
100 | struct flakey_c *fc = ti->private; | 232 | struct flakey_c *fc = ti->private; |
101 | 233 | ||
102 | return fc->start + (bi_sector - ti->begin); | 234 | return fc->start + dm_target_offset(ti, bi_sector); |
103 | } | 235 | } |
104 | 236 | ||
105 | static void flakey_map_bio(struct dm_target *ti, struct bio *bio) | 237 | static void flakey_map_bio(struct dm_target *ti, struct bio *bio) |
@@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) | |||
111 | bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); | 243 | bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); |
112 | } | 244 | } |
113 | 245 | ||
246 | static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) | ||
247 | { | ||
248 | unsigned bio_bytes = bio_cur_bytes(bio); | ||
249 | char *data = bio_data(bio); | ||
250 | |||
251 | /* | ||
252 | * Overwrite the Nth byte of the data returned. | ||
253 | */ | ||
254 | if (data && bio_bytes >= fc->corrupt_bio_byte) { | ||
255 | data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; | ||
256 | |||
257 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " | ||
258 | "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", | ||
259 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, | ||
260 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', | ||
261 | bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); | ||
262 | } | ||
263 | } | ||
264 | |||
114 | static int flakey_map(struct dm_target *ti, struct bio *bio, | 265 | static int flakey_map(struct dm_target *ti, struct bio *bio, |
115 | union map_info *map_context) | 266 | union map_info *map_context) |
116 | { | 267 | { |
@@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio, | |||
119 | 270 | ||
120 | /* Are we alive ? */ | 271 | /* Are we alive ? */ |
121 | elapsed = (jiffies - fc->start_time) / HZ; | 272 | elapsed = (jiffies - fc->start_time) / HZ; |
122 | if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) | 273 | if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { |
274 | /* | ||
275 | * Flag this bio as submitted while down. | ||
276 | */ | ||
277 | map_context->ll = 1; | ||
278 | |||
279 | /* | ||
280 | * Map reads as normal. | ||
281 | */ | ||
282 | if (bio_data_dir(bio) == READ) | ||
283 | goto map_bio; | ||
284 | |||
285 | /* | ||
286 | * Drop writes? | ||
287 | */ | ||
288 | if (test_bit(DROP_WRITES, &fc->flags)) { | ||
289 | bio_endio(bio, 0); | ||
290 | return DM_MAPIO_SUBMITTED; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Corrupt matching writes. | ||
295 | */ | ||
296 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { | ||
297 | if (all_corrupt_bio_flags_match(bio, fc)) | ||
298 | corrupt_bio_data(bio, fc); | ||
299 | goto map_bio; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * By default, error all I/O. | ||
304 | */ | ||
123 | return -EIO; | 305 | return -EIO; |
306 | } | ||
124 | 307 | ||
308 | map_bio: | ||
125 | flakey_map_bio(ti, bio); | 309 | flakey_map_bio(ti, bio); |
126 | 310 | ||
127 | return DM_MAPIO_REMAPPED; | 311 | return DM_MAPIO_REMAPPED; |
128 | } | 312 | } |
129 | 313 | ||
314 | static int flakey_end_io(struct dm_target *ti, struct bio *bio, | ||
315 | int error, union map_info *map_context) | ||
316 | { | ||
317 | struct flakey_c *fc = ti->private; | ||
318 | unsigned bio_submitted_while_down = map_context->ll; | ||
319 | |||
320 | /* | ||
321 | * Corrupt successful READs while in down state. | ||
322 | * If flags were specified, only corrupt those that match. | ||
323 | */ | ||
324 | if (!error && bio_submitted_while_down && | ||
325 | (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && | ||
326 | all_corrupt_bio_flags_match(bio, fc)) | ||
327 | corrupt_bio_data(bio, fc); | ||
328 | |||
329 | return error; | ||
330 | } | ||
331 | |||
130 | static int flakey_status(struct dm_target *ti, status_type_t type, | 332 | static int flakey_status(struct dm_target *ti, status_type_t type, |
131 | char *result, unsigned int maxlen) | 333 | char *result, unsigned int maxlen) |
132 | { | 334 | { |
335 | unsigned sz = 0; | ||
133 | struct flakey_c *fc = ti->private; | 336 | struct flakey_c *fc = ti->private; |
337 | unsigned drop_writes; | ||
134 | 338 | ||
135 | switch (type) { | 339 | switch (type) { |
136 | case STATUSTYPE_INFO: | 340 | case STATUSTYPE_INFO: |
@@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type, | |||
138 | break; | 342 | break; |
139 | 343 | ||
140 | case STATUSTYPE_TABLE: | 344 | case STATUSTYPE_TABLE: |
141 | snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name, | 345 | DMEMIT("%s %llu %u %u ", fc->dev->name, |
142 | (unsigned long long)fc->start, fc->up_interval, | 346 | (unsigned long long)fc->start, fc->up_interval, |
143 | fc->down_interval); | 347 | fc->down_interval); |
348 | |||
349 | drop_writes = test_bit(DROP_WRITES, &fc->flags); | ||
350 | DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5); | ||
351 | |||
352 | if (drop_writes) | ||
353 | DMEMIT("drop_writes "); | ||
354 | |||
355 | if (fc->corrupt_bio_byte) | ||
356 | DMEMIT("corrupt_bio_byte %u %c %u %u ", | ||
357 | fc->corrupt_bio_byte, | ||
358 | (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', | ||
359 | fc->corrupt_bio_value, fc->corrupt_bio_flags); | ||
360 | |||
144 | break; | 361 | break; |
145 | } | 362 | } |
146 | return 0; | 363 | return 0; |
@@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_ | |||
177 | 394 | ||
178 | static struct target_type flakey_target = { | 395 | static struct target_type flakey_target = { |
179 | .name = "flakey", | 396 | .name = "flakey", |
180 | .version = {1, 1, 0}, | 397 | .version = {1, 2, 0}, |
181 | .module = THIS_MODULE, | 398 | .module = THIS_MODULE, |
182 | .ctr = flakey_ctr, | 399 | .ctr = flakey_ctr, |
183 | .dtr = flakey_dtr, | 400 | .dtr = flakey_dtr, |
184 | .map = flakey_map, | 401 | .map = flakey_map, |
402 | .end_io = flakey_end_io, | ||
185 | .status = flakey_status, | 403 | .status = flakey_status, |
186 | .ioctl = flakey_ioctl, | 404 | .ioctl = flakey_ioctl, |
187 | .merge = flakey_merge, | 405 | .merge = flakey_merge, |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2067288f61f9..ad2eba40e319 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -38,6 +38,8 @@ struct io { | |||
38 | struct dm_io_client *client; | 38 | struct dm_io_client *client; |
39 | io_notify_fn callback; | 39 | io_notify_fn callback; |
40 | void *context; | 40 | void *context; |
41 | void *vma_invalidate_address; | ||
42 | unsigned long vma_invalidate_size; | ||
41 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); | 43 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
42 | 44 | ||
43 | static struct kmem_cache *_dm_io_cache; | 45 | static struct kmem_cache *_dm_io_cache; |
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error) | |||
116 | set_bit(region, &io->error_bits); | 118 | set_bit(region, &io->error_bits); |
117 | 119 | ||
118 | if (atomic_dec_and_test(&io->count)) { | 120 | if (atomic_dec_and_test(&io->count)) { |
121 | if (io->vma_invalidate_size) | ||
122 | invalidate_kernel_vmap_range(io->vma_invalidate_address, | ||
123 | io->vma_invalidate_size); | ||
124 | |||
119 | if (io->sleeper) | 125 | if (io->sleeper) |
120 | wake_up_process(io->sleeper); | 126 | wake_up_process(io->sleeper); |
121 | 127 | ||
@@ -159,6 +165,9 @@ struct dpages { | |||
159 | 165 | ||
160 | unsigned context_u; | 166 | unsigned context_u; |
161 | void *context_ptr; | 167 | void *context_ptr; |
168 | |||
169 | void *vma_invalidate_address; | ||
170 | unsigned long vma_invalidate_size; | ||
162 | }; | 171 | }; |
163 | 172 | ||
164 | /* | 173 | /* |
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
377 | io->sleeper = current; | 386 | io->sleeper = current; |
378 | io->client = client; | 387 | io->client = client; |
379 | 388 | ||
389 | io->vma_invalidate_address = dp->vma_invalidate_address; | ||
390 | io->vma_invalidate_size = dp->vma_invalidate_size; | ||
391 | |||
380 | dispatch_io(rw, num_regions, where, dp, io, 1); | 392 | dispatch_io(rw, num_regions, where, dp, io, 1); |
381 | 393 | ||
382 | while (1) { | 394 | while (1) { |
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
415 | io->callback = fn; | 427 | io->callback = fn; |
416 | io->context = context; | 428 | io->context = context; |
417 | 429 | ||
430 | io->vma_invalidate_address = dp->vma_invalidate_address; | ||
431 | io->vma_invalidate_size = dp->vma_invalidate_size; | ||
432 | |||
418 | dispatch_io(rw, num_regions, where, dp, io, 0); | 433 | dispatch_io(rw, num_regions, where, dp, io, 0); |
419 | return 0; | 434 | return 0; |
420 | } | 435 | } |
421 | 436 | ||
422 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp) | 437 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp, |
438 | unsigned long size) | ||
423 | { | 439 | { |
424 | /* Set up dpages based on memory type */ | 440 | /* Set up dpages based on memory type */ |
441 | |||
442 | dp->vma_invalidate_address = NULL; | ||
443 | dp->vma_invalidate_size = 0; | ||
444 | |||
425 | switch (io_req->mem.type) { | 445 | switch (io_req->mem.type) { |
426 | case DM_IO_PAGE_LIST: | 446 | case DM_IO_PAGE_LIST: |
427 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | 447 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp) | |||
432 | break; | 452 | break; |
433 | 453 | ||
434 | case DM_IO_VMA: | 454 | case DM_IO_VMA: |
455 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); | ||
456 | if ((io_req->bi_rw & RW_MASK) == READ) { | ||
457 | dp->vma_invalidate_address = io_req->mem.ptr.vma; | ||
458 | dp->vma_invalidate_size = size; | ||
459 | } | ||
435 | vm_dp_init(dp, io_req->mem.ptr.vma); | 460 | vm_dp_init(dp, io_req->mem.ptr.vma); |
436 | break; | 461 | break; |
437 | 462 | ||
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |||
460 | int r; | 485 | int r; |
461 | struct dpages dp; | 486 | struct dpages dp; |
462 | 487 | ||
463 | r = dp_init(io_req, &dp); | 488 | r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); |
464 | if (r) | 489 | if (r) |
465 | return r; | 490 | return r; |
466 | 491 | ||
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4cacdad2270a..2e9a3ca37bdd 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str) | |||
128 | return NULL; | 128 | return NULL; |
129 | } | 129 | } |
130 | 130 | ||
131 | static struct hash_cell *__get_dev_cell(uint64_t dev) | ||
132 | { | ||
133 | struct mapped_device *md; | ||
134 | struct hash_cell *hc; | ||
135 | |||
136 | md = dm_get_md(huge_decode_dev(dev)); | ||
137 | if (!md) | ||
138 | return NULL; | ||
139 | |||
140 | hc = dm_get_mdptr(md); | ||
141 | if (!hc) { | ||
142 | dm_put(md); | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | return hc; | ||
147 | } | ||
148 | |||
131 | /*----------------------------------------------------------------- | 149 | /*----------------------------------------------------------------- |
132 | * Inserting, removing and renaming a device. | 150 | * Inserting, removing and renaming a device. |
133 | *---------------------------------------------------------------*/ | 151 | *---------------------------------------------------------------*/ |
@@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) | |||
718 | */ | 736 | */ |
719 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) | 737 | static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) |
720 | { | 738 | { |
721 | struct mapped_device *md; | 739 | struct hash_cell *hc = NULL; |
722 | void *mdptr = NULL; | ||
723 | 740 | ||
724 | if (*param->uuid) | 741 | if (*param->uuid) { |
725 | return __get_uuid_cell(param->uuid); | 742 | if (*param->name || param->dev) |
743 | return NULL; | ||
726 | 744 | ||
727 | if (*param->name) | 745 | hc = __get_uuid_cell(param->uuid); |
728 | return __get_name_cell(param->name); | 746 | if (!hc) |
747 | return NULL; | ||
748 | } else if (*param->name) { | ||
749 | if (param->dev) | ||
750 | return NULL; | ||
729 | 751 | ||
730 | md = dm_get_md(huge_decode_dev(param->dev)); | 752 | hc = __get_name_cell(param->name); |
731 | if (!md) | 753 | if (!hc) |
732 | goto out; | 754 | return NULL; |
755 | } else if (param->dev) { | ||
756 | hc = __get_dev_cell(param->dev); | ||
757 | if (!hc) | ||
758 | return NULL; | ||
759 | } else | ||
760 | return NULL; | ||
733 | 761 | ||
734 | mdptr = dm_get_mdptr(md); | 762 | /* |
735 | if (!mdptr) | 763 | * Sneakily write in both the name and the uuid |
736 | dm_put(md); | 764 | * while we have the cell. |
765 | */ | ||
766 | strlcpy(param->name, hc->name, sizeof(param->name)); | ||
767 | if (hc->uuid) | ||
768 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); | ||
769 | else | ||
770 | param->uuid[0] = '\0'; | ||
737 | 771 | ||
738 | out: | 772 | if (hc->new_map) |
739 | return mdptr; | 773 | param->flags |= DM_INACTIVE_PRESENT_FLAG; |
774 | else | ||
775 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | ||
776 | |||
777 | return hc; | ||
740 | } | 778 | } |
741 | 779 | ||
742 | static struct mapped_device *find_device(struct dm_ioctl *param) | 780 | static struct mapped_device *find_device(struct dm_ioctl *param) |
@@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param) | |||
746 | 784 | ||
747 | down_read(&_hash_lock); | 785 | down_read(&_hash_lock); |
748 | hc = __find_device_hash_cell(param); | 786 | hc = __find_device_hash_cell(param); |
749 | if (hc) { | 787 | if (hc) |
750 | md = hc->md; | 788 | md = hc->md; |
751 | |||
752 | /* | ||
753 | * Sneakily write in both the name and the uuid | ||
754 | * while we have the cell. | ||
755 | */ | ||
756 | strlcpy(param->name, hc->name, sizeof(param->name)); | ||
757 | if (hc->uuid) | ||
758 | strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); | ||
759 | else | ||
760 | param->uuid[0] = '\0'; | ||
761 | |||
762 | if (hc->new_map) | ||
763 | param->flags |= DM_INACTIVE_PRESENT_FLAG; | ||
764 | else | ||
765 | param->flags &= ~DM_INACTIVE_PRESENT_FLAG; | ||
766 | } | ||
767 | up_read(&_hash_lock); | 789 | up_read(&_hash_lock); |
768 | 790 | ||
769 | return md; | 791 | return md; |
@@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1402 | goto out; | 1424 | goto out; |
1403 | } | 1425 | } |
1404 | 1426 | ||
1427 | if (!argc) { | ||
1428 | DMWARN("Empty message received."); | ||
1429 | goto out; | ||
1430 | } | ||
1431 | |||
1405 | table = dm_get_live_table(md); | 1432 | table = dm_get_live_table(md); |
1406 | if (!table) | 1433 | if (!table) |
1407 | goto out_argv; | 1434 | goto out_argv; |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 320401dec104..f82147029636 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -224,8 +224,6 @@ struct kcopyd_job { | |||
224 | unsigned int num_dests; | 224 | unsigned int num_dests; |
225 | struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; | 225 | struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; |
226 | 226 | ||
227 | sector_t offset; | ||
228 | unsigned int nr_pages; | ||
229 | struct page_list *pages; | 227 | struct page_list *pages; |
230 | 228 | ||
231 | /* | 229 | /* |
@@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job) | |||
380 | .bi_rw = job->rw, | 378 | .bi_rw = job->rw, |
381 | .mem.type = DM_IO_PAGE_LIST, | 379 | .mem.type = DM_IO_PAGE_LIST, |
382 | .mem.ptr.pl = job->pages, | 380 | .mem.ptr.pl = job->pages, |
383 | .mem.offset = job->offset, | 381 | .mem.offset = 0, |
384 | .notify.fn = complete_io, | 382 | .notify.fn = complete_io, |
385 | .notify.context = job, | 383 | .notify.context = job, |
386 | .client = job->kc->io_client, | 384 | .client = job->kc->io_client, |
@@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job) | |||
397 | static int run_pages_job(struct kcopyd_job *job) | 395 | static int run_pages_job(struct kcopyd_job *job) |
398 | { | 396 | { |
399 | int r; | 397 | int r; |
398 | unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); | ||
400 | 399 | ||
401 | job->nr_pages = dm_div_up(job->dests[0].count + job->offset, | 400 | r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); |
402 | PAGE_SIZE >> 9); | ||
403 | r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); | ||
404 | if (!r) { | 401 | if (!r) { |
405 | /* this job is ready for io */ | 402 | /* this job is ready for io */ |
406 | push(&job->kc->io_jobs, job); | 403 | push(&job->kc->io_jobs, job); |
@@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | |||
602 | job->num_dests = num_dests; | 599 | job->num_dests = num_dests; |
603 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); | 600 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); |
604 | 601 | ||
605 | job->offset = 0; | ||
606 | job->nr_pages = 0; | ||
607 | job->pages = NULL; | 602 | job->pages = NULL; |
608 | 603 | ||
609 | job->fn = fn; | 604 | job->fn = fn; |
@@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | |||
622 | } | 617 | } |
623 | EXPORT_SYMBOL(dm_kcopyd_copy); | 618 | EXPORT_SYMBOL(dm_kcopyd_copy); |
624 | 619 | ||
620 | void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, | ||
621 | dm_kcopyd_notify_fn fn, void *context) | ||
622 | { | ||
623 | struct kcopyd_job *job; | ||
624 | |||
625 | job = mempool_alloc(kc->job_pool, GFP_NOIO); | ||
626 | |||
627 | memset(job, 0, sizeof(struct kcopyd_job)); | ||
628 | job->kc = kc; | ||
629 | job->fn = fn; | ||
630 | job->context = context; | ||
631 | |||
632 | atomic_inc(&kc->nr_jobs); | ||
633 | |||
634 | return job; | ||
635 | } | ||
636 | EXPORT_SYMBOL(dm_kcopyd_prepare_callback); | ||
637 | |||
638 | void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err) | ||
639 | { | ||
640 | struct kcopyd_job *job = j; | ||
641 | struct dm_kcopyd_client *kc = job->kc; | ||
642 | |||
643 | job->read_err = read_err; | ||
644 | job->write_err = write_err; | ||
645 | |||
646 | push(&kc->complete_jobs, job); | ||
647 | wake(kc); | ||
648 | } | ||
649 | EXPORT_SYMBOL(dm_kcopyd_do_callback); | ||
650 | |||
625 | /* | 651 | /* |
626 | * Cancels a kcopyd job, eg. someone might be deactivating a | 652 | * Cancels a kcopyd job, eg. someone might be deactivating a |
627 | * mirror. | 653 | * mirror. |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index aa2e0c374ab3..1021c8986011 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list) | |||
394 | group[count] = fe->region; | 394 | group[count] = fe->region; |
395 | count++; | 395 | count++; |
396 | 396 | ||
397 | list_del(&fe->list); | 397 | list_move(&fe->list, &tmp_list); |
398 | list_add(&fe->list, &tmp_list); | ||
399 | 398 | ||
400 | type = fe->type; | 399 | type = fe->type; |
401 | if (count >= MAX_FLUSH_GROUP_COUNT) | 400 | if (count >= MAX_FLUSH_GROUP_COUNT) |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 948e3f4925bf..3b52bb72bd1f 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy); | |||
197 | #define MIRROR_DISK_VERSION 2 | 197 | #define MIRROR_DISK_VERSION 2 |
198 | #define LOG_OFFSET 2 | 198 | #define LOG_OFFSET 2 |
199 | 199 | ||
200 | struct log_header { | 200 | struct log_header_disk { |
201 | uint32_t magic; | 201 | __le32 magic; |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Simple, incrementing version. no backward | 204 | * Simple, incrementing version. no backward |
205 | * compatibility. | 205 | * compatibility. |
206 | */ | 206 | */ |
207 | __le32 version; | ||
208 | __le64 nr_regions; | ||
209 | } __packed; | ||
210 | |||
211 | struct log_header_core { | ||
212 | uint32_t magic; | ||
207 | uint32_t version; | 213 | uint32_t version; |
208 | sector_t nr_regions; | 214 | uint64_t nr_regions; |
209 | }; | 215 | }; |
210 | 216 | ||
211 | struct log_c { | 217 | struct log_c { |
@@ -239,10 +245,10 @@ struct log_c { | |||
239 | int log_dev_failed; | 245 | int log_dev_failed; |
240 | int log_dev_flush_failed; | 246 | int log_dev_flush_failed; |
241 | struct dm_dev *log_dev; | 247 | struct dm_dev *log_dev; |
242 | struct log_header header; | 248 | struct log_header_core header; |
243 | 249 | ||
244 | struct dm_io_region header_location; | 250 | struct dm_io_region header_location; |
245 | struct log_header *disk_header; | 251 | struct log_header_disk *disk_header; |
246 | }; | 252 | }; |
247 | 253 | ||
248 | /* | 254 | /* |
@@ -251,34 +257,34 @@ struct log_c { | |||
251 | */ | 257 | */ |
252 | static inline int log_test_bit(uint32_t *bs, unsigned bit) | 258 | static inline int log_test_bit(uint32_t *bs, unsigned bit) |
253 | { | 259 | { |
254 | return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0; | 260 | return test_bit_le(bit, bs) ? 1 : 0; |
255 | } | 261 | } |
256 | 262 | ||
257 | static inline void log_set_bit(struct log_c *l, | 263 | static inline void log_set_bit(struct log_c *l, |
258 | uint32_t *bs, unsigned bit) | 264 | uint32_t *bs, unsigned bit) |
259 | { | 265 | { |
260 | __test_and_set_bit_le(bit, (unsigned long *) bs); | 266 | __set_bit_le(bit, bs); |
261 | l->touched_cleaned = 1; | 267 | l->touched_cleaned = 1; |
262 | } | 268 | } |
263 | 269 | ||
264 | static inline void log_clear_bit(struct log_c *l, | 270 | static inline void log_clear_bit(struct log_c *l, |
265 | uint32_t *bs, unsigned bit) | 271 | uint32_t *bs, unsigned bit) |
266 | { | 272 | { |
267 | __test_and_clear_bit_le(bit, (unsigned long *) bs); | 273 | __clear_bit_le(bit, bs); |
268 | l->touched_dirtied = 1; | 274 | l->touched_dirtied = 1; |
269 | } | 275 | } |
270 | 276 | ||
271 | /*---------------------------------------------------------------- | 277 | /*---------------------------------------------------------------- |
272 | * Header IO | 278 | * Header IO |
273 | *--------------------------------------------------------------*/ | 279 | *--------------------------------------------------------------*/ |
274 | static void header_to_disk(struct log_header *core, struct log_header *disk) | 280 | static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk) |
275 | { | 281 | { |
276 | disk->magic = cpu_to_le32(core->magic); | 282 | disk->magic = cpu_to_le32(core->magic); |
277 | disk->version = cpu_to_le32(core->version); | 283 | disk->version = cpu_to_le32(core->version); |
278 | disk->nr_regions = cpu_to_le64(core->nr_regions); | 284 | disk->nr_regions = cpu_to_le64(core->nr_regions); |
279 | } | 285 | } |
280 | 286 | ||
281 | static void header_from_disk(struct log_header *core, struct log_header *disk) | 287 | static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk) |
282 | { | 288 | { |
283 | core->magic = le32_to_cpu(disk->magic); | 289 | core->magic = le32_to_cpu(disk->magic); |
284 | core->version = le32_to_cpu(disk->version); | 290 | core->version = le32_to_cpu(disk->version); |
@@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
486 | memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); | 492 | memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); |
487 | lc->sync_count = (sync == NOSYNC) ? region_count : 0; | 493 | lc->sync_count = (sync == NOSYNC) ? region_count : 0; |
488 | 494 | ||
489 | lc->recovering_bits = vmalloc(bitset_size); | 495 | lc->recovering_bits = vzalloc(bitset_size); |
490 | if (!lc->recovering_bits) { | 496 | if (!lc->recovering_bits) { |
491 | DMWARN("couldn't allocate sync bitset"); | 497 | DMWARN("couldn't allocate sync bitset"); |
492 | vfree(lc->sync_bits); | 498 | vfree(lc->sync_bits); |
@@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, | |||
498 | kfree(lc); | 504 | kfree(lc); |
499 | return -ENOMEM; | 505 | return -ENOMEM; |
500 | } | 506 | } |
501 | memset(lc->recovering_bits, 0, bitset_size); | ||
502 | lc->sync_search = 0; | 507 | lc->sync_search = 0; |
503 | log->context = lc; | 508 | log->context = lc; |
504 | 509 | ||
@@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) | |||
739 | return 0; | 744 | return 0; |
740 | 745 | ||
741 | do { | 746 | do { |
742 | *region = find_next_zero_bit_le( | 747 | *region = find_next_zero_bit_le(lc->sync_bits, |
743 | (unsigned long *) lc->sync_bits, | ||
744 | lc->region_count, | 748 | lc->region_count, |
745 | lc->sync_search); | 749 | lc->sync_search); |
746 | lc->sync_search = *region + 1; | 750 | lc->sync_search = *region + 1; |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c3547016f0f1..5e0090ef4182 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | 23 | ||
24 | #define DM_MSG_PREFIX "multipath" | 24 | #define DM_MSG_PREFIX "multipath" |
25 | #define MESG_STR(x) x, sizeof(x) | ||
26 | #define DM_PG_INIT_DELAY_MSECS 2000 | 25 | #define DM_PG_INIT_DELAY_MSECS 2000 |
27 | #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) | 26 | #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) |
28 | 27 | ||
@@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work) | |||
505 | * <#paths> <#per-path selector args> | 504 | * <#paths> <#per-path selector args> |
506 | * [<path> [<arg>]* ]+ ]+ | 505 | * [<path> [<arg>]* ]+ ]+ |
507 | *---------------------------------------------------------------*/ | 506 | *---------------------------------------------------------------*/ |
508 | struct param { | 507 | static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, |
509 | unsigned min; | ||
510 | unsigned max; | ||
511 | char *error; | ||
512 | }; | ||
513 | |||
514 | static int read_param(struct param *param, char *str, unsigned *v, char **error) | ||
515 | { | ||
516 | if (!str || | ||
517 | (sscanf(str, "%u", v) != 1) || | ||
518 | (*v < param->min) || | ||
519 | (*v > param->max)) { | ||
520 | *error = param->error; | ||
521 | return -EINVAL; | ||
522 | } | ||
523 | |||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | struct arg_set { | ||
528 | unsigned argc; | ||
529 | char **argv; | ||
530 | }; | ||
531 | |||
532 | static char *shift(struct arg_set *as) | ||
533 | { | ||
534 | char *r; | ||
535 | |||
536 | if (as->argc) { | ||
537 | as->argc--; | ||
538 | r = *as->argv; | ||
539 | as->argv++; | ||
540 | return r; | ||
541 | } | ||
542 | |||
543 | return NULL; | ||
544 | } | ||
545 | |||
546 | static void consume(struct arg_set *as, unsigned n) | ||
547 | { | ||
548 | BUG_ON (as->argc < n); | ||
549 | as->argc -= n; | ||
550 | as->argv += n; | ||
551 | } | ||
552 | |||
553 | static int parse_path_selector(struct arg_set *as, struct priority_group *pg, | ||
554 | struct dm_target *ti) | 508 | struct dm_target *ti) |
555 | { | 509 | { |
556 | int r; | 510 | int r; |
557 | struct path_selector_type *pst; | 511 | struct path_selector_type *pst; |
558 | unsigned ps_argc; | 512 | unsigned ps_argc; |
559 | 513 | ||
560 | static struct param _params[] = { | 514 | static struct dm_arg _args[] = { |
561 | {0, 1024, "invalid number of path selector args"}, | 515 | {0, 1024, "invalid number of path selector args"}, |
562 | }; | 516 | }; |
563 | 517 | ||
564 | pst = dm_get_path_selector(shift(as)); | 518 | pst = dm_get_path_selector(dm_shift_arg(as)); |
565 | if (!pst) { | 519 | if (!pst) { |
566 | ti->error = "unknown path selector type"; | 520 | ti->error = "unknown path selector type"; |
567 | return -EINVAL; | 521 | return -EINVAL; |
568 | } | 522 | } |
569 | 523 | ||
570 | r = read_param(_params, shift(as), &ps_argc, &ti->error); | 524 | r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); |
571 | if (r) { | 525 | if (r) { |
572 | dm_put_path_selector(pst); | 526 | dm_put_path_selector(pst); |
573 | return -EINVAL; | 527 | return -EINVAL; |
574 | } | 528 | } |
575 | 529 | ||
576 | if (ps_argc > as->argc) { | ||
577 | dm_put_path_selector(pst); | ||
578 | ti->error = "not enough arguments for path selector"; | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | |||
582 | r = pst->create(&pg->ps, ps_argc, as->argv); | 530 | r = pst->create(&pg->ps, ps_argc, as->argv); |
583 | if (r) { | 531 | if (r) { |
584 | dm_put_path_selector(pst); | 532 | dm_put_path_selector(pst); |
@@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg, | |||
587 | } | 535 | } |
588 | 536 | ||
589 | pg->ps.type = pst; | 537 | pg->ps.type = pst; |
590 | consume(as, ps_argc); | 538 | dm_consume_args(as, ps_argc); |
591 | 539 | ||
592 | return 0; | 540 | return 0; |
593 | } | 541 | } |
594 | 542 | ||
595 | static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | 543 | static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, |
596 | struct dm_target *ti) | 544 | struct dm_target *ti) |
597 | { | 545 | { |
598 | int r; | 546 | int r; |
@@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
609 | if (!p) | 557 | if (!p) |
610 | return ERR_PTR(-ENOMEM); | 558 | return ERR_PTR(-ENOMEM); |
611 | 559 | ||
612 | r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), | 560 | r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), |
613 | &p->path.dev); | 561 | &p->path.dev); |
614 | if (r) { | 562 | if (r) { |
615 | ti->error = "error getting device"; | 563 | ti->error = "error getting device"; |
@@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, | |||
660 | return ERR_PTR(r); | 608 | return ERR_PTR(r); |
661 | } | 609 | } |
662 | 610 | ||
663 | static struct priority_group *parse_priority_group(struct arg_set *as, | 611 | static struct priority_group *parse_priority_group(struct dm_arg_set *as, |
664 | struct multipath *m) | 612 | struct multipath *m) |
665 | { | 613 | { |
666 | static struct param _params[] = { | 614 | static struct dm_arg _args[] = { |
667 | {1, 1024, "invalid number of paths"}, | 615 | {1, 1024, "invalid number of paths"}, |
668 | {0, 1024, "invalid number of selector args"} | 616 | {0, 1024, "invalid number of selector args"} |
669 | }; | 617 | }; |
670 | 618 | ||
671 | int r; | 619 | int r; |
672 | unsigned i, nr_selector_args, nr_params; | 620 | unsigned i, nr_selector_args, nr_args; |
673 | struct priority_group *pg; | 621 | struct priority_group *pg; |
674 | struct dm_target *ti = m->ti; | 622 | struct dm_target *ti = m->ti; |
675 | 623 | ||
@@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
693 | /* | 641 | /* |
694 | * read the paths | 642 | * read the paths |
695 | */ | 643 | */ |
696 | r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); | 644 | r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); |
697 | if (r) | 645 | if (r) |
698 | goto bad; | 646 | goto bad; |
699 | 647 | ||
700 | r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); | 648 | r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); |
701 | if (r) | 649 | if (r) |
702 | goto bad; | 650 | goto bad; |
703 | 651 | ||
704 | nr_params = 1 + nr_selector_args; | 652 | nr_args = 1 + nr_selector_args; |
705 | for (i = 0; i < pg->nr_pgpaths; i++) { | 653 | for (i = 0; i < pg->nr_pgpaths; i++) { |
706 | struct pgpath *pgpath; | 654 | struct pgpath *pgpath; |
707 | struct arg_set path_args; | 655 | struct dm_arg_set path_args; |
708 | 656 | ||
709 | if (as->argc < nr_params) { | 657 | if (as->argc < nr_args) { |
710 | ti->error = "not enough path parameters"; | 658 | ti->error = "not enough path parameters"; |
711 | r = -EINVAL; | 659 | r = -EINVAL; |
712 | goto bad; | 660 | goto bad; |
713 | } | 661 | } |
714 | 662 | ||
715 | path_args.argc = nr_params; | 663 | path_args.argc = nr_args; |
716 | path_args.argv = as->argv; | 664 | path_args.argv = as->argv; |
717 | 665 | ||
718 | pgpath = parse_path(&path_args, &pg->ps, ti); | 666 | pgpath = parse_path(&path_args, &pg->ps, ti); |
@@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
723 | 671 | ||
724 | pgpath->pg = pg; | 672 | pgpath->pg = pg; |
725 | list_add_tail(&pgpath->list, &pg->pgpaths); | 673 | list_add_tail(&pgpath->list, &pg->pgpaths); |
726 | consume(as, nr_params); | 674 | dm_consume_args(as, nr_args); |
727 | } | 675 | } |
728 | 676 | ||
729 | return pg; | 677 | return pg; |
@@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as, | |||
733 | return ERR_PTR(r); | 681 | return ERR_PTR(r); |
734 | } | 682 | } |
735 | 683 | ||
736 | static int parse_hw_handler(struct arg_set *as, struct multipath *m) | 684 | static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) |
737 | { | 685 | { |
738 | unsigned hw_argc; | 686 | unsigned hw_argc; |
739 | int ret; | 687 | int ret; |
740 | struct dm_target *ti = m->ti; | 688 | struct dm_target *ti = m->ti; |
741 | 689 | ||
742 | static struct param _params[] = { | 690 | static struct dm_arg _args[] = { |
743 | {0, 1024, "invalid number of hardware handler args"}, | 691 | {0, 1024, "invalid number of hardware handler args"}, |
744 | }; | 692 | }; |
745 | 693 | ||
746 | if (read_param(_params, shift(as), &hw_argc, &ti->error)) | 694 | if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) |
747 | return -EINVAL; | 695 | return -EINVAL; |
748 | 696 | ||
749 | if (!hw_argc) | 697 | if (!hw_argc) |
750 | return 0; | 698 | return 0; |
751 | 699 | ||
752 | if (hw_argc > as->argc) { | 700 | m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); |
753 | ti->error = "not enough arguments for hardware handler"; | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | |||
757 | m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); | ||
758 | request_module("scsi_dh_%s", m->hw_handler_name); | 701 | request_module("scsi_dh_%s", m->hw_handler_name); |
759 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { | 702 | if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { |
760 | ti->error = "unknown hardware handler type"; | 703 | ti->error = "unknown hardware handler type"; |
@@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) | |||
778 | for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) | 721 | for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) |
779 | j = sprintf(p, "%s", as->argv[i]); | 722 | j = sprintf(p, "%s", as->argv[i]); |
780 | } | 723 | } |
781 | consume(as, hw_argc - 1); | 724 | dm_consume_args(as, hw_argc - 1); |
782 | 725 | ||
783 | return 0; | 726 | return 0; |
784 | fail: | 727 | fail: |
@@ -787,20 +730,20 @@ fail: | |||
787 | return ret; | 730 | return ret; |
788 | } | 731 | } |
789 | 732 | ||
790 | static int parse_features(struct arg_set *as, struct multipath *m) | 733 | static int parse_features(struct dm_arg_set *as, struct multipath *m) |
791 | { | 734 | { |
792 | int r; | 735 | int r; |
793 | unsigned argc; | 736 | unsigned argc; |
794 | struct dm_target *ti = m->ti; | 737 | struct dm_target *ti = m->ti; |
795 | const char *param_name; | 738 | const char *arg_name; |
796 | 739 | ||
797 | static struct param _params[] = { | 740 | static struct dm_arg _args[] = { |
798 | {0, 5, "invalid number of feature args"}, | 741 | {0, 5, "invalid number of feature args"}, |
799 | {1, 50, "pg_init_retries must be between 1 and 50"}, | 742 | {1, 50, "pg_init_retries must be between 1 and 50"}, |
800 | {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, | 743 | {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, |
801 | }; | 744 | }; |
802 | 745 | ||
803 | r = read_param(_params, shift(as), &argc, &ti->error); | 746 | r = dm_read_arg_group(_args, as, &argc, &ti->error); |
804 | if (r) | 747 | if (r) |
805 | return -EINVAL; | 748 | return -EINVAL; |
806 | 749 | ||
@@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m) | |||
808 | return 0; | 751 | return 0; |
809 | 752 | ||
810 | do { | 753 | do { |
811 | param_name = shift(as); | 754 | arg_name = dm_shift_arg(as); |
812 | argc--; | 755 | argc--; |
813 | 756 | ||
814 | if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { | 757 | if (!strcasecmp(arg_name, "queue_if_no_path")) { |
815 | r = queue_if_no_path(m, 1, 0); | 758 | r = queue_if_no_path(m, 1, 0); |
816 | continue; | 759 | continue; |
817 | } | 760 | } |
818 | 761 | ||
819 | if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && | 762 | if (!strcasecmp(arg_name, "pg_init_retries") && |
820 | (argc >= 1)) { | 763 | (argc >= 1)) { |
821 | r = read_param(_params + 1, shift(as), | 764 | r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); |
822 | &m->pg_init_retries, &ti->error); | ||
823 | argc--; | 765 | argc--; |
824 | continue; | 766 | continue; |
825 | } | 767 | } |
826 | 768 | ||
827 | if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && | 769 | if (!strcasecmp(arg_name, "pg_init_delay_msecs") && |
828 | (argc >= 1)) { | 770 | (argc >= 1)) { |
829 | r = read_param(_params + 2, shift(as), | 771 | r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); |
830 | &m->pg_init_delay_msecs, &ti->error); | ||
831 | argc--; | 772 | argc--; |
832 | continue; | 773 | continue; |
833 | } | 774 | } |
@@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m) | |||
842 | static int multipath_ctr(struct dm_target *ti, unsigned int argc, | 783 | static int multipath_ctr(struct dm_target *ti, unsigned int argc, |
843 | char **argv) | 784 | char **argv) |
844 | { | 785 | { |
845 | /* target parameters */ | 786 | /* target arguments */ |
846 | static struct param _params[] = { | 787 | static struct dm_arg _args[] = { |
847 | {0, 1024, "invalid number of priority groups"}, | 788 | {0, 1024, "invalid number of priority groups"}, |
848 | {0, 1024, "invalid initial priority group number"}, | 789 | {0, 1024, "invalid initial priority group number"}, |
849 | }; | 790 | }; |
850 | 791 | ||
851 | int r; | 792 | int r; |
852 | struct multipath *m; | 793 | struct multipath *m; |
853 | struct arg_set as; | 794 | struct dm_arg_set as; |
854 | unsigned pg_count = 0; | 795 | unsigned pg_count = 0; |
855 | unsigned next_pg_num; | 796 | unsigned next_pg_num; |
856 | 797 | ||
@@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
871 | if (r) | 812 | if (r) |
872 | goto bad; | 813 | goto bad; |
873 | 814 | ||
874 | r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); | 815 | r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); |
875 | if (r) | 816 | if (r) |
876 | goto bad; | 817 | goto bad; |
877 | 818 | ||
878 | r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); | 819 | r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); |
879 | if (r) | 820 | if (r) |
880 | goto bad; | 821 | goto bad; |
881 | 822 | ||
@@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1505 | } | 1446 | } |
1506 | 1447 | ||
1507 | if (argc == 1) { | 1448 | if (argc == 1) { |
1508 | if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { | 1449 | if (!strcasecmp(argv[0], "queue_if_no_path")) { |
1509 | r = queue_if_no_path(m, 1, 0); | 1450 | r = queue_if_no_path(m, 1, 0); |
1510 | goto out; | 1451 | goto out; |
1511 | } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { | 1452 | } else if (!strcasecmp(argv[0], "fail_if_no_path")) { |
1512 | r = queue_if_no_path(m, 0, 0); | 1453 | r = queue_if_no_path(m, 0, 0); |
1513 | goto out; | 1454 | goto out; |
1514 | } | 1455 | } |
@@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) | |||
1519 | goto out; | 1460 | goto out; |
1520 | } | 1461 | } |
1521 | 1462 | ||
1522 | if (!strnicmp(argv[0], MESG_STR("disable_group"))) { | 1463 | if (!strcasecmp(argv[0], "disable_group")) { |
1523 | r = bypass_pg_num(m, argv[1], 1); | 1464 | r = bypass_pg_num(m, argv[1], 1); |
1524 | goto out; | 1465 | goto out; |
1525 | } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { | 1466 | } else if (!strcasecmp(argv[0], "enable_group")) { |
1526 | r = bypass_pg_num(m, argv[1], 0); | 1467 | r = bypass_pg_num(m, argv[1], 0); |
1527 | goto out; | 1468 | goto out; |
1528 | } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { | 1469 | } else if (!strcasecmp(argv[0], "switch_group")) { |
1529 | r = switch_pg_num(m, argv[1]); | 1470 | r = switch_pg_num(m, argv[1]); |
1530 | goto out; | 1471 | goto out; |
1531 | } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) | 1472 | } else if (!strcasecmp(argv[0], "reinstate_path")) |
1532 | action = reinstate_path; | 1473 | action = reinstate_path; |
1533 | else if (!strnicmp(argv[0], MESG_STR("fail_path"))) | 1474 | else if (!strcasecmp(argv[0], "fail_path")) |
1534 | action = fail_path; | 1475 | action = fail_path; |
1535 | else { | 1476 | else { |
1536 | DMWARN("Unrecognised multipath message received."); | 1477 | DMWARN("Unrecognised multipath message received."); |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index e5d8904fc8f6..a002dd85db1e 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -8,19 +8,19 @@ | |||
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | 9 | ||
10 | #include "md.h" | 10 | #include "md.h" |
11 | #include "raid1.h" | ||
11 | #include "raid5.h" | 12 | #include "raid5.h" |
12 | #include "dm.h" | ||
13 | #include "bitmap.h" | 13 | #include "bitmap.h" |
14 | 14 | ||
15 | #include <linux/device-mapper.h> | ||
16 | |||
15 | #define DM_MSG_PREFIX "raid" | 17 | #define DM_MSG_PREFIX "raid" |
16 | 18 | ||
17 | /* | 19 | /* |
18 | * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then | 20 | * The following flags are used by dm-raid.c to set up the array state. |
19 | * make it so the flag doesn't set anything. | 21 | * They must be cleared before md_run is called. |
20 | */ | 22 | */ |
21 | #ifndef MD_SYNC_STATE_FORCED | 23 | #define FirstUse 10 /* rdev flag */ |
22 | #define MD_SYNC_STATE_FORCED 0 | ||
23 | #endif | ||
24 | 24 | ||
25 | struct raid_dev { | 25 | struct raid_dev { |
26 | /* | 26 | /* |
@@ -43,14 +43,15 @@ struct raid_dev { | |||
43 | /* | 43 | /* |
44 | * Flags for rs->print_flags field. | 44 | * Flags for rs->print_flags field. |
45 | */ | 45 | */ |
46 | #define DMPF_DAEMON_SLEEP 0x1 | 46 | #define DMPF_SYNC 0x1 |
47 | #define DMPF_MAX_WRITE_BEHIND 0x2 | 47 | #define DMPF_NOSYNC 0x2 |
48 | #define DMPF_SYNC 0x4 | 48 | #define DMPF_REBUILD 0x4 |
49 | #define DMPF_NOSYNC 0x8 | 49 | #define DMPF_DAEMON_SLEEP 0x8 |
50 | #define DMPF_STRIPE_CACHE 0x10 | 50 | #define DMPF_MIN_RECOVERY_RATE 0x10 |
51 | #define DMPF_MIN_RECOVERY_RATE 0x20 | 51 | #define DMPF_MAX_RECOVERY_RATE 0x20 |
52 | #define DMPF_MAX_RECOVERY_RATE 0x40 | 52 | #define DMPF_MAX_WRITE_BEHIND 0x40 |
53 | 53 | #define DMPF_STRIPE_CACHE 0x80 | |
54 | #define DMPF_REGION_SIZE 0X100 | ||
54 | struct raid_set { | 55 | struct raid_set { |
55 | struct dm_target *ti; | 56 | struct dm_target *ti; |
56 | 57 | ||
@@ -72,6 +73,7 @@ static struct raid_type { | |||
72 | const unsigned level; /* RAID level. */ | 73 | const unsigned level; /* RAID level. */ |
73 | const unsigned algorithm; /* RAID algorithm. */ | 74 | const unsigned algorithm; /* RAID algorithm. */ |
74 | } raid_types[] = { | 75 | } raid_types[] = { |
76 | {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, | ||
75 | {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, | 77 | {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, |
76 | {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, | 78 | {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, |
77 | {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, | 79 | {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, |
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra | |||
105 | } | 107 | } |
106 | 108 | ||
107 | sectors_per_dev = ti->len; | 109 | sectors_per_dev = ti->len; |
108 | if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { | 110 | if ((raid_type->level > 1) && |
111 | sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { | ||
109 | ti->error = "Target length not divisible by number of data devices"; | 112 | ti->error = "Target length not divisible by number of data devices"; |
110 | return ERR_PTR(-EINVAL); | 113 | return ERR_PTR(-EINVAL); |
111 | } | 114 | } |
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs) | |||
147 | { | 150 | { |
148 | int i; | 151 | int i; |
149 | 152 | ||
150 | for (i = 0; i < rs->md.raid_disks; i++) | 153 | for (i = 0; i < rs->md.raid_disks; i++) { |
154 | if (rs->dev[i].meta_dev) | ||
155 | dm_put_device(rs->ti, rs->dev[i].meta_dev); | ||
156 | if (rs->dev[i].rdev.sb_page) | ||
157 | put_page(rs->dev[i].rdev.sb_page); | ||
158 | rs->dev[i].rdev.sb_page = NULL; | ||
159 | rs->dev[i].rdev.sb_loaded = 0; | ||
151 | if (rs->dev[i].data_dev) | 160 | if (rs->dev[i].data_dev) |
152 | dm_put_device(rs->ti, rs->dev[i].data_dev); | 161 | dm_put_device(rs->ti, rs->dev[i].data_dev); |
162 | } | ||
153 | 163 | ||
154 | kfree(rs); | 164 | kfree(rs); |
155 | } | 165 | } |
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs) | |||
159 | * <meta_dev>: meta device name or '-' if missing | 169 | * <meta_dev>: meta device name or '-' if missing |
160 | * <data_dev>: data device name or '-' if missing | 170 | * <data_dev>: data device name or '-' if missing |
161 | * | 171 | * |
162 | * This code parses those words. | 172 | * The following are permitted: |
173 | * - - | ||
174 | * - <data_dev> | ||
175 | * <meta_dev> <data_dev> | ||
176 | * | ||
177 | * The following is not allowed: | ||
178 | * <meta_dev> - | ||
179 | * | ||
180 | * This code parses those words. If there is a failure, | ||
181 | * the caller must use context_free to unwind the operations. | ||
163 | */ | 182 | */ |
164 | static int dev_parms(struct raid_set *rs, char **argv) | 183 | static int dev_parms(struct raid_set *rs, char **argv) |
165 | { | 184 | { |
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv) | |||
182 | rs->dev[i].rdev.mddev = &rs->md; | 201 | rs->dev[i].rdev.mddev = &rs->md; |
183 | 202 | ||
184 | if (strcmp(argv[0], "-")) { | 203 | if (strcmp(argv[0], "-")) { |
185 | rs->ti->error = "Metadata devices not supported"; | 204 | ret = dm_get_device(rs->ti, argv[0], |
186 | return -EINVAL; | 205 | dm_table_get_mode(rs->ti->table), |
206 | &rs->dev[i].meta_dev); | ||
207 | rs->ti->error = "RAID metadata device lookup failure"; | ||
208 | if (ret) | ||
209 | return ret; | ||
210 | |||
211 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); | ||
212 | if (!rs->dev[i].rdev.sb_page) | ||
213 | return -ENOMEM; | ||
187 | } | 214 | } |
188 | 215 | ||
189 | if (!strcmp(argv[1], "-")) { | 216 | if (!strcmp(argv[1], "-")) { |
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv) | |||
193 | return -EINVAL; | 220 | return -EINVAL; |
194 | } | 221 | } |
195 | 222 | ||
223 | rs->ti->error = "No data device supplied with metadata device"; | ||
224 | if (rs->dev[i].meta_dev) | ||
225 | return -EINVAL; | ||
226 | |||
196 | continue; | 227 | continue; |
197 | } | 228 | } |
198 | 229 | ||
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv) | |||
204 | return ret; | 235 | return ret; |
205 | } | 236 | } |
206 | 237 | ||
238 | if (rs->dev[i].meta_dev) { | ||
239 | metadata_available = 1; | ||
240 | rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; | ||
241 | } | ||
207 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; | 242 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; |
208 | list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); | 243 | list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); |
209 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) | 244 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) |
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv) | |||
235 | } | 270 | } |
236 | 271 | ||
237 | /* | 272 | /* |
273 | * validate_region_size | ||
274 | * @rs | ||
275 | * @region_size: region size in sectors. If 0, pick a size (4MiB default). | ||
276 | * | ||
277 | * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). | ||
278 | * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. | ||
279 | * | ||
280 | * Returns: 0 on success, -EINVAL on failure. | ||
281 | */ | ||
282 | static int validate_region_size(struct raid_set *rs, unsigned long region_size) | ||
283 | { | ||
284 | unsigned long min_region_size = rs->ti->len / (1 << 21); | ||
285 | |||
286 | if (!region_size) { | ||
287 | /* | ||
288 | * Choose a reasonable default. All figures in sectors. | ||
289 | */ | ||
290 | if (min_region_size > (1 << 13)) { | ||
291 | DMINFO("Choosing default region size of %lu sectors", | ||
292 | region_size); | ||
293 | region_size = min_region_size; | ||
294 | } else { | ||
295 | DMINFO("Choosing default region size of 4MiB"); | ||
296 | region_size = 1 << 13; /* sectors */ | ||
297 | } | ||
298 | } else { | ||
299 | /* | ||
300 | * Validate user-supplied value. | ||
301 | */ | ||
302 | if (region_size > rs->ti->len) { | ||
303 | rs->ti->error = "Supplied region size is too large"; | ||
304 | return -EINVAL; | ||
305 | } | ||
306 | |||
307 | if (region_size < min_region_size) { | ||
308 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", | ||
309 | region_size, min_region_size); | ||
310 | rs->ti->error = "Supplied region size is too small"; | ||
311 | return -EINVAL; | ||
312 | } | ||
313 | |||
314 | if (!is_power_of_2(region_size)) { | ||
315 | rs->ti->error = "Region size is not a power of 2"; | ||
316 | return -EINVAL; | ||
317 | } | ||
318 | |||
319 | if (region_size < rs->md.chunk_sectors) { | ||
320 | rs->ti->error = "Region size is smaller than the chunk size"; | ||
321 | return -EINVAL; | ||
322 | } | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Convert sectors to bytes. | ||
327 | */ | ||
328 | rs->md.bitmap_info.chunksize = (region_size << 9); | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | /* | ||
238 | * Possible arguments are... | 334 | * Possible arguments are... |
239 | * RAID456: | ||
240 | * <chunk_size> [optional_args] | 335 | * <chunk_size> [optional_args] |
241 | * | 336 | * |
242 | * Optional args: | 337 | * Argument definitions |
243 | * [[no]sync] Force or prevent recovery of the entire array | 338 | * <chunk_size> The number of sectors per disk that |
339 | * will form the "stripe" | ||
340 | * [[no]sync] Force or prevent recovery of the | ||
341 | * entire array | ||
244 | * [rebuild <idx>] Rebuild the drive indicated by the index | 342 | * [rebuild <idx>] Rebuild the drive indicated by the index |
245 | * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits | 343 | * [daemon_sleep <ms>] Time between bitmap daemon work to |
344 | * clear bits | ||
246 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization | 345 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
247 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization | 346 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
347 | * [write_mostly <idx>] Indicate a write mostly drive via index | ||
248 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) | 348 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) |
249 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs | 349 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs |
350 | * [region_size <sectors>] Defines granularity of bitmap | ||
250 | */ | 351 | */ |
251 | static int parse_raid_params(struct raid_set *rs, char **argv, | 352 | static int parse_raid_params(struct raid_set *rs, char **argv, |
252 | unsigned num_raid_params) | 353 | unsigned num_raid_params) |
253 | { | 354 | { |
254 | unsigned i, rebuild_cnt = 0; | 355 | unsigned i, rebuild_cnt = 0; |
255 | unsigned long value; | 356 | unsigned long value, region_size = 0; |
256 | char *key; | 357 | char *key; |
257 | 358 | ||
258 | /* | 359 | /* |
259 | * First, parse the in-order required arguments | 360 | * First, parse the in-order required arguments |
361 | * "chunk_size" is the only argument of this type. | ||
260 | */ | 362 | */ |
261 | if ((strict_strtoul(argv[0], 10, &value) < 0) || | 363 | if ((strict_strtoul(argv[0], 10, &value) < 0)) { |
262 | !is_power_of_2(value) || (value < 8)) { | ||
263 | rs->ti->error = "Bad chunk size"; | 364 | rs->ti->error = "Bad chunk size"; |
264 | return -EINVAL; | 365 | return -EINVAL; |
366 | } else if (rs->raid_type->level == 1) { | ||
367 | if (value) | ||
368 | DMERR("Ignoring chunk size parameter for RAID 1"); | ||
369 | value = 0; | ||
370 | } else if (!is_power_of_2(value)) { | ||
371 | rs->ti->error = "Chunk size must be a power of 2"; | ||
372 | return -EINVAL; | ||
373 | } else if (value < 8) { | ||
374 | rs->ti->error = "Chunk size value is too small"; | ||
375 | return -EINVAL; | ||
265 | } | 376 | } |
266 | 377 | ||
267 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; | 378 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; |
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
269 | num_raid_params--; | 380 | num_raid_params--; |
270 | 381 | ||
271 | /* | 382 | /* |
272 | * Second, parse the unordered optional arguments | 383 | * We set each individual device as In_sync with a completed |
384 | * 'recovery_offset'. If there has been a device failure or | ||
385 | * replacement then one of the following cases applies: | ||
386 | * | ||
387 | * 1) User specifies 'rebuild'. | ||
388 | * - Device is reset when param is read. | ||
389 | * 2) A new device is supplied. | ||
390 | * - No matching superblock found, resets device. | ||
391 | * 3) Device failure was transient and returns on reload. | ||
392 | * - Failure noticed, resets device for bitmap replay. | ||
393 | * 4) Device hadn't completed recovery after previous failure. | ||
394 | * - Superblock is read and overrides recovery_offset. | ||
395 | * | ||
396 | * What is found in the superblocks of the devices is always | ||
397 | * authoritative, unless 'rebuild' or '[no]sync' was specified. | ||
273 | */ | 398 | */ |
274 | for (i = 0; i < rs->md.raid_disks; i++) | 399 | for (i = 0; i < rs->md.raid_disks; i++) { |
275 | set_bit(In_sync, &rs->dev[i].rdev.flags); | 400 | set_bit(In_sync, &rs->dev[i].rdev.flags); |
401 | rs->dev[i].rdev.recovery_offset = MaxSector; | ||
402 | } | ||
276 | 403 | ||
404 | /* | ||
405 | * Second, parse the unordered optional arguments | ||
406 | */ | ||
277 | for (i = 0; i < num_raid_params; i++) { | 407 | for (i = 0; i < num_raid_params; i++) { |
278 | if (!strcmp(argv[i], "nosync")) { | 408 | if (!strcasecmp(argv[i], "nosync")) { |
279 | rs->md.recovery_cp = MaxSector; | 409 | rs->md.recovery_cp = MaxSector; |
280 | rs->print_flags |= DMPF_NOSYNC; | 410 | rs->print_flags |= DMPF_NOSYNC; |
281 | rs->md.flags |= MD_SYNC_STATE_FORCED; | ||
282 | continue; | 411 | continue; |
283 | } | 412 | } |
284 | if (!strcmp(argv[i], "sync")) { | 413 | if (!strcasecmp(argv[i], "sync")) { |
285 | rs->md.recovery_cp = 0; | 414 | rs->md.recovery_cp = 0; |
286 | rs->print_flags |= DMPF_SYNC; | 415 | rs->print_flags |= DMPF_SYNC; |
287 | rs->md.flags |= MD_SYNC_STATE_FORCED; | ||
288 | continue; | 416 | continue; |
289 | } | 417 | } |
290 | 418 | ||
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
300 | return -EINVAL; | 428 | return -EINVAL; |
301 | } | 429 | } |
302 | 430 | ||
303 | if (!strcmp(key, "rebuild")) { | 431 | if (!strcasecmp(key, "rebuild")) { |
304 | if (++rebuild_cnt > rs->raid_type->parity_devs) { | 432 | rebuild_cnt++; |
305 | rs->ti->error = "Too many rebuild drives given"; | 433 | if (((rs->raid_type->level != 1) && |
434 | (rebuild_cnt > rs->raid_type->parity_devs)) || | ||
435 | ((rs->raid_type->level == 1) && | ||
436 | (rebuild_cnt > (rs->md.raid_disks - 1)))) { | ||
437 | rs->ti->error = "Too many rebuild devices specified for given RAID type"; | ||
306 | return -EINVAL; | 438 | return -EINVAL; |
307 | } | 439 | } |
308 | if (value > rs->md.raid_disks) { | 440 | if (value > rs->md.raid_disks) { |
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
311 | } | 443 | } |
312 | clear_bit(In_sync, &rs->dev[value].rdev.flags); | 444 | clear_bit(In_sync, &rs->dev[value].rdev.flags); |
313 | rs->dev[value].rdev.recovery_offset = 0; | 445 | rs->dev[value].rdev.recovery_offset = 0; |
314 | } else if (!strcmp(key, "max_write_behind")) { | 446 | rs->print_flags |= DMPF_REBUILD; |
447 | } else if (!strcasecmp(key, "write_mostly")) { | ||
448 | if (rs->raid_type->level != 1) { | ||
449 | rs->ti->error = "write_mostly option is only valid for RAID1"; | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | if (value > rs->md.raid_disks) { | ||
453 | rs->ti->error = "Invalid write_mostly drive index given"; | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); | ||
457 | } else if (!strcasecmp(key, "max_write_behind")) { | ||
458 | if (rs->raid_type->level != 1) { | ||
459 | rs->ti->error = "max_write_behind option is only valid for RAID1"; | ||
460 | return -EINVAL; | ||
461 | } | ||
315 | rs->print_flags |= DMPF_MAX_WRITE_BEHIND; | 462 | rs->print_flags |= DMPF_MAX_WRITE_BEHIND; |
316 | 463 | ||
317 | /* | 464 | /* |
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
324 | return -EINVAL; | 471 | return -EINVAL; |
325 | } | 472 | } |
326 | rs->md.bitmap_info.max_write_behind = value; | 473 | rs->md.bitmap_info.max_write_behind = value; |
327 | } else if (!strcmp(key, "daemon_sleep")) { | 474 | } else if (!strcasecmp(key, "daemon_sleep")) { |
328 | rs->print_flags |= DMPF_DAEMON_SLEEP; | 475 | rs->print_flags |= DMPF_DAEMON_SLEEP; |
329 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { | 476 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { |
330 | rs->ti->error = "daemon sleep period out of range"; | 477 | rs->ti->error = "daemon sleep period out of range"; |
331 | return -EINVAL; | 478 | return -EINVAL; |
332 | } | 479 | } |
333 | rs->md.bitmap_info.daemon_sleep = value; | 480 | rs->md.bitmap_info.daemon_sleep = value; |
334 | } else if (!strcmp(key, "stripe_cache")) { | 481 | } else if (!strcasecmp(key, "stripe_cache")) { |
335 | rs->print_flags |= DMPF_STRIPE_CACHE; | 482 | rs->print_flags |= DMPF_STRIPE_CACHE; |
336 | 483 | ||
337 | /* | 484 | /* |
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
348 | rs->ti->error = "Bad stripe_cache size"; | 495 | rs->ti->error = "Bad stripe_cache size"; |
349 | return -EINVAL; | 496 | return -EINVAL; |
350 | } | 497 | } |
351 | } else if (!strcmp(key, "min_recovery_rate")) { | 498 | } else if (!strcasecmp(key, "min_recovery_rate")) { |
352 | rs->print_flags |= DMPF_MIN_RECOVERY_RATE; | 499 | rs->print_flags |= DMPF_MIN_RECOVERY_RATE; |
353 | if (value > INT_MAX) { | 500 | if (value > INT_MAX) { |
354 | rs->ti->error = "min_recovery_rate out of range"; | 501 | rs->ti->error = "min_recovery_rate out of range"; |
355 | return -EINVAL; | 502 | return -EINVAL; |
356 | } | 503 | } |
357 | rs->md.sync_speed_min = (int)value; | 504 | rs->md.sync_speed_min = (int)value; |
358 | } else if (!strcmp(key, "max_recovery_rate")) { | 505 | } else if (!strcasecmp(key, "max_recovery_rate")) { |
359 | rs->print_flags |= DMPF_MAX_RECOVERY_RATE; | 506 | rs->print_flags |= DMPF_MAX_RECOVERY_RATE; |
360 | if (value > INT_MAX) { | 507 | if (value > INT_MAX) { |
361 | rs->ti->error = "max_recovery_rate out of range"; | 508 | rs->ti->error = "max_recovery_rate out of range"; |
362 | return -EINVAL; | 509 | return -EINVAL; |
363 | } | 510 | } |
364 | rs->md.sync_speed_max = (int)value; | 511 | rs->md.sync_speed_max = (int)value; |
512 | } else if (!strcasecmp(key, "region_size")) { | ||
513 | rs->print_flags |= DMPF_REGION_SIZE; | ||
514 | region_size = value; | ||
365 | } else { | 515 | } else { |
366 | DMERR("Unable to parse RAID parameter: %s", key); | 516 | DMERR("Unable to parse RAID parameter: %s", key); |
367 | rs->ti->error = "Unable to parse RAID parameters"; | 517 | rs->ti->error = "Unable to parse RAID parameters"; |
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
369 | } | 519 | } |
370 | } | 520 | } |
371 | 521 | ||
522 | if (validate_region_size(rs, region_size)) | ||
523 | return -EINVAL; | ||
524 | |||
525 | if (rs->md.chunk_sectors) | ||
526 | rs->ti->split_io = rs->md.chunk_sectors; | ||
527 | else | ||
528 | rs->ti->split_io = region_size; | ||
529 | |||
530 | if (rs->md.chunk_sectors) | ||
531 | rs->ti->split_io = rs->md.chunk_sectors; | ||
532 | else | ||
533 | rs->ti->split_io = region_size; | ||
534 | |||
372 | /* Assume there are no metadata devices until the drives are parsed */ | 535 | /* Assume there are no metadata devices until the drives are parsed */ |
373 | rs->md.persistent = 0; | 536 | rs->md.persistent = 0; |
374 | rs->md.external = 1; | 537 | rs->md.external = 1; |
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) | |||
387 | { | 550 | { |
388 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | 551 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); |
389 | 552 | ||
553 | if (rs->raid_type->level == 1) | ||
554 | return md_raid1_congested(&rs->md, bits); | ||
555 | |||
390 | return md_raid5_congested(&rs->md, bits); | 556 | return md_raid5_congested(&rs->md, bits); |
391 | } | 557 | } |
392 | 558 | ||
393 | /* | 559 | /* |
560 | * This structure is never routinely used by userspace, unlike md superblocks. | ||
561 | * Devices with this superblock should only ever be accessed via device-mapper. | ||
562 | */ | ||
563 | #define DM_RAID_MAGIC 0x64526D44 | ||
564 | struct dm_raid_superblock { | ||
565 | __le32 magic; /* "DmRd" */ | ||
566 | __le32 features; /* Used to indicate possible future changes */ | ||
567 | |||
568 | __le32 num_devices; /* Number of devices in this array. (Max 64) */ | ||
569 | __le32 array_position; /* The position of this drive in the array */ | ||
570 | |||
571 | __le64 events; /* Incremented by md when superblock updated */ | ||
572 | __le64 failed_devices; /* Bit field of devices to indicate failures */ | ||
573 | |||
574 | /* | ||
575 | * This offset tracks the progress of the repair or replacement of | ||
576 | * an individual drive. | ||
577 | */ | ||
578 | __le64 disk_recovery_offset; | ||
579 | |||
580 | /* | ||
581 | * This offset tracks the progress of the initial array | ||
582 | * synchronisation/parity calculation. | ||
583 | */ | ||
584 | __le64 array_resync_offset; | ||
585 | |||
586 | /* | ||
587 | * RAID characteristics | ||
588 | */ | ||
589 | __le32 level; | ||
590 | __le32 layout; | ||
591 | __le32 stripe_sectors; | ||
592 | |||
593 | __u8 pad[452]; /* Round struct to 512 bytes. */ | ||
594 | /* Always set to 0 when writing. */ | ||
595 | } __packed; | ||
596 | |||
597 | static int read_disk_sb(mdk_rdev_t *rdev, int size) | ||
598 | { | ||
599 | BUG_ON(!rdev->sb_page); | ||
600 | |||
601 | if (rdev->sb_loaded) | ||
602 | return 0; | ||
603 | |||
604 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { | ||
605 | DMERR("Failed to read device superblock"); | ||
606 | return -EINVAL; | ||
607 | } | ||
608 | |||
609 | rdev->sb_loaded = 1; | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) | ||
615 | { | ||
616 | mdk_rdev_t *r, *t; | ||
617 | uint64_t failed_devices; | ||
618 | struct dm_raid_superblock *sb; | ||
619 | |||
620 | sb = page_address(rdev->sb_page); | ||
621 | failed_devices = le64_to_cpu(sb->failed_devices); | ||
622 | |||
623 | rdev_for_each(r, t, mddev) | ||
624 | if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags)) | ||
625 | failed_devices |= (1ULL << r->raid_disk); | ||
626 | |||
627 | memset(sb, 0, sizeof(*sb)); | ||
628 | |||
629 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); | ||
630 | sb->features = cpu_to_le32(0); /* No features yet */ | ||
631 | |||
632 | sb->num_devices = cpu_to_le32(mddev->raid_disks); | ||
633 | sb->array_position = cpu_to_le32(rdev->raid_disk); | ||
634 | |||
635 | sb->events = cpu_to_le64(mddev->events); | ||
636 | sb->failed_devices = cpu_to_le64(failed_devices); | ||
637 | |||
638 | sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); | ||
639 | sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); | ||
640 | |||
641 | sb->level = cpu_to_le32(mddev->level); | ||
642 | sb->layout = cpu_to_le32(mddev->layout); | ||
643 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * super_load | ||
648 | * | ||
649 | * This function creates a superblock if one is not found on the device | ||
650 | * and will decide which superblock to use if there's a choice. | ||
651 | * | ||
652 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | ||
653 | */ | ||
654 | static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) | ||
655 | { | ||
656 | int ret; | ||
657 | struct dm_raid_superblock *sb; | ||
658 | struct dm_raid_superblock *refsb; | ||
659 | uint64_t events_sb, events_refsb; | ||
660 | |||
661 | rdev->sb_start = 0; | ||
662 | rdev->sb_size = sizeof(*sb); | ||
663 | |||
664 | ret = read_disk_sb(rdev, rdev->sb_size); | ||
665 | if (ret) | ||
666 | return ret; | ||
667 | |||
668 | sb = page_address(rdev->sb_page); | ||
669 | if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) { | ||
670 | super_sync(rdev->mddev, rdev); | ||
671 | |||
672 | set_bit(FirstUse, &rdev->flags); | ||
673 | |||
674 | /* Force writing of superblocks to disk */ | ||
675 | set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); | ||
676 | |||
677 | /* Any superblock is better than none, choose that if given */ | ||
678 | return refdev ? 0 : 1; | ||
679 | } | ||
680 | |||
681 | if (!refdev) | ||
682 | return 1; | ||
683 | |||
684 | events_sb = le64_to_cpu(sb->events); | ||
685 | |||
686 | refsb = page_address(refdev->sb_page); | ||
687 | events_refsb = le64_to_cpu(refsb->events); | ||
688 | |||
689 | return (events_sb > events_refsb) ? 1 : 0; | ||
690 | } | ||
691 | |||
692 | static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) | ||
693 | { | ||
694 | int role; | ||
695 | struct raid_set *rs = container_of(mddev, struct raid_set, md); | ||
696 | uint64_t events_sb; | ||
697 | uint64_t failed_devices; | ||
698 | struct dm_raid_superblock *sb; | ||
699 | uint32_t new_devs = 0; | ||
700 | uint32_t rebuilds = 0; | ||
701 | mdk_rdev_t *r, *t; | ||
702 | struct dm_raid_superblock *sb2; | ||
703 | |||
704 | sb = page_address(rdev->sb_page); | ||
705 | events_sb = le64_to_cpu(sb->events); | ||
706 | failed_devices = le64_to_cpu(sb->failed_devices); | ||
707 | |||
708 | /* | ||
709 | * Initialise to 1 if this is a new superblock. | ||
710 | */ | ||
711 | mddev->events = events_sb ? : 1; | ||
712 | |||
713 | /* | ||
714 | * Reshaping is not currently allowed | ||
715 | */ | ||
716 | if ((le32_to_cpu(sb->level) != mddev->level) || | ||
717 | (le32_to_cpu(sb->layout) != mddev->layout) || | ||
718 | (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) { | ||
719 | DMERR("Reshaping arrays not yet supported."); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | |||
723 | /* We can only change the number of devices in RAID1 right now */ | ||
724 | if ((rs->raid_type->level != 1) && | ||
725 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { | ||
726 | DMERR("Reshaping arrays not yet supported."); | ||
727 | return -EINVAL; | ||
728 | } | ||
729 | |||
730 | if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) | ||
731 | mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); | ||
732 | |||
733 | /* | ||
734 | * During load, we set FirstUse if a new superblock was written. | ||
735 | * There are two reasons we might not have a superblock: | ||
736 | * 1) The array is brand new - in which case, all of the | ||
737 | * devices must have their In_sync bit set. Also, | ||
738 | * recovery_cp must be 0, unless forced. | ||
739 | * 2) This is a new device being added to an old array | ||
740 | * and the new device needs to be rebuilt - in which | ||
741 | * case the In_sync bit will /not/ be set and | ||
742 | * recovery_cp must be MaxSector. | ||
743 | */ | ||
744 | rdev_for_each(r, t, mddev) { | ||
745 | if (!test_bit(In_sync, &r->flags)) { | ||
746 | if (!test_bit(FirstUse, &r->flags)) | ||
747 | DMERR("Superblock area of " | ||
748 | "rebuild device %d should have been " | ||
749 | "cleared.", r->raid_disk); | ||
750 | set_bit(FirstUse, &r->flags); | ||
751 | rebuilds++; | ||
752 | } else if (test_bit(FirstUse, &r->flags)) | ||
753 | new_devs++; | ||
754 | } | ||
755 | |||
756 | if (!rebuilds) { | ||
757 | if (new_devs == mddev->raid_disks) { | ||
758 | DMINFO("Superblocks created for new array"); | ||
759 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | ||
760 | } else if (new_devs) { | ||
761 | DMERR("New device injected " | ||
762 | "into existing array without 'rebuild' " | ||
763 | "parameter specified"); | ||
764 | return -EINVAL; | ||
765 | } | ||
766 | } else if (new_devs) { | ||
767 | DMERR("'rebuild' devices cannot be " | ||
768 | "injected into an array with other first-time devices"); | ||
769 | return -EINVAL; | ||
770 | } else if (mddev->recovery_cp != MaxSector) { | ||
771 | DMERR("'rebuild' specified while array is not in-sync"); | ||
772 | return -EINVAL; | ||
773 | } | ||
774 | |||
775 | /* | ||
776 | * Now we set the Faulty bit for those devices that are | ||
777 | * recorded in the superblock as failed. | ||
778 | */ | ||
779 | rdev_for_each(r, t, mddev) { | ||
780 | if (!r->sb_page) | ||
781 | continue; | ||
782 | sb2 = page_address(r->sb_page); | ||
783 | sb2->failed_devices = 0; | ||
784 | |||
785 | /* | ||
786 | * Check for any device re-ordering. | ||
787 | */ | ||
788 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { | ||
789 | role = le32_to_cpu(sb2->array_position); | ||
790 | if (role != r->raid_disk) { | ||
791 | if (rs->raid_type->level != 1) { | ||
792 | rs->ti->error = "Cannot change device " | ||
793 | "positions in RAID array"; | ||
794 | return -EINVAL; | ||
795 | } | ||
796 | DMINFO("RAID1 device #%d now at position #%d", | ||
797 | role, r->raid_disk); | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * Partial recovery is performed on | ||
802 | * returning failed devices. | ||
803 | */ | ||
804 | if (failed_devices & (1 << role)) | ||
805 | set_bit(Faulty, &r->flags); | ||
806 | } | ||
807 | } | ||
808 | |||
809 | return 0; | ||
810 | } | ||
811 | |||
812 | static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) | ||
813 | { | ||
814 | struct dm_raid_superblock *sb = page_address(rdev->sb_page); | ||
815 | |||
816 | /* | ||
817 | * If mddev->events is not set, we know we have not yet initialized | ||
818 | * the array. | ||
819 | */ | ||
820 | if (!mddev->events && super_init_validation(mddev, rdev)) | ||
821 | return -EINVAL; | ||
822 | |||
823 | mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */ | ||
824 | rdev->mddev->bitmap_info.default_offset = 4096 >> 9; | ||
825 | if (!test_bit(FirstUse, &rdev->flags)) { | ||
826 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); | ||
827 | if (rdev->recovery_offset != MaxSector) | ||
828 | clear_bit(In_sync, &rdev->flags); | ||
829 | } | ||
830 | |||
831 | /* | ||
832 | * If a device comes back, set it as not In_sync and no longer faulty. | ||
833 | */ | ||
834 | if (test_bit(Faulty, &rdev->flags)) { | ||
835 | clear_bit(Faulty, &rdev->flags); | ||
836 | clear_bit(In_sync, &rdev->flags); | ||
837 | rdev->saved_raid_disk = rdev->raid_disk; | ||
838 | rdev->recovery_offset = 0; | ||
839 | } | ||
840 | |||
841 | clear_bit(FirstUse, &rdev->flags); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Analyse superblocks and select the freshest. | ||
848 | */ | ||
849 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | ||
850 | { | ||
851 | int ret; | ||
852 | mdk_rdev_t *rdev, *freshest, *tmp; | ||
853 | mddev_t *mddev = &rs->md; | ||
854 | |||
855 | freshest = NULL; | ||
856 | rdev_for_each(rdev, tmp, mddev) { | ||
857 | if (!rdev->meta_bdev) | ||
858 | continue; | ||
859 | |||
860 | ret = super_load(rdev, freshest); | ||
861 | |||
862 | switch (ret) { | ||
863 | case 1: | ||
864 | freshest = rdev; | ||
865 | break; | ||
866 | case 0: | ||
867 | break; | ||
868 | default: | ||
869 | ti->error = "Failed to load superblock"; | ||
870 | return ret; | ||
871 | } | ||
872 | } | ||
873 | |||
874 | if (!freshest) | ||
875 | return 0; | ||
876 | |||
877 | /* | ||
878 | * Validation of the freshest device provides the source of | ||
879 | * validation for the remaining devices. | ||
880 | */ | ||
881 | ti->error = "Unable to assemble array: Invalid superblocks"; | ||
882 | if (super_validate(mddev, freshest)) | ||
883 | return -EINVAL; | ||
884 | |||
885 | rdev_for_each(rdev, tmp, mddev) | ||
886 | if ((rdev != freshest) && super_validate(mddev, rdev)) | ||
887 | return -EINVAL; | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | /* | ||
394 | * Construct a RAID4/5/6 mapping: | 893 | * Construct a RAID4/5/6 mapping: |
395 | * Args: | 894 | * Args: |
396 | * <raid_type> <#raid_params> <raid_params> \ | 895 | * <raid_type> <#raid_params> <raid_params> \ |
397 | * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } | 896 | * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } |
398 | * | 897 | * |
399 | * ** metadata devices are not supported yet, use '-' instead ** | ||
400 | * | ||
401 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for | 898 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for |
402 | * details on possible <raid_params>. | 899 | * details on possible <raid_params>. |
403 | */ | 900 | */ |
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
465 | if (ret) | 962 | if (ret) |
466 | goto bad; | 963 | goto bad; |
467 | 964 | ||
965 | rs->md.sync_super = super_sync; | ||
966 | ret = analyse_superblocks(ti, rs); | ||
967 | if (ret) | ||
968 | goto bad; | ||
969 | |||
468 | INIT_WORK(&rs->md.event_work, do_table_event); | 970 | INIT_WORK(&rs->md.event_work, do_table_event); |
469 | ti->split_io = rs->md.chunk_sectors; | ||
470 | ti->private = rs; | 971 | ti->private = rs; |
471 | 972 | ||
472 | mutex_lock(&rs->md.reconfig_mutex); | 973 | mutex_lock(&rs->md.reconfig_mutex); |
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
482 | rs->callbacks.congested_fn = raid_is_congested; | 983 | rs->callbacks.congested_fn = raid_is_congested; |
483 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); | 984 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
484 | 985 | ||
986 | mddev_suspend(&rs->md); | ||
485 | return 0; | 987 | return 0; |
486 | 988 | ||
487 | bad: | 989 | bad: |
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type, | |||
546 | break; | 1048 | break; |
547 | case STATUSTYPE_TABLE: | 1049 | case STATUSTYPE_TABLE: |
548 | /* The string you would use to construct this array */ | 1050 | /* The string you would use to construct this array */ |
549 | for (i = 0; i < rs->md.raid_disks; i++) | 1051 | for (i = 0; i < rs->md.raid_disks; i++) { |
550 | if (rs->dev[i].data_dev && | 1052 | if ((rs->print_flags & DMPF_REBUILD) && |
1053 | rs->dev[i].data_dev && | ||
551 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) | 1054 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) |
552 | raid_param_cnt++; /* for rebuilds */ | 1055 | raid_param_cnt += 2; /* for rebuilds */ |
1056 | if (rs->dev[i].data_dev && | ||
1057 | test_bit(WriteMostly, &rs->dev[i].rdev.flags)) | ||
1058 | raid_param_cnt += 2; | ||
1059 | } | ||
553 | 1060 | ||
554 | raid_param_cnt += (hweight64(rs->print_flags) * 2); | 1061 | raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2); |
555 | if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) | 1062 | if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) |
556 | raid_param_cnt--; | 1063 | raid_param_cnt--; |
557 | 1064 | ||
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type, | |||
565 | DMEMIT(" nosync"); | 1072 | DMEMIT(" nosync"); |
566 | 1073 | ||
567 | for (i = 0; i < rs->md.raid_disks; i++) | 1074 | for (i = 0; i < rs->md.raid_disks; i++) |
568 | if (rs->dev[i].data_dev && | 1075 | if ((rs->print_flags & DMPF_REBUILD) && |
1076 | rs->dev[i].data_dev && | ||
569 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) | 1077 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) |
570 | DMEMIT(" rebuild %u", i); | 1078 | DMEMIT(" rebuild %u", i); |
571 | 1079 | ||
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type, | |||
579 | if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) | 1087 | if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) |
580 | DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); | 1088 | DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); |
581 | 1089 | ||
1090 | for (i = 0; i < rs->md.raid_disks; i++) | ||
1091 | if (rs->dev[i].data_dev && | ||
1092 | test_bit(WriteMostly, &rs->dev[i].rdev.flags)) | ||
1093 | DMEMIT(" write_mostly %u", i); | ||
1094 | |||
582 | if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) | 1095 | if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) |
583 | DMEMIT(" max_write_behind %lu", | 1096 | DMEMIT(" max_write_behind %lu", |
584 | rs->md.bitmap_info.max_write_behind); | 1097 | rs->md.bitmap_info.max_write_behind); |
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type, | |||
591 | conf ? conf->max_nr_stripes * 2 : 0); | 1104 | conf ? conf->max_nr_stripes * 2 : 0); |
592 | } | 1105 | } |
593 | 1106 | ||
1107 | if (rs->print_flags & DMPF_REGION_SIZE) | ||
1108 | DMEMIT(" region_size %lu", | ||
1109 | rs->md.bitmap_info.chunksize >> 9); | ||
1110 | |||
594 | DMEMIT(" %d", rs->md.raid_disks); | 1111 | DMEMIT(" %d", rs->md.raid_disks); |
595 | for (i = 0; i < rs->md.raid_disks; i++) { | 1112 | for (i = 0; i < rs->md.raid_disks; i++) { |
596 | DMEMIT(" -"); /* metadata device */ | 1113 | if (rs->dev[i].meta_dev) |
1114 | DMEMIT(" %s", rs->dev[i].meta_dev->name); | ||
1115 | else | ||
1116 | DMEMIT(" -"); | ||
597 | 1117 | ||
598 | if (rs->dev[i].data_dev) | 1118 | if (rs->dev[i].data_dev) |
599 | DMEMIT(" %s", rs->dev[i].data_dev->name); | 1119 | DMEMIT(" %s", rs->dev[i].data_dev->name); |
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti) | |||
650 | { | 1170 | { |
651 | struct raid_set *rs = ti->private; | 1171 | struct raid_set *rs = ti->private; |
652 | 1172 | ||
1173 | bitmap_load(&rs->md); | ||
653 | mddev_resume(&rs->md); | 1174 | mddev_resume(&rs->md); |
654 | } | 1175 | } |
655 | 1176 | ||
656 | static struct target_type raid_target = { | 1177 | static struct target_type raid_target = { |
657 | .name = "raid", | 1178 | .name = "raid", |
658 | .version = {1, 0, 0}, | 1179 | .version = {1, 1, 0}, |
659 | .module = THIS_MODULE, | 1180 | .module = THIS_MODULE, |
660 | .ctr = raid_ctr, | 1181 | .ctr = raid_ctr, |
661 | .dtr = raid_dtr, | 1182 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 135c2f1fdbfc..d1f1d7017103 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -58,25 +58,30 @@ | |||
58 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 | 58 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
59 | 59 | ||
60 | struct disk_header { | 60 | struct disk_header { |
61 | uint32_t magic; | 61 | __le32 magic; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Is this snapshot valid. There is no way of recovering | 64 | * Is this snapshot valid. There is no way of recovering |
65 | * an invalid snapshot. | 65 | * an invalid snapshot. |
66 | */ | 66 | */ |
67 | uint32_t valid; | 67 | __le32 valid; |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Simple, incrementing version. no backward | 70 | * Simple, incrementing version. no backward |
71 | * compatibility. | 71 | * compatibility. |
72 | */ | 72 | */ |
73 | uint32_t version; | 73 | __le32 version; |
74 | 74 | ||
75 | /* In sectors */ | 75 | /* In sectors */ |
76 | uint32_t chunk_size; | 76 | __le32 chunk_size; |
77 | }; | 77 | } __packed; |
78 | 78 | ||
79 | struct disk_exception { | 79 | struct disk_exception { |
80 | __le64 old_chunk; | ||
81 | __le64 new_chunk; | ||
82 | } __packed; | ||
83 | |||
84 | struct core_exception { | ||
80 | uint64_t old_chunk; | 85 | uint64_t old_chunk; |
81 | uint64_t new_chunk; | 86 | uint64_t new_chunk; |
82 | }; | 87 | }; |
@@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps) | |||
169 | if (!ps->area) | 174 | if (!ps->area) |
170 | goto err_area; | 175 | goto err_area; |
171 | 176 | ||
172 | ps->zero_area = vmalloc(len); | 177 | ps->zero_area = vzalloc(len); |
173 | if (!ps->zero_area) | 178 | if (!ps->zero_area) |
174 | goto err_zero_area; | 179 | goto err_zero_area; |
175 | memset(ps->zero_area, 0, len); | ||
176 | 180 | ||
177 | ps->header_area = vmalloc(len); | 181 | ps->header_area = vmalloc(len); |
178 | if (!ps->header_area) | 182 | if (!ps->header_area) |
@@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) | |||
396 | } | 400 | } |
397 | 401 | ||
398 | static void read_exception(struct pstore *ps, | 402 | static void read_exception(struct pstore *ps, |
399 | uint32_t index, struct disk_exception *result) | 403 | uint32_t index, struct core_exception *result) |
400 | { | 404 | { |
401 | struct disk_exception *e = get_exception(ps, index); | 405 | struct disk_exception *de = get_exception(ps, index); |
402 | 406 | ||
403 | /* copy it */ | 407 | /* copy it */ |
404 | result->old_chunk = le64_to_cpu(e->old_chunk); | 408 | result->old_chunk = le64_to_cpu(de->old_chunk); |
405 | result->new_chunk = le64_to_cpu(e->new_chunk); | 409 | result->new_chunk = le64_to_cpu(de->new_chunk); |
406 | } | 410 | } |
407 | 411 | ||
408 | static void write_exception(struct pstore *ps, | 412 | static void write_exception(struct pstore *ps, |
409 | uint32_t index, struct disk_exception *de) | 413 | uint32_t index, struct core_exception *e) |
410 | { | 414 | { |
411 | struct disk_exception *e = get_exception(ps, index); | 415 | struct disk_exception *de = get_exception(ps, index); |
412 | 416 | ||
413 | /* copy it */ | 417 | /* copy it */ |
414 | e->old_chunk = cpu_to_le64(de->old_chunk); | 418 | de->old_chunk = cpu_to_le64(e->old_chunk); |
415 | e->new_chunk = cpu_to_le64(de->new_chunk); | 419 | de->new_chunk = cpu_to_le64(e->new_chunk); |
416 | } | 420 | } |
417 | 421 | ||
418 | static void clear_exception(struct pstore *ps, uint32_t index) | 422 | static void clear_exception(struct pstore *ps, uint32_t index) |
419 | { | 423 | { |
420 | struct disk_exception *e = get_exception(ps, index); | 424 | struct disk_exception *de = get_exception(ps, index); |
421 | 425 | ||
422 | /* clear it */ | 426 | /* clear it */ |
423 | e->old_chunk = 0; | 427 | de->old_chunk = 0; |
424 | e->new_chunk = 0; | 428 | de->new_chunk = 0; |
425 | } | 429 | } |
426 | 430 | ||
427 | /* | 431 | /* |
@@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps, | |||
437 | { | 441 | { |
438 | int r; | 442 | int r; |
439 | unsigned int i; | 443 | unsigned int i; |
440 | struct disk_exception de; | 444 | struct core_exception e; |
441 | 445 | ||
442 | /* presume the area is full */ | 446 | /* presume the area is full */ |
443 | *full = 1; | 447 | *full = 1; |
444 | 448 | ||
445 | for (i = 0; i < ps->exceptions_per_area; i++) { | 449 | for (i = 0; i < ps->exceptions_per_area; i++) { |
446 | read_exception(ps, i, &de); | 450 | read_exception(ps, i, &e); |
447 | 451 | ||
448 | /* | 452 | /* |
449 | * If the new_chunk is pointing at the start of | 453 | * If the new_chunk is pointing at the start of |
@@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps, | |||
451 | * is we know that we've hit the end of the | 455 | * is we know that we've hit the end of the |
452 | * exceptions. Therefore the area is not full. | 456 | * exceptions. Therefore the area is not full. |
453 | */ | 457 | */ |
454 | if (de.new_chunk == 0LL) { | 458 | if (e.new_chunk == 0LL) { |
455 | ps->current_committed = i; | 459 | ps->current_committed = i; |
456 | *full = 0; | 460 | *full = 0; |
457 | break; | 461 | break; |
@@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps, | |||
460 | /* | 464 | /* |
461 | * Keep track of the start of the free chunks. | 465 | * Keep track of the start of the free chunks. |
462 | */ | 466 | */ |
463 | if (ps->next_free <= de.new_chunk) | 467 | if (ps->next_free <= e.new_chunk) |
464 | ps->next_free = de.new_chunk + 1; | 468 | ps->next_free = e.new_chunk + 1; |
465 | 469 | ||
466 | /* | 470 | /* |
467 | * Otherwise we add the exception to the snapshot. | 471 | * Otherwise we add the exception to the snapshot. |
468 | */ | 472 | */ |
469 | r = callback(callback_context, de.old_chunk, de.new_chunk); | 473 | r = callback(callback_context, e.old_chunk, e.new_chunk); |
470 | if (r) | 474 | if (r) |
471 | return r; | 475 | return r; |
472 | } | 476 | } |
@@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store, | |||
563 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / | 567 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / |
564 | sizeof(struct disk_exception); | 568 | sizeof(struct disk_exception); |
565 | ps->callbacks = dm_vcalloc(ps->exceptions_per_area, | 569 | ps->callbacks = dm_vcalloc(ps->exceptions_per_area, |
566 | sizeof(*ps->callbacks)); | 570 | sizeof(*ps->callbacks)); |
567 | if (!ps->callbacks) | 571 | if (!ps->callbacks) |
568 | return -ENOMEM; | 572 | return -ENOMEM; |
569 | 573 | ||
@@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
641 | { | 645 | { |
642 | unsigned int i; | 646 | unsigned int i; |
643 | struct pstore *ps = get_info(store); | 647 | struct pstore *ps = get_info(store); |
644 | struct disk_exception de; | 648 | struct core_exception ce; |
645 | struct commit_callback *cb; | 649 | struct commit_callback *cb; |
646 | 650 | ||
647 | de.old_chunk = e->old_chunk; | 651 | ce.old_chunk = e->old_chunk; |
648 | de.new_chunk = e->new_chunk; | 652 | ce.new_chunk = e->new_chunk; |
649 | write_exception(ps, ps->current_committed++, &de); | 653 | write_exception(ps, ps->current_committed++, &ce); |
650 | 654 | ||
651 | /* | 655 | /* |
652 | * Add the callback to the back of the array. This code | 656 | * Add the callback to the back of the array. This code |
@@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
670 | * If we completely filled the current area, then wipe the next one. | 674 | * If we completely filled the current area, then wipe the next one. |
671 | */ | 675 | */ |
672 | if ((ps->current_committed == ps->exceptions_per_area) && | 676 | if ((ps->current_committed == ps->exceptions_per_area) && |
673 | zero_disk_area(ps, ps->current_area + 1)) | 677 | zero_disk_area(ps, ps->current_area + 1)) |
674 | ps->valid = 0; | 678 | ps->valid = 0; |
675 | 679 | ||
676 | /* | 680 | /* |
@@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
701 | chunk_t *last_new_chunk) | 705 | chunk_t *last_new_chunk) |
702 | { | 706 | { |
703 | struct pstore *ps = get_info(store); | 707 | struct pstore *ps = get_info(store); |
704 | struct disk_exception de; | 708 | struct core_exception ce; |
705 | int nr_consecutive; | 709 | int nr_consecutive; |
706 | int r; | 710 | int r; |
707 | 711 | ||
@@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
722 | ps->current_committed = ps->exceptions_per_area; | 726 | ps->current_committed = ps->exceptions_per_area; |
723 | } | 727 | } |
724 | 728 | ||
725 | read_exception(ps, ps->current_committed - 1, &de); | 729 | read_exception(ps, ps->current_committed - 1, &ce); |
726 | *last_old_chunk = de.old_chunk; | 730 | *last_old_chunk = ce.old_chunk; |
727 | *last_new_chunk = de.new_chunk; | 731 | *last_new_chunk = ce.new_chunk; |
728 | 732 | ||
729 | /* | 733 | /* |
730 | * Find number of consecutive chunks within the current area, | 734 | * Find number of consecutive chunks within the current area, |
@@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
733 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | 737 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; |
734 | nr_consecutive++) { | 738 | nr_consecutive++) { |
735 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, | 739 | read_exception(ps, ps->current_committed - 1 - nr_consecutive, |
736 | &de); | 740 | &ce); |
737 | if (de.old_chunk != *last_old_chunk - nr_consecutive || | 741 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
738 | de.new_chunk != *last_new_chunk - nr_consecutive) | 742 | ce.new_chunk != *last_new_chunk - nr_consecutive) |
739 | break; | 743 | break; |
740 | } | 744 | } |
741 | 745 | ||
@@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, | |||
753 | for (i = 0; i < nr_merged; i++) | 757 | for (i = 0; i < nr_merged; i++) |
754 | clear_exception(ps, ps->current_committed - 1 - i); | 758 | clear_exception(ps, ps->current_committed - 1 - i); |
755 | 759 | ||
756 | r = area_io(ps, WRITE); | 760 | r = area_io(ps, WRITE_FLUSH_FUA); |
757 | if (r < 0) | 761 | if (r < 0) |
758 | return r; | 762 | return r; |
759 | 763 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 9ecff5f3023a..6f758870fc19 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; | |||
30 | ((ti)->type->name == dm_snapshot_merge_target_name) | 30 | ((ti)->type->name == dm_snapshot_merge_target_name) |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * The percentage increment we will wake up users at | ||
34 | */ | ||
35 | #define WAKE_UP_PERCENT 5 | ||
36 | |||
37 | /* | ||
38 | * kcopyd priority of snapshot operations | ||
39 | */ | ||
40 | #define SNAPSHOT_COPY_PRIORITY 2 | ||
41 | |||
42 | /* | ||
43 | * The size of the mempool used to track chunks in use. | 33 | * The size of the mempool used to track chunks in use. |
44 | */ | 34 | */ |
45 | #define MIN_IOS 256 | 35 | #define MIN_IOS 256 |
@@ -180,6 +170,13 @@ struct dm_snap_pending_exception { | |||
180 | * kcopyd. | 170 | * kcopyd. |
181 | */ | 171 | */ |
182 | int started; | 172 | int started; |
173 | |||
174 | /* | ||
175 | * For writing a complete chunk, bypassing the copy. | ||
176 | */ | ||
177 | struct bio *full_bio; | ||
178 | bio_end_io_t *full_bio_end_io; | ||
179 | void *full_bio_private; | ||
183 | }; | 180 | }; |
184 | 181 | ||
185 | /* | 182 | /* |
@@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1055 | 1052 | ||
1056 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 1053 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1057 | if (!s) { | 1054 | if (!s) { |
1058 | ti->error = "Cannot allocate snapshot context private " | 1055 | ti->error = "Cannot allocate private snapshot structure"; |
1059 | "structure"; | ||
1060 | r = -ENOMEM; | 1056 | r = -ENOMEM; |
1061 | goto bad; | 1057 | goto bad; |
1062 | } | 1058 | } |
@@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
1380 | struct dm_snapshot *s = pe->snap; | 1376 | struct dm_snapshot *s = pe->snap; |
1381 | struct bio *origin_bios = NULL; | 1377 | struct bio *origin_bios = NULL; |
1382 | struct bio *snapshot_bios = NULL; | 1378 | struct bio *snapshot_bios = NULL; |
1379 | struct bio *full_bio = NULL; | ||
1383 | int error = 0; | 1380 | int error = 0; |
1384 | 1381 | ||
1385 | if (!success) { | 1382 | if (!success) { |
@@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
1415 | */ | 1412 | */ |
1416 | dm_insert_exception(&s->complete, e); | 1413 | dm_insert_exception(&s->complete, e); |
1417 | 1414 | ||
1418 | out: | 1415 | out: |
1419 | dm_remove_exception(&pe->e); | 1416 | dm_remove_exception(&pe->e); |
1420 | snapshot_bios = bio_list_get(&pe->snapshot_bios); | 1417 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
1421 | origin_bios = bio_list_get(&pe->origin_bios); | 1418 | origin_bios = bio_list_get(&pe->origin_bios); |
1419 | full_bio = pe->full_bio; | ||
1420 | if (full_bio) { | ||
1421 | full_bio->bi_end_io = pe->full_bio_end_io; | ||
1422 | full_bio->bi_private = pe->full_bio_private; | ||
1423 | } | ||
1422 | free_pending_exception(pe); | 1424 | free_pending_exception(pe); |
1423 | 1425 | ||
1424 | increment_pending_exceptions_done_count(); | 1426 | increment_pending_exceptions_done_count(); |
@@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) | |||
1426 | up_write(&s->lock); | 1428 | up_write(&s->lock); |
1427 | 1429 | ||
1428 | /* Submit any pending write bios */ | 1430 | /* Submit any pending write bios */ |
1429 | if (error) | 1431 | if (error) { |
1432 | if (full_bio) | ||
1433 | bio_io_error(full_bio); | ||
1430 | error_bios(snapshot_bios); | 1434 | error_bios(snapshot_bios); |
1431 | else | 1435 | } else { |
1436 | if (full_bio) | ||
1437 | bio_endio(full_bio, 0); | ||
1432 | flush_bios(snapshot_bios); | 1438 | flush_bios(snapshot_bios); |
1439 | } | ||
1433 | 1440 | ||
1434 | retry_origin_bios(s, origin_bios); | 1441 | retry_origin_bios(s, origin_bios); |
1435 | } | 1442 | } |
@@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe) | |||
1480 | dest.count = src.count; | 1487 | dest.count = src.count; |
1481 | 1488 | ||
1482 | /* Hand over to kcopyd */ | 1489 | /* Hand over to kcopyd */ |
1483 | dm_kcopyd_copy(s->kcopyd_client, | 1490 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); |
1484 | &src, 1, &dest, 0, copy_callback, pe); | 1491 | } |
1492 | |||
1493 | static void full_bio_end_io(struct bio *bio, int error) | ||
1494 | { | ||
1495 | void *callback_data = bio->bi_private; | ||
1496 | |||
1497 | dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0); | ||
1498 | } | ||
1499 | |||
1500 | static void start_full_bio(struct dm_snap_pending_exception *pe, | ||
1501 | struct bio *bio) | ||
1502 | { | ||
1503 | struct dm_snapshot *s = pe->snap; | ||
1504 | void *callback_data; | ||
1505 | |||
1506 | pe->full_bio = bio; | ||
1507 | pe->full_bio_end_io = bio->bi_end_io; | ||
1508 | pe->full_bio_private = bio->bi_private; | ||
1509 | |||
1510 | callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, | ||
1511 | copy_callback, pe); | ||
1512 | |||
1513 | bio->bi_end_io = full_bio_end_io; | ||
1514 | bio->bi_private = callback_data; | ||
1515 | |||
1516 | generic_make_request(bio); | ||
1485 | } | 1517 | } |
1486 | 1518 | ||
1487 | static struct dm_snap_pending_exception * | 1519 | static struct dm_snap_pending_exception * |
@@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s, | |||
1519 | bio_list_init(&pe->origin_bios); | 1551 | bio_list_init(&pe->origin_bios); |
1520 | bio_list_init(&pe->snapshot_bios); | 1552 | bio_list_init(&pe->snapshot_bios); |
1521 | pe->started = 0; | 1553 | pe->started = 0; |
1554 | pe->full_bio = NULL; | ||
1522 | 1555 | ||
1523 | if (s->store->type->prepare_exception(s->store, &pe->e)) { | 1556 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
1524 | free_pending_exception(pe); | 1557 | free_pending_exception(pe); |
@@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1612 | } | 1645 | } |
1613 | 1646 | ||
1614 | remap_exception(s, &pe->e, bio, chunk); | 1647 | remap_exception(s, &pe->e, bio, chunk); |
1615 | bio_list_add(&pe->snapshot_bios, bio); | ||
1616 | 1648 | ||
1617 | r = DM_MAPIO_SUBMITTED; | 1649 | r = DM_MAPIO_SUBMITTED; |
1618 | 1650 | ||
1651 | if (!pe->started && | ||
1652 | bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { | ||
1653 | pe->started = 1; | ||
1654 | up_write(&s->lock); | ||
1655 | start_full_bio(pe, bio); | ||
1656 | goto out; | ||
1657 | } | ||
1658 | |||
1659 | bio_list_add(&pe->snapshot_bios, bio); | ||
1660 | |||
1619 | if (!pe->started) { | 1661 | if (!pe->started) { |
1620 | /* this is protected by snap->lock */ | 1662 | /* this is protected by snap->lock */ |
1621 | pe->started = 1; | 1663 | pe->started = 1; |
@@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, | |||
1628 | map_context->ptr = track_chunk(s, chunk); | 1670 | map_context->ptr = track_chunk(s, chunk); |
1629 | } | 1671 | } |
1630 | 1672 | ||
1631 | out_unlock: | 1673 | out_unlock: |
1632 | up_write(&s->lock); | 1674 | up_write(&s->lock); |
1633 | out: | 1675 | out: |
1634 | return r; | 1676 | return r; |
1635 | } | 1677 | } |
1636 | 1678 | ||
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, | |||
1974 | pe_to_start_now = pe; | 2016 | pe_to_start_now = pe; |
1975 | } | 2017 | } |
1976 | 2018 | ||
1977 | next_snapshot: | 2019 | next_snapshot: |
1978 | up_write(&snap->lock); | 2020 | up_write(&snap->lock); |
1979 | 2021 | ||
1980 | if (pe_to_start_now) { | 2022 | if (pe_to_start_now) { |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bfe9c2333cea..986b8754bb08 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -54,7 +54,6 @@ struct dm_table { | |||
54 | sector_t *highs; | 54 | sector_t *highs; |
55 | struct dm_target *targets; | 55 | struct dm_target *targets; |
56 | 56 | ||
57 | unsigned discards_supported:1; | ||
58 | unsigned integrity_supported:1; | 57 | unsigned integrity_supported:1; |
59 | 58 | ||
60 | /* | 59 | /* |
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | |||
154 | return NULL; | 153 | return NULL; |
155 | 154 | ||
156 | size = nmemb * elem_size; | 155 | size = nmemb * elem_size; |
157 | addr = vmalloc(size); | 156 | addr = vzalloc(size); |
158 | if (addr) | ||
159 | memset(addr, 0, size); | ||
160 | 157 | ||
161 | return addr; | 158 | return addr; |
162 | } | 159 | } |
160 | EXPORT_SYMBOL(dm_vcalloc); | ||
163 | 161 | ||
164 | /* | 162 | /* |
165 | * highs, and targets are managed as dynamic arrays during a | 163 | * highs, and targets are managed as dynamic arrays during a |
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
209 | INIT_LIST_HEAD(&t->devices); | 207 | INIT_LIST_HEAD(&t->devices); |
210 | INIT_LIST_HEAD(&t->target_callbacks); | 208 | INIT_LIST_HEAD(&t->target_callbacks); |
211 | atomic_set(&t->holders, 0); | 209 | atomic_set(&t->holders, 0); |
212 | t->discards_supported = 1; | ||
213 | 210 | ||
214 | if (!num_targets) | 211 | if (!num_targets) |
215 | num_targets = KEYS_PER_NODE; | 212 | num_targets = KEYS_PER_NODE; |
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t) | |||
281 | { | 278 | { |
282 | atomic_inc(&t->holders); | 279 | atomic_inc(&t->holders); |
283 | } | 280 | } |
281 | EXPORT_SYMBOL(dm_table_get); | ||
284 | 282 | ||
285 | void dm_table_put(struct dm_table *t) | 283 | void dm_table_put(struct dm_table *t) |
286 | { | 284 | { |
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t) | |||
290 | smp_mb__before_atomic_dec(); | 288 | smp_mb__before_atomic_dec(); |
291 | atomic_dec(&t->holders); | 289 | atomic_dec(&t->holders); |
292 | } | 290 | } |
291 | EXPORT_SYMBOL(dm_table_put); | ||
293 | 292 | ||
294 | /* | 293 | /* |
295 | * Checks to see if we need to extend highs or targets. | 294 | * Checks to see if we need to extend highs or targets. |
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, | |||
455 | * Add a device to the list, or just increment the usage count if | 454 | * Add a device to the list, or just increment the usage count if |
456 | * it's already present. | 455 | * it's already present. |
457 | */ | 456 | */ |
458 | static int __table_get_device(struct dm_table *t, struct dm_target *ti, | 457 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
459 | const char *path, fmode_t mode, struct dm_dev **result) | 458 | struct dm_dev **result) |
460 | { | 459 | { |
461 | int r; | 460 | int r; |
462 | dev_t uninitialized_var(dev); | 461 | dev_t uninitialized_var(dev); |
463 | struct dm_dev_internal *dd; | 462 | struct dm_dev_internal *dd; |
464 | unsigned int major, minor; | 463 | unsigned int major, minor; |
464 | struct dm_table *t = ti->table; | ||
465 | 465 | ||
466 | BUG_ON(!t); | 466 | BUG_ON(!t); |
467 | 467 | ||
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
509 | *result = &dd->dm_dev; | 509 | *result = &dd->dm_dev; |
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | EXPORT_SYMBOL(dm_get_device); | ||
512 | 513 | ||
513 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | 514 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
514 | sector_t start, sector_t len, void *data) | 515 | sector_t start, sector_t len, void *data) |
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
539 | * If not we'll force DM to use PAGE_SIZE or | 540 | * If not we'll force DM to use PAGE_SIZE or |
540 | * smaller I/O, just to be safe. | 541 | * smaller I/O, just to be safe. |
541 | */ | 542 | */ |
542 | 543 | if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) | |
543 | if (q->merge_bvec_fn && !ti->type->merge) | ||
544 | blk_limits_max_hw_sectors(limits, | 544 | blk_limits_max_hw_sectors(limits, |
545 | (unsigned int) (PAGE_SIZE >> 9)); | 545 | (unsigned int) (PAGE_SIZE >> 9)); |
546 | return 0; | 546 | return 0; |
547 | } | 547 | } |
548 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 548 | EXPORT_SYMBOL_GPL(dm_set_device_limits); |
549 | 549 | ||
550 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | ||
551 | struct dm_dev **result) | ||
552 | { | ||
553 | return __table_get_device(ti->table, ti, path, mode, result); | ||
554 | } | ||
555 | |||
556 | |||
557 | /* | 550 | /* |
558 | * Decrement a devices use count and remove it if necessary. | 551 | * Decrement a device's use count and remove it if necessary. |
559 | */ | 552 | */ |
560 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) | 553 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
561 | { | 554 | { |
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) | |||
568 | kfree(dd); | 561 | kfree(dd); |
569 | } | 562 | } |
570 | } | 563 | } |
564 | EXPORT_SYMBOL(dm_put_device); | ||
571 | 565 | ||
572 | /* | 566 | /* |
573 | * Checks to see if the target joins onto the end of the table. | 567 | * Checks to see if the target joins onto the end of the table. |
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
791 | 785 | ||
792 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 786 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
793 | 787 | ||
794 | if (!tgt->num_discard_requests) | 788 | if (!tgt->num_discard_requests && tgt->discards_supported) |
795 | t->discards_supported = 0; | 789 | DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.", |
790 | dm_device_name(t->md), type); | ||
796 | 791 | ||
797 | return 0; | 792 | return 0; |
798 | 793 | ||
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
802 | return r; | 797 | return r; |
803 | } | 798 | } |
804 | 799 | ||
800 | /* | ||
801 | * Target argument parsing helpers. | ||
802 | */ | ||
803 | static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
804 | unsigned *value, char **error, unsigned grouped) | ||
805 | { | ||
806 | const char *arg_str = dm_shift_arg(arg_set); | ||
807 | |||
808 | if (!arg_str || | ||
809 | (sscanf(arg_str, "%u", value) != 1) || | ||
810 | (*value < arg->min) || | ||
811 | (*value > arg->max) || | ||
812 | (grouped && arg_set->argc < *value)) { | ||
813 | *error = arg->error; | ||
814 | return -EINVAL; | ||
815 | } | ||
816 | |||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
821 | unsigned *value, char **error) | ||
822 | { | ||
823 | return validate_next_arg(arg, arg_set, value, error, 0); | ||
824 | } | ||
825 | EXPORT_SYMBOL(dm_read_arg); | ||
826 | |||
827 | int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, | ||
828 | unsigned *value, char **error) | ||
829 | { | ||
830 | return validate_next_arg(arg, arg_set, value, error, 1); | ||
831 | } | ||
832 | EXPORT_SYMBOL(dm_read_arg_group); | ||
833 | |||
834 | const char *dm_shift_arg(struct dm_arg_set *as) | ||
835 | { | ||
836 | char *r; | ||
837 | |||
838 | if (as->argc) { | ||
839 | as->argc--; | ||
840 | r = *as->argv; | ||
841 | as->argv++; | ||
842 | return r; | ||
843 | } | ||
844 | |||
845 | return NULL; | ||
846 | } | ||
847 | EXPORT_SYMBOL(dm_shift_arg); | ||
848 | |||
849 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args) | ||
850 | { | ||
851 | BUG_ON(as->argc < num_args); | ||
852 | as->argc -= num_args; | ||
853 | as->argv += num_args; | ||
854 | } | ||
855 | EXPORT_SYMBOL(dm_consume_args); | ||
856 | |||
805 | static int dm_table_set_type(struct dm_table *t) | 857 | static int dm_table_set_type(struct dm_table *t) |
806 | { | 858 | { |
807 | unsigned i; | 859 | unsigned i; |
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t) | |||
1077 | t->event_fn(t->event_context); | 1129 | t->event_fn(t->event_context); |
1078 | mutex_unlock(&_event_lock); | 1130 | mutex_unlock(&_event_lock); |
1079 | } | 1131 | } |
1132 | EXPORT_SYMBOL(dm_table_event); | ||
1080 | 1133 | ||
1081 | sector_t dm_table_get_size(struct dm_table *t) | 1134 | sector_t dm_table_get_size(struct dm_table *t) |
1082 | { | 1135 | { |
1083 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | 1136 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; |
1084 | } | 1137 | } |
1138 | EXPORT_SYMBOL(dm_table_get_size); | ||
1085 | 1139 | ||
1086 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | 1140 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) |
1087 | { | 1141 | { |
@@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1194 | blk_get_integrity(template_disk)); | 1248 | blk_get_integrity(template_disk)); |
1195 | } | 1249 | } |
1196 | 1250 | ||
1251 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | ||
1252 | sector_t start, sector_t len, void *data) | ||
1253 | { | ||
1254 | unsigned flush = (*(unsigned *)data); | ||
1255 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
1256 | |||
1257 | return q && (q->flush_flags & flush); | ||
1258 | } | ||
1259 | |||
1260 | static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | ||
1261 | { | ||
1262 | struct dm_target *ti; | ||
1263 | unsigned i = 0; | ||
1264 | |||
1265 | /* | ||
1266 | * Require at least one underlying device to support flushes. | ||
1267 | * t->devices includes internal dm devices such as mirror logs | ||
1268 | * so we need to use iterate_devices here, which targets | ||
1269 | * supporting flushes must provide. | ||
1270 | */ | ||
1271 | while (i < dm_table_get_num_targets(t)) { | ||
1272 | ti = dm_table_get_target(t, i++); | ||
1273 | |||
1274 | if (!ti->num_flush_requests) | ||
1275 | continue; | ||
1276 | |||
1277 | if (ti->type->iterate_devices && | ||
1278 | ti->type->iterate_devices(ti, device_flush_capable, &flush)) | ||
1279 | return 1; | ||
1280 | } | ||
1281 | |||
1282 | return 0; | ||
1283 | } | ||
1284 | |||
1197 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1285 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1198 | struct queue_limits *limits) | 1286 | struct queue_limits *limits) |
1199 | { | 1287 | { |
1288 | unsigned flush = 0; | ||
1289 | |||
1200 | /* | 1290 | /* |
1201 | * Copy table's limits to the DM device's request_queue | 1291 | * Copy table's limits to the DM device's request_queue |
1202 | */ | 1292 | */ |
@@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1207 | else | 1297 | else |
1208 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 1298 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
1209 | 1299 | ||
1300 | if (dm_table_supports_flush(t, REQ_FLUSH)) { | ||
1301 | flush |= REQ_FLUSH; | ||
1302 | if (dm_table_supports_flush(t, REQ_FUA)) | ||
1303 | flush |= REQ_FUA; | ||
1304 | } | ||
1305 | blk_queue_flush(q, flush); | ||
1306 | |||
1210 | dm_table_set_integrity(t); | 1307 | dm_table_set_integrity(t); |
1211 | 1308 | ||
1212 | /* | 1309 | /* |
@@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t) | |||
1237 | { | 1334 | { |
1238 | return t->mode; | 1335 | return t->mode; |
1239 | } | 1336 | } |
1337 | EXPORT_SYMBOL(dm_table_get_mode); | ||
1240 | 1338 | ||
1241 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) | 1339 | static void suspend_targets(struct dm_table *t, unsigned postsuspend) |
1242 | { | 1340 | { |
@@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) | |||
1345 | { | 1443 | { |
1346 | return t->md; | 1444 | return t->md; |
1347 | } | 1445 | } |
1446 | EXPORT_SYMBOL(dm_table_get_md); | ||
1348 | 1447 | ||
1349 | static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, | 1448 | static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, |
1350 | sector_t start, sector_t len, void *data) | 1449 | sector_t start, sector_t len, void *data) |
@@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1359 | struct dm_target *ti; | 1458 | struct dm_target *ti; |
1360 | unsigned i = 0; | 1459 | unsigned i = 0; |
1361 | 1460 | ||
1362 | if (!t->discards_supported) | ||
1363 | return 0; | ||
1364 | |||
1365 | /* | 1461 | /* |
1366 | * Unless any target used by the table set discards_supported, | 1462 | * Unless any target used by the table set discards_supported, |
1367 | * require at least one underlying device to support discards. | 1463 | * require at least one underlying device to support discards. |
1368 | * t->devices includes internal dm devices such as mirror logs | 1464 | * t->devices includes internal dm devices such as mirror logs |
1369 | * so we need to use iterate_devices here, which targets | 1465 | * so we need to use iterate_devices here, which targets |
1370 | * supporting discard must provide. | 1466 | * supporting discard selectively must provide. |
1371 | */ | 1467 | */ |
1372 | while (i < dm_table_get_num_targets(t)) { | 1468 | while (i < dm_table_get_num_targets(t)) { |
1373 | ti = dm_table_get_target(t, i++); | 1469 | ti = dm_table_get_target(t, i++); |
1374 | 1470 | ||
1471 | if (!ti->num_discard_requests) | ||
1472 | continue; | ||
1473 | |||
1375 | if (ti->discards_supported) | 1474 | if (ti->discards_supported) |
1376 | return 1; | 1475 | return 1; |
1377 | 1476 | ||
@@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t) | |||
1382 | 1481 | ||
1383 | return 0; | 1482 | return 0; |
1384 | } | 1483 | } |
1385 | |||
1386 | EXPORT_SYMBOL(dm_vcalloc); | ||
1387 | EXPORT_SYMBOL(dm_get_device); | ||
1388 | EXPORT_SYMBOL(dm_put_device); | ||
1389 | EXPORT_SYMBOL(dm_table_event); | ||
1390 | EXPORT_SYMBOL(dm_table_get_size); | ||
1391 | EXPORT_SYMBOL(dm_table_get_mode); | ||
1392 | EXPORT_SYMBOL(dm_table_get_md); | ||
1393 | EXPORT_SYMBOL(dm_table_put); | ||
1394 | EXPORT_SYMBOL(dm_table_get); | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0cf68b478878..52b39f335bb3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME; | |||
37 | static unsigned int major = 0; | 37 | static unsigned int major = 0; |
38 | static unsigned int _major = 0; | 38 | static unsigned int _major = 0; |
39 | 39 | ||
40 | static DEFINE_IDR(_minor_idr); | ||
41 | |||
40 | static DEFINE_SPINLOCK(_minor_lock); | 42 | static DEFINE_SPINLOCK(_minor_lock); |
41 | /* | 43 | /* |
42 | * For bio-based dm. | 44 | * For bio-based dm. |
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | |||
109 | #define DMF_FREEING 3 | 111 | #define DMF_FREEING 3 |
110 | #define DMF_DELETING 4 | 112 | #define DMF_DELETING 4 |
111 | #define DMF_NOFLUSH_SUSPENDING 5 | 113 | #define DMF_NOFLUSH_SUSPENDING 5 |
114 | #define DMF_MERGE_IS_OPTIONAL 6 | ||
112 | 115 | ||
113 | /* | 116 | /* |
114 | * Work processed by per-device workqueue. | 117 | * Work processed by per-device workqueue. |
@@ -313,6 +316,12 @@ static void __exit dm_exit(void) | |||
313 | 316 | ||
314 | while (i--) | 317 | while (i--) |
315 | _exits[i](); | 318 | _exits[i](); |
319 | |||
320 | /* | ||
321 | * Should be empty by this point. | ||
322 | */ | ||
323 | idr_remove_all(&_minor_idr); | ||
324 | idr_destroy(&_minor_idr); | ||
316 | } | 325 | } |
317 | 326 | ||
318 | /* | 327 | /* |
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
1171 | 1180 | ||
1172 | /* | 1181 | /* |
1173 | * Even though the device advertised discard support, | 1182 | * Even though the device advertised discard support, |
1174 | * reconfiguration might have changed that since the | 1183 | * that does not mean every target supports it, and |
1184 | * reconfiguration might also have changed that since the | ||
1175 | * check was performed. | 1185 | * check was performed. |
1176 | */ | 1186 | */ |
1177 | if (!ti->num_discard_requests) | 1187 | if (!ti->num_discard_requests) |
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1705 | /*----------------------------------------------------------------- | 1715 | /*----------------------------------------------------------------- |
1706 | * An IDR is used to keep track of allocated minor numbers. | 1716 | * An IDR is used to keep track of allocated minor numbers. |
1707 | *---------------------------------------------------------------*/ | 1717 | *---------------------------------------------------------------*/ |
1708 | static DEFINE_IDR(_minor_idr); | ||
1709 | |||
1710 | static void free_minor(int minor) | 1718 | static void free_minor(int minor) |
1711 | { | 1719 | { |
1712 | spin_lock(&_minor_lock); | 1720 | spin_lock(&_minor_lock); |
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md) | |||
1800 | blk_queue_make_request(md->queue, dm_request); | 1808 | blk_queue_make_request(md->queue, dm_request); |
1801 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1809 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1802 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1810 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1803 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); | ||
1804 | } | 1811 | } |
1805 | 1812 | ||
1806 | /* | 1813 | /* |
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size) | |||
1986 | } | 1993 | } |
1987 | 1994 | ||
1988 | /* | 1995 | /* |
1996 | * Return 1 if the queue has a compulsory merge_bvec_fn function. | ||
1997 | * | ||
1998 | * If this function returns 0, then the device is either a non-dm | ||
1999 | * device without a merge_bvec_fn, or it is a dm device that is | ||
2000 | * able to split any bios it receives that are too big. | ||
2001 | */ | ||
2002 | int dm_queue_merge_is_compulsory(struct request_queue *q) | ||
2003 | { | ||
2004 | struct mapped_device *dev_md; | ||
2005 | |||
2006 | if (!q->merge_bvec_fn) | ||
2007 | return 0; | ||
2008 | |||
2009 | if (q->make_request_fn == dm_request) { | ||
2010 | dev_md = q->queuedata; | ||
2011 | if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) | ||
2012 | return 0; | ||
2013 | } | ||
2014 | |||
2015 | return 1; | ||
2016 | } | ||
2017 | |||
2018 | static int dm_device_merge_is_compulsory(struct dm_target *ti, | ||
2019 | struct dm_dev *dev, sector_t start, | ||
2020 | sector_t len, void *data) | ||
2021 | { | ||
2022 | struct block_device *bdev = dev->bdev; | ||
2023 | struct request_queue *q = bdev_get_queue(bdev); | ||
2024 | |||
2025 | return dm_queue_merge_is_compulsory(q); | ||
2026 | } | ||
2027 | |||
2028 | /* | ||
2029 | * Return 1 if it is acceptable to ignore merge_bvec_fn based | ||
2030 | * on the properties of the underlying devices. | ||
2031 | */ | ||
2032 | static int dm_table_merge_is_optional(struct dm_table *table) | ||
2033 | { | ||
2034 | unsigned i = 0; | ||
2035 | struct dm_target *ti; | ||
2036 | |||
2037 | while (i < dm_table_get_num_targets(table)) { | ||
2038 | ti = dm_table_get_target(table, i++); | ||
2039 | |||
2040 | if (ti->type->iterate_devices && | ||
2041 | ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) | ||
2042 | return 0; | ||
2043 | } | ||
2044 | |||
2045 | return 1; | ||
2046 | } | ||
2047 | |||
2048 | /* | ||
1989 | * Returns old map, which caller must destroy. | 2049 | * Returns old map, which caller must destroy. |
1990 | */ | 2050 | */ |
1991 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | 2051 | static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, |
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
1995 | struct request_queue *q = md->queue; | 2055 | struct request_queue *q = md->queue; |
1996 | sector_t size; | 2056 | sector_t size; |
1997 | unsigned long flags; | 2057 | unsigned long flags; |
2058 | int merge_is_optional; | ||
1998 | 2059 | ||
1999 | size = dm_table_get_size(t); | 2060 | size = dm_table_get_size(t); |
2000 | 2061 | ||
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2020 | 2081 | ||
2021 | __bind_mempools(md, t); | 2082 | __bind_mempools(md, t); |
2022 | 2083 | ||
2084 | merge_is_optional = dm_table_merge_is_optional(t); | ||
2085 | |||
2023 | write_lock_irqsave(&md->map_lock, flags); | 2086 | write_lock_irqsave(&md->map_lock, flags); |
2024 | old_map = md->map; | 2087 | old_map = md->map; |
2025 | md->map = t; | 2088 | md->map = t; |
2026 | dm_table_set_restrictions(t, q, limits); | 2089 | dm_table_set_restrictions(t, q, limits); |
2090 | if (merge_is_optional) | ||
2091 | set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2092 | else | ||
2093 | clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | ||
2027 | write_unlock_irqrestore(&md->map_lock, flags); | 2094 | write_unlock_irqrestore(&md->map_lock, flags); |
2028 | 2095 | ||
2029 | return old_map; | 2096 | return old_map; |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 1aaf16746da8..6745dbd278a4 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t); | |||
66 | void dm_table_free_md_mempools(struct dm_table *t); | 66 | void dm_table_free_md_mempools(struct dm_table *t); |
67 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); | 67 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); |
68 | 68 | ||
69 | int dm_queue_merge_is_compulsory(struct request_queue *q); | ||
70 | |||
69 | void dm_lock_md_type(struct mapped_device *md); | 71 | void dm_lock_md_type(struct mapped_device *md); |
70 | void dm_unlock_md_type(struct mapped_device *md); | 72 | void dm_unlock_md_type(struct mapped_device *md); |
71 | void dm_set_md_type(struct mapped_device *md, unsigned type); | 73 | void dm_set_md_type(struct mapped_device *md, unsigned type); |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 5b0dba6d4efa..d724a18b5285 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -1989,14 +1989,20 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1989 | return -EINVAL; | 1989 | return -EINVAL; |
1990 | } | 1990 | } |
1991 | 1991 | ||
1992 | /* | ||
1993 | * It's important to set the bp->state to the value different from | ||
1994 | * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() | ||
1995 | * may restart the Tx from the NAPI context (see bnx2x_tx_int()). | ||
1996 | */ | ||
1997 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
1998 | smp_mb(); | ||
1999 | |||
1992 | /* Stop Tx */ | 2000 | /* Stop Tx */ |
1993 | bnx2x_tx_disable(bp); | 2001 | bnx2x_tx_disable(bp); |
1994 | 2002 | ||
1995 | #ifdef BCM_CNIC | 2003 | #ifdef BCM_CNIC |
1996 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | 2004 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); |
1997 | #endif | 2005 | #endif |
1998 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
1999 | smp_mb(); | ||
2000 | 2006 | ||
2001 | bp->rx_mode = BNX2X_RX_MODE_NONE; | 2007 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
2002 | 2008 | ||
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 06727f32e505..dc24de40e336 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h | |||
@@ -1204,6 +1204,8 @@ struct drv_port_mb { | |||
1204 | 1204 | ||
1205 | #define LINK_STATUS_PFC_ENABLED 0x20000000 | 1205 | #define LINK_STATUS_PFC_ENABLED 0x20000000 |
1206 | 1206 | ||
1207 | #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 | ||
1208 | |||
1207 | u32 port_stx; | 1209 | u32 port_stx; |
1208 | 1210 | ||
1209 | u32 stat_nig_timer; | 1211 | u32 stat_nig_timer; |
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index bcd8f0038628..d45b1555a602 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -1546,6 +1546,12 @@ static void bnx2x_umac_enable(struct link_params *params, | |||
1546 | vars->line_speed); | 1546 | vars->line_speed); |
1547 | break; | 1547 | break; |
1548 | } | 1548 | } |
1549 | if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) | ||
1550 | val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; | ||
1551 | |||
1552 | if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) | ||
1553 | val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; | ||
1554 | |||
1549 | REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); | 1555 | REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); |
1550 | udelay(50); | 1556 | udelay(50); |
1551 | 1557 | ||
@@ -1661,10 +1667,20 @@ static void bnx2x_xmac_disable(struct link_params *params) | |||
1661 | { | 1667 | { |
1662 | u8 port = params->port; | 1668 | u8 port = params->port; |
1663 | struct bnx2x *bp = params->bp; | 1669 | struct bnx2x *bp = params->bp; |
1664 | u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; | 1670 | u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; |
1665 | 1671 | ||
1666 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & | 1672 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & |
1667 | MISC_REGISTERS_RESET_REG_2_XMAC) { | 1673 | MISC_REGISTERS_RESET_REG_2_XMAC) { |
1674 | /* | ||
1675 | * Send an indication to change the state in the NIG back to XON | ||
1676 | * Clearing this bit enables the next set of this bit to get | ||
1677 | * rising edge | ||
1678 | */ | ||
1679 | pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); | ||
1680 | REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, | ||
1681 | (pfc_ctrl & ~(1<<1))); | ||
1682 | REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, | ||
1683 | (pfc_ctrl | (1<<1))); | ||
1668 | DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); | 1684 | DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); |
1669 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); | 1685 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); |
1670 | usleep_range(1000, 1000); | 1686 | usleep_range(1000, 1000); |
@@ -1729,6 +1745,10 @@ static int bnx2x_emac_enable(struct link_params *params, | |||
1729 | 1745 | ||
1730 | DP(NETIF_MSG_LINK, "enabling EMAC\n"); | 1746 | DP(NETIF_MSG_LINK, "enabling EMAC\n"); |
1731 | 1747 | ||
1748 | /* Disable BMAC */ | ||
1749 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
1750 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | ||
1751 | |||
1732 | /* enable emac and not bmac */ | 1752 | /* enable emac and not bmac */ |
1733 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); | 1753 | REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); |
1734 | 1754 | ||
@@ -2583,12 +2603,6 @@ static int bnx2x_bmac1_enable(struct link_params *params, | |||
2583 | REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, | 2603 | REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, |
2584 | wb_data, 2); | 2604 | wb_data, 2); |
2585 | 2605 | ||
2586 | if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { | ||
2587 | REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS, | ||
2588 | wb_data, 2); | ||
2589 | if (wb_data[0] > 0) | ||
2590 | return -ESRCH; | ||
2591 | } | ||
2592 | return 0; | 2606 | return 0; |
2593 | } | 2607 | } |
2594 | 2608 | ||
@@ -2654,16 +2668,6 @@ static int bnx2x_bmac2_enable(struct link_params *params, | |||
2654 | udelay(30); | 2668 | udelay(30); |
2655 | bnx2x_update_pfc_bmac2(params, vars, is_lb); | 2669 | bnx2x_update_pfc_bmac2(params, vars, is_lb); |
2656 | 2670 | ||
2657 | if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { | ||
2658 | REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT, | ||
2659 | wb_data, 2); | ||
2660 | if (wb_data[0] > 0) { | ||
2661 | DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n", | ||
2662 | wb_data[0]); | ||
2663 | return -ESRCH; | ||
2664 | } | ||
2665 | } | ||
2666 | |||
2667 | return 0; | 2671 | return 0; |
2668 | } | 2672 | } |
2669 | 2673 | ||
@@ -2949,7 +2953,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
2949 | u32 val; | 2953 | u32 val; |
2950 | u16 i; | 2954 | u16 i; |
2951 | int rc = 0; | 2955 | int rc = 0; |
2952 | 2956 | if (phy->flags & FLAGS_MDC_MDIO_WA_B0) | |
2957 | bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, | ||
2958 | EMAC_MDIO_STATUS_10MB); | ||
2953 | /* address */ | 2959 | /* address */ |
2954 | val = ((phy->addr << 21) | (devad << 16) | reg | | 2960 | val = ((phy->addr << 21) | (devad << 16) | reg | |
2955 | EMAC_MDIO_COMM_COMMAND_ADDRESS | | 2961 | EMAC_MDIO_COMM_COMMAND_ADDRESS | |
@@ -3003,6 +3009,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
3003 | } | 3009 | } |
3004 | } | 3010 | } |
3005 | 3011 | ||
3012 | if (phy->flags & FLAGS_MDC_MDIO_WA_B0) | ||
3013 | bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, | ||
3014 | EMAC_MDIO_STATUS_10MB); | ||
3006 | return rc; | 3015 | return rc; |
3007 | } | 3016 | } |
3008 | 3017 | ||
@@ -3012,6 +3021,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
3012 | u32 tmp; | 3021 | u32 tmp; |
3013 | u8 i; | 3022 | u8 i; |
3014 | int rc = 0; | 3023 | int rc = 0; |
3024 | if (phy->flags & FLAGS_MDC_MDIO_WA_B0) | ||
3025 | bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, | ||
3026 | EMAC_MDIO_STATUS_10MB); | ||
3015 | 3027 | ||
3016 | /* address */ | 3028 | /* address */ |
3017 | 3029 | ||
@@ -3065,7 +3077,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, | |||
3065 | bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); | 3077 | bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); |
3066 | } | 3078 | } |
3067 | } | 3079 | } |
3068 | 3080 | if (phy->flags & FLAGS_MDC_MDIO_WA_B0) | |
3081 | bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, | ||
3082 | EMAC_MDIO_STATUS_10MB); | ||
3069 | return rc; | 3083 | return rc; |
3070 | } | 3084 | } |
3071 | 3085 | ||
@@ -4353,6 +4367,9 @@ void bnx2x_link_status_update(struct link_params *params, | |||
4353 | 4367 | ||
4354 | vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); | 4368 | vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); |
4355 | vars->phy_flags = PHY_XGXS_FLAG; | 4369 | vars->phy_flags = PHY_XGXS_FLAG; |
4370 | if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) | ||
4371 | vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; | ||
4372 | |||
4356 | if (vars->link_up) { | 4373 | if (vars->link_up) { |
4357 | DP(NETIF_MSG_LINK, "phy link up\n"); | 4374 | DP(NETIF_MSG_LINK, "phy link up\n"); |
4358 | 4375 | ||
@@ -4444,6 +4461,8 @@ void bnx2x_link_status_update(struct link_params *params, | |||
4444 | 4461 | ||
4445 | /* indicate no mac active */ | 4462 | /* indicate no mac active */ |
4446 | vars->mac_type = MAC_TYPE_NONE; | 4463 | vars->mac_type = MAC_TYPE_NONE; |
4464 | if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) | ||
4465 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; | ||
4447 | } | 4466 | } |
4448 | 4467 | ||
4449 | /* Sync media type */ | 4468 | /* Sync media type */ |
@@ -5903,20 +5922,30 @@ int bnx2x_set_led(struct link_params *params, | |||
5903 | tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); | 5922 | tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); |
5904 | EMAC_WR(bp, EMAC_REG_EMAC_LED, | 5923 | EMAC_WR(bp, EMAC_REG_EMAC_LED, |
5905 | (tmp | EMAC_LED_OVERRIDE)); | 5924 | (tmp | EMAC_LED_OVERRIDE)); |
5906 | return rc; | 5925 | /* |
5926 | * return here without enabling traffic | ||
5927 | * LED blink andsetting rate in ON mode. | ||
5928 | * In oper mode, enabling LED blink | ||
5929 | * and setting rate is needed. | ||
5930 | */ | ||
5931 | if (mode == LED_MODE_ON) | ||
5932 | return rc; | ||
5907 | } | 5933 | } |
5908 | } else if (SINGLE_MEDIA_DIRECT(params) && | 5934 | } else if (SINGLE_MEDIA_DIRECT(params)) { |
5909 | (CHIP_IS_E1x(bp) || | ||
5910 | CHIP_IS_E2(bp))) { | ||
5911 | /* | 5935 | /* |
5912 | * This is a work-around for HW issue found when link | 5936 | * This is a work-around for HW issue found when link |
5913 | * is up in CL73 | 5937 | * is up in CL73 |
5914 | */ | 5938 | */ |
5915 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); | ||
5916 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | 5939 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); |
5917 | } else { | 5940 | if (CHIP_IS_E1x(bp) || |
5941 | CHIP_IS_E2(bp) || | ||
5942 | (mode == LED_MODE_ON)) | ||
5943 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); | ||
5944 | else | ||
5945 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, | ||
5946 | hw_led_mode); | ||
5947 | } else | ||
5918 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); | 5948 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); |
5919 | } | ||
5920 | 5949 | ||
5921 | REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); | 5950 | REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); |
5922 | /* Set blinking rate to ~15.9Hz */ | 5951 | /* Set blinking rate to ~15.9Hz */ |
@@ -6160,6 +6189,7 @@ static int bnx2x_update_link_down(struct link_params *params, | |||
6160 | /* update shared memory */ | 6189 | /* update shared memory */ |
6161 | vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | | 6190 | vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | |
6162 | LINK_STATUS_LINK_UP | | 6191 | LINK_STATUS_LINK_UP | |
6192 | LINK_STATUS_PHYSICAL_LINK_FLAG | | ||
6163 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | | 6193 | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | |
6164 | LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | | 6194 | LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | |
6165 | LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | | 6195 | LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | |
@@ -6197,7 +6227,8 @@ static int bnx2x_update_link_up(struct link_params *params, | |||
6197 | u8 port = params->port; | 6227 | u8 port = params->port; |
6198 | int rc = 0; | 6228 | int rc = 0; |
6199 | 6229 | ||
6200 | vars->link_status |= LINK_STATUS_LINK_UP; | 6230 | vars->link_status |= (LINK_STATUS_LINK_UP | |
6231 | LINK_STATUS_PHYSICAL_LINK_FLAG); | ||
6201 | vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; | 6232 | vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; |
6202 | 6233 | ||
6203 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) | 6234 | if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) |
@@ -7998,6 +8029,9 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, | |||
7998 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | 8029 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, |
7999 | MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); | 8030 | MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); |
8000 | 8031 | ||
8032 | /* Restart microcode to re-read the new mode */ | ||
8033 | bnx2x_warpcore_reset_lane(bp, phy, 1); | ||
8034 | bnx2x_warpcore_reset_lane(bp, phy, 0); | ||
8001 | 8035 | ||
8002 | } | 8036 | } |
8003 | 8037 | ||
@@ -8116,7 +8150,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params) | |||
8116 | offsetof(struct shmem_region, dev_info. | 8150 | offsetof(struct shmem_region, dev_info. |
8117 | port_feature_config[params->port]. | 8151 | port_feature_config[params->port]. |
8118 | config)); | 8152 | config)); |
8119 | |||
8120 | bnx2x_set_gpio_int(bp, gpio_num, | 8153 | bnx2x_set_gpio_int(bp, gpio_num, |
8121 | MISC_REGISTERS_GPIO_INT_OUTPUT_SET, | 8154 | MISC_REGISTERS_GPIO_INT_OUTPUT_SET, |
8122 | gpio_port); | 8155 | gpio_port); |
@@ -8125,8 +8158,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) | |||
8125 | * Disable transmit for this module | 8158 | * Disable transmit for this module |
8126 | */ | 8159 | */ |
8127 | phy->media_type = ETH_PHY_NOT_PRESENT; | 8160 | phy->media_type = ETH_PHY_NOT_PRESENT; |
8128 | if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == | 8161 | if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == |
8129 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) | 8162 | PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) || |
8163 | CHIP_IS_E3(bp)) | ||
8130 | bnx2x_sfp_set_transmitter(params, phy, 0); | 8164 | bnx2x_sfp_set_transmitter(params, phy, 0); |
8131 | } | 8165 | } |
8132 | } | 8166 | } |
@@ -8228,9 +8262,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, | |||
8228 | u16 cnt, val, tmp1; | 8262 | u16 cnt, val, tmp1; |
8229 | struct bnx2x *bp = params->bp; | 8263 | struct bnx2x *bp = params->bp; |
8230 | 8264 | ||
8231 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8232 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8233 | |||
8234 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | 8265 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, |
8235 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); | 8266 | MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); |
8236 | /* HW reset */ | 8267 | /* HW reset */ |
@@ -8414,9 +8445,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, | |||
8414 | struct bnx2x *bp = params->bp; | 8445 | struct bnx2x *bp = params->bp; |
8415 | DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); | 8446 | DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); |
8416 | 8447 | ||
8417 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8418 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8419 | |||
8420 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); | 8448 | bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); |
8421 | bnx2x_wait_reset_complete(bp, phy, params); | 8449 | bnx2x_wait_reset_complete(bp, phy, params); |
8422 | 8450 | ||
@@ -8585,9 +8613,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, | |||
8585 | struct bnx2x *bp = params->bp; | 8613 | struct bnx2x *bp = params->bp; |
8586 | /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ | 8614 | /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ |
8587 | 8615 | ||
8588 | /* SPF+ PHY: Set flag to check for Tx error */ | ||
8589 | vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; | ||
8590 | |||
8591 | bnx2x_wait_reset_complete(bp, phy, params); | 8616 | bnx2x_wait_reset_complete(bp, phy, params); |
8592 | rx_alarm_ctrl_val = (1<<2) | (1<<5) ; | 8617 | rx_alarm_ctrl_val = (1<<2) | (1<<5) ; |
8593 | /* Should be 0x6 to enable XS on Tx side. */ | 8618 | /* Should be 0x6 to enable XS on Tx side. */ |
@@ -9243,7 +9268,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
9243 | if (phy->req_duplex == DUPLEX_FULL) | 9268 | if (phy->req_duplex == DUPLEX_FULL) |
9244 | autoneg_val |= (1<<8); | 9269 | autoneg_val |= (1<<8); |
9245 | 9270 | ||
9246 | bnx2x_cl45_write(bp, phy, | 9271 | /* |
9272 | * Always write this if this is not 84833. | ||
9273 | * For 84833, write it only when it's a forced speed. | ||
9274 | */ | ||
9275 | if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || | ||
9276 | ((autoneg_val & (1<<12)) == 0)) | ||
9277 | bnx2x_cl45_write(bp, phy, | ||
9247 | MDIO_AN_DEVAD, | 9278 | MDIO_AN_DEVAD, |
9248 | MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); | 9279 | MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); |
9249 | 9280 | ||
@@ -9257,13 +9288,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
9257 | bnx2x_cl45_write(bp, phy, | 9288 | bnx2x_cl45_write(bp, phy, |
9258 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, | 9289 | MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, |
9259 | 0x3200); | 9290 | 0x3200); |
9260 | } else if (phy->req_line_speed != SPEED_10 && | 9291 | } else |
9261 | phy->req_line_speed != SPEED_100) { | ||
9262 | bnx2x_cl45_write(bp, phy, | 9292 | bnx2x_cl45_write(bp, phy, |
9263 | MDIO_AN_DEVAD, | 9293 | MDIO_AN_DEVAD, |
9264 | MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, | 9294 | MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, |
9265 | 1); | 9295 | 1); |
9266 | } | 9296 | |
9267 | /* Save spirom version */ | 9297 | /* Save spirom version */ |
9268 | bnx2x_save_848xx_spirom_version(phy, params); | 9298 | bnx2x_save_848xx_spirom_version(phy, params); |
9269 | 9299 | ||
@@ -9756,11 +9786,9 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, | |||
9756 | bnx2x_cl45_read(bp, phy, | 9786 | bnx2x_cl45_read(bp, phy, |
9757 | MDIO_CTL_DEVAD, | 9787 | MDIO_CTL_DEVAD, |
9758 | 0x400f, &val16); | 9788 | 0x400f, &val16); |
9759 | /* Put to low power mode on newer FW */ | 9789 | bnx2x_cl45_write(bp, phy, |
9760 | if ((val16 & 0x303f) > 0x1009) | 9790 | MDIO_PMA_DEVAD, |
9761 | bnx2x_cl45_write(bp, phy, | 9791 | MDIO_PMA_REG_CTRL, 0x800); |
9762 | MDIO_PMA_DEVAD, | ||
9763 | MDIO_PMA_REG_CTRL, 0x800); | ||
9764 | } | 9792 | } |
9765 | } | 9793 | } |
9766 | 9794 | ||
@@ -10191,8 +10219,15 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, | |||
10191 | u32 cfg_pin; | 10219 | u32 cfg_pin; |
10192 | u8 port; | 10220 | u8 port; |
10193 | 10221 | ||
10194 | /* This works with E3 only, no need to check the chip | 10222 | /* |
10195 | before determining the port. */ | 10223 | * In case of no EPIO routed to reset the GPHY, put it |
10224 | * in low power mode. | ||
10225 | */ | ||
10226 | bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); | ||
10227 | /* | ||
10228 | * This works with E3 only, no need to check the chip | ||
10229 | * before determining the port. | ||
10230 | */ | ||
10196 | port = params->port; | 10231 | port = params->port; |
10197 | cfg_pin = (REG_RD(bp, params->shmem_base + | 10232 | cfg_pin = (REG_RD(bp, params->shmem_base + |
10198 | offsetof(struct shmem_region, | 10233 | offsetof(struct shmem_region, |
@@ -10603,7 +10638,8 @@ static struct bnx2x_phy phy_warpcore = { | |||
10603 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | 10638 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, |
10604 | .addr = 0xff, | 10639 | .addr = 0xff, |
10605 | .def_md_devad = 0, | 10640 | .def_md_devad = 0, |
10606 | .flags = FLAGS_HW_LOCK_REQUIRED, | 10641 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
10642 | FLAGS_TX_ERROR_CHECK), | ||
10607 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10643 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10608 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10644 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10609 | .mdio_ctrl = 0, | 10645 | .mdio_ctrl = 0, |
@@ -10729,7 +10765,8 @@ static struct bnx2x_phy phy_8706 = { | |||
10729 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, | 10765 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, |
10730 | .addr = 0xff, | 10766 | .addr = 0xff, |
10731 | .def_md_devad = 0, | 10767 | .def_md_devad = 0, |
10732 | .flags = FLAGS_INIT_XGXS_FIRST, | 10768 | .flags = (FLAGS_INIT_XGXS_FIRST | |
10769 | FLAGS_TX_ERROR_CHECK), | ||
10733 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10770 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10734 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10771 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10735 | .mdio_ctrl = 0, | 10772 | .mdio_ctrl = 0, |
@@ -10760,7 +10797,8 @@ static struct bnx2x_phy phy_8726 = { | |||
10760 | .addr = 0xff, | 10797 | .addr = 0xff, |
10761 | .def_md_devad = 0, | 10798 | .def_md_devad = 0, |
10762 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10799 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
10763 | FLAGS_INIT_XGXS_FIRST), | 10800 | FLAGS_INIT_XGXS_FIRST | |
10801 | FLAGS_TX_ERROR_CHECK), | ||
10764 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10802 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10765 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10803 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10766 | .mdio_ctrl = 0, | 10804 | .mdio_ctrl = 0, |
@@ -10791,7 +10829,8 @@ static struct bnx2x_phy phy_8727 = { | |||
10791 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, | 10829 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, |
10792 | .addr = 0xff, | 10830 | .addr = 0xff, |
10793 | .def_md_devad = 0, | 10831 | .def_md_devad = 0, |
10794 | .flags = FLAGS_FAN_FAILURE_DET_REQ, | 10832 | .flags = (FLAGS_FAN_FAILURE_DET_REQ | |
10833 | FLAGS_TX_ERROR_CHECK), | ||
10795 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10834 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10796 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10835 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10797 | .mdio_ctrl = 0, | 10836 | .mdio_ctrl = 0, |
@@ -11112,6 +11151,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, | |||
11112 | */ | 11151 | */ |
11113 | if (CHIP_REV(bp) == CHIP_REV_Ax) | 11152 | if (CHIP_REV(bp) == CHIP_REV_Ax) |
11114 | phy->flags |= FLAGS_MDC_MDIO_WA; | 11153 | phy->flags |= FLAGS_MDC_MDIO_WA; |
11154 | else | ||
11155 | phy->flags |= FLAGS_MDC_MDIO_WA_B0; | ||
11115 | } else { | 11156 | } else { |
11116 | switch (switch_cfg) { | 11157 | switch (switch_cfg) { |
11117 | case SWITCH_CFG_1G: | 11158 | case SWITCH_CFG_1G: |
@@ -11500,13 +11541,12 @@ void bnx2x_init_xmac_loopback(struct link_params *params, | |||
11500 | * Set WC to loopback mode since link is required to provide clock | 11541 | * Set WC to loopback mode since link is required to provide clock |
11501 | * to the XMAC in 20G mode | 11542 | * to the XMAC in 20G mode |
11502 | */ | 11543 | */ |
11503 | if (vars->line_speed == SPEED_20000) { | 11544 | bnx2x_set_aer_mmd(params, ¶ms->phy[0]); |
11504 | bnx2x_set_aer_mmd(params, ¶ms->phy[0]); | 11545 | bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); |
11505 | bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); | 11546 | params->phy[INT_PHY].config_loopback( |
11506 | params->phy[INT_PHY].config_loopback( | ||
11507 | ¶ms->phy[INT_PHY], | 11547 | ¶ms->phy[INT_PHY], |
11508 | params); | 11548 | params); |
11509 | } | 11549 | |
11510 | bnx2x_xmac_enable(params, vars, 1); | 11550 | bnx2x_xmac_enable(params, vars, 1); |
11511 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); | 11551 | REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); |
11512 | } | 11552 | } |
@@ -11684,12 +11724,16 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, | |||
11684 | bnx2x_set_led(params, vars, LED_MODE_OFF, 0); | 11724 | bnx2x_set_led(params, vars, LED_MODE_OFF, 0); |
11685 | 11725 | ||
11686 | if (reset_ext_phy) { | 11726 | if (reset_ext_phy) { |
11727 | bnx2x_set_mdio_clk(bp, params->chip_id, port); | ||
11687 | for (phy_index = EXT_PHY1; phy_index < params->num_phys; | 11728 | for (phy_index = EXT_PHY1; phy_index < params->num_phys; |
11688 | phy_index++) { | 11729 | phy_index++) { |
11689 | if (params->phy[phy_index].link_reset) | 11730 | if (params->phy[phy_index].link_reset) { |
11731 | bnx2x_set_aer_mmd(params, | ||
11732 | ¶ms->phy[phy_index]); | ||
11690 | params->phy[phy_index].link_reset( | 11733 | params->phy[phy_index].link_reset( |
11691 | ¶ms->phy[phy_index], | 11734 | ¶ms->phy[phy_index], |
11692 | params); | 11735 | params); |
11736 | } | ||
11693 | if (params->phy[phy_index].flags & | 11737 | if (params->phy[phy_index].flags & |
11694 | FLAGS_REARM_LATCH_SIGNAL) | 11738 | FLAGS_REARM_LATCH_SIGNAL) |
11695 | clear_latch_ind = 1; | 11739 | clear_latch_ind = 1; |
@@ -12178,10 +12222,6 @@ static void bnx2x_analyze_link_error(struct link_params *params, | |||
12178 | u8 led_mode; | 12222 | u8 led_mode; |
12179 | u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; | 12223 | u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; |
12180 | 12224 | ||
12181 | /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n", | ||
12182 | vars->link_up, | ||
12183 | half_open_conn, lss_status);*/ | ||
12184 | |||
12185 | if ((lss_status ^ half_open_conn) == 0) | 12225 | if ((lss_status ^ half_open_conn) == 0) |
12186 | return; | 12226 | return; |
12187 | 12227 | ||
@@ -12194,6 +12234,7 @@ static void bnx2x_analyze_link_error(struct link_params *params, | |||
12194 | * b. Update link_vars->link_up | 12234 | * b. Update link_vars->link_up |
12195 | */ | 12235 | */ |
12196 | if (lss_status) { | 12236 | if (lss_status) { |
12237 | DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); | ||
12197 | vars->link_status &= ~LINK_STATUS_LINK_UP; | 12238 | vars->link_status &= ~LINK_STATUS_LINK_UP; |
12198 | vars->link_up = 0; | 12239 | vars->link_up = 0; |
12199 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; | 12240 | vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; |
@@ -12203,6 +12244,7 @@ static void bnx2x_analyze_link_error(struct link_params *params, | |||
12203 | */ | 12244 | */ |
12204 | led_mode = LED_MODE_OFF; | 12245 | led_mode = LED_MODE_OFF; |
12205 | } else { | 12246 | } else { |
12247 | DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); | ||
12206 | vars->link_status |= LINK_STATUS_LINK_UP; | 12248 | vars->link_status |= LINK_STATUS_LINK_UP; |
12207 | vars->link_up = 1; | 12249 | vars->link_up = 1; |
12208 | vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; | 12250 | vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; |
@@ -12219,6 +12261,15 @@ static void bnx2x_analyze_link_error(struct link_params *params, | |||
12219 | bnx2x_notify_link_changed(bp); | 12261 | bnx2x_notify_link_changed(bp); |
12220 | } | 12262 | } |
12221 | 12263 | ||
12264 | /****************************************************************************** | ||
12265 | * Description: | ||
12266 | * This function checks for half opened connection change indication. | ||
12267 | * When such change occurs, it calls the bnx2x_analyze_link_error | ||
12268 | * to check if Remote Fault is set or cleared. Reception of remote fault | ||
12269 | * status message in the MAC indicates that the peer's MAC has detected | ||
12270 | * a fault, for example, due to break in the TX side of fiber. | ||
12271 | * | ||
12272 | ******************************************************************************/ | ||
12222 | static void bnx2x_check_half_open_conn(struct link_params *params, | 12273 | static void bnx2x_check_half_open_conn(struct link_params *params, |
12223 | struct link_vars *vars) | 12274 | struct link_vars *vars) |
12224 | { | 12275 | { |
@@ -12229,9 +12280,28 @@ static void bnx2x_check_half_open_conn(struct link_params *params, | |||
12229 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) | 12280 | if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) |
12230 | return; | 12281 | return; |
12231 | 12282 | ||
12232 | if (!CHIP_IS_E3(bp) && | 12283 | if (CHIP_IS_E3(bp) && |
12233 | (REG_RD(bp, MISC_REG_RESET_REG_2) & | 12284 | (REG_RD(bp, MISC_REG_RESET_REG_2) & |
12234 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) { | 12285 | (MISC_REGISTERS_RESET_REG_2_XMAC))) { |
12286 | /* Check E3 XMAC */ | ||
12287 | /* | ||
12288 | * Note that link speed cannot be queried here, since it may be | ||
12289 | * zero while link is down. In case UMAC is active, LSS will | ||
12290 | * simply not be set | ||
12291 | */ | ||
12292 | mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; | ||
12293 | |||
12294 | /* Clear stick bits (Requires rising edge) */ | ||
12295 | REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); | ||
12296 | REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, | ||
12297 | XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | | ||
12298 | XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); | ||
12299 | if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) | ||
12300 | lss_status = 1; | ||
12301 | |||
12302 | bnx2x_analyze_link_error(params, vars, lss_status); | ||
12303 | } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & | ||
12304 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { | ||
12235 | /* Check E1X / E2 BMAC */ | 12305 | /* Check E1X / E2 BMAC */ |
12236 | u32 lss_status_reg; | 12306 | u32 lss_status_reg; |
12237 | u32 wb_data[2]; | 12307 | u32 wb_data[2]; |
@@ -12253,14 +12323,20 @@ static void bnx2x_check_half_open_conn(struct link_params *params, | |||
12253 | void bnx2x_period_func(struct link_params *params, struct link_vars *vars) | 12323 | void bnx2x_period_func(struct link_params *params, struct link_vars *vars) |
12254 | { | 12324 | { |
12255 | struct bnx2x *bp = params->bp; | 12325 | struct bnx2x *bp = params->bp; |
12326 | u16 phy_idx; | ||
12256 | if (!params) { | 12327 | if (!params) { |
12257 | DP(NETIF_MSG_LINK, "Ininitliazed params !\n"); | 12328 | DP(NETIF_MSG_LINK, "Uninitialized params !\n"); |
12258 | return; | 12329 | return; |
12259 | } | 12330 | } |
12260 | /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x | 12331 | |
12261 | RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed, | 12332 | for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { |
12262 | REG_RD(bp, MISC_REG_RESET_REG_2)); */ | 12333 | if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { |
12263 | bnx2x_check_half_open_conn(params, vars); | 12334 | bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); |
12335 | bnx2x_check_half_open_conn(params, vars); | ||
12336 | break; | ||
12337 | } | ||
12338 | } | ||
12339 | |||
12264 | if (CHIP_IS_E3(bp)) | 12340 | if (CHIP_IS_E3(bp)) |
12265 | bnx2x_check_over_curr(params, vars); | 12341 | bnx2x_check_over_curr(params, vars); |
12266 | } | 12342 | } |
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index 6a7708d5da37..c12db6da213e 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h | |||
@@ -145,6 +145,8 @@ struct bnx2x_phy { | |||
145 | #define FLAGS_SFP_NOT_APPROVED (1<<7) | 145 | #define FLAGS_SFP_NOT_APPROVED (1<<7) |
146 | #define FLAGS_MDC_MDIO_WA (1<<8) | 146 | #define FLAGS_MDC_MDIO_WA (1<<8) |
147 | #define FLAGS_DUMMY_READ (1<<9) | 147 | #define FLAGS_DUMMY_READ (1<<9) |
148 | #define FLAGS_MDC_MDIO_WA_B0 (1<<10) | ||
149 | #define FLAGS_TX_ERROR_CHECK (1<<12) | ||
148 | 150 | ||
149 | /* preemphasis values for the rx side */ | 151 | /* preemphasis values for the rx side */ |
150 | u16 rx_preemphasis[4]; | 152 | u16 rx_preemphasis[4]; |
@@ -276,7 +278,6 @@ struct link_vars { | |||
276 | #define PHY_PHYSICAL_LINK_FLAG (1<<2) | 278 | #define PHY_PHYSICAL_LINK_FLAG (1<<2) |
277 | #define PHY_HALF_OPEN_CONN_FLAG (1<<3) | 279 | #define PHY_HALF_OPEN_CONN_FLAG (1<<3) |
278 | #define PHY_OVER_CURRENT_FLAG (1<<4) | 280 | #define PHY_OVER_CURRENT_FLAG (1<<4) |
279 | #define PHY_TX_ERROR_CHECK_FLAG (1<<5) | ||
280 | 281 | ||
281 | u8 mac_type; | 282 | u8 mac_type; |
282 | #define MAC_TYPE_NONE 0 | 283 | #define MAC_TYPE_NONE 0 |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 02461fef8751..27b5ecb11830 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -4771,9 +4771,11 @@ | |||
4771 | The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - | 4771 | The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - |
4772 | header pointer. */ | 4772 | header pointer. */ |
4773 | #define UCM_REG_XX_TABLE 0xe0300 | 4773 | #define UCM_REG_XX_TABLE 0xe0300 |
4774 | #define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) | ||
4774 | #define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) | 4775 | #define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) |
4775 | #define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) | 4776 | #define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) |
4776 | #define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) | 4777 | #define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) |
4778 | #define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) | ||
4777 | #define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) | 4779 | #define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) |
4778 | #define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) | 4780 | #define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) |
4779 | #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) | 4781 | #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) |
@@ -5622,8 +5624,9 @@ | |||
5622 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) | 5624 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) |
5623 | #define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) | 5625 | #define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) |
5624 | #define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) | 5626 | #define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) |
5625 | #define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) | 5627 | #define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) |
5626 | #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 | 5628 | #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 |
5629 | #define EMAC_MDIO_STATUS_10MB (1L<<1) | ||
5627 | #define EMAC_MODE_25G_MODE (1L<<5) | 5630 | #define EMAC_MODE_25G_MODE (1L<<5) |
5628 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) | 5631 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) |
5629 | #define EMAC_MODE_PORT_GMII (2L<<2) | 5632 | #define EMAC_MODE_PORT_GMII (2L<<2) |
@@ -5634,6 +5637,7 @@ | |||
5634 | #define EMAC_REG_EMAC_MAC_MATCH 0x10 | 5637 | #define EMAC_REG_EMAC_MAC_MATCH 0x10 |
5635 | #define EMAC_REG_EMAC_MDIO_COMM 0xac | 5638 | #define EMAC_REG_EMAC_MDIO_COMM 0xac |
5636 | #define EMAC_REG_EMAC_MDIO_MODE 0xb4 | 5639 | #define EMAC_REG_EMAC_MDIO_MODE 0xb4 |
5640 | #define EMAC_REG_EMAC_MDIO_STATUS 0xb0 | ||
5637 | #define EMAC_REG_EMAC_MODE 0x0 | 5641 | #define EMAC_REG_EMAC_MODE 0x0 |
5638 | #define EMAC_REG_EMAC_RX_MODE 0xc8 | 5642 | #define EMAC_REG_EMAC_RX_MODE 0xc8 |
5639 | #define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c | 5643 | #define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index c5f0f04219f3..5548d464261a 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -838,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
838 | 838 | ||
839 | /* Disable all the interrupts */ | 839 | /* Disable all the interrupts */ |
840 | ew32(IMC, 0xFFFFFFFF); | 840 | ew32(IMC, 0xFFFFFFFF); |
841 | E1000_WRITE_FLUSH(); | ||
841 | msleep(10); | 842 | msleep(10); |
842 | 843 | ||
843 | /* Test each interrupt */ | 844 | /* Test each interrupt */ |
@@ -856,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
856 | adapter->test_icr = 0; | 857 | adapter->test_icr = 0; |
857 | ew32(IMC, mask); | 858 | ew32(IMC, mask); |
858 | ew32(ICS, mask); | 859 | ew32(ICS, mask); |
860 | E1000_WRITE_FLUSH(); | ||
859 | msleep(10); | 861 | msleep(10); |
860 | 862 | ||
861 | if (adapter->test_icr & mask) { | 863 | if (adapter->test_icr & mask) { |
@@ -873,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
873 | adapter->test_icr = 0; | 875 | adapter->test_icr = 0; |
874 | ew32(IMS, mask); | 876 | ew32(IMS, mask); |
875 | ew32(ICS, mask); | 877 | ew32(ICS, mask); |
878 | E1000_WRITE_FLUSH(); | ||
876 | msleep(10); | 879 | msleep(10); |
877 | 880 | ||
878 | if (!(adapter->test_icr & mask)) { | 881 | if (!(adapter->test_icr & mask)) { |
@@ -890,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
890 | adapter->test_icr = 0; | 893 | adapter->test_icr = 0; |
891 | ew32(IMC, ~mask & 0x00007FFF); | 894 | ew32(IMC, ~mask & 0x00007FFF); |
892 | ew32(ICS, ~mask & 0x00007FFF); | 895 | ew32(ICS, ~mask & 0x00007FFF); |
896 | E1000_WRITE_FLUSH(); | ||
893 | msleep(10); | 897 | msleep(10); |
894 | 898 | ||
895 | if (adapter->test_icr) { | 899 | if (adapter->test_icr) { |
@@ -901,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
901 | 905 | ||
902 | /* Disable all the interrupts */ | 906 | /* Disable all the interrupts */ |
903 | ew32(IMC, 0xFFFFFFFF); | 907 | ew32(IMC, 0xFFFFFFFF); |
908 | E1000_WRITE_FLUSH(); | ||
904 | msleep(10); | 909 | msleep(10); |
905 | 910 | ||
906 | /* Unhook test interrupt handler */ | 911 | /* Unhook test interrupt handler */ |
@@ -1394,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1394 | if (unlikely(++k == txdr->count)) k = 0; | 1399 | if (unlikely(++k == txdr->count)) k = 0; |
1395 | } | 1400 | } |
1396 | ew32(TDT, k); | 1401 | ew32(TDT, k); |
1402 | E1000_WRITE_FLUSH(); | ||
1397 | msleep(200); | 1403 | msleep(200); |
1398 | time = jiffies; /* set the start time for the receive */ | 1404 | time = jiffies; /* set the start time for the receive */ |
1399 | good_cnt = 0; | 1405 | good_cnt = 0; |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 1698622af434..8545c7aa93eb 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
446 | /* Must reset the PHY before resetting the MAC */ | 446 | /* Must reset the PHY before resetting the MAC */ |
447 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 447 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
448 | ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); | 448 | ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); |
449 | E1000_WRITE_FLUSH(); | ||
449 | msleep(5); | 450 | msleep(5); |
450 | } | 451 | } |
451 | 452 | ||
@@ -3752,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw) | |||
3752 | /* Clear SK and CS */ | 3753 | /* Clear SK and CS */ |
3753 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 3754 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
3754 | ew32(EECD, eecd); | 3755 | ew32(EECD, eecd); |
3756 | E1000_WRITE_FLUSH(); | ||
3755 | udelay(1); | 3757 | udelay(1); |
3756 | } | 3758 | } |
3757 | 3759 | ||
@@ -3824,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw) | |||
3824 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ | 3826 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ |
3825 | 3827 | ||
3826 | ew32(EECD, eecd); | 3828 | ew32(EECD, eecd); |
3829 | E1000_WRITE_FLUSH(); | ||
3827 | 3830 | ||
3828 | udelay(hw->eeprom.delay_usec); | 3831 | udelay(hw->eeprom.delay_usec); |
3829 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { | 3832 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index c0ecb2d9fdb7..e4f42257c24c 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1313,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1313 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 1313 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
1314 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | 1314 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; |
1315 | ew32(KMRNCTRLSTA, kmrnctrlsta); | 1315 | ew32(KMRNCTRLSTA, kmrnctrlsta); |
1316 | e1e_flush(); | ||
1316 | 1317 | ||
1317 | udelay(2); | 1318 | udelay(2); |
1318 | 1319 | ||
@@ -1347,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, | |||
1347 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 1348 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
1348 | E1000_KMRNCTRLSTA_OFFSET) | data; | 1349 | E1000_KMRNCTRLSTA_OFFSET) | data; |
1349 | ew32(KMRNCTRLSTA, kmrnctrlsta); | 1350 | ew32(KMRNCTRLSTA, kmrnctrlsta); |
1351 | e1e_flush(); | ||
1350 | 1352 | ||
1351 | udelay(2); | 1353 | udelay(2); |
1352 | 1354 | ||
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index cb1a3623253e..06d88f316dce 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -28,8 +28,8 @@ | |||
28 | 28 | ||
29 | /* ethtool support for e1000 */ | 29 | /* ethtool support for e1000 */ |
30 | 30 | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/ethtool.h> | 33 | #include <linux/ethtool.h> |
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
@@ -964,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
964 | 964 | ||
965 | /* Disable all the interrupts */ | 965 | /* Disable all the interrupts */ |
966 | ew32(IMC, 0xFFFFFFFF); | 966 | ew32(IMC, 0xFFFFFFFF); |
967 | e1e_flush(); | ||
967 | usleep_range(10000, 20000); | 968 | usleep_range(10000, 20000); |
968 | 969 | ||
969 | /* Test each interrupt */ | 970 | /* Test each interrupt */ |
@@ -996,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
996 | adapter->test_icr = 0; | 997 | adapter->test_icr = 0; |
997 | ew32(IMC, mask); | 998 | ew32(IMC, mask); |
998 | ew32(ICS, mask); | 999 | ew32(ICS, mask); |
1000 | e1e_flush(); | ||
999 | usleep_range(10000, 20000); | 1001 | usleep_range(10000, 20000); |
1000 | 1002 | ||
1001 | if (adapter->test_icr & mask) { | 1003 | if (adapter->test_icr & mask) { |
@@ -1014,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
1014 | adapter->test_icr = 0; | 1016 | adapter->test_icr = 0; |
1015 | ew32(IMS, mask); | 1017 | ew32(IMS, mask); |
1016 | ew32(ICS, mask); | 1018 | ew32(ICS, mask); |
1019 | e1e_flush(); | ||
1017 | usleep_range(10000, 20000); | 1020 | usleep_range(10000, 20000); |
1018 | 1021 | ||
1019 | if (!(adapter->test_icr & mask)) { | 1022 | if (!(adapter->test_icr & mask)) { |
@@ -1032,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
1032 | adapter->test_icr = 0; | 1035 | adapter->test_icr = 0; |
1033 | ew32(IMC, ~mask & 0x00007FFF); | 1036 | ew32(IMC, ~mask & 0x00007FFF); |
1034 | ew32(ICS, ~mask & 0x00007FFF); | 1037 | ew32(ICS, ~mask & 0x00007FFF); |
1038 | e1e_flush(); | ||
1035 | usleep_range(10000, 20000); | 1039 | usleep_range(10000, 20000); |
1036 | 1040 | ||
1037 | if (adapter->test_icr) { | 1041 | if (adapter->test_icr) { |
@@ -1043,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
1043 | 1047 | ||
1044 | /* Disable all the interrupts */ | 1048 | /* Disable all the interrupts */ |
1045 | ew32(IMC, 0xFFFFFFFF); | 1049 | ew32(IMC, 0xFFFFFFFF); |
1050 | e1e_flush(); | ||
1046 | usleep_range(10000, 20000); | 1051 | usleep_range(10000, 20000); |
1047 | 1052 | ||
1048 | /* Unhook test interrupt handler */ | 1053 | /* Unhook test interrupt handler */ |
@@ -1276,6 +1281,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1276 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1281 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1277 | 1282 | ||
1278 | ew32(CTRL, ctrl_reg); | 1283 | ew32(CTRL, ctrl_reg); |
1284 | e1e_flush(); | ||
1279 | udelay(500); | 1285 | udelay(500); |
1280 | 1286 | ||
1281 | return 0; | 1287 | return 0; |
@@ -1418,6 +1424,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) | |||
1418 | */ | 1424 | */ |
1419 | #define E1000_SERDES_LB_ON 0x410 | 1425 | #define E1000_SERDES_LB_ON 0x410 |
1420 | ew32(SCTL, E1000_SERDES_LB_ON); | 1426 | ew32(SCTL, E1000_SERDES_LB_ON); |
1427 | e1e_flush(); | ||
1421 | usleep_range(10000, 20000); | 1428 | usleep_range(10000, 20000); |
1422 | 1429 | ||
1423 | return 0; | 1430 | return 0; |
@@ -1513,6 +1520,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1513 | hw->phy.media_type == e1000_media_type_internal_serdes) { | 1520 | hw->phy.media_type == e1000_media_type_internal_serdes) { |
1514 | #define E1000_SERDES_LB_OFF 0x400 | 1521 | #define E1000_SERDES_LB_OFF 0x400 |
1515 | ew32(SCTL, E1000_SERDES_LB_OFF); | 1522 | ew32(SCTL, E1000_SERDES_LB_OFF); |
1523 | e1e_flush(); | ||
1516 | usleep_range(10000, 20000); | 1524 | usleep_range(10000, 20000); |
1517 | break; | 1525 | break; |
1518 | } | 1526 | } |
@@ -1592,6 +1600,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1592 | k = 0; | 1600 | k = 0; |
1593 | } | 1601 | } |
1594 | ew32(TDT, k); | 1602 | ew32(TDT, k); |
1603 | e1e_flush(); | ||
1595 | msleep(200); | 1604 | msleep(200); |
1596 | time = jiffies; /* set the start time for the receive */ | 1605 | time = jiffies; /* set the start time for the receive */ |
1597 | good_cnt = 0; | 1606 | good_cnt = 0; |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index c1752124f3cd..4e36978b8fd8 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -283,6 +283,7 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) | |||
283 | ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; | 283 | ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; |
284 | ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; | 284 | ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; |
285 | ew32(CTRL, ctrl); | 285 | ew32(CTRL, ctrl); |
286 | e1e_flush(); | ||
286 | udelay(10); | 287 | udelay(10); |
287 | ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; | 288 | ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; |
288 | ew32(CTRL, ctrl); | 289 | ew32(CTRL, ctrl); |
@@ -1230,9 +1231,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | |||
1230 | ew32(CTRL, reg); | 1231 | ew32(CTRL, reg); |
1231 | 1232 | ||
1232 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); | 1233 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); |
1234 | e1e_flush(); | ||
1233 | udelay(20); | 1235 | udelay(20); |
1234 | ew32(CTRL, ctrl_reg); | 1236 | ew32(CTRL, ctrl_reg); |
1235 | ew32(CTRL_EXT, ctrl_ext); | 1237 | ew32(CTRL_EXT, ctrl_ext); |
1238 | e1e_flush(); | ||
1236 | udelay(20); | 1239 | udelay(20); |
1237 | 1240 | ||
1238 | out: | 1241 | out: |
@@ -2134,8 +2137,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
2134 | 2137 | ||
2135 | ret_val = 0; | 2138 | ret_val = 0; |
2136 | for (i = 0; i < words; i++) { | 2139 | for (i = 0; i < words; i++) { |
2137 | if ((dev_spec->shadow_ram) && | 2140 | if (dev_spec->shadow_ram[offset+i].modified) { |
2138 | (dev_spec->shadow_ram[offset+i].modified)) { | ||
2139 | data[i] = dev_spec->shadow_ram[offset+i].value; | 2141 | data[i] = dev_spec->shadow_ram[offset+i].value; |
2140 | } else { | 2142 | } else { |
2141 | ret_val = e1000_read_flash_word_ich8lan(hw, | 2143 | ret_val = e1000_read_flash_word_ich8lan(hw, |
@@ -3090,6 +3092,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
3090 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 3092 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
3091 | e_dbg("Issuing a global reset to ich8lan\n"); | 3093 | e_dbg("Issuing a global reset to ich8lan\n"); |
3092 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 3094 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
3095 | /* cannot issue a flush here because it hangs the hardware */ | ||
3093 | msleep(20); | 3096 | msleep(20); |
3094 | 3097 | ||
3095 | if (!ret_val) | 3098 | if (!ret_val) |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 65580b405942..7898a67d6505 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1986,6 +1986,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1986 | /* Clear SK and CS */ | 1986 | /* Clear SK and CS */ |
1987 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 1987 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
1988 | ew32(EECD, eecd); | 1988 | ew32(EECD, eecd); |
1989 | e1e_flush(); | ||
1989 | udelay(1); | 1990 | udelay(1); |
1990 | 1991 | ||
1991 | /* | 1992 | /* |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 4353ad56cf16..ab4be80f7ab5 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -31,12 +31,12 @@ | |||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
36 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
37 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
38 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
39 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/tcp.h> | 40 | #include <linux/tcp.h> |
41 | #include <linux/ipv6.h> | 41 | #include <linux/ipv6.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 2a6ee13285b1..8666476cb9be 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -537,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, | |||
537 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 537 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
538 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | 538 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; |
539 | ew32(KMRNCTRLSTA, kmrnctrlsta); | 539 | ew32(KMRNCTRLSTA, kmrnctrlsta); |
540 | e1e_flush(); | ||
540 | 541 | ||
541 | udelay(2); | 542 | udelay(2); |
542 | 543 | ||
@@ -609,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, | |||
609 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 610 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
610 | E1000_KMRNCTRLSTA_OFFSET) | data; | 611 | E1000_KMRNCTRLSTA_OFFSET) | data; |
611 | ew32(KMRNCTRLSTA, kmrnctrlsta); | 612 | ew32(KMRNCTRLSTA, kmrnctrlsta); |
613 | e1e_flush(); | ||
612 | 614 | ||
613 | udelay(2); | 615 | udelay(2); |
614 | 616 | ||
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c index 1c97861596f0..f67b8aebc89c 100644 --- a/drivers/net/gianfar_ptp.c +++ b/drivers/net/gianfar_ptp.c | |||
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects) | |||
193 | /* Caller must hold etsects->lock. */ | 193 | /* Caller must hold etsects->lock. */ |
194 | static void set_fipers(struct etsects *etsects) | 194 | static void set_fipers(struct etsects *etsects) |
195 | { | 195 | { |
196 | u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); | 196 | set_alarm(etsects); |
197 | |||
198 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE)); | ||
199 | gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); | ||
200 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | 197 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); |
201 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | 198 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); |
202 | set_alarm(etsects); | ||
203 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE); | ||
204 | } | 199 | } |
205 | 200 | ||
206 | /* | 201 | /* |
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
511 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | 506 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); |
512 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | 507 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); |
513 | set_alarm(etsects); | 508 | set_alarm(etsects); |
514 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); | 509 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); |
515 | 510 | ||
516 | spin_unlock_irqrestore(&etsects->lock, flags); | 511 | spin_unlock_irqrestore(&etsects->lock, flags); |
517 | 512 | ||
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index 7dcd65cede56..40407124e722 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c | |||
@@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) | |||
285 | /* Clear SK and CS */ | 285 | /* Clear SK and CS */ |
286 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 286 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
287 | wr32(E1000_EECD, eecd); | 287 | wr32(E1000_EECD, eecd); |
288 | wrfl(); | ||
288 | udelay(1); | 289 | udelay(1); |
289 | timeout = NVM_MAX_RETRY_SPI; | 290 | timeout = NVM_MAX_RETRY_SPI; |
290 | 291 | ||
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index ff244ce803ce..414b0225be89 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -1225,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1225 | 1225 | ||
1226 | /* Disable all the interrupts */ | 1226 | /* Disable all the interrupts */ |
1227 | wr32(E1000_IMC, ~0); | 1227 | wr32(E1000_IMC, ~0); |
1228 | wrfl(); | ||
1228 | msleep(10); | 1229 | msleep(10); |
1229 | 1230 | ||
1230 | /* Define all writable bits for ICS */ | 1231 | /* Define all writable bits for ICS */ |
@@ -1268,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1268 | 1269 | ||
1269 | wr32(E1000_IMC, mask); | 1270 | wr32(E1000_IMC, mask); |
1270 | wr32(E1000_ICS, mask); | 1271 | wr32(E1000_ICS, mask); |
1272 | wrfl(); | ||
1271 | msleep(10); | 1273 | msleep(10); |
1272 | 1274 | ||
1273 | if (adapter->test_icr & mask) { | 1275 | if (adapter->test_icr & mask) { |
@@ -1289,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1289 | 1291 | ||
1290 | wr32(E1000_IMS, mask); | 1292 | wr32(E1000_IMS, mask); |
1291 | wr32(E1000_ICS, mask); | 1293 | wr32(E1000_ICS, mask); |
1294 | wrfl(); | ||
1292 | msleep(10); | 1295 | msleep(10); |
1293 | 1296 | ||
1294 | if (!(adapter->test_icr & mask)) { | 1297 | if (!(adapter->test_icr & mask)) { |
@@ -1310,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1310 | 1313 | ||
1311 | wr32(E1000_IMC, ~mask); | 1314 | wr32(E1000_IMC, ~mask); |
1312 | wr32(E1000_ICS, ~mask); | 1315 | wr32(E1000_ICS, ~mask); |
1316 | wrfl(); | ||
1313 | msleep(10); | 1317 | msleep(10); |
1314 | 1318 | ||
1315 | if (adapter->test_icr & mask) { | 1319 | if (adapter->test_icr & mask) { |
@@ -1321,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1321 | 1325 | ||
1322 | /* Disable all the interrupts */ | 1326 | /* Disable all the interrupts */ |
1323 | wr32(E1000_IMC, ~0); | 1327 | wr32(E1000_IMC, ~0); |
1328 | wrfl(); | ||
1324 | msleep(10); | 1329 | msleep(10); |
1325 | 1330 | ||
1326 | /* Unhook test interrupt handler */ | 1331 | /* Unhook test interrupt handler */ |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index dc599059512a..40d4c405fd7e 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1052,6 +1052,7 @@ msi_only: | |||
1052 | kfree(adapter->vf_data); | 1052 | kfree(adapter->vf_data); |
1053 | adapter->vf_data = NULL; | 1053 | adapter->vf_data = NULL; |
1054 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | 1054 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); |
1055 | wrfl(); | ||
1055 | msleep(100); | 1056 | msleep(100); |
1056 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); | 1057 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); |
1057 | } | 1058 | } |
@@ -2022,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
2022 | 2023 | ||
2023 | if (hw->bus.func == 0) | 2024 | if (hw->bus.func == 0) |
2024 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 2025 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
2025 | else if (hw->mac.type == e1000_82580) | 2026 | else if (hw->mac.type >= e1000_82580) |
2026 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + | 2027 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + |
2027 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, | 2028 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, |
2028 | &eeprom_data); | 2029 | &eeprom_data); |
@@ -2198,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
2198 | kfree(adapter->vf_data); | 2199 | kfree(adapter->vf_data); |
2199 | adapter->vf_data = NULL; | 2200 | adapter->vf_data = NULL; |
2200 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); | 2201 | wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); |
2202 | wrfl(); | ||
2201 | msleep(100); | 2203 | msleep(100); |
2202 | dev_info(&pdev->dev, "IOV Disabled\n"); | 2204 | dev_info(&pdev->dev, "IOV Disabled\n"); |
2203 | } | 2205 | } |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 1330c8e932da..40ed066e3ef4 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1226,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1226 | /* disable transmits */ | 1226 | /* disable transmits */ |
1227 | txdctl = er32(TXDCTL(0)); | 1227 | txdctl = er32(TXDCTL(0)); |
1228 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | 1228 | ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); |
1229 | e1e_flush(); | ||
1229 | msleep(10); | 1230 | msleep(10); |
1230 | 1231 | ||
1231 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1232 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
@@ -1306,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) | |||
1306 | /* disable receives */ | 1307 | /* disable receives */ |
1307 | rxdctl = er32(RXDCTL(0)); | 1308 | rxdctl = er32(RXDCTL(0)); |
1308 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | 1309 | ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); |
1310 | e1e_flush(); | ||
1309 | msleep(10); | 1311 | msleep(10); |
1310 | 1312 | ||
1311 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); | 1313 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 954f6e938fb7..8b1c3484d271 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -2405,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) | |||
2405 | * addresses making a subsystem device table necessary. | 2405 | * addresses making a subsystem device table necessary. |
2406 | */ | 2406 | */ |
2407 | #ifdef CONFIG_PCI | 2407 | #ifdef CONFIG_PCI |
2408 | #define PCIID_VENDOR_INTEL 0x8086 | ||
2409 | #define PCIID_VENDOR_ALI 0x10b9 | ||
2410 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { | 2408 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { |
2411 | /* | 2409 | /* |
2412 | * Subsystems needing entries: | 2410 | * Subsystems needing entries: |
@@ -2416,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2416 | */ | 2414 | */ |
2417 | { | 2415 | { |
2418 | /* Guessed entry */ | 2416 | /* Guessed entry */ |
2419 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2417 | .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ |
2420 | .device = 0x24cc, | 2418 | .device = 0x24cc, |
2421 | .subvendor = 0x103c, | 2419 | .subvendor = 0x103c, |
2422 | .subdevice = 0x08bc, | 2420 | .subdevice = 0x08bc, |
@@ -2429,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2429 | .name = "HP nx5000 family", | 2427 | .name = "HP nx5000 family", |
2430 | }, | 2428 | }, |
2431 | { | 2429 | { |
2432 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2430 | .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ |
2433 | .device = 0x24cc, | 2431 | .device = 0x24cc, |
2434 | .subvendor = 0x103c, | 2432 | .subvendor = 0x103c, |
2435 | .subdevice = 0x088c, | 2433 | .subdevice = 0x088c, |
@@ -2443,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2443 | .name = "HP nc8000 family", | 2441 | .name = "HP nc8000 family", |
2444 | }, | 2442 | }, |
2445 | { | 2443 | { |
2446 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2444 | .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ |
2447 | .device = 0x24cc, | 2445 | .device = 0x24cc, |
2448 | .subvendor = 0x103c, | 2446 | .subvendor = 0x103c, |
2449 | .subdevice = 0x0890, | 2447 | .subdevice = 0x0890, |
@@ -2456,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2456 | .name = "HP nc6000 family", | 2454 | .name = "HP nc6000 family", |
2457 | }, | 2455 | }, |
2458 | { | 2456 | { |
2459 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2457 | .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ |
2460 | .device = 0x24cc, | 2458 | .device = 0x24cc, |
2461 | .subvendor = 0x0e11, | 2459 | .subvendor = 0x0e11, |
2462 | .subdevice = 0x0860, | 2460 | .subdevice = 0x0860, |
@@ -2471,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2471 | }, | 2469 | }, |
2472 | { | 2470 | { |
2473 | /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */ | 2471 | /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */ |
2474 | .vendor = PCIID_VENDOR_INTEL, | 2472 | .vendor = PCI_VENDOR_ID_INTEL, |
2475 | .device = 0x24c0, | 2473 | .device = 0x24c0, |
2476 | .subvendor = 0x1179, | 2474 | .subvendor = 0x1179, |
2477 | .subdevice = 0xffff, /* 0xffff is "any" */ | 2475 | .subdevice = 0xffff, /* 0xffff is "any" */ |
@@ -2484,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2484 | .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge", | 2482 | .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge", |
2485 | }, | 2483 | }, |
2486 | { | 2484 | { |
2487 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */ | 2485 | .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */ |
2488 | .device = 0x248c, | 2486 | .device = 0x248c, |
2489 | .subvendor = 0x1179, | 2487 | .subvendor = 0x1179, |
2490 | .subdevice = 0xffff, /* 0xffff is "any" */ | 2488 | .subdevice = 0xffff, /* 0xffff is "any" */ |
@@ -2498,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2498 | }, | 2496 | }, |
2499 | { | 2497 | { |
2500 | /* 82801DBM (ICH4-M) LPC Interface Bridge */ | 2498 | /* 82801DBM (ICH4-M) LPC Interface Bridge */ |
2501 | .vendor = PCIID_VENDOR_INTEL, | 2499 | .vendor = PCI_VENDOR_ID_INTEL, |
2502 | .device = 0x24cc, | 2500 | .device = 0x24cc, |
2503 | .subvendor = 0x1179, | 2501 | .subvendor = 0x1179, |
2504 | .subdevice = 0xffff, /* 0xffff is "any" */ | 2502 | .subdevice = 0xffff, /* 0xffff is "any" */ |
@@ -2512,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini | |||
2512 | }, | 2510 | }, |
2513 | { | 2511 | { |
2514 | /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */ | 2512 | /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */ |
2515 | .vendor = PCIID_VENDOR_ALI, | 2513 | .vendor = PCI_VENDOR_ID_AL, |
2516 | .device = 0x1533, | 2514 | .device = 0x1533, |
2517 | .subvendor = 0x1179, | 2515 | .subvendor = 0x1179, |
2518 | .subdevice = 0xffff, /* 0xffff is "any" */ | 2516 | .subdevice = 0xffff, /* 0xffff is "any" */ |
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index c982ab9f9005..38b362b67857 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c | |||
@@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw, | |||
57 | */ | 57 | */ |
58 | *eecd_reg = *eecd_reg | IXGB_EECD_SK; | 58 | *eecd_reg = *eecd_reg | IXGB_EECD_SK; |
59 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); | 59 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); |
60 | IXGB_WRITE_FLUSH(hw); | ||
60 | udelay(50); | 61 | udelay(50); |
61 | } | 62 | } |
62 | 63 | ||
@@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw, | |||
75 | */ | 76 | */ |
76 | *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; | 77 | *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; |
77 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); | 78 | IXGB_WRITE_REG(hw, EECD, *eecd_reg); |
79 | IXGB_WRITE_FLUSH(hw); | ||
78 | udelay(50); | 80 | udelay(50); |
79 | } | 81 | } |
80 | 82 | ||
@@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, | |||
112 | eecd_reg |= IXGB_EECD_DI; | 114 | eecd_reg |= IXGB_EECD_DI; |
113 | 115 | ||
114 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 116 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
117 | IXGB_WRITE_FLUSH(hw); | ||
115 | 118 | ||
116 | udelay(50); | 119 | udelay(50); |
117 | 120 | ||
@@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw) | |||
206 | /* Deselect EEPROM */ | 209 | /* Deselect EEPROM */ |
207 | eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); | 210 | eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); |
208 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 211 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
212 | IXGB_WRITE_FLUSH(hw); | ||
209 | udelay(50); | 213 | udelay(50); |
210 | 214 | ||
211 | /* Clock high */ | 215 | /* Clock high */ |
212 | eecd_reg |= IXGB_EECD_SK; | 216 | eecd_reg |= IXGB_EECD_SK; |
213 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 217 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
218 | IXGB_WRITE_FLUSH(hw); | ||
214 | udelay(50); | 219 | udelay(50); |
215 | 220 | ||
216 | /* Select EEPROM */ | 221 | /* Select EEPROM */ |
217 | eecd_reg |= IXGB_EECD_CS; | 222 | eecd_reg |= IXGB_EECD_CS; |
218 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 223 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
224 | IXGB_WRITE_FLUSH(hw); | ||
219 | udelay(50); | 225 | udelay(50); |
220 | 226 | ||
221 | /* Clock low */ | 227 | /* Clock low */ |
222 | eecd_reg &= ~IXGB_EECD_SK; | 228 | eecd_reg &= ~IXGB_EECD_SK; |
223 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 229 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
230 | IXGB_WRITE_FLUSH(hw); | ||
224 | udelay(50); | 231 | udelay(50); |
225 | } | 232 | } |
226 | 233 | ||
@@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw) | |||
239 | /* Rising edge of clock */ | 246 | /* Rising edge of clock */ |
240 | eecd_reg |= IXGB_EECD_SK; | 247 | eecd_reg |= IXGB_EECD_SK; |
241 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 248 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
249 | IXGB_WRITE_FLUSH(hw); | ||
242 | udelay(50); | 250 | udelay(50); |
243 | 251 | ||
244 | /* Falling edge of clock */ | 252 | /* Falling edge of clock */ |
245 | eecd_reg &= ~IXGB_EECD_SK; | 253 | eecd_reg &= ~IXGB_EECD_SK; |
246 | IXGB_WRITE_REG(hw, EECD, eecd_reg); | 254 | IXGB_WRITE_REG(hw, EECD, eecd_reg); |
255 | IXGB_WRITE_FLUSH(hw); | ||
247 | udelay(50); | 256 | udelay(50); |
248 | } | 257 | } |
249 | 258 | ||
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 6cb2e42ff4c1..3d61a9e4faf7 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c | |||
@@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw) | |||
149 | */ | 149 | */ |
150 | IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); | 150 | IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); |
151 | IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); | 151 | IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); |
152 | IXGB_WRITE_FLUSH(hw); | ||
152 | msleep(IXGB_DELAY_BEFORE_RESET); | 153 | msleep(IXGB_DELAY_BEFORE_RESET); |
153 | 154 | ||
154 | /* Issue a global reset to the MAC. This will reset the chip's | 155 | /* Issue a global reset to the MAC. This will reset the chip's |
@@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw) | |||
1220 | ctrl &= ~IXGB_CTRL0_SDP2; | 1221 | ctrl &= ~IXGB_CTRL0_SDP2; |
1221 | ctrl |= IXGB_CTRL0_SDP3; | 1222 | ctrl |= IXGB_CTRL0_SDP3; |
1222 | IXGB_WRITE_REG(hw, CTRL0, ctrl); | 1223 | IXGB_WRITE_REG(hw, CTRL0, ctrl); |
1224 | IXGB_WRITE_FLUSH(hw); | ||
1223 | 1225 | ||
1224 | /* SerDes needs extra delay */ | 1226 | /* SerDes needs extra delay */ |
1225 | msleep(IXGB_SUN_PHY_RESET_DELAY); | 1227 | msleep(IXGB_SUN_PHY_RESET_DELAY); |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 3b3dd4df4c5c..34f30ec79c2e 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) | |||
213 | switch (hw->phy.type) { | 213 | switch (hw->phy.type) { |
214 | case ixgbe_phy_tn: | 214 | case ixgbe_phy_tn: |
215 | phy->ops.check_link = &ixgbe_check_phy_link_tnx; | 215 | phy->ops.check_link = &ixgbe_check_phy_link_tnx; |
216 | phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; | ||
216 | phy->ops.get_firmware_version = | 217 | phy->ops.get_firmware_version = |
217 | &ixgbe_get_phy_firmware_version_tnx; | 218 | &ixgbe_get_phy_firmware_version_tnx; |
218 | break; | 219 | break; |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 777051f54e53..fc1375f26fe5 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) | |||
2632 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | 2632 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; |
2633 | autoc_reg |= IXGBE_AUTOC_FLU; | 2633 | autoc_reg |= IXGBE_AUTOC_FLU; |
2634 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | 2634 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); |
2635 | IXGBE_WRITE_FLUSH(hw); | ||
2635 | usleep_range(10000, 20000); | 2636 | usleep_range(10000, 20000); |
2636 | } | 2637 | } |
2637 | 2638 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index dc649553a0a6..82d4244c6e10 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1378,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1378 | 1378 | ||
1379 | /* Disable all the interrupts */ | 1379 | /* Disable all the interrupts */ |
1380 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | 1380 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); |
1381 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
1381 | usleep_range(10000, 20000); | 1382 | usleep_range(10000, 20000); |
1382 | 1383 | ||
1383 | /* Test each interrupt */ | 1384 | /* Test each interrupt */ |
@@ -1398,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1398 | ~mask & 0x00007FFF); | 1399 | ~mask & 0x00007FFF); |
1399 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | 1400 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, |
1400 | ~mask & 0x00007FFF); | 1401 | ~mask & 0x00007FFF); |
1402 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
1401 | usleep_range(10000, 20000); | 1403 | usleep_range(10000, 20000); |
1402 | 1404 | ||
1403 | if (adapter->test_icr & mask) { | 1405 | if (adapter->test_icr & mask) { |
@@ -1415,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1415 | adapter->test_icr = 0; | 1417 | adapter->test_icr = 0; |
1416 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1418 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1417 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | 1419 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
1420 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
1418 | usleep_range(10000, 20000); | 1421 | usleep_range(10000, 20000); |
1419 | 1422 | ||
1420 | if (!(adapter->test_icr &mask)) { | 1423 | if (!(adapter->test_icr &mask)) { |
@@ -1435,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1435 | ~mask & 0x00007FFF); | 1438 | ~mask & 0x00007FFF); |
1436 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | 1439 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, |
1437 | ~mask & 0x00007FFF); | 1440 | ~mask & 0x00007FFF); |
1441 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
1438 | usleep_range(10000, 20000); | 1442 | usleep_range(10000, 20000); |
1439 | 1443 | ||
1440 | if (adapter->test_icr) { | 1444 | if (adapter->test_icr) { |
@@ -1446,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1446 | 1450 | ||
1447 | /* Disable all the interrupts */ | 1451 | /* Disable all the interrupts */ |
1448 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | 1452 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); |
1453 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
1449 | usleep_range(10000, 20000); | 1454 | usleep_range(10000, 20000); |
1450 | 1455 | ||
1451 | /* Unhook test interrupt handler */ | 1456 | /* Unhook test interrupt handler */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 1be617545dc9..e86297b32733 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -184,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | |||
184 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | 184 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
185 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; | 185 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; |
186 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); | 186 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); |
187 | IXGBE_WRITE_FLUSH(hw); | ||
187 | 188 | ||
188 | /* take a breather then clean up driver data */ | 189 | /* take a breather then clean up driver data */ |
189 | msleep(100); | 190 | msleep(100); |
@@ -1005,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
1005 | struct ixgbe_adapter *adapter = dev_get_drvdata(dev); | 1006 | struct ixgbe_adapter *adapter = dev_get_drvdata(dev); |
1006 | unsigned long event = *(unsigned long *)data; | 1007 | unsigned long event = *(unsigned long *)data; |
1007 | 1008 | ||
1008 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | 1009 | if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) |
1009 | return 0; | 1010 | return 0; |
1010 | 1011 | ||
1011 | switch (event) { | 1012 | switch (event) { |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 735f686c3b36..f7ca3511b9fe 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) | |||
1585 | *i2cctl |= IXGBE_I2C_CLK_OUT; | 1585 | *i2cctl |= IXGBE_I2C_CLK_OUT; |
1586 | 1586 | ||
1587 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); | 1587 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); |
1588 | IXGBE_WRITE_FLUSH(hw); | ||
1588 | 1589 | ||
1589 | /* SCL rise time (1000ns) */ | 1590 | /* SCL rise time (1000ns) */ |
1590 | udelay(IXGBE_I2C_T_RISE); | 1591 | udelay(IXGBE_I2C_T_RISE); |
@@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) | |||
1605 | *i2cctl &= ~IXGBE_I2C_CLK_OUT; | 1606 | *i2cctl &= ~IXGBE_I2C_CLK_OUT; |
1606 | 1607 | ||
1607 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); | 1608 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); |
1609 | IXGBE_WRITE_FLUSH(hw); | ||
1608 | 1610 | ||
1609 | /* SCL fall time (300ns) */ | 1611 | /* SCL fall time (300ns) */ |
1610 | udelay(IXGBE_I2C_T_FALL); | 1612 | udelay(IXGBE_I2C_T_FALL); |
@@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) | |||
1628 | *i2cctl &= ~IXGBE_I2C_DATA_OUT; | 1630 | *i2cctl &= ~IXGBE_I2C_DATA_OUT; |
1629 | 1631 | ||
1630 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); | 1632 | IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); |
1633 | IXGBE_WRITE_FLUSH(hw); | ||
1631 | 1634 | ||
1632 | /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ | 1635 | /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ |
1633 | udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); | 1636 | udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index bec30ed91adc..2696c78e9f46 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -162,6 +162,7 @@ mac_reset_top: | |||
162 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | 162 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
163 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | 163 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; |
164 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | 164 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); |
165 | IXGBE_WRITE_FLUSH(hw); | ||
165 | 166 | ||
166 | msleep(50); | 167 | msleep(50); |
167 | 168 | ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 0fcdc25699d8..dc4e305a1087 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -322,6 +322,9 @@ static void macb_tx(struct macb *bp) | |||
322 | for (i = 0; i < TX_RING_SIZE; i++) | 322 | for (i = 0; i < TX_RING_SIZE; i++) |
323 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 323 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
324 | 324 | ||
325 | /* Add wrap bit */ | ||
326 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | ||
327 | |||
325 | /* free transmit buffer in upper layer*/ | 328 | /* free transmit buffer in upper layer*/ |
326 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 329 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { |
327 | struct ring_info *rp = &bp->tx_skb[tail]; | 330 | struct ring_info *rp = &bp->tx_skb[tail]; |
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index 5e7109178061..5ada5b469112 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c | |||
@@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
128 | memset(context, 0, sizeof *context); | 128 | memset(context, 0, sizeof *context); |
129 | 129 | ||
130 | context->base_qpn = cpu_to_be32(base_qpn); | 130 | context->base_qpn = cpu_to_be32(base_qpn); |
131 | context->n_mac = 0x7; | 131 | context->n_mac = 0x2; |
132 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | | 132 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | |
133 | base_qpn); | 133 | base_qpn); |
134 | context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | | 134 | context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index c94b3426d355..f0ee35df4dd7 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -1117,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
1117 | info->port = port; | 1117 | info->port = port; |
1118 | mlx4_init_mac_table(dev, &info->mac_table); | 1118 | mlx4_init_mac_table(dev, &info->mac_table); |
1119 | mlx4_init_vlan_table(dev, &info->vlan_table); | 1119 | mlx4_init_vlan_table(dev, &info->vlan_table); |
1120 | info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + | ||
1121 | (port - 1) * (1 << log_num_mac); | ||
1120 | 1122 | ||
1121 | sprintf(info->dev_name, "mlx4_port%d", port); | 1123 | sprintf(info->dev_name, "mlx4_port%d", port); |
1122 | info->port_attr.attr.name = info->dev_name; | 1124 | info->port_attr.attr.name = info->dev_name; |
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 1f95afda6841..609e0ec14cee 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
@@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) | |||
258 | if (validate_index(dev, table, index)) | 258 | if (validate_index(dev, table, index)) |
259 | goto out; | 259 | goto out; |
260 | 260 | ||
261 | table->entries[index] = 0; | 261 | /* Check whether this address has reference count */ |
262 | mlx4_set_port_mac_table(dev, port, table->entries); | 262 | if (!(--table->refs[index])) { |
263 | --table->total; | 263 | table->entries[index] = 0; |
264 | mlx4_set_port_mac_table(dev, port, table->entries); | ||
265 | --table->total; | ||
266 | } | ||
264 | out: | 267 | out: |
265 | mutex_unlock(&table->mutex); | 268 | mutex_unlock(&table->mutex); |
266 | } | 269 | } |
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index cd6c2317e29e..ed47585a6862 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -9201,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np) | |||
9201 | 9201 | ||
9202 | first_chan = 0; | 9202 | first_chan = 0; |
9203 | for (i = 0; i < port; i++) | 9203 | for (i = 0; i < port; i++) |
9204 | first_chan += parent->rxchan_per_port[port]; | 9204 | first_chan += parent->rxchan_per_port[i]; |
9205 | num_chan = parent->rxchan_per_port[port]; | 9205 | num_chan = parent->rxchan_per_port[port]; |
9206 | 9206 | ||
9207 | for (i = first_chan; i < (first_chan + num_chan); i++) { | 9207 | for (i = first_chan; i < (first_chan + num_chan); i++) { |
@@ -9217,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np) | |||
9217 | 9217 | ||
9218 | first_chan = 0; | 9218 | first_chan = 0; |
9219 | for (i = 0; i < port; i++) | 9219 | for (i = 0; i < port; i++) |
9220 | first_chan += parent->txchan_per_port[port]; | 9220 | first_chan += parent->txchan_per_port[i]; |
9221 | num_chan = parent->txchan_per_port[port]; | 9221 | num_chan = parent->txchan_per_port[port]; |
9222 | for (i = first_chan; i < (first_chan + num_chan); i++) { | 9222 | for (i = first_chan; i < (first_chan + num_chan); i++) { |
9223 | err = niu_ldg_assign_ldn(np, parent, | 9223 | err = niu_ldg_assign_ldn(np, parent, |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2cd8dc5847b4..cb6e0b486b1e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -34,8 +34,7 @@ | |||
34 | #define PAGESEL 0x13 | 34 | #define PAGESEL 0x13 |
35 | #define LAYER4 0x02 | 35 | #define LAYER4 0x02 |
36 | #define LAYER2 0x01 | 36 | #define LAYER2 0x01 |
37 | #define MAX_RXTS 4 | 37 | #define MAX_RXTS 64 |
38 | #define MAX_TXTS 4 | ||
39 | #define N_EXT_TS 1 | 38 | #define N_EXT_TS 1 |
40 | #define PSF_PTPVER 2 | 39 | #define PSF_PTPVER 2 |
41 | #define PSF_EVNT 0x4000 | 40 | #define PSF_EVNT 0x4000 |
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts) | |||
218 | rxts->seqid = p->seqid; | 217 | rxts->seqid = p->seqid; |
219 | rxts->msgtype = (p->msgtype >> 12) & 0xf; | 218 | rxts->msgtype = (p->msgtype >> 12) & 0xf; |
220 | rxts->hash = p->msgtype & 0x0fff; | 219 | rxts->hash = p->msgtype & 0x0fff; |
221 | rxts->tmo = jiffies + HZ; | 220 | rxts->tmo = jiffies + 2; |
222 | } | 221 | } |
223 | 222 | ||
224 | static u64 phy2txts(struct phy_txts *p) | 223 | static u64 phy2txts(struct phy_txts *p) |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c77286edba4d..02339b3352e7 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -1092,6 +1092,21 @@ rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) | |||
1092 | rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); | 1092 | rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | struct exgmac_reg { | ||
1096 | u16 addr; | ||
1097 | u16 mask; | ||
1098 | u32 val; | ||
1099 | }; | ||
1100 | |||
1101 | static void rtl_write_exgmac_batch(void __iomem *ioaddr, | ||
1102 | const struct exgmac_reg *r, int len) | ||
1103 | { | ||
1104 | while (len-- > 0) { | ||
1105 | rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); | ||
1106 | r++; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1095 | static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) | 1110 | static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) |
1096 | { | 1111 | { |
1097 | u8 value = 0xff; | 1112 | u8 value = 0xff; |
@@ -3117,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | |||
3117 | RTL_W32(MAC0, low); | 3132 | RTL_W32(MAC0, low); |
3118 | RTL_R32(MAC0); | 3133 | RTL_R32(MAC0); |
3119 | 3134 | ||
3135 | if (tp->mac_version == RTL_GIGA_MAC_VER_34) { | ||
3136 | const struct exgmac_reg e[] = { | ||
3137 | { .addr = 0xe0, ERIAR_MASK_1111, .val = low }, | ||
3138 | { .addr = 0xe4, ERIAR_MASK_1111, .val = high }, | ||
3139 | { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 }, | ||
3140 | { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 | | ||
3141 | low >> 16 }, | ||
3142 | }; | ||
3143 | |||
3144 | rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); | ||
3145 | } | ||
3146 | |||
3120 | RTL_W8(Cfg9346, Cfg9346_Lock); | 3147 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3121 | 3148 | ||
3122 | spin_unlock_irq(&tp->lock); | 3149 | spin_unlock_irq(&tp->lock); |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index cbe8865e322a..ba08341fb92c 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl) | |||
367 | memcpy(skb_put(skb, count), sl->rbuff, count); | 367 | memcpy(skb_put(skb, count), sl->rbuff, count); |
368 | skb_reset_mac_header(skb); | 368 | skb_reset_mac_header(skb); |
369 | skb->protocol = htons(ETH_P_IP); | 369 | skb->protocol = htons(ETH_P_IP); |
370 | netif_rx(skb); | 370 | netif_rx_ni(skb); |
371 | dev->stats.rx_packets++; | 371 | dev->stats.rx_packets++; |
372 | } | 372 | } |
373 | 373 | ||
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index fd622a66ebbf..a03336e086d5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/usb/usbnet.h> | 53 | #include <linux/usb/usbnet.h> |
54 | #include <linux/usb/cdc.h> | 54 | #include <linux/usb/cdc.h> |
55 | 55 | ||
56 | #define DRIVER_VERSION "01-June-2011" | 56 | #define DRIVER_VERSION "04-Aug-2011" |
57 | 57 | ||
58 | /* CDC NCM subclass 3.2.1 */ | 58 | /* CDC NCM subclass 3.2.1 */ |
59 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 | 59 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 |
@@ -163,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) | |||
163 | usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); | 163 | usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); |
164 | } | 164 | } |
165 | 165 | ||
166 | static int | ||
167 | cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req, | ||
168 | void *data, u16 flags, u16 *actlen, u16 timeout) | ||
169 | { | ||
170 | int err; | ||
171 | |||
172 | err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ? | ||
173 | usb_rcvctrlpipe(ctx->udev, 0) : | ||
174 | usb_sndctrlpipe(ctx->udev, 0), | ||
175 | req->bNotificationType, req->bmRequestType, | ||
176 | req->wValue, | ||
177 | req->wIndex, data, | ||
178 | req->wLength, timeout); | ||
179 | |||
180 | if (err < 0) { | ||
181 | if (actlen) | ||
182 | *actlen = 0; | ||
183 | return err; | ||
184 | } | ||
185 | |||
186 | if (actlen) | ||
187 | *actlen = err; | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | 166 | static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) |
193 | { | 167 | { |
194 | struct usb_cdc_notification req; | ||
195 | u32 val; | 168 | u32 val; |
196 | u8 flags; | 169 | u8 flags; |
197 | u8 iface_no; | 170 | u8 iface_no; |
@@ -200,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
200 | 173 | ||
201 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; | 174 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; |
202 | 175 | ||
203 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; | 176 | err = usb_control_msg(ctx->udev, |
204 | req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS; | 177 | usb_rcvctrlpipe(ctx->udev, 0), |
205 | req.wValue = 0; | 178 | USB_CDC_GET_NTB_PARAMETERS, |
206 | req.wIndex = cpu_to_le16(iface_no); | 179 | USB_TYPE_CLASS | USB_DIR_IN |
207 | req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm)); | 180 | | USB_RECIP_INTERFACE, |
208 | 181 | 0, iface_no, &ctx->ncm_parm, | |
209 | err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000); | 182 | sizeof(ctx->ncm_parm), 10000); |
210 | if (err) { | 183 | if (err < 0) { |
211 | pr_debug("failed GET_NTB_PARAMETERS\n"); | 184 | pr_debug("failed GET_NTB_PARAMETERS\n"); |
212 | return 1; | 185 | return 1; |
213 | } | 186 | } |
@@ -253,31 +226,26 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
253 | 226 | ||
254 | /* inform device about NTB input size changes */ | 227 | /* inform device about NTB input size changes */ |
255 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | 228 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
256 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | ||
257 | USB_RECIP_INTERFACE; | ||
258 | req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; | ||
259 | req.wValue = 0; | ||
260 | req.wIndex = cpu_to_le16(iface_no); | ||
261 | 229 | ||
262 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | 230 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { |
263 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | 231 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; |
264 | 232 | err = usb_control_msg(ctx->udev, | |
265 | req.wLength = 8; | 233 | usb_sndctrlpipe(ctx->udev, 0), |
266 | ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 234 | USB_CDC_SET_NTB_INPUT_SIZE, |
267 | ndp_in_sz.wNtbInMaxDatagrams = | 235 | USB_TYPE_CLASS | USB_DIR_OUT |
268 | cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); | 236 | | USB_RECIP_INTERFACE, |
269 | ndp_in_sz.wReserved = 0; | 237 | 0, iface_no, &ndp_in_sz, 8, 1000); |
270 | err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, | ||
271 | 1000); | ||
272 | } else { | 238 | } else { |
273 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 239 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); |
274 | 240 | err = usb_control_msg(ctx->udev, | |
275 | req.wLength = 4; | 241 | usb_sndctrlpipe(ctx->udev, 0), |
276 | err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, | 242 | USB_CDC_SET_NTB_INPUT_SIZE, |
277 | NULL, 1000); | 243 | USB_TYPE_CLASS | USB_DIR_OUT |
244 | | USB_RECIP_INTERFACE, | ||
245 | 0, iface_no, &dwNtbInMaxSize, 4, 1000); | ||
278 | } | 246 | } |
279 | 247 | ||
280 | if (err) | 248 | if (err < 0) |
281 | pr_debug("Setting NTB Input Size failed\n"); | 249 | pr_debug("Setting NTB Input Size failed\n"); |
282 | } | 250 | } |
283 | 251 | ||
@@ -332,29 +300,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
332 | 300 | ||
333 | /* set CRC Mode */ | 301 | /* set CRC Mode */ |
334 | if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { | 302 | if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { |
335 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | 303 | err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), |
336 | USB_RECIP_INTERFACE; | 304 | USB_CDC_SET_CRC_MODE, |
337 | req.bNotificationType = USB_CDC_SET_CRC_MODE; | 305 | USB_TYPE_CLASS | USB_DIR_OUT |
338 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); | 306 | | USB_RECIP_INTERFACE, |
339 | req.wIndex = cpu_to_le16(iface_no); | 307 | USB_CDC_NCM_CRC_NOT_APPENDED, |
340 | req.wLength = 0; | 308 | iface_no, NULL, 0, 1000); |
341 | 309 | if (err < 0) | |
342 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | ||
343 | if (err) | ||
344 | pr_debug("Setting CRC mode off failed\n"); | 310 | pr_debug("Setting CRC mode off failed\n"); |
345 | } | 311 | } |
346 | 312 | ||
347 | /* set NTB format, if both formats are supported */ | 313 | /* set NTB format, if both formats are supported */ |
348 | if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { | 314 | if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { |
349 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | 315 | err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), |
350 | USB_RECIP_INTERFACE; | 316 | USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS |
351 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; | 317 | | USB_DIR_OUT | USB_RECIP_INTERFACE, |
352 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); | 318 | USB_CDC_NCM_NTB16_FORMAT, |
353 | req.wIndex = cpu_to_le16(iface_no); | 319 | iface_no, NULL, 0, 1000); |
354 | req.wLength = 0; | 320 | if (err < 0) |
355 | |||
356 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | ||
357 | if (err) | ||
358 | pr_debug("Setting NTB format to 16-bit failed\n"); | 321 | pr_debug("Setting NTB format to 16-bit failed\n"); |
359 | } | 322 | } |
360 | 323 | ||
@@ -364,17 +327,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
364 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { | 327 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
365 | __le16 max_datagram_size; | 328 | __le16 max_datagram_size; |
366 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 329 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
367 | 330 | err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), | |
368 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | | 331 | USB_CDC_GET_MAX_DATAGRAM_SIZE, |
369 | USB_RECIP_INTERFACE; | 332 | USB_TYPE_CLASS | USB_DIR_IN |
370 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | 333 | | USB_RECIP_INTERFACE, |
371 | req.wValue = 0; | 334 | 0, iface_no, &max_datagram_size, |
372 | req.wIndex = cpu_to_le16(iface_no); | 335 | 2, 1000); |
373 | req.wLength = cpu_to_le16(2); | 336 | if (err < 0) { |
374 | |||
375 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, | ||
376 | 1000); | ||
377 | if (err) { | ||
378 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | 337 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", |
379 | CDC_NCM_MIN_DATAGRAM_SIZE); | 338 | CDC_NCM_MIN_DATAGRAM_SIZE); |
380 | } else { | 339 | } else { |
@@ -395,17 +354,15 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
395 | CDC_NCM_MIN_DATAGRAM_SIZE; | 354 | CDC_NCM_MIN_DATAGRAM_SIZE; |
396 | 355 | ||
397 | /* if value changed, update device */ | 356 | /* if value changed, update device */ |
398 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | 357 | err = usb_control_msg(ctx->udev, |
399 | USB_RECIP_INTERFACE; | 358 | usb_sndctrlpipe(ctx->udev, 0), |
400 | req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; | 359 | USB_CDC_SET_MAX_DATAGRAM_SIZE, |
401 | req.wValue = 0; | 360 | USB_TYPE_CLASS | USB_DIR_OUT |
402 | req.wIndex = cpu_to_le16(iface_no); | 361 | | USB_RECIP_INTERFACE, |
403 | req.wLength = 2; | 362 | 0, |
404 | max_datagram_size = cpu_to_le16(ctx->max_datagram_size); | 363 | iface_no, &max_datagram_size, |
405 | 364 | 2, 1000); | |
406 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, | 365 | if (err < 0) |
407 | 0, NULL, 1000); | ||
408 | if (err) | ||
409 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | 366 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); |
410 | } | 367 | } |
411 | 368 | ||
@@ -671,7 +628,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
671 | u32 rem; | 628 | u32 rem; |
672 | u32 offset; | 629 | u32 offset; |
673 | u32 last_offset; | 630 | u32 last_offset; |
674 | u16 n = 0; | 631 | u16 n = 0, index; |
675 | u8 ready2send = 0; | 632 | u8 ready2send = 0; |
676 | 633 | ||
677 | /* if there is a remaining skb, it gets priority */ | 634 | /* if there is a remaining skb, it gets priority */ |
@@ -859,8 +816,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
859 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); | 816 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); |
860 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); | 817 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); |
861 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); | 818 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); |
862 | ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), | 819 | index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus); |
863 | ctx->tx_ndp_modulus); | 820 | ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index); |
864 | 821 | ||
865 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); | 822 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); |
866 | ctx->tx_seq++; | 823 | ctx->tx_seq++; |
@@ -873,12 +830,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
873 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); | 830 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); |
874 | ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ | 831 | ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ |
875 | 832 | ||
876 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, | 833 | memcpy(((u8 *)skb_out->data) + index, |
877 | &(ctx->tx_ncm.ndp16), | 834 | &(ctx->tx_ncm.ndp16), |
878 | sizeof(ctx->tx_ncm.ndp16)); | 835 | sizeof(ctx->tx_ncm.ndp16)); |
879 | 836 | ||
880 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + | 837 | memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16), |
881 | sizeof(ctx->tx_ncm.ndp16), | ||
882 | &(ctx->tx_ncm.dpe16), | 838 | &(ctx->tx_ncm.dpe16), |
883 | (ctx->tx_curr_frame_num + 1) * | 839 | (ctx->tx_curr_frame_num + 1) * |
884 | sizeof(struct usb_cdc_ncm_dpe16)); | 840 | sizeof(struct usb_cdc_ncm_dpe16)); |
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 041fb7d43c4f..ef3b236b5145 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf) | |||
977 | usb_set_intfdata(intf, NULL); | 977 | usb_set_intfdata(intf, NULL); |
978 | if (dev) { | 978 | if (dev) { |
979 | set_bit(RTL8150_UNPLUG, &dev->flags); | 979 | set_bit(RTL8150_UNPLUG, &dev->flags); |
980 | tasklet_disable(&dev->tl); | ||
981 | tasklet_kill(&dev->tl); | 980 | tasklet_kill(&dev->tl); |
982 | unregister_netdev(dev->netdev); | 981 | unregister_netdev(dev->netdev); |
983 | unlink_all_urbs(dev); | 982 | unlink_all_urbs(dev); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 9ff7c30573b8..44d9d8d56490 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c | |||
@@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, | |||
309 | u8 i; | 309 | u8 i; |
310 | u32 val; | 310 | u32 val; |
311 | 311 | ||
312 | if (ah->is_pciexpress != true) | 312 | if (ah->is_pciexpress != true || ah->aspm_enabled != true) |
313 | return; | ||
314 | |||
315 | /* Do not touch SerDes registers */ | ||
316 | if (ah->config.pcie_powersave_enable == 2) | ||
317 | return; | 313 | return; |
318 | 314 | ||
319 | /* Nothing to do on restore for 11N */ | 315 | /* Nothing to do on restore for 11N */ |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 8efdec247c02..ad2bb2bf4e8a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c | |||
@@ -519,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah, | |||
519 | int restore, | 519 | int restore, |
520 | int power_off) | 520 | int power_off) |
521 | { | 521 | { |
522 | if (ah->is_pciexpress != true) | 522 | if (ah->is_pciexpress != true || ah->aspm_enabled != true) |
523 | return; | ||
524 | |||
525 | /* Do not touch SerDes registers */ | ||
526 | if (ah->config.pcie_powersave_enable == 2) | ||
527 | return; | 523 | return; |
528 | 524 | ||
529 | /* Nothing to do on restore for 11N */ | 525 | /* Nothing to do on restore for 11N */ |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8006ce0c7357..8dcefe74f4c3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -318,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah) | |||
318 | REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); | 318 | REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); |
319 | } | 319 | } |
320 | 320 | ||
321 | static void ath9k_hw_aspm_init(struct ath_hw *ah) | ||
322 | { | ||
323 | struct ath_common *common = ath9k_hw_common(ah); | ||
324 | |||
325 | if (common->bus_ops->aspm_init) | ||
326 | common->bus_ops->aspm_init(common); | ||
327 | } | ||
328 | |||
321 | /* This should work for all families including legacy */ | 329 | /* This should work for all families including legacy */ |
322 | static bool ath9k_hw_chip_test(struct ath_hw *ah) | 330 | static bool ath9k_hw_chip_test(struct ath_hw *ah) |
323 | { | 331 | { |
@@ -378,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) | |||
378 | ah->config.additional_swba_backoff = 0; | 386 | ah->config.additional_swba_backoff = 0; |
379 | ah->config.ack_6mb = 0x0; | 387 | ah->config.ack_6mb = 0x0; |
380 | ah->config.cwm_ignore_extcca = 0; | 388 | ah->config.cwm_ignore_extcca = 0; |
381 | ah->config.pcie_powersave_enable = 0; | ||
382 | ah->config.pcie_clock_req = 0; | 389 | ah->config.pcie_clock_req = 0; |
383 | ah->config.pcie_waen = 0; | 390 | ah->config.pcie_waen = 0; |
384 | ah->config.analog_shiftreg = 1; | 391 | ah->config.analog_shiftreg = 1; |
@@ -598,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) | |||
598 | 605 | ||
599 | 606 | ||
600 | if (ah->is_pciexpress) | 607 | if (ah->is_pciexpress) |
601 | ath9k_hw_configpcipowersave(ah, 0, 0); | 608 | ath9k_hw_aspm_init(ah); |
602 | else | 609 | else |
603 | ath9k_hw_disablepcie(ah); | 610 | ath9k_hw_disablepcie(ah); |
604 | 611 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 6acd0f975ae1..c79889036ec4 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
@@ -219,7 +219,6 @@ struct ath9k_ops_config { | |||
219 | int additional_swba_backoff; | 219 | int additional_swba_backoff; |
220 | int ack_6mb; | 220 | int ack_6mb; |
221 | u32 cwm_ignore_extcca; | 221 | u32 cwm_ignore_extcca; |
222 | u8 pcie_powersave_enable; | ||
223 | bool pcieSerDesWrite; | 222 | bool pcieSerDesWrite; |
224 | u8 pcie_clock_req; | 223 | u8 pcie_clock_req; |
225 | u32 pcie_waen; | 224 | u32 pcie_waen; |
@@ -673,6 +672,7 @@ struct ath_hw { | |||
673 | 672 | ||
674 | bool sw_mgmt_crypto; | 673 | bool sw_mgmt_crypto; |
675 | bool is_pciexpress; | 674 | bool is_pciexpress; |
675 | bool aspm_enabled; | ||
676 | bool is_monitoring; | 676 | bool is_monitoring; |
677 | bool need_an_top2_fixup; | 677 | bool need_an_top2_fixup; |
678 | u16 tx_trig_level; | 678 | u16 tx_trig_level; |
@@ -874,6 +874,7 @@ struct ath_bus_ops { | |||
874 | bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); | 874 | bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); |
875 | void (*bt_coex_prep)(struct ath_common *common); | 875 | void (*bt_coex_prep)(struct ath_common *common); |
876 | void (*extn_synch_en)(struct ath_common *common); | 876 | void (*extn_synch_en)(struct ath_common *common); |
877 | void (*aspm_init)(struct ath_common *common); | ||
877 | }; | 878 | }; |
878 | 879 | ||
879 | static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) | 880 | static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index ac5107172f94..aa0ff7e2c922 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -670,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band) | |||
670 | static void ath9k_init_txpower_limits(struct ath_softc *sc) | 670 | static void ath9k_init_txpower_limits(struct ath_softc *sc) |
671 | { | 671 | { |
672 | struct ath_hw *ah = sc->sc_ah; | 672 | struct ath_hw *ah = sc->sc_ah; |
673 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
673 | struct ath9k_channel *curchan = ah->curchan; | 674 | struct ath9k_channel *curchan = ah->curchan; |
674 | 675 | ||
676 | ah->txchainmask = common->tx_chainmask; | ||
675 | if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) | 677 | if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) |
676 | ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); | 678 | ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); |
677 | if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) | 679 | if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) |
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 3bad0b2cf9a3..be4ea1329813 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/nl80211.h> | 17 | #include <linux/nl80211.h> |
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/pci-aspm.h> | ||
19 | #include <linux/ath9k_platform.h> | 20 | #include <linux/ath9k_platform.h> |
20 | #include "ath9k.h" | 21 | #include "ath9k.h" |
21 | 22 | ||
@@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common) | |||
115 | pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl); | 116 | pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl); |
116 | } | 117 | } |
117 | 118 | ||
119 | static void ath_pci_aspm_init(struct ath_common *common) | ||
120 | { | ||
121 | struct ath_softc *sc = (struct ath_softc *) common->priv; | ||
122 | struct ath_hw *ah = sc->sc_ah; | ||
123 | struct pci_dev *pdev = to_pci_dev(sc->dev); | ||
124 | struct pci_dev *parent; | ||
125 | int pos; | ||
126 | u8 aspm; | ||
127 | |||
128 | if (!pci_is_pcie(pdev)) | ||
129 | return; | ||
130 | |||
131 | parent = pdev->bus->self; | ||
132 | if (WARN_ON(!parent)) | ||
133 | return; | ||
134 | |||
135 | pos = pci_pcie_cap(parent); | ||
136 | pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); | ||
137 | if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { | ||
138 | ah->aspm_enabled = true; | ||
139 | /* Initialize PCIe PM and SERDES registers. */ | ||
140 | ath9k_hw_configpcipowersave(ah, 0, 0); | ||
141 | } | ||
142 | } | ||
143 | |||
118 | static const struct ath_bus_ops ath_pci_bus_ops = { | 144 | static const struct ath_bus_ops ath_pci_bus_ops = { |
119 | .ath_bus_type = ATH_PCI, | 145 | .ath_bus_type = ATH_PCI, |
120 | .read_cachesize = ath_pci_read_cachesize, | 146 | .read_cachesize = ath_pci_read_cachesize, |
121 | .eeprom_read = ath_pci_eeprom_read, | 147 | .eeprom_read = ath_pci_eeprom_read, |
122 | .bt_coex_prep = ath_pci_bt_coex_prep, | 148 | .bt_coex_prep = ath_pci_bt_coex_prep, |
123 | .extn_synch_en = ath_pci_extn_synch_enable, | 149 | .extn_synch_en = ath_pci_extn_synch_enable, |
150 | .aspm_init = ath_pci_aspm_init, | ||
124 | }; | 151 | }; |
125 | 152 | ||
126 | static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 153 | static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c index dab67a12d73b..73fe3cdf796b 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945.c | |||
@@ -1746,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) | |||
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); | 1748 | memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); |
1749 | 1749 | /* | |
1750 | * We do not commit tx power settings while channel changing, | ||
1751 | * do it now if tx power changed. | ||
1752 | */ | ||
1753 | iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); | ||
1750 | return 0; | 1754 | return 0; |
1751 | } | 1755 | } |
1752 | 1756 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c index bd4b000733f7..ecdc6e557428 100644 --- a/drivers/net/wireless/iwlegacy/iwl-4965.c +++ b/drivers/net/wireless/iwlegacy/iwl-4965.c | |||
@@ -1235,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c | |||
1235 | 1235 | ||
1236 | memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); | 1236 | memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); |
1237 | iwl_legacy_print_rx_config_cmd(priv, ctx); | 1237 | iwl_legacy_print_rx_config_cmd(priv, ctx); |
1238 | goto set_tx_power; | 1238 | /* |
1239 | * We do not commit tx power settings while channel changing, | ||
1240 | * do it now if tx power changed. | ||
1241 | */ | ||
1242 | iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); | ||
1243 | return 0; | ||
1239 | } | 1244 | } |
1240 | 1245 | ||
1241 | /* If we are currently associated and the new config requires | 1246 | /* If we are currently associated and the new config requires |
@@ -1315,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c | |||
1315 | 1320 | ||
1316 | iwl4965_init_sensitivity(priv); | 1321 | iwl4965_init_sensitivity(priv); |
1317 | 1322 | ||
1318 | set_tx_power: | ||
1319 | /* If we issue a new RXON command which required a tune then we must | 1323 | /* If we issue a new RXON command which required a tune then we must |
1320 | * send a new TXPOWER command or we won't be able to Tx any frames */ | 1324 | * send a new TXPOWER command or we won't be able to Tx any frames */ |
1321 | ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); | 1325 | ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 3eeb12ebe6e9..c95cefd529dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -365,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = { | |||
365 | .chain_noise_scale = 1000, | 365 | .chain_noise_scale = 1000, |
366 | .wd_timeout = IWL_LONG_WD_TIMEOUT, | 366 | .wd_timeout = IWL_LONG_WD_TIMEOUT, |
367 | .max_event_log_size = 512, | 367 | .max_event_log_size = 512, |
368 | .no_idle_support = true, | ||
368 | }; | 369 | }; |
369 | static struct iwl_ht_params iwl5000_ht_params = { | 370 | static struct iwl_ht_params iwl5000_ht_params = { |
370 | .ht_greenfield_support = true, | 371 | .ht_greenfield_support = true, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 3e6bb734dcb7..02817a438550 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -135,6 +135,7 @@ struct iwl_mod_params { | |||
135 | * @temperature_kelvin: temperature report by uCode in kelvin | 135 | * @temperature_kelvin: temperature report by uCode in kelvin |
136 | * @max_event_log_size: size of event log buffer size for ucode event logging | 136 | * @max_event_log_size: size of event log buffer size for ucode event logging |
137 | * @shadow_reg_enable: HW shadhow register bit | 137 | * @shadow_reg_enable: HW shadhow register bit |
138 | * @no_idle_support: do not support idle mode | ||
138 | */ | 139 | */ |
139 | struct iwl_base_params { | 140 | struct iwl_base_params { |
140 | int eeprom_size; | 141 | int eeprom_size; |
@@ -156,6 +157,7 @@ struct iwl_base_params { | |||
156 | bool temperature_kelvin; | 157 | bool temperature_kelvin; |
157 | u32 max_event_log_size; | 158 | u32 max_event_log_size; |
158 | const bool shadow_reg_enable; | 159 | const bool shadow_reg_enable; |
160 | const bool no_idle_support; | ||
159 | }; | 161 | }; |
160 | /* | 162 | /* |
161 | * @advanced_bt_coexist: support advanced bt coexist | 163 | * @advanced_bt_coexist: support advanced bt coexist |
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index fb7e436b40c7..69d4ec467dca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c | |||
@@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) | |||
134 | static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) | 134 | static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) |
135 | { | 135 | { |
136 | bus->drv_data = drv_data; | 136 | bus->drv_data = drv_data; |
137 | pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data); | ||
137 | } | 138 | } |
138 | 139 | ||
139 | static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], | 140 | static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], |
@@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
454 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | 455 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); |
455 | } | 456 | } |
456 | 457 | ||
457 | pci_set_drvdata(pdev, bus); | ||
458 | |||
459 | bus->dev = &pdev->dev; | 458 | bus->dev = &pdev->dev; |
460 | bus->irq = pdev->irq; | 459 | bus->irq = pdev->irq; |
461 | bus->ops = &pci_ops; | 460 | bus->ops = &pci_ops; |
@@ -494,11 +493,12 @@ static void iwl_pci_down(struct iwl_bus *bus) | |||
494 | 493 | ||
495 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) | 494 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) |
496 | { | 495 | { |
497 | struct iwl_bus *bus = pci_get_drvdata(pdev); | 496 | struct iwl_priv *priv = pci_get_drvdata(pdev); |
497 | void *bus_specific = priv->bus->bus_specific; | ||
498 | 498 | ||
499 | iwl_remove(bus->drv_data); | 499 | iwl_remove(priv); |
500 | 500 | ||
501 | iwl_pci_down(bus); | 501 | iwl_pci_down(bus_specific); |
502 | } | 502 | } |
503 | 503 | ||
504 | #ifdef CONFIG_PM | 504 | #ifdef CONFIG_PM |
@@ -506,20 +506,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) | |||
506 | static int iwl_pci_suspend(struct device *device) | 506 | static int iwl_pci_suspend(struct device *device) |
507 | { | 507 | { |
508 | struct pci_dev *pdev = to_pci_dev(device); | 508 | struct pci_dev *pdev = to_pci_dev(device); |
509 | struct iwl_bus *bus = pci_get_drvdata(pdev); | 509 | struct iwl_priv *priv = pci_get_drvdata(pdev); |
510 | 510 | ||
511 | /* Before you put code here, think about WoWLAN. You cannot check here | 511 | /* Before you put code here, think about WoWLAN. You cannot check here |
512 | * whether WoWLAN is enabled or not, and your code will run even if | 512 | * whether WoWLAN is enabled or not, and your code will run even if |
513 | * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. | 513 | * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. |
514 | */ | 514 | */ |
515 | 515 | ||
516 | return iwl_suspend(bus->drv_data); | 516 | return iwl_suspend(priv); |
517 | } | 517 | } |
518 | 518 | ||
519 | static int iwl_pci_resume(struct device *device) | 519 | static int iwl_pci_resume(struct device *device) |
520 | { | 520 | { |
521 | struct pci_dev *pdev = to_pci_dev(device); | 521 | struct pci_dev *pdev = to_pci_dev(device); |
522 | struct iwl_bus *bus = pci_get_drvdata(pdev); | 522 | struct iwl_priv *priv = pci_get_drvdata(pdev); |
523 | 523 | ||
524 | /* Before you put code here, think about WoWLAN. You cannot check here | 524 | /* Before you put code here, think about WoWLAN. You cannot check here |
525 | * whether WoWLAN is enabled or not, and your code will run even if | 525 | * whether WoWLAN is enabled or not, and your code will run even if |
@@ -532,7 +532,7 @@ static int iwl_pci_resume(struct device *device) | |||
532 | */ | 532 | */ |
533 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | 533 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); |
534 | 534 | ||
535 | return iwl_resume(bus->drv_data); | 535 | return iwl_resume(priv); |
536 | } | 536 | } |
537 | 537 | ||
538 | static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); | 538 | static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 3ec619c6881c..cd64df05f9ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c | |||
@@ -349,7 +349,8 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, | |||
349 | 349 | ||
350 | if (priv->wowlan) | 350 | if (priv->wowlan) |
351 | iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); | 351 | iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); |
352 | else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) | 352 | else if (!priv->cfg->base_params->no_idle_support && |
353 | priv->hw->conf.flags & IEEE80211_CONF_IDLE) | ||
353 | iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); | 354 | iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); |
354 | else if (iwl_tt_is_low_power_state(priv)) { | 355 | else if (iwl_tt_is_low_power_state(priv)) { |
355 | /* in thermal throttling low power state */ | 356 | /* in thermal throttling low power state */ |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 84ab7d1acb6a..ef67f6786a84 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -703,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) | |||
703 | /* | 703 | /* |
704 | * Add space for the TXWI in front of the skb. | 704 | * Add space for the TXWI in front of the skb. |
705 | */ | 705 | */ |
706 | skb_push(entry->skb, TXWI_DESC_SIZE); | 706 | memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE); |
707 | memset(entry->skb, 0, TXWI_DESC_SIZE); | ||
708 | 707 | ||
709 | /* | 708 | /* |
710 | * Register descriptor details in skb frame descriptor. | 709 | * Register descriptor details in skb frame descriptor. |
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 15cdc7e57fc4..4cdf247a870d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h | |||
@@ -355,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf * | |||
355 | return CIPHER_NONE; | 355 | return CIPHER_NONE; |
356 | } | 356 | } |
357 | 357 | ||
358 | static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, | 358 | static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, |
359 | struct sk_buff *skb, | ||
359 | struct txentry_desc *txdesc) | 360 | struct txentry_desc *txdesc) |
360 | { | 361 | { |
361 | } | 362 | } |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 8efab3983528..4ccf23805973 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
@@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
113 | * due to possible race conditions in mac80211. | 113 | * due to possible race conditions in mac80211. |
114 | */ | 114 | */ |
115 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | 115 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) |
116 | goto exit_fail; | 116 | goto exit_free_skb; |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * Use the ATIM queue if appropriate and present. | 119 | * Use the ATIM queue if appropriate and present. |
@@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
127 | ERROR(rt2x00dev, | 127 | ERROR(rt2x00dev, |
128 | "Attempt to send packet over invalid queue %d.\n" | 128 | "Attempt to send packet over invalid queue %d.\n" |
129 | "Please file bug report to %s.\n", qid, DRV_PROJECT); | 129 | "Please file bug report to %s.\n", qid, DRV_PROJECT); |
130 | goto exit_fail; | 130 | goto exit_free_skb; |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
@@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
159 | 159 | ||
160 | exit_fail: | 160 | exit_fail: |
161 | rt2x00queue_pause_queue(queue); | 161 | rt2x00queue_pause_queue(queue); |
162 | exit_free_skb: | ||
162 | dev_kfree_skb_any(skb); | 163 | dev_kfree_skb_any(skb); |
163 | } | 164 | } |
164 | EXPORT_SYMBOL_GPL(rt2x00mac_tx); | 165 | EXPORT_SYMBOL_GPL(rt2x00mac_tx); |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 5efd57833489..56f12358389d 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -1696,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, | |||
1696 | pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); | 1696 | pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); |
1697 | pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); | 1697 | pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); |
1698 | 1698 | ||
1699 | /*find bridge info */ | 1699 | if (bridge_pdev) { |
1700 | pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; | 1700 | /*find bridge info if available */ |
1701 | for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { | 1701 | pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; |
1702 | if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { | 1702 | for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { |
1703 | pcipriv->ndis_adapter.pcibridge_vendor = tmp; | 1703 | if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { |
1704 | RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, | 1704 | pcipriv->ndis_adapter.pcibridge_vendor = tmp; |
1705 | ("Pci Bridge Vendor is found index: %d\n", | 1705 | RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, |
1706 | tmp)); | 1706 | ("Pci Bridge Vendor is found index:" |
1707 | break; | 1707 | " %d\n", tmp)); |
1708 | break; | ||
1709 | } | ||
1708 | } | 1710 | } |
1709 | } | 1711 | } |
1710 | 1712 | ||
diff --git a/drivers/of/address.c b/drivers/of/address.c index da1f4b9605df..72c33fbe451d 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
@@ -610,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index) | |||
610 | if (of_address_to_resource(np, index, &res)) | 610 | if (of_address_to_resource(np, index, &res)) |
611 | return NULL; | 611 | return NULL; |
612 | 612 | ||
613 | return ioremap(res.start, 1 + res.end - res.start); | 613 | return ioremap(res.start, resource_size(&res)); |
614 | } | 614 | } |
615 | EXPORT_SYMBOL(of_iomap); | 615 | EXPORT_SYMBOL(of_iomap); |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 02ed36719def..3ff22e32b602 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -610,8 +610,9 @@ EXPORT_SYMBOL(of_find_node_by_phandle); | |||
610 | * | 610 | * |
611 | * The out_value is modified only if a valid u32 value can be decoded. | 611 | * The out_value is modified only if a valid u32 value can be decoded. |
612 | */ | 612 | */ |
613 | int of_property_read_u32_array(const struct device_node *np, char *propname, | 613 | int of_property_read_u32_array(const struct device_node *np, |
614 | u32 *out_values, size_t sz) | 614 | const char *propname, u32 *out_values, |
615 | size_t sz) | ||
615 | { | 616 | { |
616 | struct property *prop = of_find_property(np, propname, NULL); | 617 | struct property *prop = of_find_property(np, propname, NULL); |
617 | const __be32 *val; | 618 | const __be32 *val; |
@@ -645,7 +646,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array); | |||
645 | * | 646 | * |
646 | * The out_string pointer is modified only if a valid string can be decoded. | 647 | * The out_string pointer is modified only if a valid string can be decoded. |
647 | */ | 648 | */ |
648 | int of_property_read_string(struct device_node *np, char *propname, | 649 | int of_property_read_string(struct device_node *np, const char *propname, |
649 | const char **out_string) | 650 | const char **out_string) |
650 | { | 651 | { |
651 | struct property *prop = of_find_property(np, propname, NULL); | 652 | struct property *prop = of_find_property(np, propname, NULL); |
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index 3007662ac614..ef0105fa52b1 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c | |||
@@ -127,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count); | |||
127 | * gpio chips. This function performs only one sanity check: whether gpio | 127 | * gpio chips. This function performs only one sanity check: whether gpio |
128 | * is less than ngpios (that is specified in the gpio_chip). | 128 | * is less than ngpios (that is specified in the gpio_chip). |
129 | */ | 129 | */ |
130 | static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, | 130 | int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, |
131 | const void *gpio_spec, u32 *flags) | 131 | const void *gpio_spec, u32 *flags) |
132 | { | 132 | { |
133 | const __be32 *gpio = gpio_spec; | 133 | const __be32 *gpio = gpio_spec; |
134 | const u32 n = be32_to_cpup(gpio); | 134 | const u32 n = be32_to_cpup(gpio); |
@@ -152,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, | |||
152 | 152 | ||
153 | return n; | 153 | return n; |
154 | } | 154 | } |
155 | EXPORT_SYMBOL(of_gpio_simple_xlate); | ||
155 | 156 | ||
156 | /** | 157 | /** |
157 | * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) | 158 | * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a70fa89f76fd..220285760b68 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, | |||
110 | } | 110 | } |
111 | 111 | ||
112 | 112 | ||
113 | static struct acpi_dock_ops acpiphp_dock_ops = { | 113 | static const struct acpi_dock_ops acpiphp_dock_ops = { |
114 | .handler = handle_hotplug_event_func, | 114 | .handler = handle_hotplug_event_func, |
115 | }; | 115 | }; |
116 | 116 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 45e0191c35dd..1e88d4785321 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -769,4 +769,12 @@ config INTEL_OAKTRAIL | |||
769 | enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y | 769 | enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y |
770 | here; it will only load on supported platforms. | 770 | here; it will only load on supported platforms. |
771 | 771 | ||
772 | config SAMSUNG_Q10 | ||
773 | tristate "Samsung Q10 Extras" | ||
774 | depends on SERIO_I8042 | ||
775 | select BACKLIGHT_CLASS_DEVICE | ||
776 | ---help--- | ||
777 | This driver provides support for backlight control on Samsung Q10 | ||
778 | and related laptops, including Dell Latitude X200. | ||
779 | |||
772 | endif # X86_PLATFORM_DEVICES | 780 | endif # X86_PLATFORM_DEVICES |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index afc1f832aa67..293a320d9faa 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o | |||
44 | obj-$(CONFIG_MXM_WMI) += mxm-wmi.o | 44 | obj-$(CONFIG_MXM_WMI) += mxm-wmi.o |
45 | obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o | 45 | obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o |
46 | obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o | 46 | obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o |
47 | obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o | ||
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index e1c4938b301b..af2bb20cb2fb 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -99,6 +99,7 @@ enum acer_wmi_event_ids { | |||
99 | static const struct key_entry acer_wmi_keymap[] = { | 99 | static const struct key_entry acer_wmi_keymap[] = { |
100 | {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ | 100 | {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ |
101 | {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ | 101 | {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ |
102 | {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ | ||
102 | {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ | 103 | {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ |
103 | {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ | 104 | {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ |
104 | {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ | 105 | {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ |
@@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { | |||
304 | .wireless = 2, | 305 | .wireless = 2, |
305 | }; | 306 | }; |
306 | 307 | ||
308 | static struct quirk_entry quirk_lenovo_ideapad_s205 = { | ||
309 | .wireless = 3, | ||
310 | }; | ||
311 | |||
307 | /* The Aspire One has a dummy ACPI-WMI interface - disable it */ | 312 | /* The Aspire One has a dummy ACPI-WMI interface - disable it */ |
308 | static struct dmi_system_id __devinitdata acer_blacklist[] = { | 313 | static struct dmi_system_id __devinitdata acer_blacklist[] = { |
309 | { | 314 | { |
@@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = { | |||
450 | }, | 455 | }, |
451 | .driver_data = &quirk_medion_md_98300, | 456 | .driver_data = &quirk_medion_md_98300, |
452 | }, | 457 | }, |
458 | { | ||
459 | .callback = dmi_matched, | ||
460 | .ident = "Lenovo Ideapad S205", | ||
461 | .matches = { | ||
462 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
463 | DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), | ||
464 | }, | ||
465 | .driver_data = &quirk_lenovo_ideapad_s205, | ||
466 | }, | ||
453 | {} | 467 | {} |
454 | }; | 468 | }; |
455 | 469 | ||
@@ -542,6 +556,12 @@ struct wmi_interface *iface) | |||
542 | return AE_ERROR; | 556 | return AE_ERROR; |
543 | *value = result & 0x1; | 557 | *value = result & 0x1; |
544 | return AE_OK; | 558 | return AE_OK; |
559 | case 3: | ||
560 | err = ec_read(0x78, &result); | ||
561 | if (err) | ||
562 | return AE_ERROR; | ||
563 | *value = result & 0x1; | ||
564 | return AE_OK; | ||
545 | default: | 565 | default: |
546 | err = ec_read(0xA, &result); | 566 | err = ec_read(0xA, &result); |
547 | if (err) | 567 | if (err) |
@@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored) | |||
1266 | acpi_status status; | 1286 | acpi_status status; |
1267 | 1287 | ||
1268 | status = get_u32(&state, ACER_CAP_WIRELESS); | 1288 | status = get_u32(&state, ACER_CAP_WIRELESS); |
1269 | if (ACPI_SUCCESS(status)) | 1289 | if (ACPI_SUCCESS(status)) { |
1270 | rfkill_set_sw_state(wireless_rfkill, !state); | 1290 | if (quirks->wireless == 3) { |
1291 | rfkill_set_hw_state(wireless_rfkill, !state); | ||
1292 | } else { | ||
1293 | rfkill_set_sw_state(wireless_rfkill, !state); | ||
1294 | } | ||
1295 | } | ||
1271 | 1296 | ||
1272 | if (has_cap(ACER_CAP_BLUETOOTH)) { | 1297 | if (has_cap(ACER_CAP_BLUETOOTH)) { |
1273 | status = get_u32(&state, ACER_CAP_BLUETOOTH); | 1298 | status = get_u32(&state, ACER_CAP_BLUETOOTH); |
@@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev, | |||
1400 | { | 1425 | { |
1401 | u32 result; \ | 1426 | u32 result; \ |
1402 | acpi_status status; | 1427 | acpi_status status; |
1428 | |||
1429 | pr_info("This threeg sysfs will be removed in 2012" | ||
1430 | " - used by: %s\n", current->comm); | ||
1403 | if (wmi_has_guid(WMID_GUID3)) | 1431 | if (wmi_has_guid(WMID_GUID3)) |
1404 | status = wmid3_get_device_status(&result, | 1432 | status = wmid3_get_device_status(&result, |
1405 | ACER_WMID3_GDS_THREEG); | 1433 | ACER_WMID3_GDS_THREEG); |
@@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev, | |||
1415 | { | 1443 | { |
1416 | u32 tmp = simple_strtoul(buf, NULL, 10); | 1444 | u32 tmp = simple_strtoul(buf, NULL, 10); |
1417 | acpi_status status = set_u32(tmp, ACER_CAP_THREEG); | 1445 | acpi_status status = set_u32(tmp, ACER_CAP_THREEG); |
1418 | if (ACPI_FAILURE(status)) | 1446 | pr_info("This threeg sysfs will be removed in 2012" |
1419 | return -EINVAL; | 1447 | " - used by: %s\n", current->comm); |
1448 | if (ACPI_FAILURE(status)) | ||
1449 | return -EINVAL; | ||
1420 | return count; | 1450 | return count; |
1421 | } | 1451 | } |
1422 | static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, | 1452 | static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, |
@@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, | |||
1425 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, | 1455 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, |
1426 | char *buf) | 1456 | char *buf) |
1427 | { | 1457 | { |
1458 | pr_info("This interface sysfs will be removed in 2012" | ||
1459 | " - used by: %s\n", current->comm); | ||
1428 | switch (interface->type) { | 1460 | switch (interface->type) { |
1429 | case ACER_AMW0: | 1461 | case ACER_AMW0: |
1430 | return sprintf(buf, "AMW0\n"); | 1462 | return sprintf(buf, "AMW0\n"); |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index fca3489218b7..760c6d7624fe 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c | |||
@@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = { | |||
182 | {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, | 182 | {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, |
183 | {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, | 183 | {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, |
184 | {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, | 184 | {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, |
185 | {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, | ||
185 | /* Acer 531 */ | 186 | /* Acer 531 */ |
186 | {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, | 187 | {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, |
187 | /* Gateway */ | 188 | /* Gateway */ |
@@ -703,15 +704,15 @@ MODULE_LICENSE("GPL"); | |||
703 | MODULE_AUTHOR("Peter Feuerer"); | 704 | MODULE_AUTHOR("Peter Feuerer"); |
704 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); | 705 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); |
705 | MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); | 706 | MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); |
706 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:"); | 707 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); |
707 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:"); | 708 | MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); |
708 | MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); | 709 | MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); |
709 | MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); | 710 | MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); |
710 | MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); | 711 | MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); |
711 | MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); | 712 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); |
712 | MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); | 713 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); |
713 | MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:"); | 714 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); |
714 | MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:"); | 715 | MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); |
715 | 716 | ||
716 | module_init(acerhdf_init); | 717 | module_init(acerhdf_init); |
717 | module_exit(acerhdf_exit); | 718 | module_exit(acerhdf_exit); |
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index d65df92e2acc..fa6d7ec68b26 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -70,11 +70,10 @@ MODULE_LICENSE("GPL"); | |||
70 | * WAPF defines the behavior of the Fn+Fx wlan key | 70 | * WAPF defines the behavior of the Fn+Fx wlan key |
71 | * The significance of values is yet to be found, but | 71 | * The significance of values is yet to be found, but |
72 | * most of the time: | 72 | * most of the time: |
73 | * 0x0 will do nothing | 73 | * Bit | Bluetooth | WLAN |
74 | * 0x1 will allow to control the device with Fn+Fx key. | 74 | * 0 | Hardware | Hardware |
75 | * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key | 75 | * 1 | Hardware | Software |
76 | * 0x5 like 0x1 or 0x4 | 76 | * 4 | Software | Software |
77 | * So, if something doesn't work as you want, just try other values =) | ||
78 | */ | 77 | */ |
79 | static uint wapf = 1; | 78 | static uint wapf = 1; |
80 | module_param(wapf, uint, 0444); | 79 | module_param(wapf, uint, 0444); |
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 0580d99b0798..b0859d4183e8 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c | |||
@@ -38,6 +38,24 @@ MODULE_LICENSE("GPL"); | |||
38 | 38 | ||
39 | MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); | 39 | MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); |
40 | 40 | ||
41 | /* | ||
42 | * WAPF defines the behavior of the Fn+Fx wlan key | ||
43 | * The significance of values is yet to be found, but | ||
44 | * most of the time: | ||
45 | * Bit | Bluetooth | WLAN | ||
46 | * 0 | Hardware | Hardware | ||
47 | * 1 | Hardware | Software | ||
48 | * 4 | Software | Software | ||
49 | */ | ||
50 | static uint wapf; | ||
51 | module_param(wapf, uint, 0444); | ||
52 | MODULE_PARM_DESC(wapf, "WAPF value"); | ||
53 | |||
54 | static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) | ||
55 | { | ||
56 | driver->wapf = wapf; | ||
57 | } | ||
58 | |||
41 | static const struct key_entry asus_nb_wmi_keymap[] = { | 59 | static const struct key_entry asus_nb_wmi_keymap[] = { |
42 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | 60 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, |
43 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | 61 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, |
@@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = { | |||
53 | { KE_KEY, 0x51, { KEY_WWW } }, | 71 | { KE_KEY, 0x51, { KEY_WWW } }, |
54 | { KE_KEY, 0x55, { KEY_CALC } }, | 72 | { KE_KEY, 0x55, { KEY_CALC } }, |
55 | { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ | 73 | { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ |
56 | { KE_KEY, 0x5D, { KEY_WLAN } }, | 74 | { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */ |
57 | { KE_KEY, 0x5E, { KEY_WLAN } }, | 75 | { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */ |
58 | { KE_KEY, 0x5F, { KEY_WLAN } }, | 76 | { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */ |
59 | { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, | 77 | { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, |
60 | { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, | 78 | { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, |
61 | { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, | 79 | { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, |
62 | { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, | 80 | { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, |
63 | { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, | 81 | { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, |
64 | { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, | ||
65 | { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, | 82 | { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, |
83 | { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, | ||
66 | { KE_KEY, 0x82, { KEY_CAMERA } }, | 84 | { KE_KEY, 0x82, { KEY_CAMERA } }, |
67 | { KE_KEY, 0x88, { KEY_RFKILL } }, | 85 | { KE_KEY, 0x88, { KEY_RFKILL } }, |
68 | { KE_KEY, 0x8A, { KEY_PROG1 } }, | 86 | { KE_KEY, 0x8A, { KEY_PROG1 } }, |
@@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { | |||
81 | .keymap = asus_nb_wmi_keymap, | 99 | .keymap = asus_nb_wmi_keymap, |
82 | .input_name = "Asus WMI hotkeys", | 100 | .input_name = "Asus WMI hotkeys", |
83 | .input_phys = ASUS_NB_WMI_FILE "/input0", | 101 | .input_phys = ASUS_NB_WMI_FILE "/input0", |
102 | .quirks = asus_nb_wmi_quirks, | ||
84 | }; | 103 | }; |
85 | 104 | ||
86 | 105 | ||
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 65b66aa44c78..95cba9ebf6c0 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> |
45 | #include <linux/seq_file.h> | 45 | #include <linux/seq_file.h> |
46 | #include <linux/platform_device.h> | 46 | #include <linux/platform_device.h> |
47 | #include <linux/thermal.h> | ||
47 | #include <acpi/acpi_bus.h> | 48 | #include <acpi/acpi_bus.h> |
48 | #include <acpi/acpi_drivers.h> | 49 | #include <acpi/acpi_drivers.h> |
49 | 50 | ||
@@ -66,6 +67,8 @@ MODULE_LICENSE("GPL"); | |||
66 | #define NOTIFY_BRNUP_MAX 0x1f | 67 | #define NOTIFY_BRNUP_MAX 0x1f |
67 | #define NOTIFY_BRNDOWN_MIN 0x20 | 68 | #define NOTIFY_BRNDOWN_MIN 0x20 |
68 | #define NOTIFY_BRNDOWN_MAX 0x2e | 69 | #define NOTIFY_BRNDOWN_MAX 0x2e |
70 | #define NOTIFY_KBD_BRTUP 0xc4 | ||
71 | #define NOTIFY_KBD_BRTDWN 0xc5 | ||
69 | 72 | ||
70 | /* WMI Methods */ | 73 | /* WMI Methods */ |
71 | #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ | 74 | #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ |
@@ -93,6 +96,7 @@ MODULE_LICENSE("GPL"); | |||
93 | /* Wireless */ | 96 | /* Wireless */ |
94 | #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 | 97 | #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 |
95 | #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 | 98 | #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 |
99 | #define ASUS_WMI_DEVID_CWAP 0x00010003 | ||
96 | #define ASUS_WMI_DEVID_WLAN 0x00010011 | 100 | #define ASUS_WMI_DEVID_WLAN 0x00010011 |
97 | #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 | 101 | #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 |
98 | #define ASUS_WMI_DEVID_GPS 0x00010015 | 102 | #define ASUS_WMI_DEVID_GPS 0x00010015 |
@@ -102,6 +106,12 @@ MODULE_LICENSE("GPL"); | |||
102 | 106 | ||
103 | /* Leds */ | 107 | /* Leds */ |
104 | /* 0x000200XX and 0x000400XX */ | 108 | /* 0x000200XX and 0x000400XX */ |
109 | #define ASUS_WMI_DEVID_LED1 0x00020011 | ||
110 | #define ASUS_WMI_DEVID_LED2 0x00020012 | ||
111 | #define ASUS_WMI_DEVID_LED3 0x00020013 | ||
112 | #define ASUS_WMI_DEVID_LED4 0x00020014 | ||
113 | #define ASUS_WMI_DEVID_LED5 0x00020015 | ||
114 | #define ASUS_WMI_DEVID_LED6 0x00020016 | ||
105 | 115 | ||
106 | /* Backlight and Brightness */ | 116 | /* Backlight and Brightness */ |
107 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 | 117 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 |
@@ -174,13 +184,18 @@ struct asus_wmi { | |||
174 | 184 | ||
175 | struct led_classdev tpd_led; | 185 | struct led_classdev tpd_led; |
176 | int tpd_led_wk; | 186 | int tpd_led_wk; |
187 | struct led_classdev kbd_led; | ||
188 | int kbd_led_wk; | ||
177 | struct workqueue_struct *led_workqueue; | 189 | struct workqueue_struct *led_workqueue; |
178 | struct work_struct tpd_led_work; | 190 | struct work_struct tpd_led_work; |
191 | struct work_struct kbd_led_work; | ||
179 | 192 | ||
180 | struct asus_rfkill wlan; | 193 | struct asus_rfkill wlan; |
181 | struct asus_rfkill bluetooth; | 194 | struct asus_rfkill bluetooth; |
182 | struct asus_rfkill wimax; | 195 | struct asus_rfkill wimax; |
183 | struct asus_rfkill wwan3g; | 196 | struct asus_rfkill wwan3g; |
197 | struct asus_rfkill gps; | ||
198 | struct asus_rfkill uwb; | ||
184 | 199 | ||
185 | struct hotplug_slot *hotplug_slot; | 200 | struct hotplug_slot *hotplug_slot; |
186 | struct mutex hotplug_lock; | 201 | struct mutex hotplug_lock; |
@@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus) | |||
205 | asus->inputdev->phys = asus->driver->input_phys; | 220 | asus->inputdev->phys = asus->driver->input_phys; |
206 | asus->inputdev->id.bustype = BUS_HOST; | 221 | asus->inputdev->id.bustype = BUS_HOST; |
207 | asus->inputdev->dev.parent = &asus->platform_device->dev; | 222 | asus->inputdev->dev.parent = &asus->platform_device->dev; |
223 | set_bit(EV_REP, asus->inputdev->evbit); | ||
208 | 224 | ||
209 | err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); | 225 | err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); |
210 | if (err) | 226 | if (err) |
@@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) | |||
359 | return read_tpd_led_state(asus); | 375 | return read_tpd_led_state(asus); |
360 | } | 376 | } |
361 | 377 | ||
362 | static int asus_wmi_led_init(struct asus_wmi *asus) | 378 | static void kbd_led_update(struct work_struct *work) |
363 | { | 379 | { |
364 | int rv; | 380 | int ctrl_param = 0; |
381 | struct asus_wmi *asus; | ||
365 | 382 | ||
366 | if (read_tpd_led_state(asus) < 0) | 383 | asus = container_of(work, struct asus_wmi, kbd_led_work); |
367 | return 0; | ||
368 | 384 | ||
369 | asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); | 385 | /* |
370 | if (!asus->led_workqueue) | 386 | * bits 0-2: level |
371 | return -ENOMEM; | 387 | * bit 7: light on/off |
372 | INIT_WORK(&asus->tpd_led_work, tpd_led_update); | 388 | */ |
389 | if (asus->kbd_led_wk > 0) | ||
390 | ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); | ||
373 | 391 | ||
374 | asus->tpd_led.name = "asus::touchpad"; | 392 | asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); |
375 | asus->tpd_led.brightness_set = tpd_led_set; | 393 | } |
376 | asus->tpd_led.brightness_get = tpd_led_get; | ||
377 | asus->tpd_led.max_brightness = 1; | ||
378 | 394 | ||
379 | rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); | 395 | static int kbd_led_read(struct asus_wmi *asus, int *level, int *env) |
380 | if (rv) { | 396 | { |
381 | destroy_workqueue(asus->led_workqueue); | 397 | int retval; |
382 | return rv; | 398 | |
399 | /* | ||
400 | * bits 0-2: level | ||
401 | * bit 7: light on/off | ||
402 | * bit 8-10: environment (0: dark, 1: normal, 2: light) | ||
403 | * bit 17: status unknown | ||
404 | */ | ||
405 | retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT, | ||
406 | 0xFFFF); | ||
407 | |||
408 | /* Unknown status is considered as off */ | ||
409 | if (retval == 0x8000) | ||
410 | retval = 0; | ||
411 | |||
412 | if (retval >= 0) { | ||
413 | if (level) | ||
414 | *level = retval & 0x80 ? retval & 0x7F : 0; | ||
415 | if (env) | ||
416 | *env = (retval >> 8) & 0x7F; | ||
417 | retval = 0; | ||
383 | } | 418 | } |
384 | 419 | ||
385 | return 0; | 420 | return retval; |
421 | } | ||
422 | |||
423 | static void kbd_led_set(struct led_classdev *led_cdev, | ||
424 | enum led_brightness value) | ||
425 | { | ||
426 | struct asus_wmi *asus; | ||
427 | |||
428 | asus = container_of(led_cdev, struct asus_wmi, kbd_led); | ||
429 | |||
430 | if (value > asus->kbd_led.max_brightness) | ||
431 | value = asus->kbd_led.max_brightness; | ||
432 | else if (value < 0) | ||
433 | value = 0; | ||
434 | |||
435 | asus->kbd_led_wk = value; | ||
436 | queue_work(asus->led_workqueue, &asus->kbd_led_work); | ||
437 | } | ||
438 | |||
439 | static enum led_brightness kbd_led_get(struct led_classdev *led_cdev) | ||
440 | { | ||
441 | struct asus_wmi *asus; | ||
442 | int retval, value; | ||
443 | |||
444 | asus = container_of(led_cdev, struct asus_wmi, kbd_led); | ||
445 | |||
446 | retval = kbd_led_read(asus, &value, NULL); | ||
447 | |||
448 | if (retval < 0) | ||
449 | return retval; | ||
450 | |||
451 | return value; | ||
386 | } | 452 | } |
387 | 453 | ||
388 | static void asus_wmi_led_exit(struct asus_wmi *asus) | 454 | static void asus_wmi_led_exit(struct asus_wmi *asus) |
@@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus) | |||
393 | destroy_workqueue(asus->led_workqueue); | 459 | destroy_workqueue(asus->led_workqueue); |
394 | } | 460 | } |
395 | 461 | ||
462 | static int asus_wmi_led_init(struct asus_wmi *asus) | ||
463 | { | ||
464 | int rv = 0; | ||
465 | |||
466 | asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); | ||
467 | if (!asus->led_workqueue) | ||
468 | return -ENOMEM; | ||
469 | |||
470 | if (read_tpd_led_state(asus) >= 0) { | ||
471 | INIT_WORK(&asus->tpd_led_work, tpd_led_update); | ||
472 | |||
473 | asus->tpd_led.name = "asus::touchpad"; | ||
474 | asus->tpd_led.brightness_set = tpd_led_set; | ||
475 | asus->tpd_led.brightness_get = tpd_led_get; | ||
476 | asus->tpd_led.max_brightness = 1; | ||
477 | |||
478 | rv = led_classdev_register(&asus->platform_device->dev, | ||
479 | &asus->tpd_led); | ||
480 | if (rv) | ||
481 | goto error; | ||
482 | } | ||
483 | |||
484 | if (kbd_led_read(asus, NULL, NULL) >= 0) { | ||
485 | INIT_WORK(&asus->kbd_led_work, kbd_led_update); | ||
486 | |||
487 | asus->kbd_led.name = "asus::kbd_backlight"; | ||
488 | asus->kbd_led.brightness_set = kbd_led_set; | ||
489 | asus->kbd_led.brightness_get = kbd_led_get; | ||
490 | asus->kbd_led.max_brightness = 3; | ||
491 | |||
492 | rv = led_classdev_register(&asus->platform_device->dev, | ||
493 | &asus->kbd_led); | ||
494 | } | ||
495 | |||
496 | error: | ||
497 | if (rv) | ||
498 | asus_wmi_led_exit(asus); | ||
499 | |||
500 | return rv; | ||
501 | } | ||
502 | |||
503 | |||
396 | /* | 504 | /* |
397 | * PCI hotplug (for wlan rfkill) | 505 | * PCI hotplug (for wlan rfkill) |
398 | */ | 506 | */ |
@@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus) | |||
729 | rfkill_destroy(asus->wwan3g.rfkill); | 837 | rfkill_destroy(asus->wwan3g.rfkill); |
730 | asus->wwan3g.rfkill = NULL; | 838 | asus->wwan3g.rfkill = NULL; |
731 | } | 839 | } |
840 | if (asus->gps.rfkill) { | ||
841 | rfkill_unregister(asus->gps.rfkill); | ||
842 | rfkill_destroy(asus->gps.rfkill); | ||
843 | asus->gps.rfkill = NULL; | ||
844 | } | ||
845 | if (asus->uwb.rfkill) { | ||
846 | rfkill_unregister(asus->uwb.rfkill); | ||
847 | rfkill_destroy(asus->uwb.rfkill); | ||
848 | asus->uwb.rfkill = NULL; | ||
849 | } | ||
732 | } | 850 | } |
733 | 851 | ||
734 | static int asus_wmi_rfkill_init(struct asus_wmi *asus) | 852 | static int asus_wmi_rfkill_init(struct asus_wmi *asus) |
@@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus) | |||
763 | if (result && result != -ENODEV) | 881 | if (result && result != -ENODEV) |
764 | goto exit; | 882 | goto exit; |
765 | 883 | ||
884 | result = asus_new_rfkill(asus, &asus->gps, "asus-gps", | ||
885 | RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS); | ||
886 | |||
887 | if (result && result != -ENODEV) | ||
888 | goto exit; | ||
889 | |||
890 | result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb", | ||
891 | RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB); | ||
892 | |||
893 | if (result && result != -ENODEV) | ||
894 | goto exit; | ||
895 | |||
766 | if (!asus->driver->hotplug_wireless) | 896 | if (!asus->driver->hotplug_wireless) |
767 | goto exit; | 897 | goto exit; |
768 | 898 | ||
@@ -797,8 +927,8 @@ exit: | |||
797 | * Hwmon device | 927 | * Hwmon device |
798 | */ | 928 | */ |
799 | static ssize_t asus_hwmon_pwm1(struct device *dev, | 929 | static ssize_t asus_hwmon_pwm1(struct device *dev, |
800 | struct device_attribute *attr, | 930 | struct device_attribute *attr, |
801 | char *buf) | 931 | char *buf) |
802 | { | 932 | { |
803 | struct asus_wmi *asus = dev_get_drvdata(dev); | 933 | struct asus_wmi *asus = dev_get_drvdata(dev); |
804 | u32 value; | 934 | u32 value; |
@@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev, | |||
809 | if (err < 0) | 939 | if (err < 0) |
810 | return err; | 940 | return err; |
811 | 941 | ||
812 | value |= 0xFF; | 942 | value &= 0xFF; |
813 | 943 | ||
814 | if (value == 1) /* Low Speed */ | 944 | if (value == 1) /* Low Speed */ |
815 | value = 85; | 945 | value = 85; |
@@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev, | |||
825 | return sprintf(buf, "%d\n", value); | 955 | return sprintf(buf, "%d\n", value); |
826 | } | 956 | } |
827 | 957 | ||
958 | static ssize_t asus_hwmon_temp1(struct device *dev, | ||
959 | struct device_attribute *attr, | ||
960 | char *buf) | ||
961 | { | ||
962 | struct asus_wmi *asus = dev_get_drvdata(dev); | ||
963 | u32 value; | ||
964 | int err; | ||
965 | |||
966 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value); | ||
967 | |||
968 | if (err < 0) | ||
969 | return err; | ||
970 | |||
971 | value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000; | ||
972 | |||
973 | return sprintf(buf, "%d\n", value); | ||
974 | } | ||
975 | |||
828 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); | 976 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); |
977 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0); | ||
829 | 978 | ||
830 | static ssize_t | 979 | static ssize_t |
831 | show_name(struct device *dev, struct device_attribute *attr, char *buf) | 980 | show_name(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); | |||
836 | 985 | ||
837 | static struct attribute *hwmon_attributes[] = { | 986 | static struct attribute *hwmon_attributes[] = { |
838 | &sensor_dev_attr_pwm1.dev_attr.attr, | 987 | &sensor_dev_attr_pwm1.dev_attr.attr, |
988 | &sensor_dev_attr_temp1_input.dev_attr.attr, | ||
839 | &sensor_dev_attr_name.dev_attr.attr, | 989 | &sensor_dev_attr_name.dev_attr.attr, |
840 | NULL | 990 | NULL |
841 | }; | 991 | }; |
842 | 992 | ||
843 | static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, | 993 | static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, |
844 | struct attribute *attr, int idx) | 994 | struct attribute *attr, int idx) |
845 | { | 995 | { |
846 | struct device *dev = container_of(kobj, struct device, kobj); | 996 | struct device *dev = container_of(kobj, struct device, kobj); |
847 | struct platform_device *pdev = to_platform_device(dev->parent); | 997 | struct platform_device *pdev = to_platform_device(dev->parent); |
@@ -852,6 +1002,8 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, | |||
852 | 1002 | ||
853 | if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) | 1003 | if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) |
854 | dev_id = ASUS_WMI_DEVID_FAN_CTRL; | 1004 | dev_id = ASUS_WMI_DEVID_FAN_CTRL; |
1005 | else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr) | ||
1006 | dev_id = ASUS_WMI_DEVID_THERMAL_CTRL; | ||
855 | 1007 | ||
856 | if (dev_id != -1) { | 1008 | if (dev_id != -1) { |
857 | int err = asus_wmi_get_devstate(asus, dev_id, &value); | 1009 | int err = asus_wmi_get_devstate(asus, dev_id, &value); |
@@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, | |||
869 | * - reverved bits are non-zero | 1021 | * - reverved bits are non-zero |
870 | * - sfun and presence bit are not set | 1022 | * - sfun and presence bit are not set |
871 | */ | 1023 | */ |
872 | if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 | 1024 | if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 |
873 | || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) | 1025 | || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) |
874 | ok = false; | 1026 | ok = false; |
1027 | } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) { | ||
1028 | /* If value is zero, something is clearly wrong */ | ||
1029 | if (value == 0) | ||
1030 | ok = false; | ||
875 | } | 1031 | } |
876 | 1032 | ||
877 | return ok ? attr->mode : 0; | 1033 | return ok ? attr->mode : 0; |
@@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus) | |||
904 | pr_err("Could not register asus hwmon device\n"); | 1060 | pr_err("Could not register asus hwmon device\n"); |
905 | return PTR_ERR(hwmon); | 1061 | return PTR_ERR(hwmon); |
906 | } | 1062 | } |
1063 | dev_set_drvdata(hwmon, asus); | ||
907 | asus->hwmon_device = hwmon; | 1064 | asus->hwmon_device = hwmon; |
908 | result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); | 1065 | result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); |
909 | if (result) | 1066 | if (result) |
@@ -1060,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context) | |||
1060 | acpi_status status; | 1217 | acpi_status status; |
1061 | int code; | 1218 | int code; |
1062 | int orig_code; | 1219 | int orig_code; |
1220 | unsigned int key_value = 1; | ||
1221 | bool autorelease = 1; | ||
1063 | 1222 | ||
1064 | status = wmi_get_event_data(value, &response); | 1223 | status = wmi_get_event_data(value, &response); |
1065 | if (status != AE_OK) { | 1224 | if (status != AE_OK) { |
@@ -1075,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context) | |||
1075 | code = obj->integer.value; | 1234 | code = obj->integer.value; |
1076 | orig_code = code; | 1235 | orig_code = code; |
1077 | 1236 | ||
1237 | if (asus->driver->key_filter) { | ||
1238 | asus->driver->key_filter(asus->driver, &code, &key_value, | ||
1239 | &autorelease); | ||
1240 | if (code == ASUS_WMI_KEY_IGNORE) | ||
1241 | goto exit; | ||
1242 | } | ||
1243 | |||
1078 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | 1244 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) |
1079 | code = NOTIFY_BRNUP_MIN; | 1245 | code = NOTIFY_BRNUP_MIN; |
1080 | else if (code >= NOTIFY_BRNDOWN_MIN && | 1246 | else if (code >= NOTIFY_BRNDOWN_MIN && |
@@ -1084,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context) | |||
1084 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { | 1250 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { |
1085 | if (!acpi_video_backlight_support()) | 1251 | if (!acpi_video_backlight_support()) |
1086 | asus_wmi_backlight_notify(asus, orig_code); | 1252 | asus_wmi_backlight_notify(asus, orig_code); |
1087 | } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) | 1253 | } else if (!sparse_keymap_report_event(asus->inputdev, code, |
1254 | key_value, autorelease)) | ||
1088 | pr_info("Unknown key %x pressed\n", code); | 1255 | pr_info("Unknown key %x pressed\n", code); |
1089 | 1256 | ||
1090 | exit: | 1257 | exit: |
@@ -1164,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); | |||
1164 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | 1331 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, |
1165 | const char *buf, size_t count) | 1332 | const char *buf, size_t count) |
1166 | { | 1333 | { |
1167 | int value; | 1334 | int value, rv; |
1168 | 1335 | ||
1169 | if (!count || sscanf(buf, "%i", &value) != 1) | 1336 | if (!count || sscanf(buf, "%i", &value) != 1) |
1170 | return -EINVAL; | 1337 | return -EINVAL; |
1171 | if (value < 0 || value > 2) | 1338 | if (value < 0 || value > 2) |
1172 | return -EINVAL; | 1339 | return -EINVAL; |
1173 | 1340 | ||
1174 | return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); | 1341 | rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); |
1342 | if (rv < 0) | ||
1343 | return rv; | ||
1344 | |||
1345 | return count; | ||
1175 | } | 1346 | } |
1176 | 1347 | ||
1177 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); | 1348 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); |
@@ -1234,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus) | |||
1234 | 1405 | ||
1235 | /* We don't know yet what to do with this version... */ | 1406 | /* We don't know yet what to do with this version... */ |
1236 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { | 1407 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { |
1237 | pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); | 1408 | pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF); |
1238 | asus->spec = rv; | 1409 | asus->spec = rv; |
1239 | } | 1410 | } |
1240 | 1411 | ||
@@ -1266,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus) | |||
1266 | return -ENODEV; | 1437 | return -ENODEV; |
1267 | } | 1438 | } |
1268 | 1439 | ||
1440 | /* CWAP allow to define the behavior of the Fn+F2 key, | ||
1441 | * this method doesn't seems to be present on Eee PCs */ | ||
1442 | if (asus->driver->wapf >= 0) | ||
1443 | asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP, | ||
1444 | asus->driver->wapf, NULL); | ||
1445 | |||
1269 | return asus_wmi_sysfs_init(asus->platform_device); | 1446 | return asus_wmi_sysfs_init(asus->platform_device); |
1270 | } | 1447 | } |
1271 | 1448 | ||
@@ -1568,6 +1745,14 @@ static int asus_hotk_restore(struct device *device) | |||
1568 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); | 1745 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); |
1569 | rfkill_set_sw_state(asus->wwan3g.rfkill, bl); | 1746 | rfkill_set_sw_state(asus->wwan3g.rfkill, bl); |
1570 | } | 1747 | } |
1748 | if (asus->gps.rfkill) { | ||
1749 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS); | ||
1750 | rfkill_set_sw_state(asus->gps.rfkill, bl); | ||
1751 | } | ||
1752 | if (asus->uwb.rfkill) { | ||
1753 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB); | ||
1754 | rfkill_set_sw_state(asus->uwb.rfkill, bl); | ||
1755 | } | ||
1571 | 1756 | ||
1572 | return 0; | 1757 | return 0; |
1573 | } | 1758 | } |
@@ -1604,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev) | |||
1604 | 1789 | ||
1605 | static bool used; | 1790 | static bool used; |
1606 | 1791 | ||
1607 | int asus_wmi_register_driver(struct asus_wmi_driver *driver) | 1792 | int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver) |
1608 | { | 1793 | { |
1609 | struct platform_driver *platform_driver; | 1794 | struct platform_driver *platform_driver; |
1610 | struct platform_device *platform_device; | 1795 | struct platform_device *platform_device; |
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index c044522c8766..8147c10161cc 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h | |||
@@ -29,12 +29,15 @@ | |||
29 | 29 | ||
30 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
31 | 31 | ||
32 | #define ASUS_WMI_KEY_IGNORE (-1) | ||
33 | |||
32 | struct module; | 34 | struct module; |
33 | struct key_entry; | 35 | struct key_entry; |
34 | struct asus_wmi; | 36 | struct asus_wmi; |
35 | 37 | ||
36 | struct asus_wmi_driver { | 38 | struct asus_wmi_driver { |
37 | bool hotplug_wireless; | 39 | bool hotplug_wireless; |
40 | int wapf; | ||
38 | 41 | ||
39 | const char *name; | 42 | const char *name; |
40 | struct module *owner; | 43 | struct module *owner; |
@@ -44,6 +47,10 @@ struct asus_wmi_driver { | |||
44 | const struct key_entry *keymap; | 47 | const struct key_entry *keymap; |
45 | const char *input_name; | 48 | const char *input_name; |
46 | const char *input_phys; | 49 | const char *input_phys; |
50 | /* Returns new code, value, and autorelease values in arguments. | ||
51 | * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */ | ||
52 | void (*key_filter) (struct asus_wmi_driver *driver, int *code, | ||
53 | unsigned int *value, bool *autorelease); | ||
47 | 54 | ||
48 | int (*probe) (struct platform_device *device); | 55 | int (*probe) (struct platform_device *device); |
49 | void (*quirks) (struct asus_wmi_driver *driver); | 56 | void (*quirks) (struct asus_wmi_driver *driver); |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index e39ab1d3ed87..f31fa4efa725 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
@@ -612,7 +612,6 @@ static int __init dell_init(void) | |||
612 | if (!bufferpage) | 612 | if (!bufferpage) |
613 | goto fail_buffer; | 613 | goto fail_buffer; |
614 | buffer = page_address(bufferpage); | 614 | buffer = page_address(bufferpage); |
615 | mutex_init(&buffer_mutex); | ||
616 | 615 | ||
617 | ret = dell_setup_rfkill(); | 616 | ret = dell_setup_rfkill(); |
618 | 617 | ||
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index ce790827e199..fa9a2171cc13 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID); | |||
54 | */ | 54 | */ |
55 | 55 | ||
56 | static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { | 56 | static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { |
57 | { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } }, | ||
58 | |||
57 | { KE_KEY, 0xe045, { KEY_PROG1 } }, | 59 | { KE_KEY, 0xe045, { KEY_PROG1 } }, |
58 | { KE_KEY, 0xe009, { KEY_EJECTCD } }, | 60 | { KE_KEY, 0xe009, { KEY_EJECTCD } }, |
59 | 61 | ||
@@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { | |||
85 | { KE_IGNORE, 0xe013, { KEY_RESERVED } }, | 87 | { KE_IGNORE, 0xe013, { KEY_RESERVED } }, |
86 | 88 | ||
87 | { KE_IGNORE, 0xe020, { KEY_MUTE } }, | 89 | { KE_IGNORE, 0xe020, { KEY_MUTE } }, |
90 | |||
91 | /* Shortcut and audio panel keys */ | ||
92 | { KE_IGNORE, 0xe025, { KEY_RESERVED } }, | ||
93 | { KE_IGNORE, 0xe026, { KEY_RESERVED } }, | ||
94 | |||
88 | { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } }, | 95 | { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } }, |
89 | { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } }, | 96 | { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } }, |
90 | { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } }, | 97 | { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } }, |
@@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { | |||
92 | { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } }, | 99 | { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } }, |
93 | { KE_IGNORE, 0xe045, { KEY_NUMLOCK } }, | 100 | { KE_IGNORE, 0xe045, { KEY_NUMLOCK } }, |
94 | { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } }, | 101 | { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } }, |
102 | { KE_IGNORE, 0xe0f7, { KEY_MUTE } }, | ||
103 | { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } }, | ||
104 | { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } }, | ||
95 | { KE_END, 0 } | 105 | { KE_END, 0 } |
96 | }; | 106 | }; |
97 | 107 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 4aa867a9b88b..9f6e64302b45 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless, | |||
56 | "If your laptop needs that, please report to " | 56 | "If your laptop needs that, please report to " |
57 | "acpi4asus-user@lists.sourceforge.net."); | 57 | "acpi4asus-user@lists.sourceforge.net."); |
58 | 58 | ||
59 | /* Values for T101MT "Home" key */ | ||
60 | #define HOME_PRESS 0xe4 | ||
61 | #define HOME_HOLD 0xea | ||
62 | #define HOME_RELEASE 0xe5 | ||
63 | |||
59 | static const struct key_entry eeepc_wmi_keymap[] = { | 64 | static const struct key_entry eeepc_wmi_keymap[] = { |
60 | /* Sleep already handled via generic ACPI code */ | 65 | /* Sleep already handled via generic ACPI code */ |
61 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | 66 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, |
@@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
71 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | 76 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, |
72 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ | 77 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ |
73 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ | 78 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ |
79 | { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */ | ||
74 | { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, | 80 | { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, |
75 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, | 81 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, |
76 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, | 82 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, |
@@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
81 | { KE_END, 0}, | 87 | { KE_END, 0}, |
82 | }; | 88 | }; |
83 | 89 | ||
90 | static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, | ||
91 | unsigned int *value, bool *autorelease) | ||
92 | { | ||
93 | switch (*code) { | ||
94 | case HOME_PRESS: | ||
95 | *value = 1; | ||
96 | *autorelease = 0; | ||
97 | break; | ||
98 | case HOME_HOLD: | ||
99 | *code = ASUS_WMI_KEY_IGNORE; | ||
100 | break; | ||
101 | case HOME_RELEASE: | ||
102 | *code = HOME_PRESS; | ||
103 | *value = 0; | ||
104 | *autorelease = 0; | ||
105 | break; | ||
106 | } | ||
107 | } | ||
108 | |||
84 | static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, | 109 | static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, |
85 | void *context, void **retval) | 110 | void *context, void **retval) |
86 | { | 111 | { |
@@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver) | |||
141 | static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) | 166 | static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) |
142 | { | 167 | { |
143 | driver->hotplug_wireless = hotplug_wireless; | 168 | driver->hotplug_wireless = hotplug_wireless; |
169 | driver->wapf = -1; | ||
144 | eeepc_dmi_check(driver); | 170 | eeepc_dmi_check(driver); |
145 | } | 171 | } |
146 | 172 | ||
@@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = { | |||
151 | .keymap = eeepc_wmi_keymap, | 177 | .keymap = eeepc_wmi_keymap, |
152 | .input_name = "Eee PC WMI hotkeys", | 178 | .input_name = "Eee PC WMI hotkeys", |
153 | .input_phys = EEEPC_WMI_FILE "/input0", | 179 | .input_phys = EEEPC_WMI_FILE "/input0", |
180 | .key_filter = eeepc_wmi_key_filter, | ||
154 | .probe = eeepc_wmi_probe, | 181 | .probe = eeepc_wmi_probe, |
155 | .quirks = eeepc_wmi_quirks, | 182 | .quirks = eeepc_wmi_quirks, |
156 | }; | 183 | }; |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index bfdda33feb26..0c595410e788 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -32,13 +32,22 @@ | |||
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/input.h> | 33 | #include <linux/input.h> |
34 | #include <linux/input/sparse-keymap.h> | 34 | #include <linux/input/sparse-keymap.h> |
35 | #include <linux/backlight.h> | ||
36 | #include <linux/fb.h> | ||
35 | 37 | ||
36 | #define IDEAPAD_RFKILL_DEV_NUM (3) | 38 | #define IDEAPAD_RFKILL_DEV_NUM (3) |
37 | 39 | ||
40 | #define CFG_BT_BIT (16) | ||
41 | #define CFG_3G_BIT (17) | ||
42 | #define CFG_WIFI_BIT (18) | ||
43 | #define CFG_CAMERA_BIT (19) | ||
44 | |||
38 | struct ideapad_private { | 45 | struct ideapad_private { |
39 | struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; | 46 | struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; |
40 | struct platform_device *platform_device; | 47 | struct platform_device *platform_device; |
41 | struct input_dev *inputdev; | 48 | struct input_dev *inputdev; |
49 | struct backlight_device *blightdev; | ||
50 | unsigned long cfg; | ||
42 | }; | 51 | }; |
43 | 52 | ||
44 | static acpi_handle ideapad_handle; | 53 | static acpi_handle ideapad_handle; |
@@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data) | |||
155 | } | 164 | } |
156 | 165 | ||
157 | /* | 166 | /* |
158 | * camera power | 167 | * sysfs |
159 | */ | 168 | */ |
160 | static ssize_t show_ideapad_cam(struct device *dev, | 169 | static ssize_t show_ideapad_cam(struct device *dev, |
161 | struct device_attribute *attr, | 170 | struct device_attribute *attr, |
@@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev, | |||
186 | 195 | ||
187 | static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); | 196 | static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); |
188 | 197 | ||
198 | static ssize_t show_ideapad_cfg(struct device *dev, | ||
199 | struct device_attribute *attr, | ||
200 | char *buf) | ||
201 | { | ||
202 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
203 | |||
204 | return sprintf(buf, "0x%.8lX\n", priv->cfg); | ||
205 | } | ||
206 | |||
207 | static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL); | ||
208 | |||
209 | static struct attribute *ideapad_attributes[] = { | ||
210 | &dev_attr_camera_power.attr, | ||
211 | &dev_attr_cfg.attr, | ||
212 | NULL | ||
213 | }; | ||
214 | |||
215 | static mode_t ideapad_is_visible(struct kobject *kobj, | ||
216 | struct attribute *attr, | ||
217 | int idx) | ||
218 | { | ||
219 | struct device *dev = container_of(kobj, struct device, kobj); | ||
220 | struct ideapad_private *priv = dev_get_drvdata(dev); | ||
221 | bool supported; | ||
222 | |||
223 | if (attr == &dev_attr_camera_power.attr) | ||
224 | supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); | ||
225 | else | ||
226 | supported = true; | ||
227 | |||
228 | return supported ? attr->mode : 0; | ||
229 | } | ||
230 | |||
231 | static struct attribute_group ideapad_attribute_group = { | ||
232 | .is_visible = ideapad_is_visible, | ||
233 | .attrs = ideapad_attributes | ||
234 | }; | ||
235 | |||
189 | /* | 236 | /* |
190 | * Rfkill | 237 | * Rfkill |
191 | */ | 238 | */ |
@@ -197,9 +244,9 @@ struct ideapad_rfk_data { | |||
197 | }; | 244 | }; |
198 | 245 | ||
199 | const struct ideapad_rfk_data ideapad_rfk_data[] = { | 246 | const struct ideapad_rfk_data ideapad_rfk_data[] = { |
200 | { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN }, | 247 | { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN }, |
201 | { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH }, | 248 | { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH }, |
202 | { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN }, | 249 | { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN }, |
203 | }; | 250 | }; |
204 | 251 | ||
205 | static int ideapad_rfk_set(void *data, bool blocked) | 252 | static int ideapad_rfk_set(void *data, bool blocked) |
@@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice, | |||
265 | return 0; | 312 | return 0; |
266 | } | 313 | } |
267 | 314 | ||
268 | static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice, | 315 | static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev) |
269 | int dev) | ||
270 | { | 316 | { |
271 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | 317 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); |
272 | 318 | ||
@@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice, | |||
280 | /* | 326 | /* |
281 | * Platform device | 327 | * Platform device |
282 | */ | 328 | */ |
283 | static struct attribute *ideapad_attributes[] = { | ||
284 | &dev_attr_camera_power.attr, | ||
285 | NULL | ||
286 | }; | ||
287 | |||
288 | static struct attribute_group ideapad_attribute_group = { | ||
289 | .attrs = ideapad_attributes | ||
290 | }; | ||
291 | |||
292 | static int __devinit ideapad_platform_init(struct ideapad_private *priv) | 329 | static int __devinit ideapad_platform_init(struct ideapad_private *priv) |
293 | { | 330 | { |
294 | int result; | 331 | int result; |
@@ -369,7 +406,7 @@ err_free_dev: | |||
369 | return error; | 406 | return error; |
370 | } | 407 | } |
371 | 408 | ||
372 | static void __devexit ideapad_input_exit(struct ideapad_private *priv) | 409 | static void ideapad_input_exit(struct ideapad_private *priv) |
373 | { | 410 | { |
374 | sparse_keymap_free(priv->inputdev); | 411 | sparse_keymap_free(priv->inputdev); |
375 | input_unregister_device(priv->inputdev); | 412 | input_unregister_device(priv->inputdev); |
@@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv, | |||
383 | } | 420 | } |
384 | 421 | ||
385 | /* | 422 | /* |
423 | * backlight | ||
424 | */ | ||
425 | static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) | ||
426 | { | ||
427 | unsigned long now; | ||
428 | |||
429 | if (read_ec_data(ideapad_handle, 0x12, &now)) | ||
430 | return -EIO; | ||
431 | return now; | ||
432 | } | ||
433 | |||
434 | static int ideapad_backlight_update_status(struct backlight_device *blightdev) | ||
435 | { | ||
436 | if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness)) | ||
437 | return -EIO; | ||
438 | if (write_ec_cmd(ideapad_handle, 0x33, | ||
439 | blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1)) | ||
440 | return -EIO; | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static const struct backlight_ops ideapad_backlight_ops = { | ||
446 | .get_brightness = ideapad_backlight_get_brightness, | ||
447 | .update_status = ideapad_backlight_update_status, | ||
448 | }; | ||
449 | |||
450 | static int ideapad_backlight_init(struct ideapad_private *priv) | ||
451 | { | ||
452 | struct backlight_device *blightdev; | ||
453 | struct backlight_properties props; | ||
454 | unsigned long max, now, power; | ||
455 | |||
456 | if (read_ec_data(ideapad_handle, 0x11, &max)) | ||
457 | return -EIO; | ||
458 | if (read_ec_data(ideapad_handle, 0x12, &now)) | ||
459 | return -EIO; | ||
460 | if (read_ec_data(ideapad_handle, 0x18, &power)) | ||
461 | return -EIO; | ||
462 | |||
463 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
464 | props.max_brightness = max; | ||
465 | props.type = BACKLIGHT_PLATFORM; | ||
466 | blightdev = backlight_device_register("ideapad", | ||
467 | &priv->platform_device->dev, | ||
468 | priv, | ||
469 | &ideapad_backlight_ops, | ||
470 | &props); | ||
471 | if (IS_ERR(blightdev)) { | ||
472 | pr_err("Could not register backlight device\n"); | ||
473 | return PTR_ERR(blightdev); | ||
474 | } | ||
475 | |||
476 | priv->blightdev = blightdev; | ||
477 | blightdev->props.brightness = now; | ||
478 | blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; | ||
479 | backlight_update_status(blightdev); | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static void ideapad_backlight_exit(struct ideapad_private *priv) | ||
485 | { | ||
486 | if (priv->blightdev) | ||
487 | backlight_device_unregister(priv->blightdev); | ||
488 | priv->blightdev = NULL; | ||
489 | } | ||
490 | |||
491 | static void ideapad_backlight_notify_power(struct ideapad_private *priv) | ||
492 | { | ||
493 | unsigned long power; | ||
494 | struct backlight_device *blightdev = priv->blightdev; | ||
495 | |||
496 | if (read_ec_data(ideapad_handle, 0x18, &power)) | ||
497 | return; | ||
498 | blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; | ||
499 | } | ||
500 | |||
501 | static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) | ||
502 | { | ||
503 | unsigned long now; | ||
504 | |||
505 | /* if we control brightness via acpi video driver */ | ||
506 | if (priv->blightdev == NULL) { | ||
507 | read_ec_data(ideapad_handle, 0x12, &now); | ||
508 | return; | ||
509 | } | ||
510 | |||
511 | backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY); | ||
512 | } | ||
513 | |||
514 | /* | ||
386 | * module init/exit | 515 | * module init/exit |
387 | */ | 516 | */ |
388 | static const struct acpi_device_id ideapad_device_ids[] = { | 517 | static const struct acpi_device_id ideapad_device_ids[] = { |
@@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); | |||
393 | 522 | ||
394 | static int __devinit ideapad_acpi_add(struct acpi_device *adevice) | 523 | static int __devinit ideapad_acpi_add(struct acpi_device *adevice) |
395 | { | 524 | { |
396 | int ret, i, cfg; | 525 | int ret, i; |
526 | unsigned long cfg; | ||
397 | struct ideapad_private *priv; | 527 | struct ideapad_private *priv; |
398 | 528 | ||
399 | if (read_method_int(adevice->handle, "_CFG", &cfg)) | 529 | if (read_method_int(adevice->handle, "_CFG", (int *)&cfg)) |
400 | return -ENODEV; | 530 | return -ENODEV; |
401 | 531 | ||
402 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 532 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) | |||
404 | return -ENOMEM; | 534 | return -ENOMEM; |
405 | dev_set_drvdata(&adevice->dev, priv); | 535 | dev_set_drvdata(&adevice->dev, priv); |
406 | ideapad_handle = adevice->handle; | 536 | ideapad_handle = adevice->handle; |
537 | priv->cfg = cfg; | ||
407 | 538 | ||
408 | ret = ideapad_platform_init(priv); | 539 | ret = ideapad_platform_init(priv); |
409 | if (ret) | 540 | if (ret) |
@@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) | |||
414 | goto input_failed; | 545 | goto input_failed; |
415 | 546 | ||
416 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { | 547 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { |
417 | if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg)) | 548 | if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg)) |
418 | ideapad_register_rfkill(adevice, i); | 549 | ideapad_register_rfkill(adevice, i); |
419 | else | 550 | else |
420 | priv->rfk[i] = NULL; | 551 | priv->rfk[i] = NULL; |
421 | } | 552 | } |
422 | ideapad_sync_rfk_state(adevice); | 553 | ideapad_sync_rfk_state(adevice); |
423 | 554 | ||
555 | if (!acpi_video_backlight_support()) { | ||
556 | ret = ideapad_backlight_init(priv); | ||
557 | if (ret && ret != -ENODEV) | ||
558 | goto backlight_failed; | ||
559 | } | ||
560 | |||
424 | return 0; | 561 | return 0; |
425 | 562 | ||
563 | backlight_failed: | ||
564 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) | ||
565 | ideapad_unregister_rfkill(adevice, i); | ||
566 | ideapad_input_exit(priv); | ||
426 | input_failed: | 567 | input_failed: |
427 | ideapad_platform_exit(priv); | 568 | ideapad_platform_exit(priv); |
428 | platform_failed: | 569 | platform_failed: |
@@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type) | |||
435 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); | 576 | struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); |
436 | int i; | 577 | int i; |
437 | 578 | ||
579 | ideapad_backlight_exit(priv); | ||
438 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) | 580 | for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) |
439 | ideapad_unregister_rfkill(adevice, i); | 581 | ideapad_unregister_rfkill(adevice, i); |
440 | ideapad_input_exit(priv); | 582 | ideapad_input_exit(priv); |
@@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) | |||
459 | vpc1 = (vpc2 << 8) | vpc1; | 601 | vpc1 = (vpc2 << 8) | vpc1; |
460 | for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { | 602 | for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { |
461 | if (test_bit(vpc_bit, &vpc1)) { | 603 | if (test_bit(vpc_bit, &vpc1)) { |
462 | if (vpc_bit == 9) | 604 | switch (vpc_bit) { |
605 | case 9: | ||
463 | ideapad_sync_rfk_state(adevice); | 606 | ideapad_sync_rfk_state(adevice); |
464 | else if (vpc_bit == 4) | 607 | break; |
465 | read_ec_data(handle, 0x12, &vpc2); | 608 | case 4: |
466 | else | 609 | ideapad_backlight_notify_brightness(priv); |
610 | break; | ||
611 | case 2: | ||
612 | ideapad_backlight_notify_power(priv); | ||
613 | break; | ||
614 | default: | ||
467 | ideapad_input_report(priv, vpc_bit); | 615 | ideapad_input_report(priv, vpc_bit); |
616 | } | ||
468 | } | 617 | } |
469 | } | 618 | } |
470 | } | 619 | } |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 5ffe7c398148..809a3ae943c6 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
@@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips) | |||
403 | 403 | ||
404 | thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); | 404 | thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); |
405 | 405 | ||
406 | turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; | 406 | turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; |
407 | wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); | 407 | wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); |
408 | 408 | ||
409 | turbo_override &= ~TURBO_TDP_MASK; | 409 | turbo_override &= ~TURBO_TDP_MASK; |
@@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips) | |||
438 | 438 | ||
439 | thm_writew(THM_MPCPC, (new_limit * 10) / 8); | 439 | thm_writew(THM_MPCPC, (new_limit * 10) / 8); |
440 | 440 | ||
441 | turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; | 441 | turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; |
442 | wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); | 442 | wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); |
443 | 443 | ||
444 | turbo_override &= ~TURBO_TDP_MASK; | 444 | turbo_override &= ~TURBO_TDP_MASK; |
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 809adea4965f..abddc83e9fd7 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c | |||
@@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl, | |||
477 | return AE_ERROR; | 477 | return AE_ERROR; |
478 | } | 478 | } |
479 | 479 | ||
480 | return AE_OK; | ||
481 | |||
480 | aux1_not_found: | 482 | aux1_not_found: |
481 | if (status == AE_NOT_FOUND) | 483 | if (status == AE_NOT_FOUND) |
482 | return AE_OK; | 484 | return AE_OK; |
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 3a578323122b..ccd7b1f83519 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c | |||
@@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev) | |||
493 | 493 | ||
494 | /* Register each sensor with the generic thermal framework*/ | 494 | /* Register each sensor with the generic thermal framework*/ |
495 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { | 495 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { |
496 | struct thermal_device_info *td_info = initialize_sensor(i); | ||
497 | |||
498 | if (!td_info) { | ||
499 | ret = -ENOMEM; | ||
500 | goto err; | ||
501 | } | ||
496 | pinfo->tzd[i] = thermal_zone_device_register(name[i], | 502 | pinfo->tzd[i] = thermal_zone_device_register(name[i], |
497 | 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0); | 503 | 0, td_info, &tzd_ops, 0, 0, 0, 0); |
498 | if (IS_ERR(pinfo->tzd[i])) | 504 | if (IS_ERR(pinfo->tzd[i])) { |
499 | goto reg_fail; | 505 | kfree(td_info); |
506 | ret = PTR_ERR(pinfo->tzd[i]); | ||
507 | goto err; | ||
508 | } | ||
500 | } | 509 | } |
501 | 510 | ||
502 | pinfo->pdev = pdev; | 511 | pinfo->pdev = pdev; |
503 | platform_set_drvdata(pdev, pinfo); | 512 | platform_set_drvdata(pdev, pinfo); |
504 | return 0; | 513 | return 0; |
505 | 514 | ||
506 | reg_fail: | 515 | err: |
507 | ret = PTR_ERR(pinfo->tzd[i]); | 516 | while (--i >= 0) { |
508 | while (--i >= 0) | 517 | kfree(pinfo->tzd[i]->devdata); |
509 | thermal_zone_device_unregister(pinfo->tzd[i]); | 518 | thermal_zone_device_unregister(pinfo->tzd[i]); |
519 | } | ||
510 | configure_adc(0); | 520 | configure_adc(0); |
511 | kfree(pinfo); | 521 | kfree(pinfo); |
512 | return ret; | 522 | return ret; |
@@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev) | |||
524 | int i; | 534 | int i; |
525 | struct platform_info *pinfo = platform_get_drvdata(pdev); | 535 | struct platform_info *pinfo = platform_get_drvdata(pdev); |
526 | 536 | ||
527 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) | 537 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { |
538 | kfree(pinfo->tzd[i]->devdata); | ||
528 | thermal_zone_device_unregister(pinfo->tzd[i]); | 539 | thermal_zone_device_unregister(pinfo->tzd[i]); |
540 | } | ||
529 | 541 | ||
530 | kfree(pinfo); | 542 | kfree(pinfo); |
531 | platform_set_drvdata(pdev, NULL); | 543 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index bde47e9080cd..c8a6aed45277 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c | |||
@@ -637,15 +637,13 @@ end_function: | |||
637 | return error; | 637 | return error; |
638 | } | 638 | } |
639 | 639 | ||
640 | const struct pci_device_id rar_pci_id_tbl[] = { | 640 | static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = { |
641 | { PCI_VDEVICE(INTEL, 0x4110) }, | 641 | { PCI_VDEVICE(INTEL, 0x4110) }, |
642 | { 0 } | 642 | { 0 } |
643 | }; | 643 | }; |
644 | 644 | ||
645 | MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); | 645 | MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); |
646 | 646 | ||
647 | const struct pci_device_id *my_id_table = rar_pci_id_tbl; | ||
648 | |||
649 | /* field for registering driver to PCI device */ | 647 | /* field for registering driver to PCI device */ |
650 | static struct pci_driver rar_pci_driver = { | 648 | static struct pci_driver rar_pci_driver = { |
651 | .name = "rar_register_driver", | 649 | .name = "rar_register_driver", |
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 940accbe28d3..c86665369a22 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c | |||
@@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev) | |||
725 | intel_scu_devices_destroy(); | 725 | intel_scu_devices_destroy(); |
726 | } | 726 | } |
727 | 727 | ||
728 | static const struct pci_device_id pci_ids[] = { | 728 | static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { |
729 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}, | 729 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}, |
730 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, | 730 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, |
731 | { 0,} | 731 | { 0,} |
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 3ff629df9f01..f204643c5052 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c | |||
@@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = { | |||
538 | }, | 538 | }, |
539 | .callback = dmi_check_cb | 539 | .callback = dmi_check_cb |
540 | }, | 540 | }, |
541 | { | ||
542 | .ident = "MSI U270", | ||
543 | .matches = { | ||
544 | DMI_MATCH(DMI_SYS_VENDOR, | ||
545 | "Micro-Star International Co., Ltd."), | ||
546 | DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"), | ||
547 | }, | ||
548 | .callback = dmi_check_cb | ||
549 | }, | ||
541 | { } | 550 | { } |
542 | }; | 551 | }; |
543 | 552 | ||
@@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*"); | |||
996 | MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*"); | 1005 | MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*"); |
997 | MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*"); | 1006 | MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*"); |
998 | MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*"); | 1007 | MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*"); |
1008 | MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*"); | ||
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index c832e3356cd6..6f40bf202dc7 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c | |||
@@ -272,6 +272,7 @@ static int __init msi_wmi_init(void) | |||
272 | err_free_backlight: | 272 | err_free_backlight: |
273 | backlight_device_unregister(backlight); | 273 | backlight_device_unregister(backlight); |
274 | err_free_input: | 274 | err_free_input: |
275 | sparse_keymap_free(msi_wmi_input_dev); | ||
275 | input_unregister_device(msi_wmi_input_dev); | 276 | input_unregister_device(msi_wmi_input_dev); |
276 | err_uninstall_notifier: | 277 | err_uninstall_notifier: |
277 | wmi_remove_notify_handler(MSIWMI_EVENT_GUID); | 278 | wmi_remove_notify_handler(MSIWMI_EVENT_GUID); |
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index d347116d150e..359163011044 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c | |||
@@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
521 | .callback = dmi_check_cb, | 521 | .callback = dmi_check_cb, |
522 | }, | 522 | }, |
523 | { | 523 | { |
524 | .ident = "N510", | ||
525 | .matches = { | ||
526 | DMI_MATCH(DMI_SYS_VENDOR, | ||
527 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
528 | DMI_MATCH(DMI_PRODUCT_NAME, "N510"), | ||
529 | DMI_MATCH(DMI_BOARD_NAME, "N510"), | ||
530 | }, | ||
531 | .callback = dmi_check_cb, | ||
532 | }, | ||
533 | { | ||
524 | .ident = "X125", | 534 | .ident = "X125", |
525 | .matches = { | 535 | .matches = { |
526 | DMI_MATCH(DMI_SYS_VENDOR, | 536 | DMI_MATCH(DMI_SYS_VENDOR, |
@@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
601 | .callback = dmi_check_cb, | 611 | .callback = dmi_check_cb, |
602 | }, | 612 | }, |
603 | { | 613 | { |
614 | .ident = "N150/N210/N220", | ||
615 | .matches = { | ||
616 | DMI_MATCH(DMI_SYS_VENDOR, | ||
617 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
618 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), | ||
619 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), | ||
620 | }, | ||
621 | .callback = dmi_check_cb, | ||
622 | }, | ||
623 | { | ||
604 | .ident = "N150/N210/N220/N230", | 624 | .ident = "N150/N210/N220/N230", |
605 | .matches = { | 625 | .matches = { |
606 | DMI_MATCH(DMI_SYS_VENDOR, | 626 | DMI_MATCH(DMI_SYS_VENDOR, |
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c new file mode 100644 index 000000000000..1e54ae74274c --- /dev/null +++ b/drivers/platform/x86/samsung-q10.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * Driver for Samsung Q10 and related laptops: controls the backlight | ||
3 | * | ||
4 | * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/backlight.h> | ||
17 | #include <linux/i8042.h> | ||
18 | #include <linux/dmi.h> | ||
19 | |||
20 | #define SAMSUNGQ10_BL_MAX_INTENSITY 255 | ||
21 | #define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185 | ||
22 | |||
23 | #define SAMSUNGQ10_BL_8042_CMD 0xbe | ||
24 | #define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 } | ||
25 | |||
26 | static int samsungq10_bl_brightness; | ||
27 | |||
28 | static bool force; | ||
29 | module_param(force, bool, 0); | ||
30 | MODULE_PARM_DESC(force, | ||
31 | "Disable the DMI check and force the driver to be loaded"); | ||
32 | |||
33 | static int samsungq10_bl_set_intensity(struct backlight_device *bd) | ||
34 | { | ||
35 | |||
36 | int brightness = bd->props.brightness; | ||
37 | unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA; | ||
38 | |||
39 | c[2] = (unsigned char)brightness; | ||
40 | i8042_lock_chip(); | ||
41 | i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD); | ||
42 | i8042_unlock_chip(); | ||
43 | samsungq10_bl_brightness = brightness; | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static int samsungq10_bl_get_intensity(struct backlight_device *bd) | ||
49 | { | ||
50 | return samsungq10_bl_brightness; | ||
51 | } | ||
52 | |||
53 | static const struct backlight_ops samsungq10_bl_ops = { | ||
54 | .get_brightness = samsungq10_bl_get_intensity, | ||
55 | .update_status = samsungq10_bl_set_intensity, | ||
56 | }; | ||
57 | |||
58 | #ifdef CONFIG_PM_SLEEP | ||
59 | static int samsungq10_suspend(struct device *dev) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int samsungq10_resume(struct device *dev) | ||
65 | { | ||
66 | |||
67 | struct backlight_device *bd = dev_get_drvdata(dev); | ||
68 | |||
69 | samsungq10_bl_set_intensity(bd); | ||
70 | return 0; | ||
71 | } | ||
72 | #else | ||
73 | #define samsungq10_suspend NULL | ||
74 | #define samsungq10_resume NULL | ||
75 | #endif | ||
76 | |||
77 | static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops, | ||
78 | samsungq10_suspend, samsungq10_resume); | ||
79 | |||
80 | static int __devinit samsungq10_probe(struct platform_device *pdev) | ||
81 | { | ||
82 | |||
83 | struct backlight_properties props; | ||
84 | struct backlight_device *bd; | ||
85 | |||
86 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
87 | props.type = BACKLIGHT_PLATFORM; | ||
88 | props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY; | ||
89 | bd = backlight_device_register("samsung", &pdev->dev, NULL, | ||
90 | &samsungq10_bl_ops, &props); | ||
91 | if (IS_ERR(bd)) | ||
92 | return PTR_ERR(bd); | ||
93 | |||
94 | platform_set_drvdata(pdev, bd); | ||
95 | |||
96 | bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; | ||
97 | samsungq10_bl_set_intensity(bd); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static int __devexit samsungq10_remove(struct platform_device *pdev) | ||
103 | { | ||
104 | |||
105 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
106 | |||
107 | bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; | ||
108 | samsungq10_bl_set_intensity(bd); | ||
109 | |||
110 | backlight_device_unregister(bd); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static struct platform_driver samsungq10_driver = { | ||
116 | .driver = { | ||
117 | .name = KBUILD_MODNAME, | ||
118 | .owner = THIS_MODULE, | ||
119 | .pm = &samsungq10_pm_ops, | ||
120 | }, | ||
121 | .probe = samsungq10_probe, | ||
122 | .remove = __devexit_p(samsungq10_remove), | ||
123 | }; | ||
124 | |||
125 | static struct platform_device *samsungq10_device; | ||
126 | |||
127 | static int __init dmi_check_callback(const struct dmi_system_id *id) | ||
128 | { | ||
129 | printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident); | ||
130 | return 1; | ||
131 | } | ||
132 | |||
133 | static struct dmi_system_id __initdata samsungq10_dmi_table[] = { | ||
134 | { | ||
135 | .ident = "Samsung Q10", | ||
136 | .matches = { | ||
137 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung"), | ||
138 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"), | ||
139 | }, | ||
140 | .callback = dmi_check_callback, | ||
141 | }, | ||
142 | { | ||
143 | .ident = "Samsung Q20", | ||
144 | .matches = { | ||
145 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"), | ||
146 | DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"), | ||
147 | }, | ||
148 | .callback = dmi_check_callback, | ||
149 | }, | ||
150 | { | ||
151 | .ident = "Samsung Q25", | ||
152 | .matches = { | ||
153 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"), | ||
154 | DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"), | ||
155 | }, | ||
156 | .callback = dmi_check_callback, | ||
157 | }, | ||
158 | { | ||
159 | .ident = "Dell Latitude X200", | ||
160 | .matches = { | ||
161 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), | ||
162 | DMI_MATCH(DMI_PRODUCT_NAME, "X200"), | ||
163 | }, | ||
164 | .callback = dmi_check_callback, | ||
165 | }, | ||
166 | { }, | ||
167 | }; | ||
168 | MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table); | ||
169 | |||
170 | static int __init samsungq10_init(void) | ||
171 | { | ||
172 | if (!force && !dmi_check_system(samsungq10_dmi_table)) | ||
173 | return -ENODEV; | ||
174 | |||
175 | samsungq10_device = platform_create_bundle(&samsungq10_driver, | ||
176 | samsungq10_probe, | ||
177 | NULL, 0, NULL, 0); | ||
178 | |||
179 | if (IS_ERR(samsungq10_device)) | ||
180 | return PTR_ERR(samsungq10_device); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static void __exit samsungq10_exit(void) | ||
186 | { | ||
187 | platform_device_unregister(samsungq10_device); | ||
188 | platform_driver_unregister(&samsungq10_driver); | ||
189 | } | ||
190 | |||
191 | module_init(samsungq10_init); | ||
192 | module_exit(samsungq10_exit); | ||
193 | |||
194 | MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>"); | ||
195 | MODULE_DESCRIPTION("Samsung Q10 Driver"); | ||
196 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 26c5b117df22..7bd829f247eb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -3186,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3186 | KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ | 3186 | KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ |
3187 | 3187 | ||
3188 | /* (assignments unknown, please report if found) */ | 3188 | /* (assignments unknown, please report if found) */ |
3189 | KEY_UNKNOWN, KEY_UNKNOWN, | ||
3190 | |||
3191 | /* | ||
3192 | * The mic mute button only sends 0x1a. It does not | ||
3193 | * automatically mute the mic or change the mute light. | ||
3194 | */ | ||
3195 | KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */ | ||
3196 | |||
3197 | /* (assignments unknown, please report if found) */ | ||
3189 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3198 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, |
3190 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3199 | KEY_UNKNOWN, |
3191 | }, | 3200 | }, |
3192 | }; | 3201 | }; |
3193 | 3202 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d3e38790906e..d8e6a429e8ba 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/debugfs.h> | 20 | #include <linux/debugfs.h> |
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/async.h> | ||
23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
24 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
25 | #include <linux/suspend.h> | 26 | #include <linux/suspend.h> |
@@ -33,6 +34,8 @@ | |||
33 | 34 | ||
34 | #include "dummy.h" | 35 | #include "dummy.h" |
35 | 36 | ||
37 | #define rdev_crit(rdev, fmt, ...) \ | ||
38 | pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) | ||
36 | #define rdev_err(rdev, fmt, ...) \ | 39 | #define rdev_err(rdev, fmt, ...) \ |
37 | pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) | 40 | pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) |
38 | #define rdev_warn(rdev, fmt, ...) \ | 41 | #define rdev_warn(rdev, fmt, ...) \ |
@@ -78,11 +81,13 @@ struct regulator { | |||
78 | char *supply_name; | 81 | char *supply_name; |
79 | struct device_attribute dev_attr; | 82 | struct device_attribute dev_attr; |
80 | struct regulator_dev *rdev; | 83 | struct regulator_dev *rdev; |
84 | #ifdef CONFIG_DEBUG_FS | ||
85 | struct dentry *debugfs; | ||
86 | #endif | ||
81 | }; | 87 | }; |
82 | 88 | ||
83 | static int _regulator_is_enabled(struct regulator_dev *rdev); | 89 | static int _regulator_is_enabled(struct regulator_dev *rdev); |
84 | static int _regulator_disable(struct regulator_dev *rdev, | 90 | static int _regulator_disable(struct regulator_dev *rdev); |
85 | struct regulator_dev **supply_rdev_ptr); | ||
86 | static int _regulator_get_voltage(struct regulator_dev *rdev); | 91 | static int _regulator_get_voltage(struct regulator_dev *rdev); |
87 | static int _regulator_get_current_limit(struct regulator_dev *rdev); | 92 | static int _regulator_get_current_limit(struct regulator_dev *rdev); |
88 | static unsigned int _regulator_get_mode(struct regulator_dev *rdev); | 93 | static unsigned int _regulator_get_mode(struct regulator_dev *rdev); |
@@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev, | |||
90 | unsigned long event, void *data); | 95 | unsigned long event, void *data); |
91 | static int _regulator_do_set_voltage(struct regulator_dev *rdev, | 96 | static int _regulator_do_set_voltage(struct regulator_dev *rdev, |
92 | int min_uV, int max_uV); | 97 | int min_uV, int max_uV); |
98 | static struct regulator *create_regulator(struct regulator_dev *rdev, | ||
99 | struct device *dev, | ||
100 | const char *supply_name); | ||
93 | 101 | ||
94 | static const char *rdev_get_name(struct regulator_dev *rdev) | 102 | static const char *rdev_get_name(struct regulator_dev *rdev) |
95 | { | 103 | { |
@@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev, | |||
143 | if (*min_uV < rdev->constraints->min_uV) | 151 | if (*min_uV < rdev->constraints->min_uV) |
144 | *min_uV = rdev->constraints->min_uV; | 152 | *min_uV = rdev->constraints->min_uV; |
145 | 153 | ||
146 | if (*min_uV > *max_uV) | 154 | if (*min_uV > *max_uV) { |
155 | rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", | ||
156 | *min_uV, *max_uV); | ||
147 | return -EINVAL; | 157 | return -EINVAL; |
158 | } | ||
148 | 159 | ||
149 | return 0; | 160 | return 0; |
150 | } | 161 | } |
@@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, | |||
197 | if (*min_uA < rdev->constraints->min_uA) | 208 | if (*min_uA < rdev->constraints->min_uA) |
198 | *min_uA = rdev->constraints->min_uA; | 209 | *min_uA = rdev->constraints->min_uA; |
199 | 210 | ||
200 | if (*min_uA > *max_uA) | 211 | if (*min_uA > *max_uA) { |
212 | rdev_err(rdev, "unsupportable current range: %d-%duA\n", | ||
213 | *min_uA, *max_uA); | ||
201 | return -EINVAL; | 214 | return -EINVAL; |
215 | } | ||
202 | 216 | ||
203 | return 0; | 217 | return 0; |
204 | } | 218 | } |
@@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) | |||
213 | case REGULATOR_MODE_STANDBY: | 227 | case REGULATOR_MODE_STANDBY: |
214 | break; | 228 | break; |
215 | default: | 229 | default: |
230 | rdev_err(rdev, "invalid mode %x specified\n", *mode); | ||
216 | return -EINVAL; | 231 | return -EINVAL; |
217 | } | 232 | } |
218 | 233 | ||
@@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, | |||
779 | if (ret < 0) { | 794 | if (ret < 0) { |
780 | rdev_err(rdev, "failed to apply %duV constraint\n", | 795 | rdev_err(rdev, "failed to apply %duV constraint\n", |
781 | rdev->constraints->min_uV); | 796 | rdev->constraints->min_uV); |
782 | rdev->constraints = NULL; | ||
783 | return ret; | 797 | return ret; |
784 | } | 798 | } |
785 | } | 799 | } |
@@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev, | |||
882 | ret = suspend_prepare(rdev, rdev->constraints->initial_state); | 896 | ret = suspend_prepare(rdev, rdev->constraints->initial_state); |
883 | if (ret < 0) { | 897 | if (ret < 0) { |
884 | rdev_err(rdev, "failed to set suspend state\n"); | 898 | rdev_err(rdev, "failed to set suspend state\n"); |
885 | rdev->constraints = NULL; | ||
886 | goto out; | 899 | goto out; |
887 | } | 900 | } |
888 | } | 901 | } |
@@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev, | |||
909 | ret = ops->enable(rdev); | 922 | ret = ops->enable(rdev); |
910 | if (ret < 0) { | 923 | if (ret < 0) { |
911 | rdev_err(rdev, "failed to enable\n"); | 924 | rdev_err(rdev, "failed to enable\n"); |
912 | rdev->constraints = NULL; | ||
913 | goto out; | 925 | goto out; |
914 | } | 926 | } |
915 | } | 927 | } |
916 | 928 | ||
917 | print_constraints(rdev); | 929 | print_constraints(rdev); |
930 | return 0; | ||
918 | out: | 931 | out: |
932 | kfree(rdev->constraints); | ||
933 | rdev->constraints = NULL; | ||
919 | return ret; | 934 | return ret; |
920 | } | 935 | } |
921 | 936 | ||
@@ -929,21 +944,20 @@ out: | |||
929 | * core if it's child is enabled. | 944 | * core if it's child is enabled. |
930 | */ | 945 | */ |
931 | static int set_supply(struct regulator_dev *rdev, | 946 | static int set_supply(struct regulator_dev *rdev, |
932 | struct regulator_dev *supply_rdev) | 947 | struct regulator_dev *supply_rdev) |
933 | { | 948 | { |
934 | int err; | 949 | int err; |
935 | 950 | ||
936 | err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj, | 951 | rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); |
937 | "supply"); | 952 | |
938 | if (err) { | 953 | rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); |
939 | rdev_err(rdev, "could not add device link %s err %d\n", | 954 | if (IS_ERR(rdev->supply)) { |
940 | supply_rdev->dev.kobj.name, err); | 955 | err = PTR_ERR(rdev->supply); |
941 | goto out; | 956 | rdev->supply = NULL; |
957 | return err; | ||
942 | } | 958 | } |
943 | rdev->supply = supply_rdev; | 959 | |
944 | list_add(&rdev->slist, &supply_rdev->supply_list); | 960 | return 0; |
945 | out: | ||
946 | return err; | ||
947 | } | 961 | } |
948 | 962 | ||
949 | /** | 963 | /** |
@@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev) | |||
1032 | } | 1046 | } |
1033 | } | 1047 | } |
1034 | 1048 | ||
1035 | #define REG_STR_SIZE 32 | 1049 | #define REG_STR_SIZE 64 |
1036 | 1050 | ||
1037 | static struct regulator *create_regulator(struct regulator_dev *rdev, | 1051 | static struct regulator *create_regulator(struct regulator_dev *rdev, |
1038 | struct device *dev, | 1052 | struct device *dev, |
@@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, | |||
1052 | 1066 | ||
1053 | if (dev) { | 1067 | if (dev) { |
1054 | /* create a 'requested_microamps_name' sysfs entry */ | 1068 | /* create a 'requested_microamps_name' sysfs entry */ |
1055 | size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s", | 1069 | size = scnprintf(buf, REG_STR_SIZE, |
1056 | supply_name); | 1070 | "microamps_requested_%s-%s", |
1071 | dev_name(dev), supply_name); | ||
1057 | if (size >= REG_STR_SIZE) | 1072 | if (size >= REG_STR_SIZE) |
1058 | goto overflow_err; | 1073 | goto overflow_err; |
1059 | 1074 | ||
@@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, | |||
1088 | dev->kobj.name, err); | 1103 | dev->kobj.name, err); |
1089 | goto link_name_err; | 1104 | goto link_name_err; |
1090 | } | 1105 | } |
1106 | } else { | ||
1107 | regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); | ||
1108 | if (regulator->supply_name == NULL) | ||
1109 | goto attr_err; | ||
1110 | } | ||
1111 | |||
1112 | #ifdef CONFIG_DEBUG_FS | ||
1113 | regulator->debugfs = debugfs_create_dir(regulator->supply_name, | ||
1114 | rdev->debugfs); | ||
1115 | if (IS_ERR_OR_NULL(regulator->debugfs)) { | ||
1116 | rdev_warn(rdev, "Failed to create debugfs directory\n"); | ||
1117 | regulator->debugfs = NULL; | ||
1118 | } else { | ||
1119 | debugfs_create_u32("uA_load", 0444, regulator->debugfs, | ||
1120 | ®ulator->uA_load); | ||
1121 | debugfs_create_u32("min_uV", 0444, regulator->debugfs, | ||
1122 | ®ulator->min_uV); | ||
1123 | debugfs_create_u32("max_uV", 0444, regulator->debugfs, | ||
1124 | ®ulator->max_uV); | ||
1091 | } | 1125 | } |
1126 | #endif | ||
1127 | |||
1092 | mutex_unlock(&rdev->mutex); | 1128 | mutex_unlock(&rdev->mutex); |
1093 | return regulator; | 1129 | return regulator; |
1094 | link_name_err: | 1130 | link_name_err: |
@@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator) | |||
1267 | mutex_lock(®ulator_list_mutex); | 1303 | mutex_lock(®ulator_list_mutex); |
1268 | rdev = regulator->rdev; | 1304 | rdev = regulator->rdev; |
1269 | 1305 | ||
1306 | #ifdef CONFIG_DEBUG_FS | ||
1307 | debugfs_remove_recursive(regulator->debugfs); | ||
1308 | #endif | ||
1309 | |||
1270 | /* remove any sysfs entries */ | 1310 | /* remove any sysfs entries */ |
1271 | if (regulator->dev) { | 1311 | if (regulator->dev) { |
1272 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); | 1312 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); |
1273 | kfree(regulator->supply_name); | ||
1274 | device_remove_file(regulator->dev, ®ulator->dev_attr); | 1313 | device_remove_file(regulator->dev, ®ulator->dev_attr); |
1275 | kfree(regulator->dev_attr.attr.name); | 1314 | kfree(regulator->dev_attr.attr.name); |
1276 | } | 1315 | } |
1316 | kfree(regulator->supply_name); | ||
1277 | list_del(®ulator->list); | 1317 | list_del(®ulator->list); |
1278 | kfree(regulator); | 1318 | kfree(regulator); |
1279 | 1319 | ||
@@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev) | |||
1301 | { | 1341 | { |
1302 | int ret, delay; | 1342 | int ret, delay; |
1303 | 1343 | ||
1304 | if (rdev->use_count == 0) { | ||
1305 | /* do we need to enable the supply regulator first */ | ||
1306 | if (rdev->supply) { | ||
1307 | mutex_lock(&rdev->supply->mutex); | ||
1308 | ret = _regulator_enable(rdev->supply); | ||
1309 | mutex_unlock(&rdev->supply->mutex); | ||
1310 | if (ret < 0) { | ||
1311 | rdev_err(rdev, "failed to enable: %d\n", ret); | ||
1312 | return ret; | ||
1313 | } | ||
1314 | } | ||
1315 | } | ||
1316 | |||
1317 | /* check voltage and requested load before enabling */ | 1344 | /* check voltage and requested load before enabling */ |
1318 | if (rdev->constraints && | 1345 | if (rdev->constraints && |
1319 | (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) | 1346 | (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) |
@@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator) | |||
1388 | struct regulator_dev *rdev = regulator->rdev; | 1415 | struct regulator_dev *rdev = regulator->rdev; |
1389 | int ret = 0; | 1416 | int ret = 0; |
1390 | 1417 | ||
1418 | if (rdev->supply) { | ||
1419 | ret = regulator_enable(rdev->supply); | ||
1420 | if (ret != 0) | ||
1421 | return ret; | ||
1422 | } | ||
1423 | |||
1391 | mutex_lock(&rdev->mutex); | 1424 | mutex_lock(&rdev->mutex); |
1392 | ret = _regulator_enable(rdev); | 1425 | ret = _regulator_enable(rdev); |
1393 | mutex_unlock(&rdev->mutex); | 1426 | mutex_unlock(&rdev->mutex); |
1427 | |||
1428 | if (ret != 0) | ||
1429 | regulator_disable(rdev->supply); | ||
1430 | |||
1394 | return ret; | 1431 | return ret; |
1395 | } | 1432 | } |
1396 | EXPORT_SYMBOL_GPL(regulator_enable); | 1433 | EXPORT_SYMBOL_GPL(regulator_enable); |
1397 | 1434 | ||
1398 | /* locks held by regulator_disable() */ | 1435 | /* locks held by regulator_disable() */ |
1399 | static int _regulator_disable(struct regulator_dev *rdev, | 1436 | static int _regulator_disable(struct regulator_dev *rdev) |
1400 | struct regulator_dev **supply_rdev_ptr) | ||
1401 | { | 1437 | { |
1402 | int ret = 0; | 1438 | int ret = 0; |
1403 | *supply_rdev_ptr = NULL; | ||
1404 | 1439 | ||
1405 | if (WARN(rdev->use_count <= 0, | 1440 | if (WARN(rdev->use_count <= 0, |
1406 | "unbalanced disables for %s\n", rdev_get_name(rdev))) | 1441 | "unbalanced disables for %s\n", rdev_get_name(rdev))) |
@@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev, | |||
1427 | NULL); | 1462 | NULL); |
1428 | } | 1463 | } |
1429 | 1464 | ||
1430 | /* decrease our supplies ref count and disable if required */ | ||
1431 | *supply_rdev_ptr = rdev->supply; | ||
1432 | |||
1433 | rdev->use_count = 0; | 1465 | rdev->use_count = 0; |
1434 | } else if (rdev->use_count > 1) { | 1466 | } else if (rdev->use_count > 1) { |
1435 | 1467 | ||
@@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev, | |||
1440 | 1472 | ||
1441 | rdev->use_count--; | 1473 | rdev->use_count--; |
1442 | } | 1474 | } |
1475 | |||
1443 | return ret; | 1476 | return ret; |
1444 | } | 1477 | } |
1445 | 1478 | ||
@@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev, | |||
1458 | int regulator_disable(struct regulator *regulator) | 1491 | int regulator_disable(struct regulator *regulator) |
1459 | { | 1492 | { |
1460 | struct regulator_dev *rdev = regulator->rdev; | 1493 | struct regulator_dev *rdev = regulator->rdev; |
1461 | struct regulator_dev *supply_rdev = NULL; | ||
1462 | int ret = 0; | 1494 | int ret = 0; |
1463 | 1495 | ||
1464 | mutex_lock(&rdev->mutex); | 1496 | mutex_lock(&rdev->mutex); |
1465 | ret = _regulator_disable(rdev, &supply_rdev); | 1497 | ret = _regulator_disable(rdev); |
1466 | mutex_unlock(&rdev->mutex); | 1498 | mutex_unlock(&rdev->mutex); |
1467 | 1499 | ||
1468 | /* decrease our supplies ref count and disable if required */ | 1500 | if (ret == 0 && rdev->supply) |
1469 | while (supply_rdev != NULL) { | 1501 | regulator_disable(rdev->supply); |
1470 | rdev = supply_rdev; | ||
1471 | |||
1472 | mutex_lock(&rdev->mutex); | ||
1473 | _regulator_disable(rdev, &supply_rdev); | ||
1474 | mutex_unlock(&rdev->mutex); | ||
1475 | } | ||
1476 | 1502 | ||
1477 | return ret; | 1503 | return ret; |
1478 | } | 1504 | } |
1479 | EXPORT_SYMBOL_GPL(regulator_disable); | 1505 | EXPORT_SYMBOL_GPL(regulator_disable); |
1480 | 1506 | ||
1481 | /* locks held by regulator_force_disable() */ | 1507 | /* locks held by regulator_force_disable() */ |
1482 | static int _regulator_force_disable(struct regulator_dev *rdev, | 1508 | static int _regulator_force_disable(struct regulator_dev *rdev) |
1483 | struct regulator_dev **supply_rdev_ptr) | ||
1484 | { | 1509 | { |
1485 | int ret = 0; | 1510 | int ret = 0; |
1486 | 1511 | ||
@@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev, | |||
1497 | REGULATOR_EVENT_DISABLE, NULL); | 1522 | REGULATOR_EVENT_DISABLE, NULL); |
1498 | } | 1523 | } |
1499 | 1524 | ||
1500 | /* decrease our supplies ref count and disable if required */ | ||
1501 | *supply_rdev_ptr = rdev->supply; | ||
1502 | |||
1503 | rdev->use_count = 0; | ||
1504 | return ret; | 1525 | return ret; |
1505 | } | 1526 | } |
1506 | 1527 | ||
@@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev, | |||
1516 | int regulator_force_disable(struct regulator *regulator) | 1537 | int regulator_force_disable(struct regulator *regulator) |
1517 | { | 1538 | { |
1518 | struct regulator_dev *rdev = regulator->rdev; | 1539 | struct regulator_dev *rdev = regulator->rdev; |
1519 | struct regulator_dev *supply_rdev = NULL; | ||
1520 | int ret; | 1540 | int ret; |
1521 | 1541 | ||
1522 | mutex_lock(&rdev->mutex); | 1542 | mutex_lock(&rdev->mutex); |
1523 | regulator->uA_load = 0; | 1543 | regulator->uA_load = 0; |
1524 | ret = _regulator_force_disable(rdev, &supply_rdev); | 1544 | ret = _regulator_force_disable(regulator->rdev); |
1525 | mutex_unlock(&rdev->mutex); | 1545 | mutex_unlock(&rdev->mutex); |
1526 | 1546 | ||
1527 | if (supply_rdev) | 1547 | if (rdev->supply) |
1528 | regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); | 1548 | while (rdev->open_count--) |
1549 | regulator_disable(rdev->supply); | ||
1529 | 1550 | ||
1530 | return ret; | 1551 | return ret; |
1531 | } | 1552 | } |
@@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) | |||
2136 | /* get input voltage */ | 2157 | /* get input voltage */ |
2137 | input_uV = 0; | 2158 | input_uV = 0; |
2138 | if (rdev->supply) | 2159 | if (rdev->supply) |
2139 | input_uV = _regulator_get_voltage(rdev->supply); | 2160 | input_uV = regulator_get_voltage(rdev->supply); |
2140 | if (input_uV <= 0) | 2161 | if (input_uV <= 0) |
2141 | input_uV = rdev->constraints->input_uV; | 2162 | input_uV = rdev->constraints->input_uV; |
2142 | if (input_uV <= 0) { | 2163 | if (input_uV <= 0) { |
@@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier); | |||
2206 | static void _notifier_call_chain(struct regulator_dev *rdev, | 2227 | static void _notifier_call_chain(struct regulator_dev *rdev, |
2207 | unsigned long event, void *data) | 2228 | unsigned long event, void *data) |
2208 | { | 2229 | { |
2209 | struct regulator_dev *_rdev; | ||
2210 | |||
2211 | /* call rdev chain first */ | 2230 | /* call rdev chain first */ |
2212 | blocking_notifier_call_chain(&rdev->notifier, event, NULL); | 2231 | blocking_notifier_call_chain(&rdev->notifier, event, NULL); |
2213 | |||
2214 | /* now notify regulator we supply */ | ||
2215 | list_for_each_entry(_rdev, &rdev->supply_list, slist) { | ||
2216 | mutex_lock(&_rdev->mutex); | ||
2217 | _notifier_call_chain(_rdev, event, data); | ||
2218 | mutex_unlock(&_rdev->mutex); | ||
2219 | } | ||
2220 | } | 2232 | } |
2221 | 2233 | ||
2222 | /** | 2234 | /** |
@@ -2264,6 +2276,13 @@ err: | |||
2264 | } | 2276 | } |
2265 | EXPORT_SYMBOL_GPL(regulator_bulk_get); | 2277 | EXPORT_SYMBOL_GPL(regulator_bulk_get); |
2266 | 2278 | ||
2279 | static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) | ||
2280 | { | ||
2281 | struct regulator_bulk_data *bulk = data; | ||
2282 | |||
2283 | bulk->ret = regulator_enable(bulk->consumer); | ||
2284 | } | ||
2285 | |||
2267 | /** | 2286 | /** |
2268 | * regulator_bulk_enable - enable multiple regulator consumers | 2287 | * regulator_bulk_enable - enable multiple regulator consumers |
2269 | * | 2288 | * |
@@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get); | |||
2279 | int regulator_bulk_enable(int num_consumers, | 2298 | int regulator_bulk_enable(int num_consumers, |
2280 | struct regulator_bulk_data *consumers) | 2299 | struct regulator_bulk_data *consumers) |
2281 | { | 2300 | { |
2301 | LIST_HEAD(async_domain); | ||
2282 | int i; | 2302 | int i; |
2283 | int ret; | 2303 | int ret = 0; |
2304 | |||
2305 | for (i = 0; i < num_consumers; i++) | ||
2306 | async_schedule_domain(regulator_bulk_enable_async, | ||
2307 | &consumers[i], &async_domain); | ||
2308 | |||
2309 | async_synchronize_full_domain(&async_domain); | ||
2284 | 2310 | ||
2311 | /* If any consumer failed we need to unwind any that succeeded */ | ||
2285 | for (i = 0; i < num_consumers; i++) { | 2312 | for (i = 0; i < num_consumers; i++) { |
2286 | ret = regulator_enable(consumers[i].consumer); | 2313 | if (consumers[i].ret != 0) { |
2287 | if (ret != 0) | 2314 | ret = consumers[i].ret; |
2288 | goto err; | 2315 | goto err; |
2316 | } | ||
2289 | } | 2317 | } |
2290 | 2318 | ||
2291 | return 0; | 2319 | return 0; |
2292 | 2320 | ||
2293 | err: | 2321 | err: |
2294 | pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); | 2322 | for (i = 0; i < num_consumers; i++) |
2295 | for (--i; i >= 0; --i) | 2323 | if (consumers[i].ret == 0) |
2296 | regulator_disable(consumers[i].consumer); | 2324 | regulator_disable(consumers[i].consumer); |
2325 | else | ||
2326 | pr_err("Failed to enable %s: %d\n", | ||
2327 | consumers[i].supply, consumers[i].ret); | ||
2297 | 2328 | ||
2298 | return ret; | 2329 | return ret; |
2299 | } | 2330 | } |
@@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2589 | rdev->owner = regulator_desc->owner; | 2620 | rdev->owner = regulator_desc->owner; |
2590 | rdev->desc = regulator_desc; | 2621 | rdev->desc = regulator_desc; |
2591 | INIT_LIST_HEAD(&rdev->consumer_list); | 2622 | INIT_LIST_HEAD(&rdev->consumer_list); |
2592 | INIT_LIST_HEAD(&rdev->supply_list); | ||
2593 | INIT_LIST_HEAD(&rdev->list); | 2623 | INIT_LIST_HEAD(&rdev->list); |
2594 | INIT_LIST_HEAD(&rdev->slist); | ||
2595 | BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); | 2624 | BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); |
2596 | 2625 | ||
2597 | /* preform any regulator specific init */ | 2626 | /* preform any regulator specific init */ |
@@ -2672,6 +2701,7 @@ unset_supplies: | |||
2672 | unset_regulator_supplies(rdev); | 2701 | unset_regulator_supplies(rdev); |
2673 | 2702 | ||
2674 | scrub: | 2703 | scrub: |
2704 | kfree(rdev->constraints); | ||
2675 | device_unregister(&rdev->dev); | 2705 | device_unregister(&rdev->dev); |
2676 | /* device core frees rdev */ | 2706 | /* device core frees rdev */ |
2677 | rdev = ERR_PTR(ret); | 2707 | rdev = ERR_PTR(ret); |
@@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev) | |||
2703 | unset_regulator_supplies(rdev); | 2733 | unset_regulator_supplies(rdev); |
2704 | list_del(&rdev->list); | 2734 | list_del(&rdev->list); |
2705 | if (rdev->supply) | 2735 | if (rdev->supply) |
2706 | sysfs_remove_link(&rdev->dev.kobj, "supply"); | 2736 | regulator_put(rdev->supply); |
2707 | device_unregister(&rdev->dev); | 2737 | device_unregister(&rdev->dev); |
2708 | kfree(rdev->constraints); | 2738 | kfree(rdev->constraints); |
2709 | mutex_unlock(®ulator_list_mutex); | 2739 | mutex_unlock(®ulator_list_mutex); |
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index c7410bde7b5d..f6ef6694ab98 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c | |||
@@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = { | |||
36 | .ops = &dummy_ops, | 36 | .ops = &dummy_ops, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static int __devinit dummy_regulator_probe(struct platform_device *pdev) | ||
40 | { | ||
41 | int ret; | ||
42 | |||
43 | dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, | ||
44 | &dummy_initdata, NULL); | ||
45 | if (IS_ERR(dummy_regulator_rdev)) { | ||
46 | ret = PTR_ERR(dummy_regulator_rdev); | ||
47 | pr_err("Failed to register regulator: %d\n", ret); | ||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | static struct platform_driver dummy_regulator_driver = { | ||
55 | .probe = dummy_regulator_probe, | ||
56 | .driver = { | ||
57 | .name = "reg-dummy", | ||
58 | .owner = THIS_MODULE, | ||
59 | }, | ||
60 | }; | ||
61 | |||
39 | static struct platform_device *dummy_pdev; | 62 | static struct platform_device *dummy_pdev; |
40 | 63 | ||
41 | void __init regulator_dummy_init(void) | 64 | void __init regulator_dummy_init(void) |
@@ -55,12 +78,9 @@ void __init regulator_dummy_init(void) | |||
55 | return; | 78 | return; |
56 | } | 79 | } |
57 | 80 | ||
58 | dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, | 81 | ret = platform_driver_register(&dummy_regulator_driver); |
59 | &dummy_initdata, NULL); | 82 | if (ret != 0) { |
60 | if (IS_ERR(dummy_regulator_rdev)) { | 83 | pr_err("Failed to register dummy regulator driver: %d\n", ret); |
61 | ret = PTR_ERR(dummy_regulator_rdev); | ||
62 | pr_err("Failed to register regulator: %d\n", ret); | ||
63 | platform_device_unregister(dummy_pdev); | 84 | platform_device_unregister(dummy_pdev); |
64 | return; | ||
65 | } | 85 | } |
66 | } | 86 | } |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 55dd4e6650db..66d2d60b436a 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #define TPS65911_REG_LDO7 11 | 49 | #define TPS65911_REG_LDO7 11 |
50 | #define TPS65911_REG_LDO8 12 | 50 | #define TPS65911_REG_LDO8 12 |
51 | 51 | ||
52 | #define TPS65910_NUM_REGULATOR 13 | ||
53 | #define TPS65910_SUPPLY_STATE_ENABLED 0x1 | 52 | #define TPS65910_SUPPLY_STATE_ENABLED 0x1 |
54 | 53 | ||
55 | /* supported VIO voltages in milivolts */ | 54 | /* supported VIO voltages in milivolts */ |
@@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = { | |||
264 | }; | 263 | }; |
265 | 264 | ||
266 | struct tps65910_reg { | 265 | struct tps65910_reg { |
267 | struct regulator_desc desc[TPS65910_NUM_REGULATOR]; | 266 | struct regulator_desc *desc; |
268 | struct tps65910 *mfd; | 267 | struct tps65910 *mfd; |
269 | struct regulator_dev *rdev[TPS65910_NUM_REGULATOR]; | 268 | struct regulator_dev **rdev; |
270 | struct tps_info *info[TPS65910_NUM_REGULATOR]; | 269 | struct tps_info **info; |
271 | struct mutex mutex; | 270 | struct mutex mutex; |
271 | int num_regulators; | ||
272 | int mode; | 272 | int mode; |
273 | int (*get_ctrl_reg)(int); | 273 | int (*get_ctrl_reg)(int); |
274 | }; | 274 | }; |
@@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, | |||
759 | mult = (selector / VDD1_2_NUM_VOLTS) + 1; | 759 | mult = (selector / VDD1_2_NUM_VOLTS) + 1; |
760 | volt = VDD1_2_MIN_VOLT + | 760 | volt = VDD1_2_MIN_VOLT + |
761 | (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; | 761 | (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; |
762 | break; | ||
762 | case TPS65911_REG_VDDCTRL: | 763 | case TPS65911_REG_VDDCTRL: |
763 | volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); | 764 | volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); |
765 | break; | ||
766 | default: | ||
767 | BUG(); | ||
768 | return -EINVAL; | ||
764 | } | 769 | } |
765 | 770 | ||
766 | return volt * 100 * mult; | 771 | return volt * 100 * mult; |
@@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev) | |||
897 | switch(tps65910_chip_id(tps65910)) { | 902 | switch(tps65910_chip_id(tps65910)) { |
898 | case TPS65910: | 903 | case TPS65910: |
899 | pmic->get_ctrl_reg = &tps65910_get_ctrl_register; | 904 | pmic->get_ctrl_reg = &tps65910_get_ctrl_register; |
905 | pmic->num_regulators = ARRAY_SIZE(tps65910_regs); | ||
900 | info = tps65910_regs; | 906 | info = tps65910_regs; |
907 | break; | ||
901 | case TPS65911: | 908 | case TPS65911: |
902 | pmic->get_ctrl_reg = &tps65911_get_ctrl_register; | 909 | pmic->get_ctrl_reg = &tps65911_get_ctrl_register; |
910 | pmic->num_regulators = ARRAY_SIZE(tps65911_regs); | ||
903 | info = tps65911_regs; | 911 | info = tps65911_regs; |
912 | break; | ||
904 | default: | 913 | default: |
905 | pr_err("Invalid tps chip version\n"); | 914 | pr_err("Invalid tps chip version\n"); |
915 | kfree(pmic); | ||
906 | return -ENODEV; | 916 | return -ENODEV; |
907 | } | 917 | } |
908 | 918 | ||
909 | for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) { | 919 | pmic->desc = kcalloc(pmic->num_regulators, |
920 | sizeof(struct regulator_desc), GFP_KERNEL); | ||
921 | if (!pmic->desc) { | ||
922 | err = -ENOMEM; | ||
923 | goto err_free_pmic; | ||
924 | } | ||
925 | |||
926 | pmic->info = kcalloc(pmic->num_regulators, | ||
927 | sizeof(struct tps_info *), GFP_KERNEL); | ||
928 | if (!pmic->info) { | ||
929 | err = -ENOMEM; | ||
930 | goto err_free_desc; | ||
931 | } | ||
932 | |||
933 | pmic->rdev = kcalloc(pmic->num_regulators, | ||
934 | sizeof(struct regulator_dev *), GFP_KERNEL); | ||
935 | if (!pmic->rdev) { | ||
936 | err = -ENOMEM; | ||
937 | goto err_free_info; | ||
938 | } | ||
939 | |||
940 | for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) { | ||
910 | /* Register the regulators */ | 941 | /* Register the regulators */ |
911 | pmic->info[i] = info; | 942 | pmic->info[i] = info; |
912 | 943 | ||
@@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev) | |||
938 | "failed to register %s regulator\n", | 969 | "failed to register %s regulator\n", |
939 | pdev->name); | 970 | pdev->name); |
940 | err = PTR_ERR(rdev); | 971 | err = PTR_ERR(rdev); |
941 | goto err; | 972 | goto err_unregister_regulator; |
942 | } | 973 | } |
943 | 974 | ||
944 | /* Save regulator for cleanup */ | 975 | /* Save regulator for cleanup */ |
@@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev) | |||
946 | } | 977 | } |
947 | return 0; | 978 | return 0; |
948 | 979 | ||
949 | err: | 980 | err_unregister_regulator: |
950 | while (--i >= 0) | 981 | while (--i >= 0) |
951 | regulator_unregister(pmic->rdev[i]); | 982 | regulator_unregister(pmic->rdev[i]); |
952 | 983 | kfree(pmic->rdev); | |
984 | err_free_info: | ||
985 | kfree(pmic->info); | ||
986 | err_free_desc: | ||
987 | kfree(pmic->desc); | ||
988 | err_free_pmic: | ||
953 | kfree(pmic); | 989 | kfree(pmic); |
954 | return err; | 990 | return err; |
955 | } | 991 | } |
956 | 992 | ||
957 | static int __devexit tps65910_remove(struct platform_device *pdev) | 993 | static int __devexit tps65910_remove(struct platform_device *pdev) |
958 | { | 994 | { |
959 | struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev); | 995 | struct tps65910_reg *pmic = platform_get_drvdata(pdev); |
960 | int i; | 996 | int i; |
961 | 997 | ||
962 | for (i = 0; i < TPS65910_NUM_REGULATOR; i++) | 998 | for (i = 0; i < pmic->num_regulators; i++) |
963 | regulator_unregister(tps65910_reg->rdev[i]); | 999 | regulator_unregister(pmic->rdev[i]); |
964 | 1000 | ||
965 | kfree(tps65910_reg); | 1001 | kfree(pmic->rdev); |
1002 | kfree(pmic->info); | ||
1003 | kfree(pmic->desc); | ||
1004 | kfree(pmic); | ||
966 | return 0; | 1005 | return 0; |
967 | } | 1006 | } |
968 | 1007 | ||
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 87fe0f75a56e..ee8747f4fa08 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c | |||
@@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = { | |||
835 | remap_conf) \ | 835 | remap_conf) \ |
836 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 836 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ |
837 | remap_conf, TWL4030, twl4030fixed_ops) | 837 | remap_conf, TWL4030, twl4030fixed_ops) |
838 | #define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \ | 838 | #define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \ |
839 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 839 | TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \ |
840 | 0x0, TWL6030, twl6030fixed_ops) | 840 | 0x0, TWL6030, twl6030fixed_ops) |
841 | 841 | ||
842 | #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ | 842 | #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ |
@@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = { | |||
856 | }, \ | 856 | }, \ |
857 | } | 857 | } |
858 | 858 | ||
859 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ | 859 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ |
860 | .base = offset, \ | 860 | .base = offset, \ |
861 | .id = num, \ | ||
862 | .min_mV = min_mVolts, \ | 861 | .min_mV = min_mVolts, \ |
863 | .max_mV = max_mVolts, \ | 862 | .max_mV = max_mVolts, \ |
864 | .desc = { \ | 863 | .desc = { \ |
865 | .name = #label, \ | 864 | .name = #label, \ |
866 | .id = TWL6030_REG_##label, \ | 865 | .id = TWL6030_REG_##label, \ |
867 | .n_voltages = (max_mVolts - min_mVolts)/100, \ | 866 | .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \ |
868 | .ops = &twl6030ldo_ops, \ | 867 | .ops = &twl6030ldo_ops, \ |
869 | .type = REGULATOR_VOLTAGE, \ | 868 | .type = REGULATOR_VOLTAGE, \ |
870 | .owner = THIS_MODULE, \ | 869 | .owner = THIS_MODULE, \ |
871 | }, \ | 870 | }, \ |
872 | } | 871 | } |
873 | 872 | ||
874 | #define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ | 873 | #define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ |
875 | .base = offset, \ | 874 | .base = offset, \ |
876 | .id = num, \ | ||
877 | .min_mV = min_mVolts, \ | 875 | .min_mV = min_mVolts, \ |
878 | .max_mV = max_mVolts, \ | 876 | .max_mV = max_mVolts, \ |
879 | .desc = { \ | 877 | .desc = { \ |
@@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = { | |||
903 | }, \ | 901 | }, \ |
904 | } | 902 | } |
905 | 903 | ||
906 | #define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \ | 904 | #define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \ |
907 | .base = offset, \ | 905 | .base = offset, \ |
908 | .id = num, \ | ||
909 | .delay = turnon_delay, \ | 906 | .delay = turnon_delay, \ |
910 | .desc = { \ | 907 | .desc = { \ |
911 | .name = #label, \ | 908 | .name = #label, \ |
@@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = { | |||
916 | }, \ | 913 | }, \ |
917 | } | 914 | } |
918 | 915 | ||
919 | #define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \ | 916 | #define TWL6025_ADJUSTABLE_SMPS(label, offset) { \ |
920 | .base = offset, \ | 917 | .base = offset, \ |
921 | .id = num, \ | ||
922 | .min_mV = 600, \ | 918 | .min_mV = 600, \ |
923 | .max_mV = 2100, \ | 919 | .max_mV = 2100, \ |
924 | .desc = { \ | 920 | .desc = { \ |
@@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = { | |||
961 | /* 6030 REG with base as PMC Slave Misc : 0x0030 */ | 957 | /* 6030 REG with base as PMC Slave Misc : 0x0030 */ |
962 | /* Turnon-delay and remap configuration values for 6030 are not | 958 | /* Turnon-delay and remap configuration values for 6030 are not |
963 | verified since the specification is not public */ | 959 | verified since the specification is not public */ |
964 | TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1), | 960 | TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300), |
965 | TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2), | 961 | TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300), |
966 | TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3), | 962 | TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300), |
967 | TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4), | 963 | TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300), |
968 | TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5), | 964 | TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300), |
969 | TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7), | 965 | TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300), |
970 | TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0), | 966 | TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0), |
971 | TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0), | 967 | TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0), |
972 | TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0), | 968 | TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0), |
973 | TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0), | 969 | TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0), |
974 | TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0), | 970 | TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0), |
975 | 971 | ||
976 | /* 6025 are renamed compared to 6030 versions */ | 972 | /* 6025 are renamed compared to 6030 versions */ |
977 | TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1), | 973 | TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300), |
978 | TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2), | 974 | TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300), |
979 | TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3), | 975 | TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300), |
980 | TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4), | 976 | TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300), |
981 | TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5), | 977 | TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300), |
982 | TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7), | 978 | TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300), |
983 | TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16), | 979 | TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300), |
984 | TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17), | 980 | TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300), |
985 | TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18), | 981 | TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300), |
986 | 982 | ||
987 | TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1), | 983 | TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34), |
988 | TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2), | 984 | TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10), |
989 | TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3), | 985 | TWL6025_ADJUSTABLE_SMPS(VIO, 0x16), |
990 | }; | 986 | }; |
991 | 987 | ||
992 | static u8 twl_get_smps_offset(void) | 988 | static u8 twl_get_smps_offset(void) |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index a0982e809851..bd3531d8b2ac 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev, | |||
267 | return vsel; | 267 | return vsel; |
268 | } | 268 | } |
269 | 269 | ||
270 | static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev, | ||
271 | int min_uV, int max_uV) | ||
272 | { | ||
273 | u16 vsel; | ||
274 | |||
275 | if (max_uV < 600000 || max_uV > 1800000) | ||
276 | return -EINVAL; | ||
277 | |||
278 | vsel = ((max_uV - 600000) / 12500) + 8; | ||
279 | |||
280 | if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV || | ||
281 | wm831x_buckv_list_voltage(rdev, vsel) < max_uV) | ||
282 | return -EINVAL; | ||
283 | |||
284 | return vsel; | ||
285 | } | ||
286 | |||
287 | static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) | 270 | static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) |
288 | { | 271 | { |
289 | struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); | 272 | struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); |
@@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, | |||
338 | if (ret < 0) | 321 | if (ret < 0) |
339 | return ret; | 322 | return ret; |
340 | 323 | ||
341 | /* Set the high voltage as the DVS voltage. This is optimised | 324 | /* |
342 | * for CPUfreq usage, most processors will keep the maximum | 325 | * If this VSEL is higher than the last one we've seen then |
343 | * voltage constant and lower the minimum with the frequency. */ | 326 | * remember it as the DVS VSEL. This is optimised for CPUfreq |
344 | vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV); | 327 | * usage where we want to get to the highest voltage very |
345 | if (vsel < 0) { | 328 | * quickly. |
346 | /* This should never happen - at worst the same vsel | 329 | */ |
347 | * should be chosen */ | 330 | if (vsel > dcdc->dvs_vsel) { |
348 | WARN_ON(vsel < 0); | 331 | ret = wm831x_set_bits(wm831x, dvs_reg, |
349 | return 0; | 332 | WM831X_DC1_DVS_VSEL_MASK, |
333 | dcdc->dvs_vsel); | ||
334 | if (ret == 0) | ||
335 | dcdc->dvs_vsel = vsel; | ||
336 | else | ||
337 | dev_warn(wm831x->dev, | ||
338 | "Failed to set DCDC DVS VSEL: %d\n", ret); | ||
350 | } | 339 | } |
351 | 340 | ||
352 | /* Don't bother if it's the same VSEL we're already using */ | ||
353 | if (vsel == dcdc->on_vsel) | ||
354 | return 0; | ||
355 | |||
356 | ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel); | ||
357 | if (ret == 0) | ||
358 | dcdc->dvs_vsel = vsel; | ||
359 | else | ||
360 | dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n", | ||
361 | ret); | ||
362 | |||
363 | return 0; | 341 | return 0; |
364 | } | 342 | } |
365 | 343 | ||
@@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc, | |||
456 | if (!pdata || !pdata->dvs_gpio) | 434 | if (!pdata || !pdata->dvs_gpio) |
457 | return; | 435 | return; |
458 | 436 | ||
459 | switch (pdata->dvs_control_src) { | ||
460 | case 1: | ||
461 | ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT; | ||
462 | break; | ||
463 | case 2: | ||
464 | ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT; | ||
465 | break; | ||
466 | default: | ||
467 | dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n", | ||
468 | pdata->dvs_control_src, dcdc->name); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, | ||
473 | WM831X_DC1_DVS_SRC_MASK, ctrl); | ||
474 | if (ret < 0) { | ||
475 | dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n", | ||
476 | dcdc->name, ret); | ||
477 | return; | ||
478 | } | ||
479 | |||
480 | ret = gpio_request(pdata->dvs_gpio, "DCDC DVS"); | 437 | ret = gpio_request(pdata->dvs_gpio, "DCDC DVS"); |
481 | if (ret < 0) { | 438 | if (ret < 0) { |
482 | dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n", | 439 | dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n", |
@@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc, | |||
498 | } | 455 | } |
499 | 456 | ||
500 | dcdc->dvs_gpio = pdata->dvs_gpio; | 457 | dcdc->dvs_gpio = pdata->dvs_gpio; |
458 | |||
459 | switch (pdata->dvs_control_src) { | ||
460 | case 1: | ||
461 | ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT; | ||
462 | break; | ||
463 | case 2: | ||
464 | ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT; | ||
465 | break; | ||
466 | default: | ||
467 | dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n", | ||
468 | pdata->dvs_control_src, dcdc->name); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL | ||
473 | * to make bootstrapping a bit smoother. | ||
474 | */ | ||
475 | if (!dcdc->dvs_vsel) { | ||
476 | ret = wm831x_set_bits(wm831x, | ||
477 | dcdc->base + WM831X_DCDC_DVS_CONTROL, | ||
478 | WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel); | ||
479 | if (ret == 0) | ||
480 | dcdc->dvs_vsel = dcdc->on_vsel; | ||
481 | else | ||
482 | dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n", | ||
483 | ret); | ||
484 | } | ||
485 | |||
486 | ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, | ||
487 | WM831X_DC1_DVS_SRC_MASK, ctrl); | ||
488 | if (ret < 0) { | ||
489 | dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n", | ||
490 | dcdc->name, ret); | ||
491 | } | ||
501 | } | 492 | } |
502 | 493 | ||
503 | static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | 494 | static __devinit int wm831x_buckv_probe(struct platform_device *pdev) |
504 | { | 495 | { |
505 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); | 496 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); |
506 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 497 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
507 | int id = pdev->id % ARRAY_SIZE(pdata->dcdc); | 498 | int id; |
508 | struct wm831x_dcdc *dcdc; | 499 | struct wm831x_dcdc *dcdc; |
509 | struct resource *res; | 500 | struct resource *res; |
510 | int ret, irq; | 501 | int ret, irq; |
511 | 502 | ||
503 | if (pdata && pdata->wm831x_num) | ||
504 | id = (pdata->wm831x_num * 10) + 1; | ||
505 | else | ||
506 | id = 0; | ||
507 | id = pdev->id - id; | ||
508 | |||
512 | dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); | 509 | dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); |
513 | 510 | ||
514 | if (pdata == NULL || pdata->dcdc[id] == NULL) | 511 | if (pdata == NULL || pdata->dcdc[id] == NULL) |
@@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) | |||
545 | } | 542 | } |
546 | dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; | 543 | dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; |
547 | 544 | ||
548 | ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); | 545 | ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL); |
549 | if (ret < 0) { | 546 | if (ret < 0) { |
550 | dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); | 547 | dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); |
551 | goto err; | 548 | goto err; |
@@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev) | |||
709 | { | 706 | { |
710 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); | 707 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); |
711 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 708 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
712 | int id = pdev->id % ARRAY_SIZE(pdata->dcdc); | 709 | int id; |
713 | struct wm831x_dcdc *dcdc; | 710 | struct wm831x_dcdc *dcdc; |
714 | struct resource *res; | 711 | struct resource *res; |
715 | int ret, irq; | 712 | int ret, irq; |
716 | 713 | ||
714 | if (pdata && pdata->wm831x_num) | ||
715 | id = (pdata->wm831x_num * 10) + 1; | ||
716 | else | ||
717 | id = 0; | ||
718 | id = pdev->id - id; | ||
719 | |||
717 | dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); | 720 | dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); |
718 | 721 | ||
719 | if (pdata == NULL || pdata->dcdc[id] == NULL) | 722 | if (pdata == NULL || pdata->dcdc[id] == NULL) |
@@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver"); | |||
1046 | MODULE_LICENSE("GPL"); | 1049 | MODULE_LICENSE("GPL"); |
1047 | MODULE_ALIAS("platform:wm831x-buckv"); | 1050 | MODULE_ALIAS("platform:wm831x-buckv"); |
1048 | MODULE_ALIAS("platform:wm831x-buckp"); | 1051 | MODULE_ALIAS("platform:wm831x-buckp"); |
1052 | MODULE_ALIAS("platform:wm831x-epe"); | ||
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index 2220cf8defb1..6709710a059e 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c | |||
@@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) | |||
310 | { | 310 | { |
311 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); | 311 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); |
312 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 312 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
313 | int id = pdev->id % ARRAY_SIZE(pdata->ldo); | 313 | int id; |
314 | struct wm831x_ldo *ldo; | 314 | struct wm831x_ldo *ldo; |
315 | struct resource *res; | 315 | struct resource *res; |
316 | int ret, irq; | 316 | int ret, irq; |
317 | 317 | ||
318 | if (pdata && pdata->wm831x_num) | ||
319 | id = (pdata->wm831x_num * 10) + 1; | ||
320 | else | ||
321 | id = 0; | ||
322 | id = pdev->id - id; | ||
323 | |||
318 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); | 324 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); |
319 | 325 | ||
320 | if (pdata == NULL || pdata->ldo[id] == NULL) | 326 | if (pdata == NULL || pdata->ldo[id] == NULL) |
@@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev) | |||
574 | { | 580 | { |
575 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); | 581 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); |
576 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 582 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
577 | int id = pdev->id % ARRAY_SIZE(pdata->ldo); | 583 | int id; |
578 | struct wm831x_ldo *ldo; | 584 | struct wm831x_ldo *ldo; |
579 | struct resource *res; | 585 | struct resource *res; |
580 | int ret, irq; | 586 | int ret, irq; |
581 | 587 | ||
588 | if (pdata && pdata->wm831x_num) | ||
589 | id = (pdata->wm831x_num * 10) + 1; | ||
590 | else | ||
591 | id = 0; | ||
592 | id = pdev->id - id; | ||
593 | |||
582 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); | 594 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); |
583 | 595 | ||
584 | if (pdata == NULL || pdata->ldo[id] == NULL) | 596 | if (pdata == NULL || pdata->ldo[id] == NULL) |
@@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev) | |||
764 | { | 776 | { |
765 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); | 777 | struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); |
766 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 778 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
767 | int id = pdev->id % ARRAY_SIZE(pdata->ldo); | 779 | int id; |
768 | struct wm831x_ldo *ldo; | 780 | struct wm831x_ldo *ldo; |
769 | struct resource *res; | 781 | struct resource *res; |
770 | int ret; | 782 | int ret; |
771 | 783 | ||
784 | if (pdata && pdata->wm831x_num) | ||
785 | id = (pdata->wm831x_num * 10) + 1; | ||
786 | else | ||
787 | id = 0; | ||
788 | id = pdev->id - id; | ||
789 | |||
790 | |||
772 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); | 791 | dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); |
773 | 792 | ||
774 | if (pdata == NULL || pdata->ldo[id] == NULL) | 793 | if (pdata == NULL || pdata->ldo[id] == NULL) |
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index 35b2958d5106..1a6a690f24db 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c | |||
@@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev) | |||
43 | if (!ldo->enable) | 43 | if (!ldo->enable) |
44 | return 0; | 44 | return 0; |
45 | 45 | ||
46 | gpio_set_value(ldo->enable, 1); | 46 | gpio_set_value_cansleep(ldo->enable, 1); |
47 | ldo->is_enabled = true; | 47 | ldo->is_enabled = true; |
48 | 48 | ||
49 | return 0; | 49 | return 0; |
@@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev) | |||
57 | if (!ldo->enable) | 57 | if (!ldo->enable) |
58 | return -EINVAL; | 58 | return -EINVAL; |
59 | 59 | ||
60 | gpio_set_value(ldo->enable, 0); | 60 | gpio_set_value_cansleep(ldo->enable, 0); |
61 | ldo->is_enabled = false; | 61 | ldo->is_enabled = false; |
62 | 62 | ||
63 | return 0; | 63 | return 0; |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index bcae8dd41496..7789002bdd5c 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev) | |||
368 | pr_info("%s: already running\n", pdev->name); | 368 | pr_info("%s: already running\n", pdev->name); |
369 | 369 | ||
370 | /* force to 24 hour mode */ | 370 | /* force to 24 hour mode */ |
371 | new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); | 371 | new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); |
372 | new_ctrl |= OMAP_RTC_CTRL_STOP; | 372 | new_ctrl |= OMAP_RTC_CTRL_STOP; |
373 | 373 | ||
374 | /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: | 374 | /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 432444af7ee4..a1d3ddba99cc 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <linux/vmalloc.h> | ||
27 | 28 | ||
28 | #include <asm/ccwdev.h> | 29 | #include <asm/ccwdev.h> |
29 | #include <asm/ebcdic.h> | 30 | #include <asm/ebcdic.h> |
@@ -888,11 +889,11 @@ char *dasd_get_user_string(const char __user *user_buf, size_t user_len) | |||
888 | { | 889 | { |
889 | char *buffer; | 890 | char *buffer; |
890 | 891 | ||
891 | buffer = kmalloc(user_len + 1, GFP_KERNEL); | 892 | buffer = vmalloc(user_len + 1); |
892 | if (buffer == NULL) | 893 | if (buffer == NULL) |
893 | return ERR_PTR(-ENOMEM); | 894 | return ERR_PTR(-ENOMEM); |
894 | if (copy_from_user(buffer, user_buf, user_len) != 0) { | 895 | if (copy_from_user(buffer, user_buf, user_len) != 0) { |
895 | kfree(buffer); | 896 | vfree(buffer); |
896 | return ERR_PTR(-EFAULT); | 897 | return ERR_PTR(-EFAULT); |
897 | } | 898 | } |
898 | /* got the string, now strip linefeed. */ | 899 | /* got the string, now strip linefeed. */ |
@@ -930,7 +931,7 @@ static ssize_t dasd_stats_write(struct file *file, | |||
930 | dasd_profile_off(prof); | 931 | dasd_profile_off(prof); |
931 | } else | 932 | } else |
932 | rc = -EINVAL; | 933 | rc = -EINVAL; |
933 | kfree(buffer); | 934 | vfree(buffer); |
934 | return rc; | 935 | return rc; |
935 | } | 936 | } |
936 | 937 | ||
@@ -1042,7 +1043,7 @@ static ssize_t dasd_stats_global_write(struct file *file, | |||
1042 | dasd_global_profile_level = DASD_PROFILE_OFF; | 1043 | dasd_global_profile_level = DASD_PROFILE_OFF; |
1043 | } else | 1044 | } else |
1044 | rc = -EINVAL; | 1045 | rc = -EINVAL; |
1045 | kfree(buffer); | 1046 | vfree(buffer); |
1046 | return rc; | 1047 | return rc; |
1047 | } | 1048 | } |
1048 | 1049 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 30fb979d684d..6e835c9fdfcb 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1461 | "Read device characteristic failed, rc=%d", rc); | 1461 | "Read device characteristic failed, rc=%d", rc); |
1462 | goto out_err3; | 1462 | goto out_err3; |
1463 | } | 1463 | } |
1464 | |||
1465 | if ((device->features & DASD_FEATURE_USERAW) && | ||
1466 | !(private->rdc_data.facilities.RT_in_LR)) { | ||
1467 | dev_err(&device->cdev->dev, "The storage server does not " | ||
1468 | "support raw-track access\n"); | ||
1469 | rc = -EINVAL; | ||
1470 | goto out_err3; | ||
1471 | } | ||
1472 | |||
1464 | /* find the valid cylinder size */ | 1473 | /* find the valid cylinder size */ |
1465 | if (private->rdc_data.no_cyl == LV_COMPAT_CYL && | 1474 | if (private->rdc_data.no_cyl == LV_COMPAT_CYL && |
1466 | private->rdc_data.long_no_cyl) | 1475 | private->rdc_data.long_no_cyl) |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 6c3c5364d082..e12989fff4ff 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -312,14 +312,14 @@ static ssize_t dasd_stats_proc_write(struct file *file, | |||
312 | pr_info("The statistics have been reset\n"); | 312 | pr_info("The statistics have been reset\n"); |
313 | } else | 313 | } else |
314 | goto out_parse_error; | 314 | goto out_parse_error; |
315 | kfree(buffer); | 315 | vfree(buffer); |
316 | return user_len; | 316 | return user_len; |
317 | out_parse_error: | 317 | out_parse_error: |
318 | rc = -EINVAL; | 318 | rc = -EINVAL; |
319 | pr_warning("%s is not a supported value for /proc/dasd/statistics\n", | 319 | pr_warning("%s is not a supported value for /proc/dasd/statistics\n", |
320 | str); | 320 | str); |
321 | out_error: | 321 | out_error: |
322 | kfree(buffer); | 322 | vfree(buffer); |
323 | return rc; | 323 | return rc; |
324 | #else | 324 | #else |
325 | pr_warning("/proc/dasd/statistics: is not activated in this kernel\n"); | 325 | pr_warning("/proc/dasd/statistics: is not activated in this kernel\n"); |
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 7ad30e72f868..5f9f929e891c 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c | |||
@@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, | |||
82 | return -EFAULT; | 82 | return -EFAULT; |
83 | } else { | 83 | } else { |
84 | len = *count; | 84 | len = *count; |
85 | rc = copy_from_user(buf, buffer, sizeof(buf)); | 85 | rc = kstrtoul_from_user(buffer, len, 0, &val); |
86 | if (rc != 0) | 86 | if (rc) |
87 | return -EFAULT; | 87 | return rc; |
88 | buf[sizeof(buf) - 1] = '\0'; | ||
89 | if (strict_strtoul(buf, 0, &val) != 0) | ||
90 | return -EINVAL; | ||
91 | if (val != 0 && val != 1) | 88 | if (val != 0 && val != 1) |
92 | return -EINVAL; | 89 | return -EINVAL; |
93 | callhome_enabled = val; | 90 | callhome_enabled = val; |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 7bc643f3f5ab..e5c966462c5a 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include "chsc.h" | 14 | #include "chsc.h" |
15 | 15 | ||
16 | #define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ | 16 | #define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ |
17 | #define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */ | ||
18 | #define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ | ||
17 | #define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ | 19 | #define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ |
18 | 20 | ||
19 | /* | 21 | /* |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f8b03a636e49..0e615cb912d0 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, | |||
188 | struct qdio_irq *irq_ptr = seq->private; | 188 | struct qdio_irq *irq_ptr = seq->private; |
189 | struct qdio_q *q; | 189 | struct qdio_q *q; |
190 | unsigned long val; | 190 | unsigned long val; |
191 | char buf[8]; | ||
192 | int ret, i; | 191 | int ret, i; |
193 | 192 | ||
194 | if (!irq_ptr) | 193 | if (!irq_ptr) |
195 | return 0; | 194 | return 0; |
196 | if (count >= sizeof(buf)) | 195 | |
197 | return -EINVAL; | 196 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
198 | if (copy_from_user(&buf, ubuf, count)) | 197 | if (ret) |
199 | return -EFAULT; | ||
200 | buf[count] = 0; | ||
201 | |||
202 | ret = strict_strtoul(buf, 10, &val); | ||
203 | if (ret < 0) | ||
204 | return ret; | 198 | return ret; |
205 | 199 | ||
206 | switch (val) { | 200 | switch (val) { |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index e58169c32474..288c9140290e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) | |||
313 | unsigned long schid = *((u32 *) &q->irq_ptr->schid); | 313 | unsigned long schid = *((u32 *) &q->irq_ptr->schid); |
314 | unsigned int fc = QDIO_SIGA_WRITE; | 314 | unsigned int fc = QDIO_SIGA_WRITE; |
315 | u64 start_time = 0; | 315 | u64 start_time = 0; |
316 | int cc; | 316 | int retries = 0, cc; |
317 | 317 | ||
318 | if (is_qebsm(q)) { | 318 | if (is_qebsm(q)) { |
319 | schid = q->irq_ptr->sch_token; | 319 | schid = q->irq_ptr->sch_token; |
@@ -325,6 +325,7 @@ again: | |||
325 | /* hipersocket busy condition */ | 325 | /* hipersocket busy condition */ |
326 | if (unlikely(*busy_bit)) { | 326 | if (unlikely(*busy_bit)) { |
327 | WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); | 327 | WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); |
328 | retries++; | ||
328 | 329 | ||
329 | if (!start_time) { | 330 | if (!start_time) { |
330 | start_time = get_clock(); | 331 | start_time = get_clock(); |
@@ -333,6 +334,11 @@ again: | |||
333 | if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) | 334 | if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) |
334 | goto again; | 335 | goto again; |
335 | } | 336 | } |
337 | if (retries) { | ||
338 | DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, | ||
339 | "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); | ||
340 | DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); | ||
341 | } | ||
336 | return cc; | 342 | return cc; |
337 | } | 343 | } |
338 | 344 | ||
@@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) | |||
728 | 734 | ||
729 | static int qdio_kick_outbound_q(struct qdio_q *q) | 735 | static int qdio_kick_outbound_q(struct qdio_q *q) |
730 | { | 736 | { |
737 | int retries = 0, cc; | ||
731 | unsigned int busy_bit; | 738 | unsigned int busy_bit; |
732 | int cc; | ||
733 | 739 | ||
734 | if (!need_siga_out(q)) | 740 | if (!need_siga_out(q)) |
735 | return 0; | 741 | return 0; |
736 | 742 | ||
737 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); | 743 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); |
744 | retry: | ||
738 | qperf_inc(q, siga_write); | 745 | qperf_inc(q, siga_write); |
739 | 746 | ||
740 | cc = qdio_siga_output(q, &busy_bit); | 747 | cc = qdio_siga_output(q, &busy_bit); |
@@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q) | |||
743 | break; | 750 | break; |
744 | case 2: | 751 | case 2: |
745 | if (busy_bit) { | 752 | if (busy_bit) { |
746 | DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); | 753 | while (++retries < QDIO_BUSY_BIT_RETRIES) { |
754 | mdelay(QDIO_BUSY_BIT_RETRY_DELAY); | ||
755 | goto retry; | ||
756 | } | ||
757 | DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); | ||
747 | cc |= QDIO_ERROR_SIGA_BUSY; | 758 | cc |= QDIO_ERROR_SIGA_BUSY; |
748 | } else | 759 | } else |
749 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); | 760 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); |
@@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q) | |||
753 | DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); | 764 | DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); |
754 | break; | 765 | break; |
755 | } | 766 | } |
767 | if (retries) { | ||
768 | DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); | ||
769 | DBF_ERROR("count:%u", retries); | ||
770 | } | ||
756 | return cc; | 771 | return cc; |
757 | } | 772 | } |
758 | 773 | ||
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index d6702e57d428..dc8d022c07a1 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c | |||
@@ -34,6 +34,9 @@ static LIST_HEAD(clock_list); | |||
34 | static DEFINE_SPINLOCK(clock_lock); | 34 | static DEFINE_SPINLOCK(clock_lock); |
35 | static DEFINE_MUTEX(clock_list_sem); | 35 | static DEFINE_MUTEX(clock_list_sem); |
36 | 36 | ||
37 | /* clock disable operations are not passed on to hardware during boot */ | ||
38 | static int allow_disable; | ||
39 | |||
37 | void clk_rate_table_build(struct clk *clk, | 40 | void clk_rate_table_build(struct clk *clk, |
38 | struct cpufreq_frequency_table *freq_table, | 41 | struct cpufreq_frequency_table *freq_table, |
39 | int nr_freqs, | 42 | int nr_freqs, |
@@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk) | |||
228 | return; | 231 | return; |
229 | 232 | ||
230 | if (!(--clk->usecount)) { | 233 | if (!(--clk->usecount)) { |
231 | if (likely(clk->ops && clk->ops->disable)) | 234 | if (likely(allow_disable && clk->ops && clk->ops->disable)) |
232 | clk->ops->disable(clk); | 235 | clk->ops->disable(clk); |
233 | if (likely(clk->parent)) | 236 | if (likely(clk->parent)) |
234 | __clk_disable(clk->parent); | 237 | __clk_disable(clk->parent); |
@@ -393,7 +396,7 @@ int clk_register(struct clk *clk) | |||
393 | { | 396 | { |
394 | int ret; | 397 | int ret; |
395 | 398 | ||
396 | if (clk == NULL || IS_ERR(clk)) | 399 | if (IS_ERR_OR_NULL(clk)) |
397 | return -EINVAL; | 400 | return -EINVAL; |
398 | 401 | ||
399 | /* | 402 | /* |
@@ -744,3 +747,25 @@ err_out: | |||
744 | return err; | 747 | return err; |
745 | } | 748 | } |
746 | late_initcall(clk_debugfs_init); | 749 | late_initcall(clk_debugfs_init); |
750 | |||
751 | static int __init clk_late_init(void) | ||
752 | { | ||
753 | unsigned long flags; | ||
754 | struct clk *clk; | ||
755 | |||
756 | /* disable all clocks with zero use count */ | ||
757 | mutex_lock(&clock_list_sem); | ||
758 | spin_lock_irqsave(&clock_lock, flags); | ||
759 | |||
760 | list_for_each_entry(clk, &clock_list, node) | ||
761 | if (!clk->usecount && clk->ops && clk->ops->disable) | ||
762 | clk->ops->disable(clk); | ||
763 | |||
764 | /* from now on allow clock disable operations */ | ||
765 | allow_disable = 1; | ||
766 | |||
767 | spin_unlock_irqrestore(&clock_lock, flags); | ||
768 | mutex_unlock(&clock_list_sem); | ||
769 | return 0; | ||
770 | } | ||
771 | late_initcall(clk_late_init); | ||
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index eba88c749fb1..730b4a37b823 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -2267,17 +2267,13 @@ static int __devexit | |||
2267 | pl022_remove(struct amba_device *adev) | 2267 | pl022_remove(struct amba_device *adev) |
2268 | { | 2268 | { |
2269 | struct pl022 *pl022 = amba_get_drvdata(adev); | 2269 | struct pl022 *pl022 = amba_get_drvdata(adev); |
2270 | int status = 0; | 2270 | |
2271 | if (!pl022) | 2271 | if (!pl022) |
2272 | return 0; | 2272 | return 0; |
2273 | 2273 | ||
2274 | /* Remove the queue */ | 2274 | /* Remove the queue */ |
2275 | status = destroy_queue(pl022); | 2275 | if (destroy_queue(pl022) != 0) |
2276 | if (status != 0) { | 2276 | dev_err(&adev->dev, "queue remove failed\n"); |
2277 | dev_err(&adev->dev, | ||
2278 | "queue remove failed (%d)\n", status); | ||
2279 | return status; | ||
2280 | } | ||
2281 | load_ssp_default_config(pl022); | 2277 | load_ssp_default_config(pl022); |
2282 | pl022_dma_remove(pl022); | 2278 | pl022_dma_remove(pl022); |
2283 | free_irq(adev->irq[0], pl022); | 2279 | free_irq(adev->irq[0], pl022); |
@@ -2289,7 +2285,6 @@ pl022_remove(struct amba_device *adev) | |||
2289 | spi_unregister_master(pl022->master); | 2285 | spi_unregister_master(pl022->master); |
2290 | spi_master_put(pl022->master); | 2286 | spi_master_put(pl022->master); |
2291 | amba_set_drvdata(adev, NULL); | 2287 | amba_set_drvdata(adev, NULL); |
2292 | dev_dbg(&adev->dev, "remove succeeded\n"); | ||
2293 | return 0; | 2288 | return 0; |
2294 | } | 2289 | } |
2295 | 2290 | ||
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig index 564ff4e0dbc4..8345fb457a40 100644 --- a/drivers/target/iscsi/Kconfig +++ b/drivers/target/iscsi/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config ISCSI_TARGET | 1 | config ISCSI_TARGET |
2 | tristate "Linux-iSCSI.org iSCSI Target Mode Stack" | 2 | tristate "Linux-iSCSI.org iSCSI Target Mode Stack" |
3 | depends on NET | ||
3 | select CRYPTO | 4 | select CRYPTO |
4 | select CRYPTO_CRC32C | 5 | select CRYPTO_CRC32C |
5 | select CRYPTO_CRC32C_INTEL if X86 | 6 | select CRYPTO_CRC32C_INTEL if X86 |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 14c81c4265bd..c24fb10de60b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -120,7 +120,7 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) | |||
120 | struct iscsi_tiqn *tiqn = NULL; | 120 | struct iscsi_tiqn *tiqn = NULL; |
121 | int ret; | 121 | int ret; |
122 | 122 | ||
123 | if (strlen(buf) > ISCSI_IQN_LEN) { | 123 | if (strlen(buf) >= ISCSI_IQN_LEN) { |
124 | pr_err("Target IQN exceeds %d bytes\n", | 124 | pr_err("Target IQN exceeds %d bytes\n", |
125 | ISCSI_IQN_LEN); | 125 | ISCSI_IQN_LEN); |
126 | return ERR_PTR(-EINVAL); | 126 | return ERR_PTR(-EINVAL); |
@@ -1857,7 +1857,7 @@ static int iscsit_handle_text_cmd( | |||
1857 | char *text_ptr, *text_in; | 1857 | char *text_ptr, *text_in; |
1858 | int cmdsn_ret, niov = 0, rx_got, rx_size; | 1858 | int cmdsn_ret, niov = 0, rx_got, rx_size; |
1859 | u32 checksum = 0, data_crc = 0, payload_length; | 1859 | u32 checksum = 0, data_crc = 0, payload_length; |
1860 | u32 padding = 0, text_length = 0; | 1860 | u32 padding = 0, pad_bytes = 0, text_length = 0; |
1861 | struct iscsi_cmd *cmd; | 1861 | struct iscsi_cmd *cmd; |
1862 | struct kvec iov[3]; | 1862 | struct kvec iov[3]; |
1863 | struct iscsi_text *hdr; | 1863 | struct iscsi_text *hdr; |
@@ -1896,7 +1896,7 @@ static int iscsit_handle_text_cmd( | |||
1896 | 1896 | ||
1897 | padding = ((-payload_length) & 3); | 1897 | padding = ((-payload_length) & 3); |
1898 | if (padding != 0) { | 1898 | if (padding != 0) { |
1899 | iov[niov].iov_base = cmd->pad_bytes; | 1899 | iov[niov].iov_base = &pad_bytes; |
1900 | iov[niov++].iov_len = padding; | 1900 | iov[niov++].iov_len = padding; |
1901 | rx_size += padding; | 1901 | rx_size += padding; |
1902 | pr_debug("Receiving %u additional bytes" | 1902 | pr_debug("Receiving %u additional bytes" |
@@ -1917,7 +1917,7 @@ static int iscsit_handle_text_cmd( | |||
1917 | if (conn->conn_ops->DataDigest) { | 1917 | if (conn->conn_ops->DataDigest) { |
1918 | iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, | 1918 | iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, |
1919 | text_in, text_length, | 1919 | text_in, text_length, |
1920 | padding, cmd->pad_bytes, | 1920 | padding, (u8 *)&pad_bytes, |
1921 | (u8 *)&data_crc); | 1921 | (u8 *)&data_crc); |
1922 | 1922 | ||
1923 | if (checksum != data_crc) { | 1923 | if (checksum != data_crc) { |
@@ -3468,7 +3468,12 @@ static inline void iscsit_thread_check_cpumask( | |||
3468 | } | 3468 | } |
3469 | 3469 | ||
3470 | #else | 3470 | #else |
3471 | #define iscsit_thread_get_cpumask(X) ({}) | 3471 | |
3472 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) | ||
3473 | { | ||
3474 | return; | ||
3475 | } | ||
3476 | |||
3472 | #define iscsit_thread_check_cpumask(X, Y, Z) ({}) | 3477 | #define iscsit_thread_check_cpumask(X, Y, Z) ({}) |
3473 | #endif /* CONFIG_SMP */ | 3478 | #endif /* CONFIG_SMP */ |
3474 | 3479 | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 32bb92c44450..f095e65b1ccf 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -181,7 +181,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( | |||
181 | return ERR_PTR(-EOVERFLOW); | 181 | return ERR_PTR(-EOVERFLOW); |
182 | } | 182 | } |
183 | memset(buf, 0, MAX_PORTAL_LEN + 1); | 183 | memset(buf, 0, MAX_PORTAL_LEN + 1); |
184 | snprintf(buf, MAX_PORTAL_LEN, "%s", name); | 184 | snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); |
185 | 185 | ||
186 | memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); | 186 | memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); |
187 | 187 | ||
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 713a4d23557a..4d087ac11067 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
@@ -978,7 +978,7 @@ struct iscsi_login *iscsi_target_init_negotiation( | |||
978 | pr_err("Unable to allocate memory for struct iscsi_login.\n"); | 978 | pr_err("Unable to allocate memory for struct iscsi_login.\n"); |
979 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | 979 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
980 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | 980 | ISCSI_LOGIN_STATUS_NO_RESOURCES); |
981 | goto out; | 981 | return NULL; |
982 | } | 982 | } |
983 | 983 | ||
984 | login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); | 984 | login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c75a01a1c475..89760329d5d0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1747,6 +1747,8 @@ int transport_generic_handle_cdb( | |||
1747 | } | 1747 | } |
1748 | EXPORT_SYMBOL(transport_generic_handle_cdb); | 1748 | EXPORT_SYMBOL(transport_generic_handle_cdb); |
1749 | 1749 | ||
1750 | static void transport_generic_request_failure(struct se_cmd *, | ||
1751 | struct se_device *, int, int); | ||
1750 | /* | 1752 | /* |
1751 | * Used by fabric module frontends to queue tasks directly. | 1753 | * Used by fabric module frontends to queue tasks directly. |
1752 | * Many only be used from process context only | 1754 | * Many only be used from process context only |
@@ -1754,6 +1756,8 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); | |||
1754 | int transport_handle_cdb_direct( | 1756 | int transport_handle_cdb_direct( |
1755 | struct se_cmd *cmd) | 1757 | struct se_cmd *cmd) |
1756 | { | 1758 | { |
1759 | int ret; | ||
1760 | |||
1757 | if (!cmd->se_lun) { | 1761 | if (!cmd->se_lun) { |
1758 | dump_stack(); | 1762 | dump_stack(); |
1759 | pr_err("cmd->se_lun is NULL\n"); | 1763 | pr_err("cmd->se_lun is NULL\n"); |
@@ -1765,8 +1769,31 @@ int transport_handle_cdb_direct( | |||
1765 | " from interrupt context\n"); | 1769 | " from interrupt context\n"); |
1766 | return -EINVAL; | 1770 | return -EINVAL; |
1767 | } | 1771 | } |
1768 | 1772 | /* | |
1769 | return transport_generic_new_cmd(cmd); | 1773 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following |
1774 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | ||
1775 | * in existing usage to ensure that outstanding descriptors are handled | ||
1776 | * correctly during shutdown via transport_generic_wait_for_tasks() | ||
1777 | * | ||
1778 | * Also, we don't take cmd->t_state_lock here as we only expect | ||
1779 | * this to be called for initial descriptor submission. | ||
1780 | */ | ||
1781 | cmd->t_state = TRANSPORT_NEW_CMD; | ||
1782 | atomic_set(&cmd->t_transport_active, 1); | ||
1783 | /* | ||
1784 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | ||
1785 | * so follow TRANSPORT_NEW_CMD processing thread context usage | ||
1786 | * and call transport_generic_request_failure() if necessary.. | ||
1787 | */ | ||
1788 | ret = transport_generic_new_cmd(cmd); | ||
1789 | if (ret == -EAGAIN) | ||
1790 | return 0; | ||
1791 | else if (ret < 0) { | ||
1792 | cmd->transport_error_status = ret; | ||
1793 | transport_generic_request_failure(cmd, NULL, 0, | ||
1794 | (cmd->data_direction != DMA_TO_DEVICE)); | ||
1795 | } | ||
1796 | return 0; | ||
1770 | } | 1797 | } |
1771 | EXPORT_SYMBOL(transport_handle_cdb_direct); | 1798 | EXPORT_SYMBOL(transport_handle_cdb_direct); |
1772 | 1799 | ||
@@ -3324,7 +3351,7 @@ static int transport_generic_cmd_sequencer( | |||
3324 | goto out_invalid_cdb_field; | 3351 | goto out_invalid_cdb_field; |
3325 | } | 3352 | } |
3326 | 3353 | ||
3327 | cmd->t_task_lba = get_unaligned_be16(&cdb[2]); | 3354 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
3328 | passthrough = (dev->transport->transport_type == | 3355 | passthrough = (dev->transport->transport_type == |
3329 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3356 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3330 | /* | 3357 | /* |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index f7fff7ed63c3..bd4fe21a23b8 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
@@ -187,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller); | |||
187 | 187 | ||
188 | ssize_t ft_format_wwn(char *, size_t, u64); | 188 | ssize_t ft_format_wwn(char *, size_t, u64); |
189 | 189 | ||
190 | /* | ||
191 | * Underlying HW specific helper function | ||
192 | */ | ||
193 | void ft_invl_hw_context(struct ft_cmd *); | ||
194 | |||
190 | #endif /* __TCM_FC_H__ */ | 195 | #endif /* __TCM_FC_H__ */ |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 09df38b4610c..5654dc22f7ae 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -320,6 +320,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
320 | default: | 320 | default: |
321 | pr_debug("%s: unhandled frame r_ctl %x\n", | 321 | pr_debug("%s: unhandled frame r_ctl %x\n", |
322 | __func__, fh->fh_r_ctl); | 322 | __func__, fh->fh_r_ctl); |
323 | ft_invl_hw_context(cmd); | ||
323 | fc_frame_free(fp); | 324 | fc_frame_free(fp); |
324 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); | 325 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); |
325 | break; | 326 | break; |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8e2a46ddcccb..c37f4cd96452 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -213,62 +213,49 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) | |||
213 | if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) | 213 | if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) |
214 | goto drop; | 214 | goto drop; |
215 | 215 | ||
216 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
217 | ep = fc_seq_exch(seq); | ||
218 | lport = ep->lp; | ||
219 | if (cmd->was_ddp_setup) { | ||
220 | BUG_ON(!ep); | ||
221 | BUG_ON(!lport); | ||
222 | } | ||
223 | |||
216 | /* | 224 | /* |
217 | * Doesn't expect even single byte of payload. Payload | 225 | * Doesn't expect payload if DDP is setup. Payload |
218 | * is expected to be copied directly to user buffers | 226 | * is expected to be copied directly to user buffers |
219 | * due to DDP (Large Rx offload) feature, hence | 227 | * due to DDP (Large Rx offload), |
220 | * BUG_ON if BUF is non-NULL | ||
221 | */ | 228 | */ |
222 | buf = fc_frame_payload_get(fp, 1); | 229 | buf = fc_frame_payload_get(fp, 1); |
223 | if (cmd->was_ddp_setup && buf) { | 230 | if (buf) |
224 | pr_debug("%s: When DDP was setup, not expected to" | 231 | pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " |
225 | "receive frame with payload, Payload shall be" | 232 | "cmd->sg_cnt 0x%x. DDP was setup" |
226 | "copied directly to buffer instead of coming " | 233 | " hence not expected to receive frame with " |
227 | "via. legacy receive queues\n", __func__); | 234 | "payload, Frame will be dropped if " |
228 | BUG_ON(buf); | 235 | "'Sequence Initiative' bit in f_ctl is " |
229 | } | 236 | "not set\n", __func__, ep->xid, f_ctl, |
237 | cmd->sg, cmd->sg_cnt); | ||
238 | /* | ||
239 | * Invalidate HW DDP context if it was setup for respective | ||
240 | * command. Invalidation of HW DDP context is requited in both | ||
241 | * situation (success and error). | ||
242 | */ | ||
243 | ft_invl_hw_context(cmd); | ||
230 | 244 | ||
231 | /* | 245 | /* |
232 | * If ft_cmd indicated 'ddp_setup', in that case only the last frame | 246 | * If "Sequence Initiative (TSI)" bit set in f_ctl, means last |
233 | * should come with 'TSI bit being set'. If 'TSI bit is not set and if | 247 | * write data frame is received successfully where payload is |
234 | * data frame appears here, means error condition. In both the cases | 248 | * posted directly to user buffer and only the last frame's |
235 | * release the DDP context (ddp_put) and in error case, as well | 249 | * header is posted in receive queue. |
236 | * initiate error recovery mechanism. | 250 | * |
251 | * If "Sequence Initiative (TSI)" bit is not set, means error | ||
252 | * condition w.r.t. DDP, hence drop the packet and let explict | ||
253 | * ABORTS from other end of exchange timer trigger the recovery. | ||
237 | */ | 254 | */ |
238 | ep = fc_seq_exch(seq); | 255 | if (f_ctl & FC_FC_SEQ_INIT) |
239 | if (cmd->was_ddp_setup) { | 256 | goto last_frame; |
240 | BUG_ON(!ep); | 257 | else |
241 | lport = ep->lp; | 258 | goto drop; |
242 | BUG_ON(!lport); | ||
243 | } | ||
244 | if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) { | ||
245 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
246 | /* | ||
247 | * If TSI bit set in f_ctl, means last write data frame is | ||
248 | * received successfully where payload is posted directly | ||
249 | * to user buffer and only the last frame's header is posted | ||
250 | * in legacy receive queue | ||
251 | */ | ||
252 | if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */ | ||
253 | cmd->write_data_len = lport->tt.ddp_done(lport, | ||
254 | ep->xid); | ||
255 | goto last_frame; | ||
256 | } else { | ||
257 | /* | ||
258 | * Updating the write_data_len may be meaningless at | ||
259 | * this point, but just in case if required in future | ||
260 | * for debugging or any other purpose | ||
261 | */ | ||
262 | pr_err("%s: Received frame with TSI bit not" | ||
263 | " being SET, dropping the frame, " | ||
264 | "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", | ||
265 | __func__, cmd->sg, cmd->sg_cnt); | ||
266 | cmd->write_data_len = lport->tt.ddp_done(lport, | ||
267 | ep->xid); | ||
268 | lport->tt.seq_exch_abort(cmd->seq, 0); | ||
269 | goto drop; | ||
270 | } | ||
271 | } | ||
272 | 259 | ||
273 | rel_off = ntohl(fh->fh_parm_offset); | 260 | rel_off = ntohl(fh->fh_parm_offset); |
274 | frame_len = fr_len(fp); | 261 | frame_len = fr_len(fp); |
@@ -331,3 +318,39 @@ last_frame: | |||
331 | drop: | 318 | drop: |
332 | fc_frame_free(fp); | 319 | fc_frame_free(fp); |
333 | } | 320 | } |
321 | |||
322 | /* | ||
323 | * Handle and cleanup any HW specific resources if | ||
324 | * received ABORTS, errors, timeouts. | ||
325 | */ | ||
326 | void ft_invl_hw_context(struct ft_cmd *cmd) | ||
327 | { | ||
328 | struct fc_seq *seq = cmd->seq; | ||
329 | struct fc_exch *ep = NULL; | ||
330 | struct fc_lport *lport = NULL; | ||
331 | |||
332 | BUG_ON(!cmd); | ||
333 | |||
334 | /* Cleanup the DDP context in HW if DDP was setup */ | ||
335 | if (cmd->was_ddp_setup && seq) { | ||
336 | ep = fc_seq_exch(seq); | ||
337 | if (ep) { | ||
338 | lport = ep->lp; | ||
339 | if (lport && (ep->xid <= lport->lro_xid)) | ||
340 | /* | ||
341 | * "ddp_done" trigger invalidation of HW | ||
342 | * specific DDP context | ||
343 | */ | ||
344 | cmd->write_data_len = lport->tt.ddp_done(lport, | ||
345 | ep->xid); | ||
346 | |||
347 | /* | ||
348 | * Resetting same variable to indicate HW's | ||
349 | * DDP context has been invalidated to avoid | ||
350 | * re_invalidation of same context (context is | ||
351 | * identified using ep->xid) | ||
352 | */ | ||
353 | cmd->was_ddp_setup = 0; | ||
354 | } | ||
355 | } | ||
356 | } | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index bf7c687519ef..f7f71b2d3101 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -14,11 +14,7 @@ menuconfig THERMAL | |||
14 | If you want this support, you should say Y or M here. | 14 | If you want this support, you should say Y or M here. |
15 | 15 | ||
16 | config THERMAL_HWMON | 16 | config THERMAL_HWMON |
17 | bool "Hardware monitoring support" | 17 | bool |
18 | depends on THERMAL | 18 | depends on THERMAL |
19 | depends on HWMON=y || HWMON=THERMAL | 19 | depends on HWMON=y || HWMON=THERMAL |
20 | help | 20 | default y |
21 | The generic thermal sysfs driver's hardware monitoring support | ||
22 | requires a 2.10.7/3.0.2 or later lm-sensors userspace. | ||
23 | |||
24 | Say Y if your user-space is new enough. | ||
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0b1c82ad6805..708f8e92771a 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev, | |||
420 | 420 | ||
421 | /* hwmon sys I/F */ | 421 | /* hwmon sys I/F */ |
422 | #include <linux/hwmon.h> | 422 | #include <linux/hwmon.h> |
423 | |||
424 | /* thermal zone devices with the same type share one hwmon device */ | ||
425 | struct thermal_hwmon_device { | ||
426 | char type[THERMAL_NAME_LENGTH]; | ||
427 | struct device *device; | ||
428 | int count; | ||
429 | struct list_head tz_list; | ||
430 | struct list_head node; | ||
431 | }; | ||
432 | |||
433 | struct thermal_hwmon_attr { | ||
434 | struct device_attribute attr; | ||
435 | char name[16]; | ||
436 | }; | ||
437 | |||
438 | /* one temperature input for each thermal zone */ | ||
439 | struct thermal_hwmon_temp { | ||
440 | struct list_head hwmon_node; | ||
441 | struct thermal_zone_device *tz; | ||
442 | struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ | ||
443 | struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ | ||
444 | }; | ||
445 | |||
423 | static LIST_HEAD(thermal_hwmon_list); | 446 | static LIST_HEAD(thermal_hwmon_list); |
424 | 447 | ||
425 | static ssize_t | 448 | static ssize_t |
@@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
437 | int ret; | 460 | int ret; |
438 | struct thermal_hwmon_attr *hwmon_attr | 461 | struct thermal_hwmon_attr *hwmon_attr |
439 | = container_of(attr, struct thermal_hwmon_attr, attr); | 462 | = container_of(attr, struct thermal_hwmon_attr, attr); |
440 | struct thermal_zone_device *tz | 463 | struct thermal_hwmon_temp *temp |
441 | = container_of(hwmon_attr, struct thermal_zone_device, | 464 | = container_of(hwmon_attr, struct thermal_hwmon_temp, |
442 | temp_input); | 465 | temp_input); |
466 | struct thermal_zone_device *tz = temp->tz; | ||
443 | 467 | ||
444 | ret = tz->ops->get_temp(tz, &temperature); | 468 | ret = tz->ops->get_temp(tz, &temperature); |
445 | 469 | ||
@@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, | |||
455 | { | 479 | { |
456 | struct thermal_hwmon_attr *hwmon_attr | 480 | struct thermal_hwmon_attr *hwmon_attr |
457 | = container_of(attr, struct thermal_hwmon_attr, attr); | 481 | = container_of(attr, struct thermal_hwmon_attr, attr); |
458 | struct thermal_zone_device *tz | 482 | struct thermal_hwmon_temp *temp |
459 | = container_of(hwmon_attr, struct thermal_zone_device, | 483 | = container_of(hwmon_attr, struct thermal_hwmon_temp, |
460 | temp_crit); | 484 | temp_crit); |
485 | struct thermal_zone_device *tz = temp->tz; | ||
461 | long temperature; | 486 | long temperature; |
462 | int ret; | 487 | int ret; |
463 | 488 | ||
@@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, | |||
469 | } | 494 | } |
470 | 495 | ||
471 | 496 | ||
472 | static int | 497 | static struct thermal_hwmon_device * |
473 | thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | 498 | thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) |
474 | { | 499 | { |
475 | struct thermal_hwmon_device *hwmon; | 500 | struct thermal_hwmon_device *hwmon; |
476 | int new_hwmon_device = 1; | ||
477 | int result; | ||
478 | 501 | ||
479 | mutex_lock(&thermal_list_lock); | 502 | mutex_lock(&thermal_list_lock); |
480 | list_for_each_entry(hwmon, &thermal_hwmon_list, node) | 503 | list_for_each_entry(hwmon, &thermal_hwmon_list, node) |
481 | if (!strcmp(hwmon->type, tz->type)) { | 504 | if (!strcmp(hwmon->type, tz->type)) { |
482 | new_hwmon_device = 0; | ||
483 | mutex_unlock(&thermal_list_lock); | 505 | mutex_unlock(&thermal_list_lock); |
484 | goto register_sys_interface; | 506 | return hwmon; |
507 | } | ||
508 | mutex_unlock(&thermal_list_lock); | ||
509 | |||
510 | return NULL; | ||
511 | } | ||
512 | |||
513 | /* Find the temperature input matching a given thermal zone */ | ||
514 | static struct thermal_hwmon_temp * | ||
515 | thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, | ||
516 | const struct thermal_zone_device *tz) | ||
517 | { | ||
518 | struct thermal_hwmon_temp *temp; | ||
519 | |||
520 | mutex_lock(&thermal_list_lock); | ||
521 | list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) | ||
522 | if (temp->tz == tz) { | ||
523 | mutex_unlock(&thermal_list_lock); | ||
524 | return temp; | ||
485 | } | 525 | } |
486 | mutex_unlock(&thermal_list_lock); | 526 | mutex_unlock(&thermal_list_lock); |
487 | 527 | ||
528 | return NULL; | ||
529 | } | ||
530 | |||
531 | static int | ||
532 | thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | ||
533 | { | ||
534 | struct thermal_hwmon_device *hwmon; | ||
535 | struct thermal_hwmon_temp *temp; | ||
536 | int new_hwmon_device = 1; | ||
537 | int result; | ||
538 | |||
539 | hwmon = thermal_hwmon_lookup_by_type(tz); | ||
540 | if (hwmon) { | ||
541 | new_hwmon_device = 0; | ||
542 | goto register_sys_interface; | ||
543 | } | ||
544 | |||
488 | hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); | 545 | hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); |
489 | if (!hwmon) | 546 | if (!hwmon) |
490 | return -ENOMEM; | 547 | return -ENOMEM; |
@@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
502 | goto free_mem; | 559 | goto free_mem; |
503 | 560 | ||
504 | register_sys_interface: | 561 | register_sys_interface: |
505 | tz->hwmon = hwmon; | 562 | temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL); |
563 | if (!temp) { | ||
564 | result = -ENOMEM; | ||
565 | goto unregister_name; | ||
566 | } | ||
567 | |||
568 | temp->tz = tz; | ||
506 | hwmon->count++; | 569 | hwmon->count++; |
507 | 570 | ||
508 | snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH, | 571 | snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH, |
509 | "temp%d_input", hwmon->count); | 572 | "temp%d_input", hwmon->count); |
510 | tz->temp_input.attr.attr.name = tz->temp_input.name; | 573 | temp->temp_input.attr.attr.name = temp->temp_input.name; |
511 | tz->temp_input.attr.attr.mode = 0444; | 574 | temp->temp_input.attr.attr.mode = 0444; |
512 | tz->temp_input.attr.show = temp_input_show; | 575 | temp->temp_input.attr.show = temp_input_show; |
513 | sysfs_attr_init(&tz->temp_input.attr.attr); | 576 | sysfs_attr_init(&temp->temp_input.attr.attr); |
514 | result = device_create_file(hwmon->device, &tz->temp_input.attr); | 577 | result = device_create_file(hwmon->device, &temp->temp_input.attr); |
515 | if (result) | 578 | if (result) |
516 | goto unregister_name; | 579 | goto free_temp_mem; |
517 | 580 | ||
518 | if (tz->ops->get_crit_temp) { | 581 | if (tz->ops->get_crit_temp) { |
519 | unsigned long temperature; | 582 | unsigned long temperature; |
520 | if (!tz->ops->get_crit_temp(tz, &temperature)) { | 583 | if (!tz->ops->get_crit_temp(tz, &temperature)) { |
521 | snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH, | 584 | snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH, |
522 | "temp%d_crit", hwmon->count); | 585 | "temp%d_crit", hwmon->count); |
523 | tz->temp_crit.attr.attr.name = tz->temp_crit.name; | 586 | temp->temp_crit.attr.attr.name = temp->temp_crit.name; |
524 | tz->temp_crit.attr.attr.mode = 0444; | 587 | temp->temp_crit.attr.attr.mode = 0444; |
525 | tz->temp_crit.attr.show = temp_crit_show; | 588 | temp->temp_crit.attr.show = temp_crit_show; |
526 | sysfs_attr_init(&tz->temp_crit.attr.attr); | 589 | sysfs_attr_init(&temp->temp_crit.attr.attr); |
527 | result = device_create_file(hwmon->device, | 590 | result = device_create_file(hwmon->device, |
528 | &tz->temp_crit.attr); | 591 | &temp->temp_crit.attr); |
529 | if (result) | 592 | if (result) |
530 | goto unregister_input; | 593 | goto unregister_input; |
531 | } | 594 | } |
@@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
534 | mutex_lock(&thermal_list_lock); | 597 | mutex_lock(&thermal_list_lock); |
535 | if (new_hwmon_device) | 598 | if (new_hwmon_device) |
536 | list_add_tail(&hwmon->node, &thermal_hwmon_list); | 599 | list_add_tail(&hwmon->node, &thermal_hwmon_list); |
537 | list_add_tail(&tz->hwmon_node, &hwmon->tz_list); | 600 | list_add_tail(&temp->hwmon_node, &hwmon->tz_list); |
538 | mutex_unlock(&thermal_list_lock); | 601 | mutex_unlock(&thermal_list_lock); |
539 | 602 | ||
540 | return 0; | 603 | return 0; |
541 | 604 | ||
542 | unregister_input: | 605 | unregister_input: |
543 | device_remove_file(hwmon->device, &tz->temp_input.attr); | 606 | device_remove_file(hwmon->device, &temp->temp_input.attr); |
607 | free_temp_mem: | ||
608 | kfree(temp); | ||
544 | unregister_name: | 609 | unregister_name: |
545 | if (new_hwmon_device) { | 610 | if (new_hwmon_device) { |
546 | device_remove_file(hwmon->device, &dev_attr_name); | 611 | device_remove_file(hwmon->device, &dev_attr_name); |
@@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
556 | static void | 621 | static void |
557 | thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) | 622 | thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) |
558 | { | 623 | { |
559 | struct thermal_hwmon_device *hwmon = tz->hwmon; | 624 | struct thermal_hwmon_device *hwmon; |
625 | struct thermal_hwmon_temp *temp; | ||
626 | |||
627 | hwmon = thermal_hwmon_lookup_by_type(tz); | ||
628 | if (unlikely(!hwmon)) { | ||
629 | /* Should never happen... */ | ||
630 | dev_dbg(&tz->device, "hwmon device lookup failed!\n"); | ||
631 | return; | ||
632 | } | ||
633 | |||
634 | temp = thermal_hwmon_lookup_temp(hwmon, tz); | ||
635 | if (unlikely(!temp)) { | ||
636 | /* Should never happen... */ | ||
637 | dev_dbg(&tz->device, "temperature input lookup failed!\n"); | ||
638 | return; | ||
639 | } | ||
560 | 640 | ||
561 | tz->hwmon = NULL; | 641 | device_remove_file(hwmon->device, &temp->temp_input.attr); |
562 | device_remove_file(hwmon->device, &tz->temp_input.attr); | ||
563 | if (tz->ops->get_crit_temp) | 642 | if (tz->ops->get_crit_temp) |
564 | device_remove_file(hwmon->device, &tz->temp_crit.attr); | 643 | device_remove_file(hwmon->device, &temp->temp_crit.attr); |
565 | 644 | ||
566 | mutex_lock(&thermal_list_lock); | 645 | mutex_lock(&thermal_list_lock); |
567 | list_del(&tz->hwmon_node); | 646 | list_del(&temp->hwmon_node); |
647 | kfree(temp); | ||
568 | if (!list_empty(&hwmon->tz_list)) { | 648 | if (!list_empty(&hwmon->tz_list)) { |
569 | mutex_unlock(&thermal_list_lock); | 649 | mutex_unlock(&thermal_list_lock); |
570 | return; | 650 | return; |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index cb40b82daf36..4dcb37bbdf92 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -959,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE | |||
959 | 959 | ||
960 | config SERIAL_SH_SCI | 960 | config SERIAL_SH_SCI |
961 | tristate "SuperH SCI(F) serial port support" | 961 | tristate "SuperH SCI(F) serial port support" |
962 | depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE) | 962 | depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE) |
963 | select SERIAL_CORE | 963 | select SERIAL_CORE |
964 | 964 | ||
965 | config SERIAL_SH_SCI_NR_UARTS | 965 | config SERIAL_SH_SCI_NR_UARTS |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 827db7654594..7e91b3d368cd 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -1286,22 +1286,17 @@ static int serial_imx_resume(struct platform_device *dev) | |||
1286 | static int serial_imx_probe_dt(struct imx_port *sport, | 1286 | static int serial_imx_probe_dt(struct imx_port *sport, |
1287 | struct platform_device *pdev) | 1287 | struct platform_device *pdev) |
1288 | { | 1288 | { |
1289 | static int portnum = 0; | ||
1289 | struct device_node *np = pdev->dev.of_node; | 1290 | struct device_node *np = pdev->dev.of_node; |
1290 | const struct of_device_id *of_id = | 1291 | const struct of_device_id *of_id = |
1291 | of_match_device(imx_uart_dt_ids, &pdev->dev); | 1292 | of_match_device(imx_uart_dt_ids, &pdev->dev); |
1292 | int ret; | ||
1293 | 1293 | ||
1294 | if (!np) | 1294 | if (!np) |
1295 | return -ENODEV; | 1295 | return -ENODEV; |
1296 | 1296 | ||
1297 | ret = of_alias_get_id(np, "serial"); | 1297 | sport->port.line = portnum++; |
1298 | if (ret < 0) { | 1298 | if (sport->port.line >= UART_NR) |
1299 | pr_err("%s: failed to get alias id, errno %d\n", | 1299 | return -EINVAL; |
1300 | __func__, ret); | ||
1301 | return -ENODEV; | ||
1302 | } else { | ||
1303 | sport->port.line = ret; | ||
1304 | } | ||
1305 | 1300 | ||
1306 | if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) | 1301 | if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) |
1307 | sport->have_rtscts = 1; | 1302 | sport->have_rtscts = 1; |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ebd8629c108d..2ec57b2fb278 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -54,10 +54,6 @@ | |||
54 | #include <asm/sh_bios.h> | 54 | #include <asm/sh_bios.h> |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #ifdef CONFIG_H8300 | ||
58 | #include <asm/gpio.h> | ||
59 | #endif | ||
60 | |||
61 | #include "sh-sci.h" | 57 | #include "sh-sci.h" |
62 | 58 | ||
63 | struct sci_port { | 59 | struct sci_port { |
@@ -66,12 +62,6 @@ struct sci_port { | |||
66 | /* Platform configuration */ | 62 | /* Platform configuration */ |
67 | struct plat_sci_port *cfg; | 63 | struct plat_sci_port *cfg; |
68 | 64 | ||
69 | /* Port enable callback */ | ||
70 | void (*enable)(struct uart_port *port); | ||
71 | |||
72 | /* Port disable callback */ | ||
73 | void (*disable)(struct uart_port *port); | ||
74 | |||
75 | /* Break timer */ | 65 | /* Break timer */ |
76 | struct timer_list break_timer; | 66 | struct timer_list break_timer; |
77 | int break_flag; | 67 | int break_flag; |
@@ -81,6 +71,8 @@ struct sci_port { | |||
81 | /* Function clock */ | 71 | /* Function clock */ |
82 | struct clk *fclk; | 72 | struct clk *fclk; |
83 | 73 | ||
74 | char *irqstr[SCIx_NR_IRQS]; | ||
75 | |||
84 | struct dma_chan *chan_tx; | 76 | struct dma_chan *chan_tx; |
85 | struct dma_chan *chan_rx; | 77 | struct dma_chan *chan_rx; |
86 | 78 | ||
@@ -121,6 +113,278 @@ to_sci_port(struct uart_port *uart) | |||
121 | return container_of(uart, struct sci_port, port); | 113 | return container_of(uart, struct sci_port, port); |
122 | } | 114 | } |
123 | 115 | ||
116 | struct plat_sci_reg { | ||
117 | u8 offset, size; | ||
118 | }; | ||
119 | |||
120 | /* Helper for invalidating specific entries of an inherited map. */ | ||
121 | #define sci_reg_invalid { .offset = 0, .size = 0 } | ||
122 | |||
123 | static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { | ||
124 | [SCIx_PROBE_REGTYPE] = { | ||
125 | [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid, | ||
126 | }, | ||
127 | |||
128 | /* | ||
129 | * Common SCI definitions, dependent on the port's regshift | ||
130 | * value. | ||
131 | */ | ||
132 | [SCIx_SCI_REGTYPE] = { | ||
133 | [SCSMR] = { 0x00, 8 }, | ||
134 | [SCBRR] = { 0x01, 8 }, | ||
135 | [SCSCR] = { 0x02, 8 }, | ||
136 | [SCxTDR] = { 0x03, 8 }, | ||
137 | [SCxSR] = { 0x04, 8 }, | ||
138 | [SCxRDR] = { 0x05, 8 }, | ||
139 | [SCFCR] = sci_reg_invalid, | ||
140 | [SCFDR] = sci_reg_invalid, | ||
141 | [SCTFDR] = sci_reg_invalid, | ||
142 | [SCRFDR] = sci_reg_invalid, | ||
143 | [SCSPTR] = sci_reg_invalid, | ||
144 | [SCLSR] = sci_reg_invalid, | ||
145 | }, | ||
146 | |||
147 | /* | ||
148 | * Common definitions for legacy IrDA ports, dependent on | ||
149 | * regshift value. | ||
150 | */ | ||
151 | [SCIx_IRDA_REGTYPE] = { | ||
152 | [SCSMR] = { 0x00, 8 }, | ||
153 | [SCBRR] = { 0x01, 8 }, | ||
154 | [SCSCR] = { 0x02, 8 }, | ||
155 | [SCxTDR] = { 0x03, 8 }, | ||
156 | [SCxSR] = { 0x04, 8 }, | ||
157 | [SCxRDR] = { 0x05, 8 }, | ||
158 | [SCFCR] = { 0x06, 8 }, | ||
159 | [SCFDR] = { 0x07, 16 }, | ||
160 | [SCTFDR] = sci_reg_invalid, | ||
161 | [SCRFDR] = sci_reg_invalid, | ||
162 | [SCSPTR] = sci_reg_invalid, | ||
163 | [SCLSR] = sci_reg_invalid, | ||
164 | }, | ||
165 | |||
166 | /* | ||
167 | * Common SCIFA definitions. | ||
168 | */ | ||
169 | [SCIx_SCIFA_REGTYPE] = { | ||
170 | [SCSMR] = { 0x00, 16 }, | ||
171 | [SCBRR] = { 0x04, 8 }, | ||
172 | [SCSCR] = { 0x08, 16 }, | ||
173 | [SCxTDR] = { 0x20, 8 }, | ||
174 | [SCxSR] = { 0x14, 16 }, | ||
175 | [SCxRDR] = { 0x24, 8 }, | ||
176 | [SCFCR] = { 0x18, 16 }, | ||
177 | [SCFDR] = { 0x1c, 16 }, | ||
178 | [SCTFDR] = sci_reg_invalid, | ||
179 | [SCRFDR] = sci_reg_invalid, | ||
180 | [SCSPTR] = sci_reg_invalid, | ||
181 | [SCLSR] = sci_reg_invalid, | ||
182 | }, | ||
183 | |||
184 | /* | ||
185 | * Common SCIFB definitions. | ||
186 | */ | ||
187 | [SCIx_SCIFB_REGTYPE] = { | ||
188 | [SCSMR] = { 0x00, 16 }, | ||
189 | [SCBRR] = { 0x04, 8 }, | ||
190 | [SCSCR] = { 0x08, 16 }, | ||
191 | [SCxTDR] = { 0x40, 8 }, | ||
192 | [SCxSR] = { 0x14, 16 }, | ||
193 | [SCxRDR] = { 0x60, 8 }, | ||
194 | [SCFCR] = { 0x18, 16 }, | ||
195 | [SCFDR] = { 0x1c, 16 }, | ||
196 | [SCTFDR] = sci_reg_invalid, | ||
197 | [SCRFDR] = sci_reg_invalid, | ||
198 | [SCSPTR] = sci_reg_invalid, | ||
199 | [SCLSR] = sci_reg_invalid, | ||
200 | }, | ||
201 | |||
202 | /* | ||
203 | * Common SH-3 SCIF definitions. | ||
204 | */ | ||
205 | [SCIx_SH3_SCIF_REGTYPE] = { | ||
206 | [SCSMR] = { 0x00, 8 }, | ||
207 | [SCBRR] = { 0x02, 8 }, | ||
208 | [SCSCR] = { 0x04, 8 }, | ||
209 | [SCxTDR] = { 0x06, 8 }, | ||
210 | [SCxSR] = { 0x08, 16 }, | ||
211 | [SCxRDR] = { 0x0a, 8 }, | ||
212 | [SCFCR] = { 0x0c, 8 }, | ||
213 | [SCFDR] = { 0x0e, 16 }, | ||
214 | [SCTFDR] = sci_reg_invalid, | ||
215 | [SCRFDR] = sci_reg_invalid, | ||
216 | [SCSPTR] = sci_reg_invalid, | ||
217 | [SCLSR] = sci_reg_invalid, | ||
218 | }, | ||
219 | |||
220 | /* | ||
221 | * Common SH-4(A) SCIF(B) definitions. | ||
222 | */ | ||
223 | [SCIx_SH4_SCIF_REGTYPE] = { | ||
224 | [SCSMR] = { 0x00, 16 }, | ||
225 | [SCBRR] = { 0x04, 8 }, | ||
226 | [SCSCR] = { 0x08, 16 }, | ||
227 | [SCxTDR] = { 0x0c, 8 }, | ||
228 | [SCxSR] = { 0x10, 16 }, | ||
229 | [SCxRDR] = { 0x14, 8 }, | ||
230 | [SCFCR] = { 0x18, 16 }, | ||
231 | [SCFDR] = { 0x1c, 16 }, | ||
232 | [SCTFDR] = sci_reg_invalid, | ||
233 | [SCRFDR] = sci_reg_invalid, | ||
234 | [SCSPTR] = { 0x20, 16 }, | ||
235 | [SCLSR] = { 0x24, 16 }, | ||
236 | }, | ||
237 | |||
238 | /* | ||
239 | * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR | ||
240 | * register. | ||
241 | */ | ||
242 | [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { | ||
243 | [SCSMR] = { 0x00, 16 }, | ||
244 | [SCBRR] = { 0x04, 8 }, | ||
245 | [SCSCR] = { 0x08, 16 }, | ||
246 | [SCxTDR] = { 0x0c, 8 }, | ||
247 | [SCxSR] = { 0x10, 16 }, | ||
248 | [SCxRDR] = { 0x14, 8 }, | ||
249 | [SCFCR] = { 0x18, 16 }, | ||
250 | [SCFDR] = { 0x1c, 16 }, | ||
251 | [SCTFDR] = sci_reg_invalid, | ||
252 | [SCRFDR] = sci_reg_invalid, | ||
253 | [SCSPTR] = sci_reg_invalid, | ||
254 | [SCLSR] = { 0x24, 16 }, | ||
255 | }, | ||
256 | |||
257 | /* | ||
258 | * Common SH-4(A) SCIF(B) definitions for ports with FIFO data | ||
259 | * count registers. | ||
260 | */ | ||
261 | [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { | ||
262 | [SCSMR] = { 0x00, 16 }, | ||
263 | [SCBRR] = { 0x04, 8 }, | ||
264 | [SCSCR] = { 0x08, 16 }, | ||
265 | [SCxTDR] = { 0x0c, 8 }, | ||
266 | [SCxSR] = { 0x10, 16 }, | ||
267 | [SCxRDR] = { 0x14, 8 }, | ||
268 | [SCFCR] = { 0x18, 16 }, | ||
269 | [SCFDR] = { 0x1c, 16 }, | ||
270 | [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ | ||
271 | [SCRFDR] = { 0x20, 16 }, | ||
272 | [SCSPTR] = { 0x24, 16 }, | ||
273 | [SCLSR] = { 0x28, 16 }, | ||
274 | }, | ||
275 | |||
276 | /* | ||
277 | * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR | ||
278 | * registers. | ||
279 | */ | ||
280 | [SCIx_SH7705_SCIF_REGTYPE] = { | ||
281 | [SCSMR] = { 0x00, 16 }, | ||
282 | [SCBRR] = { 0x04, 8 }, | ||
283 | [SCSCR] = { 0x08, 16 }, | ||
284 | [SCxTDR] = { 0x20, 8 }, | ||
285 | [SCxSR] = { 0x14, 16 }, | ||
286 | [SCxRDR] = { 0x24, 8 }, | ||
287 | [SCFCR] = { 0x18, 16 }, | ||
288 | [SCFDR] = { 0x1c, 16 }, | ||
289 | [SCTFDR] = sci_reg_invalid, | ||
290 | [SCRFDR] = sci_reg_invalid, | ||
291 | [SCSPTR] = sci_reg_invalid, | ||
292 | [SCLSR] = sci_reg_invalid, | ||
293 | }, | ||
294 | }; | ||
295 | |||
296 | #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset) | ||
297 | |||
298 | /* | ||
299 | * The "offset" here is rather misleading, in that it refers to an enum | ||
300 | * value relative to the port mapping rather than the fixed offset | ||
301 | * itself, which needs to be manually retrieved from the platform's | ||
302 | * register map for the given port. | ||
303 | */ | ||
304 | static unsigned int sci_serial_in(struct uart_port *p, int offset) | ||
305 | { | ||
306 | struct plat_sci_reg *reg = sci_getreg(p, offset); | ||
307 | |||
308 | if (reg->size == 8) | ||
309 | return ioread8(p->membase + (reg->offset << p->regshift)); | ||
310 | else if (reg->size == 16) | ||
311 | return ioread16(p->membase + (reg->offset << p->regshift)); | ||
312 | else | ||
313 | WARN(1, "Invalid register access\n"); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static void sci_serial_out(struct uart_port *p, int offset, int value) | ||
319 | { | ||
320 | struct plat_sci_reg *reg = sci_getreg(p, offset); | ||
321 | |||
322 | if (reg->size == 8) | ||
323 | iowrite8(value, p->membase + (reg->offset << p->regshift)); | ||
324 | else if (reg->size == 16) | ||
325 | iowrite16(value, p->membase + (reg->offset << p->regshift)); | ||
326 | else | ||
327 | WARN(1, "Invalid register access\n"); | ||
328 | } | ||
329 | |||
330 | #define sci_in(up, offset) (up->serial_in(up, offset)) | ||
331 | #define sci_out(up, offset, value) (up->serial_out(up, offset, value)) | ||
332 | |||
333 | static int sci_probe_regmap(struct plat_sci_port *cfg) | ||
334 | { | ||
335 | switch (cfg->type) { | ||
336 | case PORT_SCI: | ||
337 | cfg->regtype = SCIx_SCI_REGTYPE; | ||
338 | break; | ||
339 | case PORT_IRDA: | ||
340 | cfg->regtype = SCIx_IRDA_REGTYPE; | ||
341 | break; | ||
342 | case PORT_SCIFA: | ||
343 | cfg->regtype = SCIx_SCIFA_REGTYPE; | ||
344 | break; | ||
345 | case PORT_SCIFB: | ||
346 | cfg->regtype = SCIx_SCIFB_REGTYPE; | ||
347 | break; | ||
348 | case PORT_SCIF: | ||
349 | /* | ||
350 | * The SH-4 is a bit of a misnomer here, although that's | ||
351 | * where this particular port layout originated. This | ||
352 | * configuration (or some slight variation thereof) | ||
353 | * remains the dominant model for all SCIFs. | ||
354 | */ | ||
355 | cfg->regtype = SCIx_SH4_SCIF_REGTYPE; | ||
356 | break; | ||
357 | default: | ||
358 | printk(KERN_ERR "Can't probe register map for given port\n"); | ||
359 | return -EINVAL; | ||
360 | } | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static void sci_port_enable(struct sci_port *sci_port) | ||
366 | { | ||
367 | if (!sci_port->port.dev) | ||
368 | return; | ||
369 | |||
370 | pm_runtime_get_sync(sci_port->port.dev); | ||
371 | |||
372 | clk_enable(sci_port->iclk); | ||
373 | sci_port->port.uartclk = clk_get_rate(sci_port->iclk); | ||
374 | clk_enable(sci_port->fclk); | ||
375 | } | ||
376 | |||
377 | static void sci_port_disable(struct sci_port *sci_port) | ||
378 | { | ||
379 | if (!sci_port->port.dev) | ||
380 | return; | ||
381 | |||
382 | clk_disable(sci_port->fclk); | ||
383 | clk_disable(sci_port->iclk); | ||
384 | |||
385 | pm_runtime_put_sync(sci_port->port.dev); | ||
386 | } | ||
387 | |||
124 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) | 388 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) |
125 | 389 | ||
126 | #ifdef CONFIG_CONSOLE_POLL | 390 | #ifdef CONFIG_CONSOLE_POLL |
@@ -164,223 +428,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) | |||
164 | } | 428 | } |
165 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ | 429 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ |
166 | 430 | ||
167 | #if defined(__H8300H__) || defined(__H8300S__) | ||
168 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) | 431 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) |
169 | { | 432 | { |
170 | int ch = (port->mapbase - SMR0) >> 3; | 433 | struct sci_port *s = to_sci_port(port); |
171 | 434 | struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; | |
172 | /* set DDR regs */ | ||
173 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, | ||
174 | h8300_sci_pins[ch].rx, | ||
175 | H8300_GPIO_INPUT); | ||
176 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, | ||
177 | h8300_sci_pins[ch].tx, | ||
178 | H8300_GPIO_OUTPUT); | ||
179 | |||
180 | /* tx mark output*/ | ||
181 | H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; | ||
182 | } | ||
183 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
184 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
185 | { | ||
186 | if (port->mapbase == 0xA4400000) { | ||
187 | __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); | ||
188 | __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); | ||
189 | } else if (port->mapbase == 0xA4410000) | ||
190 | __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); | ||
191 | } | ||
192 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) | ||
193 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
194 | { | ||
195 | unsigned short data; | ||
196 | |||
197 | if (cflag & CRTSCTS) { | ||
198 | /* enable RTS/CTS */ | ||
199 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | ||
200 | /* Clear PTCR bit 9-2; enable all scif pins but sck */ | ||
201 | data = __raw_readw(PORT_PTCR); | ||
202 | __raw_writew((data & 0xfc03), PORT_PTCR); | ||
203 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | ||
204 | /* Clear PVCR bit 9-2 */ | ||
205 | data = __raw_readw(PORT_PVCR); | ||
206 | __raw_writew((data & 0xfc03), PORT_PVCR); | ||
207 | } | ||
208 | } else { | ||
209 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | ||
210 | /* Clear PTCR bit 5-2; enable only tx and rx */ | ||
211 | data = __raw_readw(PORT_PTCR); | ||
212 | __raw_writew((data & 0xffc3), PORT_PTCR); | ||
213 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | ||
214 | /* Clear PVCR bit 5-2 */ | ||
215 | data = __raw_readw(PORT_PVCR); | ||
216 | __raw_writew((data & 0xffc3), PORT_PVCR); | ||
217 | } | ||
218 | } | ||
219 | } | ||
220 | #elif defined(CONFIG_CPU_SH3) | ||
221 | /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ | ||
222 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
223 | { | ||
224 | unsigned short data; | ||
225 | |||
226 | /* We need to set SCPCR to enable RTS/CTS */ | ||
227 | data = __raw_readw(SCPCR); | ||
228 | /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ | ||
229 | __raw_writew(data & 0x0fcf, SCPCR); | ||
230 | |||
231 | if (!(cflag & CRTSCTS)) { | ||
232 | /* We need to set SCPCR to enable RTS/CTS */ | ||
233 | data = __raw_readw(SCPCR); | ||
234 | /* Clear out SCP7MD1,0, SCP4MD1,0, | ||
235 | Set SCP6MD1,0 = {01} (output) */ | ||
236 | __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); | ||
237 | 435 | ||
238 | data = __raw_readb(SCPDR); | 436 | /* |
239 | /* Set /RTS2 (bit6) = 0 */ | 437 | * Use port-specific handler if provided. |
240 | __raw_writeb(data & 0xbf, SCPDR); | 438 | */ |
439 | if (s->cfg->ops && s->cfg->ops->init_pins) { | ||
440 | s->cfg->ops->init_pins(port, cflag); | ||
441 | return; | ||
241 | } | 442 | } |
242 | } | ||
243 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | ||
244 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
245 | { | ||
246 | unsigned short data; | ||
247 | 443 | ||
248 | if (port->mapbase == 0xffe00000) { | 444 | /* |
249 | data = __raw_readw(PSCR); | 445 | * For the generic path SCSPTR is necessary. Bail out if that's |
250 | data &= ~0x03cf; | 446 | * unavailable, too. |
251 | if (!(cflag & CRTSCTS)) | 447 | */ |
252 | data |= 0x0340; | 448 | if (!reg->size) |
449 | return; | ||
253 | 450 | ||
254 | __raw_writew(data, PSCR); | ||
255 | } | ||
256 | } | ||
257 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ | ||
258 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | ||
259 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
260 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | ||
261 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ | ||
262 | defined(CONFIG_CPU_SUBTYPE_SHX3) | ||
263 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
264 | { | ||
265 | if (!(cflag & CRTSCTS)) | 451 | if (!(cflag & CRTSCTS)) |
266 | __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ | 452 | sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */ |
267 | } | 453 | } |
268 | #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) | ||
269 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
270 | { | ||
271 | if (!(cflag & CRTSCTS)) | ||
272 | __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ | ||
273 | } | ||
274 | #else | ||
275 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | ||
276 | { | ||
277 | /* Nothing to do */ | ||
278 | } | ||
279 | #endif | ||
280 | 454 | ||
281 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | 455 | static int sci_txfill(struct uart_port *port) |
282 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
283 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | ||
284 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
285 | static int scif_txfill(struct uart_port *port) | ||
286 | { | ||
287 | return sci_in(port, SCTFDR) & 0xff; | ||
288 | } | ||
289 | |||
290 | static int scif_txroom(struct uart_port *port) | ||
291 | { | 456 | { |
292 | return SCIF_TXROOM_MAX - scif_txfill(port); | 457 | struct plat_sci_reg *reg; |
293 | } | ||
294 | 458 | ||
295 | static int scif_rxfill(struct uart_port *port) | 459 | reg = sci_getreg(port, SCTFDR); |
296 | { | 460 | if (reg->size) |
297 | return sci_in(port, SCRFDR) & 0xff; | ||
298 | } | ||
299 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
300 | static int scif_txfill(struct uart_port *port) | ||
301 | { | ||
302 | if (port->mapbase == 0xffe00000 || | ||
303 | port->mapbase == 0xffe08000) | ||
304 | /* SCIF0/1*/ | ||
305 | return sci_in(port, SCTFDR) & 0xff; | 461 | return sci_in(port, SCTFDR) & 0xff; |
306 | else | 462 | |
307 | /* SCIF2 */ | 463 | reg = sci_getreg(port, SCFDR); |
464 | if (reg->size) | ||
308 | return sci_in(port, SCFDR) >> 8; | 465 | return sci_in(port, SCFDR) >> 8; |
309 | } | ||
310 | 466 | ||
311 | static int scif_txroom(struct uart_port *port) | 467 | return !(sci_in(port, SCxSR) & SCI_TDRE); |
312 | { | ||
313 | if (port->mapbase == 0xffe00000 || | ||
314 | port->mapbase == 0xffe08000) | ||
315 | /* SCIF0/1*/ | ||
316 | return SCIF_TXROOM_MAX - scif_txfill(port); | ||
317 | else | ||
318 | /* SCIF2 */ | ||
319 | return SCIF2_TXROOM_MAX - scif_txfill(port); | ||
320 | } | 468 | } |
321 | 469 | ||
322 | static int scif_rxfill(struct uart_port *port) | 470 | static int sci_txroom(struct uart_port *port) |
323 | { | ||
324 | if ((port->mapbase == 0xffe00000) || | ||
325 | (port->mapbase == 0xffe08000)) { | ||
326 | /* SCIF0/1*/ | ||
327 | return sci_in(port, SCRFDR) & 0xff; | ||
328 | } else { | ||
329 | /* SCIF2 */ | ||
330 | return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; | ||
331 | } | ||
332 | } | ||
333 | #elif defined(CONFIG_ARCH_SH7372) | ||
334 | static int scif_txfill(struct uart_port *port) | ||
335 | { | 471 | { |
336 | if (port->type == PORT_SCIFA) | 472 | return port->fifosize - sci_txfill(port); |
337 | return sci_in(port, SCFDR) >> 8; | ||
338 | else | ||
339 | return sci_in(port, SCTFDR); | ||
340 | } | 473 | } |
341 | 474 | ||
342 | static int scif_txroom(struct uart_port *port) | 475 | static int sci_rxfill(struct uart_port *port) |
343 | { | 476 | { |
344 | return port->fifosize - scif_txfill(port); | 477 | struct plat_sci_reg *reg; |
345 | } | ||
346 | 478 | ||
347 | static int scif_rxfill(struct uart_port *port) | 479 | reg = sci_getreg(port, SCRFDR); |
348 | { | 480 | if (reg->size) |
349 | if (port->type == PORT_SCIFA) | 481 | return sci_in(port, SCRFDR) & 0xff; |
350 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; | ||
351 | else | ||
352 | return sci_in(port, SCRFDR); | ||
353 | } | ||
354 | #else | ||
355 | static int scif_txfill(struct uart_port *port) | ||
356 | { | ||
357 | return sci_in(port, SCFDR) >> 8; | ||
358 | } | ||
359 | 482 | ||
360 | static int scif_txroom(struct uart_port *port) | 483 | reg = sci_getreg(port, SCFDR); |
361 | { | 484 | if (reg->size) |
362 | return SCIF_TXROOM_MAX - scif_txfill(port); | 485 | return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1); |
363 | } | ||
364 | 486 | ||
365 | static int scif_rxfill(struct uart_port *port) | 487 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; |
366 | { | ||
367 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; | ||
368 | } | 488 | } |
369 | #endif | ||
370 | 489 | ||
371 | static int sci_txfill(struct uart_port *port) | 490 | /* |
491 | * SCI helper for checking the state of the muxed port/RXD pins. | ||
492 | */ | ||
493 | static inline int sci_rxd_in(struct uart_port *port) | ||
372 | { | 494 | { |
373 | return !(sci_in(port, SCxSR) & SCI_TDRE); | 495 | struct sci_port *s = to_sci_port(port); |
374 | } | ||
375 | 496 | ||
376 | static int sci_txroom(struct uart_port *port) | 497 | if (s->cfg->port_reg <= 0) |
377 | { | 498 | return 1; |
378 | return !sci_txfill(port); | ||
379 | } | ||
380 | 499 | ||
381 | static int sci_rxfill(struct uart_port *port) | 500 | return !!__raw_readb(s->cfg->port_reg); |
382 | { | ||
383 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; | ||
384 | } | 501 | } |
385 | 502 | ||
386 | /* ********************************************************************** * | 503 | /* ********************************************************************** * |
@@ -406,10 +523,7 @@ static void sci_transmit_chars(struct uart_port *port) | |||
406 | return; | 523 | return; |
407 | } | 524 | } |
408 | 525 | ||
409 | if (port->type == PORT_SCI) | 526 | count = sci_txroom(port); |
410 | count = sci_txroom(port); | ||
411 | else | ||
412 | count = scif_txroom(port); | ||
413 | 527 | ||
414 | do { | 528 | do { |
415 | unsigned char c; | 529 | unsigned char c; |
@@ -464,13 +578,8 @@ static void sci_receive_chars(struct uart_port *port) | |||
464 | return; | 578 | return; |
465 | 579 | ||
466 | while (1) { | 580 | while (1) { |
467 | if (port->type == PORT_SCI) | ||
468 | count = sci_rxfill(port); | ||
469 | else | ||
470 | count = scif_rxfill(port); | ||
471 | |||
472 | /* Don't copy more bytes than there is room for in the buffer */ | 581 | /* Don't copy more bytes than there is room for in the buffer */ |
473 | count = tty_buffer_request_room(tty, count); | 582 | count = tty_buffer_request_room(tty, sci_rxfill(port)); |
474 | 583 | ||
475 | /* If for any reason we can't copy more data, we're done! */ | 584 | /* If for any reason we can't copy more data, we're done! */ |
476 | if (count == 0) | 585 | if (count == 0) |
@@ -561,8 +670,7 @@ static void sci_break_timer(unsigned long data) | |||
561 | { | 670 | { |
562 | struct sci_port *port = (struct sci_port *)data; | 671 | struct sci_port *port = (struct sci_port *)data; |
563 | 672 | ||
564 | if (port->enable) | 673 | sci_port_enable(port); |
565 | port->enable(&port->port); | ||
566 | 674 | ||
567 | if (sci_rxd_in(&port->port) == 0) { | 675 | if (sci_rxd_in(&port->port) == 0) { |
568 | port->break_flag = 1; | 676 | port->break_flag = 1; |
@@ -574,8 +682,7 @@ static void sci_break_timer(unsigned long data) | |||
574 | } else | 682 | } else |
575 | port->break_flag = 0; | 683 | port->break_flag = 0; |
576 | 684 | ||
577 | if (port->disable) | 685 | sci_port_disable(port); |
578 | port->disable(&port->port); | ||
579 | } | 686 | } |
580 | 687 | ||
581 | static int sci_handle_errors(struct uart_port *port) | 688 | static int sci_handle_errors(struct uart_port *port) |
@@ -583,13 +690,19 @@ static int sci_handle_errors(struct uart_port *port) | |||
583 | int copied = 0; | 690 | int copied = 0; |
584 | unsigned short status = sci_in(port, SCxSR); | 691 | unsigned short status = sci_in(port, SCxSR); |
585 | struct tty_struct *tty = port->state->port.tty; | 692 | struct tty_struct *tty = port->state->port.tty; |
693 | struct sci_port *s = to_sci_port(port); | ||
586 | 694 | ||
587 | if (status & SCxSR_ORER(port)) { | 695 | /* |
588 | /* overrun error */ | 696 | * Handle overruns, if supported. |
589 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) | 697 | */ |
590 | copied++; | 698 | if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) { |
699 | if (status & (1 << s->cfg->overrun_bit)) { | ||
700 | /* overrun error */ | ||
701 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) | ||
702 | copied++; | ||
591 | 703 | ||
592 | dev_notice(port->dev, "overrun error"); | 704 | dev_notice(port->dev, "overrun error"); |
705 | } | ||
593 | } | 706 | } |
594 | 707 | ||
595 | if (status & SCxSR_FER(port)) { | 708 | if (status & SCxSR_FER(port)) { |
@@ -637,12 +750,15 @@ static int sci_handle_errors(struct uart_port *port) | |||
637 | static int sci_handle_fifo_overrun(struct uart_port *port) | 750 | static int sci_handle_fifo_overrun(struct uart_port *port) |
638 | { | 751 | { |
639 | struct tty_struct *tty = port->state->port.tty; | 752 | struct tty_struct *tty = port->state->port.tty; |
753 | struct sci_port *s = to_sci_port(port); | ||
754 | struct plat_sci_reg *reg; | ||
640 | int copied = 0; | 755 | int copied = 0; |
641 | 756 | ||
642 | if (port->type != PORT_SCIF) | 757 | reg = sci_getreg(port, SCLSR); |
758 | if (!reg->size) | ||
643 | return 0; | 759 | return 0; |
644 | 760 | ||
645 | if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { | 761 | if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { |
646 | sci_out(port, SCLSR, 0); | 762 | sci_out(port, SCLSR, 0); |
647 | 763 | ||
648 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | 764 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); |
@@ -840,74 +956,102 @@ static int sci_notifier(struct notifier_block *self, | |||
840 | return NOTIFY_OK; | 956 | return NOTIFY_OK; |
841 | } | 957 | } |
842 | 958 | ||
843 | static void sci_clk_enable(struct uart_port *port) | 959 | static struct sci_irq_desc { |
844 | { | 960 | const char *desc; |
845 | struct sci_port *sci_port = to_sci_port(port); | 961 | irq_handler_t handler; |
846 | 962 | } sci_irq_desc[] = { | |
847 | pm_runtime_get_sync(port->dev); | 963 | /* |
964 | * Split out handlers, the default case. | ||
965 | */ | ||
966 | [SCIx_ERI_IRQ] = { | ||
967 | .desc = "rx err", | ||
968 | .handler = sci_er_interrupt, | ||
969 | }, | ||
848 | 970 | ||
849 | clk_enable(sci_port->iclk); | 971 | [SCIx_RXI_IRQ] = { |
850 | sci_port->port.uartclk = clk_get_rate(sci_port->iclk); | 972 | .desc = "rx full", |
851 | clk_enable(sci_port->fclk); | 973 | .handler = sci_rx_interrupt, |
852 | } | 974 | }, |
853 | 975 | ||
854 | static void sci_clk_disable(struct uart_port *port) | 976 | [SCIx_TXI_IRQ] = { |
855 | { | 977 | .desc = "tx empty", |
856 | struct sci_port *sci_port = to_sci_port(port); | 978 | .handler = sci_tx_interrupt, |
979 | }, | ||
857 | 980 | ||
858 | clk_disable(sci_port->fclk); | 981 | [SCIx_BRI_IRQ] = { |
859 | clk_disable(sci_port->iclk); | 982 | .desc = "break", |
983 | .handler = sci_br_interrupt, | ||
984 | }, | ||
860 | 985 | ||
861 | pm_runtime_put_sync(port->dev); | 986 | /* |
862 | } | 987 | * Special muxed handler. |
988 | */ | ||
989 | [SCIx_MUX_IRQ] = { | ||
990 | .desc = "mux", | ||
991 | .handler = sci_mpxed_interrupt, | ||
992 | }, | ||
993 | }; | ||
863 | 994 | ||
864 | static int sci_request_irq(struct sci_port *port) | 995 | static int sci_request_irq(struct sci_port *port) |
865 | { | 996 | { |
866 | int i; | 997 | struct uart_port *up = &port->port; |
867 | irqreturn_t (*handlers[4])(int irq, void *ptr) = { | 998 | int i, j, ret = 0; |
868 | sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, | 999 | |
869 | sci_br_interrupt, | 1000 | for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { |
870 | }; | 1001 | struct sci_irq_desc *desc; |
871 | const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", | 1002 | unsigned int irq; |
872 | "SCI Transmit Data Empty", "SCI Break" }; | 1003 | |
873 | 1004 | if (SCIx_IRQ_IS_MUXED(port)) { | |
874 | if (port->cfg->irqs[0] == port->cfg->irqs[1]) { | 1005 | i = SCIx_MUX_IRQ; |
875 | if (unlikely(!port->cfg->irqs[0])) | 1006 | irq = up->irq; |
876 | return -ENODEV; | 1007 | } else |
877 | 1008 | irq = port->cfg->irqs[i]; | |
878 | if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, | 1009 | |
879 | IRQF_DISABLED, "sci", port)) { | 1010 | desc = sci_irq_desc + i; |
880 | dev_err(port->port.dev, "Can't allocate IRQ\n"); | 1011 | port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s", |
881 | return -ENODEV; | 1012 | dev_name(up->dev), desc->desc); |
1013 | if (!port->irqstr[j]) { | ||
1014 | dev_err(up->dev, "Failed to allocate %s IRQ string\n", | ||
1015 | desc->desc); | ||
1016 | goto out_nomem; | ||
882 | } | 1017 | } |
883 | } else { | 1018 | |
884 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { | 1019 | ret = request_irq(irq, desc->handler, up->irqflags, |
885 | if (unlikely(!port->cfg->irqs[i])) | 1020 | port->irqstr[j], port); |
886 | continue; | 1021 | if (unlikely(ret)) { |
887 | 1022 | dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc); | |
888 | if (request_irq(port->cfg->irqs[i], handlers[i], | 1023 | goto out_noirq; |
889 | IRQF_DISABLED, desc[i], port)) { | ||
890 | dev_err(port->port.dev, "Can't allocate IRQ\n"); | ||
891 | return -ENODEV; | ||
892 | } | ||
893 | } | 1024 | } |
894 | } | 1025 | } |
895 | 1026 | ||
896 | return 0; | 1027 | return 0; |
1028 | |||
1029 | out_noirq: | ||
1030 | while (--i >= 0) | ||
1031 | free_irq(port->cfg->irqs[i], port); | ||
1032 | |||
1033 | out_nomem: | ||
1034 | while (--j >= 0) | ||
1035 | kfree(port->irqstr[j]); | ||
1036 | |||
1037 | return ret; | ||
897 | } | 1038 | } |
898 | 1039 | ||
899 | static void sci_free_irq(struct sci_port *port) | 1040 | static void sci_free_irq(struct sci_port *port) |
900 | { | 1041 | { |
901 | int i; | 1042 | int i; |
902 | 1043 | ||
903 | if (port->cfg->irqs[0] == port->cfg->irqs[1]) | 1044 | /* |
904 | free_irq(port->cfg->irqs[0], port); | 1045 | * Intentionally in reverse order so we iterate over the muxed |
905 | else { | 1046 | * IRQ first. |
906 | for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { | 1047 | */ |
907 | if (!port->cfg->irqs[i]) | 1048 | for (i = 0; i < SCIx_NR_IRQS; i++) { |
908 | continue; | 1049 | free_irq(port->cfg->irqs[i], port); |
1050 | kfree(port->irqstr[i]); | ||
909 | 1051 | ||
910 | free_irq(port->cfg->irqs[i], port); | 1052 | if (SCIx_IRQ_IS_MUXED(port)) { |
1053 | /* If there's only one IRQ, we're done. */ | ||
1054 | return; | ||
911 | } | 1055 | } |
912 | } | 1056 | } |
913 | } | 1057 | } |
@@ -915,7 +1059,7 @@ static void sci_free_irq(struct sci_port *port) | |||
915 | static unsigned int sci_tx_empty(struct uart_port *port) | 1059 | static unsigned int sci_tx_empty(struct uart_port *port) |
916 | { | 1060 | { |
917 | unsigned short status = sci_in(port, SCxSR); | 1061 | unsigned short status = sci_in(port, SCxSR); |
918 | unsigned short in_tx_fifo = scif_txfill(port); | 1062 | unsigned short in_tx_fifo = sci_txfill(port); |
919 | 1063 | ||
920 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; | 1064 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; |
921 | } | 1065 | } |
@@ -1438,8 +1582,7 @@ static int sci_startup(struct uart_port *port) | |||
1438 | 1582 | ||
1439 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | 1583 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
1440 | 1584 | ||
1441 | if (s->enable) | 1585 | sci_port_enable(s); |
1442 | s->enable(port); | ||
1443 | 1586 | ||
1444 | ret = sci_request_irq(s); | 1587 | ret = sci_request_irq(s); |
1445 | if (unlikely(ret < 0)) | 1588 | if (unlikely(ret < 0)) |
@@ -1465,8 +1608,7 @@ static void sci_shutdown(struct uart_port *port) | |||
1465 | sci_free_dma(port); | 1608 | sci_free_dma(port); |
1466 | sci_free_irq(s); | 1609 | sci_free_irq(s); |
1467 | 1610 | ||
1468 | if (s->disable) | 1611 | sci_port_disable(s); |
1469 | s->disable(port); | ||
1470 | } | 1612 | } |
1471 | 1613 | ||
1472 | static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, | 1614 | static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, |
@@ -1513,8 +1655,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1513 | if (likely(baud && port->uartclk)) | 1655 | if (likely(baud && port->uartclk)) |
1514 | t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); | 1656 | t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); |
1515 | 1657 | ||
1516 | if (s->enable) | 1658 | sci_port_enable(s); |
1517 | s->enable(port); | ||
1518 | 1659 | ||
1519 | do { | 1660 | do { |
1520 | status = sci_in(port, SCxSR); | 1661 | status = sci_in(port, SCxSR); |
@@ -1584,8 +1725,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1584 | if ((termios->c_cflag & CREAD) != 0) | 1725 | if ((termios->c_cflag & CREAD) != 0) |
1585 | sci_start_rx(port); | 1726 | sci_start_rx(port); |
1586 | 1727 | ||
1587 | if (s->disable) | 1728 | sci_port_disable(s); |
1588 | s->disable(port); | ||
1589 | } | 1729 | } |
1590 | 1730 | ||
1591 | static const char *sci_type(struct uart_port *port) | 1731 | static const char *sci_type(struct uart_port *port) |
@@ -1726,6 +1866,7 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
1726 | struct plat_sci_port *p) | 1866 | struct plat_sci_port *p) |
1727 | { | 1867 | { |
1728 | struct uart_port *port = &sci_port->port; | 1868 | struct uart_port *port = &sci_port->port; |
1869 | int ret; | ||
1729 | 1870 | ||
1730 | port->ops = &sci_uart_ops; | 1871 | port->ops = &sci_uart_ops; |
1731 | port->iotype = UPIO_MEM; | 1872 | port->iotype = UPIO_MEM; |
@@ -1746,6 +1887,12 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
1746 | break; | 1887 | break; |
1747 | } | 1888 | } |
1748 | 1889 | ||
1890 | if (p->regtype == SCIx_PROBE_REGTYPE) { | ||
1891 | ret = sci_probe_regmap(p); | ||
1892 | if (unlikely(ret)) | ||
1893 | return ret; | ||
1894 | } | ||
1895 | |||
1749 | if (dev) { | 1896 | if (dev) { |
1750 | sci_port->iclk = clk_get(&dev->dev, "sci_ick"); | 1897 | sci_port->iclk = clk_get(&dev->dev, "sci_ick"); |
1751 | if (IS_ERR(sci_port->iclk)) { | 1898 | if (IS_ERR(sci_port->iclk)) { |
@@ -1764,8 +1911,6 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
1764 | if (IS_ERR(sci_port->fclk)) | 1911 | if (IS_ERR(sci_port->fclk)) |
1765 | sci_port->fclk = NULL; | 1912 | sci_port->fclk = NULL; |
1766 | 1913 | ||
1767 | sci_port->enable = sci_clk_enable; | ||
1768 | sci_port->disable = sci_clk_disable; | ||
1769 | port->dev = &dev->dev; | 1914 | port->dev = &dev->dev; |
1770 | 1915 | ||
1771 | pm_runtime_enable(&dev->dev); | 1916 | pm_runtime_enable(&dev->dev); |
@@ -1775,20 +1920,51 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
1775 | sci_port->break_timer.function = sci_break_timer; | 1920 | sci_port->break_timer.function = sci_break_timer; |
1776 | init_timer(&sci_port->break_timer); | 1921 | init_timer(&sci_port->break_timer); |
1777 | 1922 | ||
1923 | /* | ||
1924 | * Establish some sensible defaults for the error detection. | ||
1925 | */ | ||
1926 | if (!p->error_mask) | ||
1927 | p->error_mask = (p->type == PORT_SCI) ? | ||
1928 | SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK; | ||
1929 | |||
1930 | /* | ||
1931 | * Establish sensible defaults for the overrun detection, unless | ||
1932 | * the part has explicitly disabled support for it. | ||
1933 | */ | ||
1934 | if (p->overrun_bit != SCIx_NOT_SUPPORTED) { | ||
1935 | if (p->type == PORT_SCI) | ||
1936 | p->overrun_bit = 5; | ||
1937 | else if (p->scbrr_algo_id == SCBRR_ALGO_4) | ||
1938 | p->overrun_bit = 9; | ||
1939 | else | ||
1940 | p->overrun_bit = 0; | ||
1941 | |||
1942 | /* | ||
1943 | * Make the error mask inclusive of overrun detection, if | ||
1944 | * supported. | ||
1945 | */ | ||
1946 | p->error_mask |= (1 << p->overrun_bit); | ||
1947 | } | ||
1948 | |||
1778 | sci_port->cfg = p; | 1949 | sci_port->cfg = p; |
1779 | 1950 | ||
1780 | port->mapbase = p->mapbase; | 1951 | port->mapbase = p->mapbase; |
1781 | port->type = p->type; | 1952 | port->type = p->type; |
1782 | port->flags = p->flags; | 1953 | port->flags = p->flags; |
1954 | port->regshift = p->regshift; | ||
1783 | 1955 | ||
1784 | /* | 1956 | /* |
1785 | * The UART port needs an IRQ value, so we peg this to the TX IRQ | 1957 | * The UART port needs an IRQ value, so we peg this to the RX IRQ |
1786 | * for the multi-IRQ ports, which is where we are primarily | 1958 | * for the multi-IRQ ports, which is where we are primarily |
1787 | * concerned with the shutdown path synchronization. | 1959 | * concerned with the shutdown path synchronization. |
1788 | * | 1960 | * |
1789 | * For the muxed case there's nothing more to do. | 1961 | * For the muxed case there's nothing more to do. |
1790 | */ | 1962 | */ |
1791 | port->irq = p->irqs[SCIx_RXI_IRQ]; | 1963 | port->irq = p->irqs[SCIx_RXI_IRQ]; |
1964 | port->irqflags = IRQF_DISABLED; | ||
1965 | |||
1966 | port->serial_in = sci_serial_in; | ||
1967 | port->serial_out = sci_serial_out; | ||
1792 | 1968 | ||
1793 | if (p->dma_dev) | 1969 | if (p->dma_dev) |
1794 | dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", | 1970 | dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", |
@@ -1814,8 +1990,7 @@ static void serial_console_write(struct console *co, const char *s, | |||
1814 | struct uart_port *port = &sci_port->port; | 1990 | struct uart_port *port = &sci_port->port; |
1815 | unsigned short bits; | 1991 | unsigned short bits; |
1816 | 1992 | ||
1817 | if (sci_port->enable) | 1993 | sci_port_enable(sci_port); |
1818 | sci_port->enable(port); | ||
1819 | 1994 | ||
1820 | uart_console_write(port, s, count, serial_console_putchar); | 1995 | uart_console_write(port, s, count, serial_console_putchar); |
1821 | 1996 | ||
@@ -1824,8 +1999,7 @@ static void serial_console_write(struct console *co, const char *s, | |||
1824 | while ((sci_in(port, SCxSR) & bits) != bits) | 1999 | while ((sci_in(port, SCxSR) & bits) != bits) |
1825 | cpu_relax(); | 2000 | cpu_relax(); |
1826 | 2001 | ||
1827 | if (sci_port->disable) | 2002 | sci_port_disable(sci_port); |
1828 | sci_port->disable(port); | ||
1829 | } | 2003 | } |
1830 | 2004 | ||
1831 | static int __devinit serial_console_setup(struct console *co, char *options) | 2005 | static int __devinit serial_console_setup(struct console *co, char *options) |
@@ -1857,20 +2031,13 @@ static int __devinit serial_console_setup(struct console *co, char *options) | |||
1857 | if (unlikely(ret != 0)) | 2031 | if (unlikely(ret != 0)) |
1858 | return ret; | 2032 | return ret; |
1859 | 2033 | ||
1860 | if (sci_port->enable) | 2034 | sci_port_enable(sci_port); |
1861 | sci_port->enable(port); | ||
1862 | 2035 | ||
1863 | if (options) | 2036 | if (options) |
1864 | uart_parse_options(options, &baud, &parity, &bits, &flow); | 2037 | uart_parse_options(options, &baud, &parity, &bits, &flow); |
1865 | 2038 | ||
1866 | ret = uart_set_options(port, co, baud, parity, bits, flow); | ||
1867 | #if defined(__H8300H__) || defined(__H8300S__) | ||
1868 | /* disable rx interrupt */ | ||
1869 | if (ret == 0) | ||
1870 | sci_stop_rx(port); | ||
1871 | #endif | ||
1872 | /* TODO: disable clock */ | 2039 | /* TODO: disable clock */ |
1873 | return ret; | 2040 | return uart_set_options(port, co, baud, parity, bits, flow); |
1874 | } | 2041 | } |
1875 | 2042 | ||
1876 | static struct console serial_console = { | 2043 | static struct console serial_console = { |
@@ -2081,3 +2248,5 @@ module_exit(sci_exit); | |||
2081 | 2248 | ||
2082 | MODULE_LICENSE("GPL"); | 2249 | MODULE_LICENSE("GPL"); |
2083 | MODULE_ALIAS("platform:sh-sci"); | 2250 | MODULE_ALIAS("platform:sh-sci"); |
2251 | MODULE_AUTHOR("Paul Mundt"); | ||
2252 | MODULE_DESCRIPTION("SuperH SCI(F) serial driver"); | ||
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index b04d937c9110..e9bed038aa1f 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h | |||
@@ -2,169 +2,14 @@ | |||
2 | #include <linux/io.h> | 2 | #include <linux/io.h> |
3 | #include <linux/gpio.h> | 3 | #include <linux/gpio.h> |
4 | 4 | ||
5 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) | ||
6 | #include <asm/regs306x.h> | ||
7 | #endif | ||
8 | #if defined(CONFIG_H8S2678) | ||
9 | #include <asm/regs267x.h> | ||
10 | #endif | ||
11 | |||
12 | #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ | ||
13 | defined(CONFIG_CPU_SUBTYPE_SH7707) || \ | ||
14 | defined(CONFIG_CPU_SUBTYPE_SH7708) || \ | ||
15 | defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
16 | # define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ | ||
17 | # define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ | ||
18 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) | ||
19 | # define SCIF0 0xA4400000 | ||
20 | # define SCIF2 0xA4410000 | ||
21 | # define SCPCR 0xA4000116 | ||
22 | # define SCPDR 0xA4000136 | ||
23 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
24 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
25 | defined(CONFIG_ARCH_SH73A0) || \ | ||
26 | defined(CONFIG_ARCH_SH7367) || \ | ||
27 | defined(CONFIG_ARCH_SH7377) || \ | ||
28 | defined(CONFIG_ARCH_SH7372) | ||
29 | # define PORT_PTCR 0xA405011EUL | ||
30 | # define PORT_PVCR 0xA4050122UL | ||
31 | # define SCIF_ORER 0x0200 /* overrun error bit */ | ||
32 | #elif defined(CONFIG_SH_RTS7751R2D) | ||
33 | # define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ | ||
34 | # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ | ||
35 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
36 | #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ | ||
37 | defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ | ||
38 | defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ | ||
39 | defined(CONFIG_CPU_SUBTYPE_SH7091) || \ | ||
40 | defined(CONFIG_CPU_SUBTYPE_SH7751) || \ | ||
41 | defined(CONFIG_CPU_SUBTYPE_SH7751R) | ||
42 | # define SCSPTR1 0xffe0001c /* 8 bit SCI */ | ||
43 | # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ | ||
44 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
45 | #elif defined(CONFIG_CPU_SUBTYPE_SH7760) | ||
46 | # define SCSPTR0 0xfe600024 /* 16 bit SCIF */ | ||
47 | # define SCSPTR1 0xfe610024 /* 16 bit SCIF */ | ||
48 | # define SCSPTR2 0xfe620024 /* 16 bit SCIF */ | ||
49 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
50 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
51 | # define SCSPTR0 0xA4400000 /* 16 bit SCIF */ | ||
52 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
53 | # define PACR 0xa4050100 | ||
54 | # define PBCR 0xa4050102 | ||
55 | #elif defined(CONFIG_CPU_SUBTYPE_SH7343) | ||
56 | # define SCSPTR0 0xffe00010 /* 16 bit SCIF */ | ||
57 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | ||
58 | # define PADR 0xA4050120 | ||
59 | # define PSDR 0xA405013e | ||
60 | # define PWDR 0xA4050166 | ||
61 | # define PSCR 0xA405011E | ||
62 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
63 | #elif defined(CONFIG_CPU_SUBTYPE_SH7366) | ||
64 | # define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ | ||
65 | # define SCSPTR0 SCPDR0 | ||
66 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
67 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) | ||
68 | # define SCSPTR0 0xa4050160 | ||
69 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
70 | #elif defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
71 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
72 | #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) | ||
73 | # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ | ||
74 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
75 | #elif defined(CONFIG_H83007) || defined(CONFIG_H83068) | ||
76 | # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) | ||
77 | #elif defined(CONFIG_H8S2678) | ||
78 | # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) | ||
79 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) | ||
80 | # define SCSPTR0 0xfe4b0020 | ||
81 | # define SCIF_ORER 0x0001 | ||
82 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
83 | # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ | ||
84 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
85 | #elif defined(CONFIG_CPU_SUBTYPE_SH7770) | ||
86 | # define SCSPTR0 0xff923020 /* 16 bit SCIF */ | ||
87 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
88 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
89 | # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ | ||
90 | # define SCIF_ORER 0x0001 /* Overrun error bit */ | ||
91 | #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | ||
92 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
93 | # define SCSPTR0 0xffea0024 /* 16 bit SCIF */ | ||
94 | # define SCIF_ORER 0x0001 /* Overrun error bit */ | ||
95 | #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ | ||
96 | defined(CONFIG_CPU_SUBTYPE_SH7203) || \ | ||
97 | defined(CONFIG_CPU_SUBTYPE_SH7206) || \ | ||
98 | defined(CONFIG_CPU_SUBTYPE_SH7263) | ||
99 | # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ | ||
100 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
101 | # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ | ||
102 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
103 | #elif defined(CONFIG_CPU_SUBTYPE_SHX3) | ||
104 | # define SCSPTR0 0xffc30020 /* 16 bit SCIF */ | ||
105 | # define SCIF_ORER 0x0001 /* Overrun error bit */ | ||
106 | #else | ||
107 | # error CPU subtype not defined | ||
108 | #endif | ||
109 | |||
110 | /* SCxSR SCI */ | ||
111 | #define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
112 | #define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
113 | #define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
114 | #define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
115 | #define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
116 | #define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
117 | /* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
118 | /* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ | ||
119 | |||
120 | #define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER) | ||
121 | |||
122 | /* SCxSR SCIF */ | ||
123 | #define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
124 | #define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
125 | #define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
126 | #define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
127 | #define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
128 | #define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
129 | #define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
130 | #define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ | ||
131 | |||
132 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | ||
133 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
134 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
135 | defined(CONFIG_ARCH_SH73A0) || \ | ||
136 | defined(CONFIG_ARCH_SH7367) || \ | ||
137 | defined(CONFIG_ARCH_SH7377) || \ | ||
138 | defined(CONFIG_ARCH_SH7372) | ||
139 | # define SCIF_ORER 0x0200 | ||
140 | # define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) | ||
141 | # define SCIF_RFDC_MASK 0x007f | ||
142 | # define SCIF_TXROOM_MAX 64 | ||
143 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
144 | # define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK ) | ||
145 | # define SCIF_RFDC_MASK 0x007f | ||
146 | # define SCIF_TXROOM_MAX 64 | ||
147 | /* SH7763 SCIF2 support */ | ||
148 | # define SCIF2_RFDC_MASK 0x001f | ||
149 | # define SCIF2_TXROOM_MAX 16 | ||
150 | #else | ||
151 | # define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) | ||
152 | # define SCIF_RFDC_MASK 0x001f | ||
153 | # define SCIF_TXROOM_MAX 16 | ||
154 | #endif | ||
155 | |||
156 | #ifndef SCIF_ORER | ||
157 | #define SCIF_ORER 0x0000 | ||
158 | #endif | ||
159 | |||
160 | #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) | 5 | #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) |
161 | #define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) | ||
162 | #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) | 6 | #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) |
163 | #define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) | 7 | #define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) |
164 | #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) | 8 | #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) |
165 | #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) | 9 | #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) |
166 | #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) | 10 | #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) |
167 | #define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) | 11 | |
12 | #define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask) | ||
168 | 13 | ||
169 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | 14 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ |
170 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | 15 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ |
@@ -191,278 +36,3 @@ | |||
191 | 36 | ||
192 | #define SCI_MAJOR 204 | 37 | #define SCI_MAJOR 204 |
193 | #define SCI_MINOR_START 8 | 38 | #define SCI_MINOR_START 8 |
194 | |||
195 | #define SCI_IN(size, offset) \ | ||
196 | if ((size) == 8) { \ | ||
197 | return ioread8(port->membase + (offset)); \ | ||
198 | } else { \ | ||
199 | return ioread16(port->membase + (offset)); \ | ||
200 | } | ||
201 | #define SCI_OUT(size, offset, value) \ | ||
202 | if ((size) == 8) { \ | ||
203 | iowrite8(value, port->membase + (offset)); \ | ||
204 | } else if ((size) == 16) { \ | ||
205 | iowrite16(value, port->membase + (offset)); \ | ||
206 | } | ||
207 | |||
208 | #define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ | ||
209 | static inline unsigned int sci_##name##_in(struct uart_port *port) \ | ||
210 | { \ | ||
211 | if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ | ||
212 | SCI_IN(scif_size, scif_offset) \ | ||
213 | } else { /* PORT_SCI or PORT_SCIFA */ \ | ||
214 | SCI_IN(sci_size, sci_offset); \ | ||
215 | } \ | ||
216 | } \ | ||
217 | static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ | ||
218 | { \ | ||
219 | if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ | ||
220 | SCI_OUT(scif_size, scif_offset, value) \ | ||
221 | } else { /* PORT_SCI or PORT_SCIFA */ \ | ||
222 | SCI_OUT(sci_size, sci_offset, value); \ | ||
223 | } \ | ||
224 | } | ||
225 | |||
226 | #ifdef CONFIG_H8300 | ||
227 | /* h8300 don't have SCIF */ | ||
228 | #define CPU_SCIF_FNS(name) \ | ||
229 | static inline unsigned int sci_##name##_in(struct uart_port *port) \ | ||
230 | { \ | ||
231 | return 0; \ | ||
232 | } \ | ||
233 | static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ | ||
234 | { \ | ||
235 | } | ||
236 | #else | ||
237 | #define CPU_SCIF_FNS(name, scif_offset, scif_size) \ | ||
238 | static inline unsigned int sci_##name##_in(struct uart_port *port) \ | ||
239 | { \ | ||
240 | SCI_IN(scif_size, scif_offset); \ | ||
241 | } \ | ||
242 | static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ | ||
243 | { \ | ||
244 | SCI_OUT(scif_size, scif_offset, value); \ | ||
245 | } | ||
246 | #endif | ||
247 | |||
248 | #define CPU_SCI_FNS(name, sci_offset, sci_size) \ | ||
249 | static inline unsigned int sci_##name##_in(struct uart_port* port) \ | ||
250 | { \ | ||
251 | SCI_IN(sci_size, sci_offset); \ | ||
252 | } \ | ||
253 | static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \ | ||
254 | { \ | ||
255 | SCI_OUT(sci_size, sci_offset, value); \ | ||
256 | } | ||
257 | |||
258 | #if defined(CONFIG_CPU_SH3) || \ | ||
259 | defined(CONFIG_ARCH_SH73A0) || \ | ||
260 | defined(CONFIG_ARCH_SH7367) || \ | ||
261 | defined(CONFIG_ARCH_SH7377) || \ | ||
262 | defined(CONFIG_ARCH_SH7372) | ||
263 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | ||
264 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ | ||
265 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ | ||
266 | h8_sci_offset, h8_sci_size) \ | ||
267 | CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) | ||
268 | #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ | ||
269 | CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) | ||
270 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | ||
271 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
272 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
273 | defined(CONFIG_ARCH_SH7367) | ||
274 | #define SCIF_FNS(name, scif_offset, scif_size) \ | ||
275 | CPU_SCIF_FNS(name, scif_offset, scif_size) | ||
276 | #elif defined(CONFIG_ARCH_SH7377) || \ | ||
277 | defined(CONFIG_ARCH_SH7372) || \ | ||
278 | defined(CONFIG_ARCH_SH73A0) | ||
279 | #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \ | ||
280 | CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) | ||
281 | #define SCIF_FNS(name, scif_offset, scif_size) \ | ||
282 | CPU_SCIF_FNS(name, scif_offset, scif_size) | ||
283 | #else | ||
284 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ | ||
285 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ | ||
286 | h8_sci_offset, h8_sci_size) \ | ||
287 | CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size) | ||
288 | #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ | ||
289 | CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size) | ||
290 | #endif | ||
291 | #elif defined(__H8300H__) || defined(__H8300S__) | ||
292 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ | ||
293 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ | ||
294 | h8_sci_offset, h8_sci_size) \ | ||
295 | CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size) | ||
296 | #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ | ||
297 | CPU_SCIF_FNS(name) | ||
298 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ | ||
299 | defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
300 | #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ | ||
301 | CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) | ||
302 | #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \ | ||
303 | CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) | ||
304 | #else | ||
305 | #define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ | ||
306 | sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ | ||
307 | h8_sci_offset, h8_sci_size) \ | ||
308 | CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) | ||
309 | #define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ | ||
310 | CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) | ||
311 | #endif | ||
312 | |||
313 | #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | ||
314 | defined(CONFIG_CPU_SUBTYPE_SH7720) || \ | ||
315 | defined(CONFIG_CPU_SUBTYPE_SH7721) || \ | ||
316 | defined(CONFIG_ARCH_SH7367) | ||
317 | |||
318 | SCIF_FNS(SCSMR, 0x00, 16) | ||
319 | SCIF_FNS(SCBRR, 0x04, 8) | ||
320 | SCIF_FNS(SCSCR, 0x08, 16) | ||
321 | SCIF_FNS(SCxSR, 0x14, 16) | ||
322 | SCIF_FNS(SCFCR, 0x18, 16) | ||
323 | SCIF_FNS(SCFDR, 0x1c, 16) | ||
324 | SCIF_FNS(SCxTDR, 0x20, 8) | ||
325 | SCIF_FNS(SCxRDR, 0x24, 8) | ||
326 | SCIF_FNS(SCLSR, 0x00, 0) | ||
327 | #elif defined(CONFIG_ARCH_SH7377) || \ | ||
328 | defined(CONFIG_ARCH_SH7372) || \ | ||
329 | defined(CONFIG_ARCH_SH73A0) | ||
330 | SCIF_FNS(SCSMR, 0x00, 16) | ||
331 | SCIF_FNS(SCBRR, 0x04, 8) | ||
332 | SCIF_FNS(SCSCR, 0x08, 16) | ||
333 | SCIF_FNS(SCTDSR, 0x0c, 16) | ||
334 | SCIF_FNS(SCFER, 0x10, 16) | ||
335 | SCIF_FNS(SCxSR, 0x14, 16) | ||
336 | SCIF_FNS(SCFCR, 0x18, 16) | ||
337 | SCIF_FNS(SCFDR, 0x1c, 16) | ||
338 | SCIF_FNS(SCTFDR, 0x38, 16) | ||
339 | SCIF_FNS(SCRFDR, 0x3c, 16) | ||
340 | SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8) | ||
341 | SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8) | ||
342 | SCIF_FNS(SCLSR, 0x00, 0) | ||
343 | #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ | ||
344 | defined(CONFIG_CPU_SUBTYPE_SH7724) | ||
345 | SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) | ||
346 | SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8) | ||
347 | SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16) | ||
348 | SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) | ||
349 | SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) | ||
350 | SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) | ||
351 | SCIx_FNS(SCSPTR, 0, 0, 0, 0) | ||
352 | SCIF_FNS(SCFCR, 0x18, 16) | ||
353 | SCIF_FNS(SCFDR, 0x1c, 16) | ||
354 | SCIF_FNS(SCLSR, 0x24, 16) | ||
355 | #else | ||
356 | /* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/ | ||
357 | /* name off sz off sz off sz off sz off sz*/ | ||
358 | SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8) | ||
359 | SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8) | ||
360 | SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8) | ||
361 | SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8) | ||
362 | SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) | ||
363 | SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) | ||
364 | SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) | ||
365 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | ||
366 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | ||
367 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | ||
368 | defined(CONFIG_CPU_SUBTYPE_SH7786) | ||
369 | SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) | ||
370 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) | ||
371 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) | ||
372 | SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) | ||
373 | SCIF_FNS(SCLSR, 0, 0, 0x28, 16) | ||
374 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
375 | SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) | ||
376 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) | ||
377 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) | ||
378 | SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) | ||
379 | SCIF_FNS(SCLSR, 0, 0, 0x28, 16) | ||
380 | #else | ||
381 | SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) | ||
382 | #if defined(CONFIG_CPU_SUBTYPE_SH7722) | ||
383 | SCIF_FNS(SCSPTR, 0, 0, 0, 0) | ||
384 | #else | ||
385 | SCIF_FNS(SCSPTR, 0, 0, 0x20, 16) | ||
386 | #endif | ||
387 | SCIF_FNS(SCLSR, 0, 0, 0x24, 16) | ||
388 | #endif | ||
389 | #endif | ||
390 | #define sci_in(port, reg) sci_##reg##_in(port) | ||
391 | #define sci_out(port, reg, value) sci_##reg##_out(port, value) | ||
392 | |||
393 | /* H8/300 series SCI pins assignment */ | ||
394 | #if defined(__H8300H__) || defined(__H8300S__) | ||
395 | static const struct __attribute__((packed)) { | ||
396 | int port; /* GPIO port no */ | ||
397 | unsigned short rx,tx; /* GPIO bit no */ | ||
398 | } h8300_sci_pins[] = { | ||
399 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) | ||
400 | { /* SCI0 */ | ||
401 | .port = H8300_GPIO_P9, | ||
402 | .rx = H8300_GPIO_B2, | ||
403 | .tx = H8300_GPIO_B0, | ||
404 | }, | ||
405 | { /* SCI1 */ | ||
406 | .port = H8300_GPIO_P9, | ||
407 | .rx = H8300_GPIO_B3, | ||
408 | .tx = H8300_GPIO_B1, | ||
409 | }, | ||
410 | { /* SCI2 */ | ||
411 | .port = H8300_GPIO_PB, | ||
412 | .rx = H8300_GPIO_B7, | ||
413 | .tx = H8300_GPIO_B6, | ||
414 | } | ||
415 | #elif defined(CONFIG_H8S2678) | ||
416 | { /* SCI0 */ | ||
417 | .port = H8300_GPIO_P3, | ||
418 | .rx = H8300_GPIO_B2, | ||
419 | .tx = H8300_GPIO_B0, | ||
420 | }, | ||
421 | { /* SCI1 */ | ||
422 | .port = H8300_GPIO_P3, | ||
423 | .rx = H8300_GPIO_B3, | ||
424 | .tx = H8300_GPIO_B1, | ||
425 | }, | ||
426 | { /* SCI2 */ | ||
427 | .port = H8300_GPIO_P5, | ||
428 | .rx = H8300_GPIO_B1, | ||
429 | .tx = H8300_GPIO_B0, | ||
430 | } | ||
431 | #endif | ||
432 | }; | ||
433 | #endif | ||
434 | |||
435 | #if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ | ||
436 | defined(CONFIG_CPU_SUBTYPE_SH7707) || \ | ||
437 | defined(CONFIG_CPU_SUBTYPE_SH7708) || \ | ||
438 | defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
439 | static inline int sci_rxd_in(struct uart_port *port) | ||
440 | { | ||
441 | if (port->mapbase == 0xfffffe80) | ||
442 | return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ | ||
443 | return 1; | ||
444 | } | ||
445 | #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ | ||
446 | defined(CONFIG_CPU_SUBTYPE_SH7751) || \ | ||
447 | defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ | ||
448 | defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ | ||
449 | defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ | ||
450 | defined(CONFIG_CPU_SUBTYPE_SH7091) | ||
451 | static inline int sci_rxd_in(struct uart_port *port) | ||
452 | { | ||
453 | if (port->mapbase == 0xffe00000) | ||
454 | return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ | ||
455 | return 1; | ||
456 | } | ||
457 | #elif defined(__H8300H__) || defined(__H8300S__) | ||
458 | static inline int sci_rxd_in(struct uart_port *port) | ||
459 | { | ||
460 | int ch = (port->mapbase - SMR0) >> 3; | ||
461 | return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; | ||
462 | } | ||
463 | #else /* default case for non-SCI processors */ | ||
464 | static inline int sci_rxd_in(struct uart_port *port) | ||
465 | { | ||
466 | return 1; | ||
467 | } | ||
468 | #endif | ||
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 69407e72aac1..278aeaa92505 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig | |||
@@ -336,7 +336,7 @@ config BACKLIGHT_PCF50633 | |||
336 | enable its driver. | 336 | enable its driver. |
337 | 337 | ||
338 | config BACKLIGHT_AAT2870 | 338 | config BACKLIGHT_AAT2870 |
339 | bool "AnalogicTech AAT2870 Backlight" | 339 | tristate "AnalogicTech AAT2870 Backlight" |
340 | depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE | 340 | depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE |
341 | help | 341 | help |
342 | If you have a AnalogicTech AAT2870 say Y to enable the | 342 | If you have a AnalogicTech AAT2870 say Y to enable the |
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c index 4952a617563d..331f1ef1dad5 100644 --- a/drivers/video/backlight/aat2870_bl.c +++ b/drivers/video/backlight/aat2870_bl.c | |||
@@ -44,7 +44,7 @@ static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl, | |||
44 | struct backlight_device *bd = aat2870_bl->bd; | 44 | struct backlight_device *bd = aat2870_bl->bd; |
45 | int val; | 45 | int val; |
46 | 46 | ||
47 | val = brightness * aat2870_bl->max_current; | 47 | val = brightness * (aat2870_bl->max_current - 1); |
48 | val /= bd->props.max_brightness; | 48 | val /= bd->props.max_brightness; |
49 | 49 | ||
50 | return val; | 50 | return val; |
@@ -158,10 +158,10 @@ static int aat2870_bl_probe(struct platform_device *pdev) | |||
158 | props.type = BACKLIGHT_RAW; | 158 | props.type = BACKLIGHT_RAW; |
159 | bd = backlight_device_register("aat2870-backlight", &pdev->dev, | 159 | bd = backlight_device_register("aat2870-backlight", &pdev->dev, |
160 | aat2870_bl, &aat2870_bl_ops, &props); | 160 | aat2870_bl, &aat2870_bl_ops, &props); |
161 | if (!bd) { | 161 | if (IS_ERR(bd)) { |
162 | dev_err(&pdev->dev, | 162 | dev_err(&pdev->dev, |
163 | "Failed allocate memory for backlight device\n"); | 163 | "Failed allocate memory for backlight device\n"); |
164 | ret = -ENOMEM; | 164 | ret = PTR_ERR(bd); |
165 | goto out_kfree; | 165 | goto out_kfree; |
166 | } | 166 | } |
167 | 167 | ||
@@ -175,7 +175,7 @@ static int aat2870_bl_probe(struct platform_device *pdev) | |||
175 | else | 175 | else |
176 | aat2870_bl->channels = AAT2870_BL_CH_ALL; | 176 | aat2870_bl->channels = AAT2870_BL_CH_ALL; |
177 | 177 | ||
178 | if (pdata->max_brightness > 0) | 178 | if (pdata->max_current > 0) |
179 | aat2870_bl->max_current = pdata->max_current; | 179 | aat2870_bl->max_current = pdata->max_current; |
180 | else | 180 | else |
181 | aat2870_bl->max_current = AAT2870_CURRENT_27_9; | 181 | aat2870_bl->max_current = AAT2870_CURRENT_27_9; |
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index fdd5d4ae437d..4e888ac09b3f 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c | |||
@@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev) | |||
504 | return 0; | 504 | return 0; |
505 | 505 | ||
506 | r = omapdss_dsi_display_enable(dssdev); | 506 | r = omapdss_dsi_display_enable(dssdev); |
507 | if (r) | 507 | if (r) { |
508 | goto err; | 508 | dev_err(&dssdev->dev, "failed to enable DSI\n"); |
509 | goto err1; | ||
510 | } | ||
509 | 511 | ||
510 | omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); | 512 | omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); |
511 | 513 | ||
512 | r = _taal_enable_te(dssdev, true); | 514 | r = _taal_enable_te(dssdev, true); |
513 | if (r) | 515 | if (r) { |
514 | goto err; | 516 | dev_err(&dssdev->dev, "failed to re-enable TE"); |
517 | goto err2; | ||
518 | } | ||
515 | 519 | ||
516 | enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); | 520 | enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); |
517 | 521 | ||
@@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev) | |||
521 | 525 | ||
522 | return 0; | 526 | return 0; |
523 | 527 | ||
524 | err: | 528 | err2: |
525 | dev_err(&dssdev->dev, "exit ULPS failed"); | 529 | dev_err(&dssdev->dev, "failed to exit ULPS"); |
526 | r = taal_panel_reset(dssdev); | ||
527 | |||
528 | enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); | ||
529 | td->ulps_enabled = false; | ||
530 | 530 | ||
531 | r = taal_panel_reset(dssdev); | ||
532 | if (!r) { | ||
533 | enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); | ||
534 | td->ulps_enabled = false; | ||
535 | } | ||
536 | err1: | ||
531 | taal_queue_ulps_work(dssdev); | 537 | taal_queue_ulps_work(dssdev); |
532 | 538 | ||
533 | return r; | 539 | return r; |
@@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev) | |||
1241 | int r; | 1247 | int r; |
1242 | 1248 | ||
1243 | r = taal_dcs_write_0(td, DCS_DISPLAY_OFF); | 1249 | r = taal_dcs_write_0(td, DCS_DISPLAY_OFF); |
1244 | if (!r) { | 1250 | if (!r) |
1245 | r = taal_sleep_in(td); | 1251 | r = taal_sleep_in(td); |
1246 | /* HACK: wait a bit so that the message goes through */ | ||
1247 | msleep(10); | ||
1248 | } | ||
1249 | 1252 | ||
1250 | if (r) { | 1253 | if (r) { |
1251 | dev_err(&dssdev->dev, | 1254 | dev_err(&dssdev->dev, |
@@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev) | |||
1317 | dsi_bus_lock(dssdev); | 1320 | dsi_bus_lock(dssdev); |
1318 | 1321 | ||
1319 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { | 1322 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { |
1320 | taal_wake_up(dssdev); | 1323 | int r; |
1321 | taal_power_off(dssdev); | 1324 | |
1325 | r = taal_wake_up(dssdev); | ||
1326 | if (!r) | ||
1327 | taal_power_off(dssdev); | ||
1322 | } | 1328 | } |
1323 | 1329 | ||
1324 | dsi_bus_unlock(dssdev); | 1330 | dsi_bus_unlock(dssdev); |
@@ -1897,20 +1903,6 @@ err: | |||
1897 | mutex_unlock(&td->lock); | 1903 | mutex_unlock(&td->lock); |
1898 | } | 1904 | } |
1899 | 1905 | ||
1900 | static int taal_set_update_mode(struct omap_dss_device *dssdev, | ||
1901 | enum omap_dss_update_mode mode) | ||
1902 | { | ||
1903 | if (mode != OMAP_DSS_UPDATE_MANUAL) | ||
1904 | return -EINVAL; | ||
1905 | return 0; | ||
1906 | } | ||
1907 | |||
1908 | static enum omap_dss_update_mode taal_get_update_mode( | ||
1909 | struct omap_dss_device *dssdev) | ||
1910 | { | ||
1911 | return OMAP_DSS_UPDATE_MANUAL; | ||
1912 | } | ||
1913 | |||
1914 | static struct omap_dss_driver taal_driver = { | 1906 | static struct omap_dss_driver taal_driver = { |
1915 | .probe = taal_probe, | 1907 | .probe = taal_probe, |
1916 | .remove = __exit_p(taal_remove), | 1908 | .remove = __exit_p(taal_remove), |
@@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = { | |||
1920 | .suspend = taal_suspend, | 1912 | .suspend = taal_suspend, |
1921 | .resume = taal_resume, | 1913 | .resume = taal_resume, |
1922 | 1914 | ||
1923 | .set_update_mode = taal_set_update_mode, | ||
1924 | .get_update_mode = taal_get_update_mode, | ||
1925 | |||
1926 | .update = taal_update, | 1915 | .update = taal_update, |
1927 | .sync = taal_sync, | 1916 | .sync = taal_sync, |
1928 | 1917 | ||
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 6b3e2da11419..0d12524db14b 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig | |||
@@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK | |||
117 | Max FCK is 173MHz, so this doesn't work if your PCK | 117 | Max FCK is 173MHz, so this doesn't work if your PCK |
118 | is very high. | 118 | is very high. |
119 | 119 | ||
120 | config OMAP2_DSS_SLEEP_BEFORE_RESET | ||
121 | bool "Sleep 50ms before DSS reset" | ||
122 | default y | ||
123 | help | ||
124 | For some unknown reason we may get SYNC_LOST errors from the display | ||
125 | subsystem at initialization time if we don't sleep before resetting | ||
126 | the DSS. See the source (dss.c) for more comments. | ||
127 | |||
128 | However, 50ms is quite long time to sleep, and with some | ||
129 | configurations the SYNC_LOST may never happen, so the sleep can | ||
130 | be disabled here. | ||
131 | |||
132 | config OMAP2_DSS_SLEEP_AFTER_VENC_RESET | 120 | config OMAP2_DSS_SLEEP_AFTER_VENC_RESET |
133 | bool "Sleep 20ms after VENC reset" | 121 | bool "Sleep 20ms after VENC reset" |
134 | default y | 122 | default y |
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 3da426719dd6..76821fefce9a 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c | |||
@@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev) | |||
183 | goto err_dss; | 183 | goto err_dss; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* keep clocks enabled to prevent context saves/restores during init */ | 186 | r = dispc_init_platform_driver(); |
187 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 187 | if (r) { |
188 | DSSERR("Failed to initialize dispc platform driver\n"); | ||
189 | goto err_dispc; | ||
190 | } | ||
188 | 191 | ||
189 | r = rfbi_init_platform_driver(); | 192 | r = rfbi_init_platform_driver(); |
190 | if (r) { | 193 | if (r) { |
@@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev) | |||
192 | goto err_rfbi; | 195 | goto err_rfbi; |
193 | } | 196 | } |
194 | 197 | ||
195 | r = dispc_init_platform_driver(); | ||
196 | if (r) { | ||
197 | DSSERR("Failed to initialize dispc platform driver\n"); | ||
198 | goto err_dispc; | ||
199 | } | ||
200 | |||
201 | r = venc_init_platform_driver(); | 198 | r = venc_init_platform_driver(); |
202 | if (r) { | 199 | if (r) { |
203 | DSSERR("Failed to initialize venc platform driver\n"); | 200 | DSSERR("Failed to initialize venc platform driver\n"); |
@@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev) | |||
238 | pdata->default_device = dssdev; | 235 | pdata->default_device = dssdev; |
239 | } | 236 | } |
240 | 237 | ||
241 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
242 | |||
243 | return 0; | 238 | return 0; |
244 | 239 | ||
245 | err_register: | 240 | err_register: |
@@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev) | |||
268 | 263 | ||
269 | dss_uninitialize_debugfs(); | 264 | dss_uninitialize_debugfs(); |
270 | 265 | ||
266 | hdmi_uninit_platform_driver(); | ||
267 | dsi_uninit_platform_driver(); | ||
271 | venc_uninit_platform_driver(); | 268 | venc_uninit_platform_driver(); |
272 | dispc_uninit_platform_driver(); | ||
273 | rfbi_uninit_platform_driver(); | 269 | rfbi_uninit_platform_driver(); |
274 | dsi_uninit_platform_driver(); | 270 | dispc_uninit_platform_driver(); |
275 | hdmi_uninit_platform_driver(); | ||
276 | dss_uninit_platform_driver(); | 271 | dss_uninit_platform_driver(); |
277 | 272 | ||
278 | dss_uninit_overlays(pdev); | 273 | dss_uninit_overlays(pdev); |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 7a9a2e7d9685..0f3961a1ce26 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
34 | #include <linux/hardirq.h> | 34 | #include <linux/hardirq.h> |
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
36 | #include <linux/platform_device.h> | ||
37 | #include <linux/pm_runtime.h> | ||
36 | 38 | ||
37 | #include <plat/sram.h> | 39 | #include <plat/sram.h> |
38 | #include <plat/clock.h> | 40 | #include <plat/clock.h> |
@@ -77,6 +79,12 @@ struct dispc_v_coef { | |||
77 | s8 vc00; | 79 | s8 vc00; |
78 | }; | 80 | }; |
79 | 81 | ||
82 | enum omap_burst_size { | ||
83 | BURST_SIZE_X2 = 0, | ||
84 | BURST_SIZE_X4 = 1, | ||
85 | BURST_SIZE_X8 = 2, | ||
86 | }; | ||
87 | |||
80 | #define REG_GET(idx, start, end) \ | 88 | #define REG_GET(idx, start, end) \ |
81 | FLD_GET(dispc_read_reg(idx), start, end) | 89 | FLD_GET(dispc_read_reg(idx), start, end) |
82 | 90 | ||
@@ -92,7 +100,11 @@ struct dispc_irq_stats { | |||
92 | static struct { | 100 | static struct { |
93 | struct platform_device *pdev; | 101 | struct platform_device *pdev; |
94 | void __iomem *base; | 102 | void __iomem *base; |
103 | |||
104 | int ctx_loss_cnt; | ||
105 | |||
95 | int irq; | 106 | int irq; |
107 | struct clk *dss_clk; | ||
96 | 108 | ||
97 | u32 fifo_size[3]; | 109 | u32 fifo_size[3]; |
98 | 110 | ||
@@ -102,6 +114,7 @@ static struct { | |||
102 | u32 error_irqs; | 114 | u32 error_irqs; |
103 | struct work_struct error_work; | 115 | struct work_struct error_work; |
104 | 116 | ||
117 | bool ctx_valid; | ||
105 | u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; | 118 | u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; |
106 | 119 | ||
107 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 120 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
@@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx) | |||
134 | return __raw_readl(dispc.base + idx); | 147 | return __raw_readl(dispc.base + idx); |
135 | } | 148 | } |
136 | 149 | ||
150 | static int dispc_get_ctx_loss_count(void) | ||
151 | { | ||
152 | struct device *dev = &dispc.pdev->dev; | ||
153 | struct omap_display_platform_data *pdata = dev->platform_data; | ||
154 | struct omap_dss_board_info *board_data = pdata->board_data; | ||
155 | int cnt; | ||
156 | |||
157 | if (!board_data->get_context_loss_count) | ||
158 | return -ENOENT; | ||
159 | |||
160 | cnt = board_data->get_context_loss_count(dev); | ||
161 | |||
162 | WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt); | ||
163 | |||
164 | return cnt; | ||
165 | } | ||
166 | |||
137 | #define SR(reg) \ | 167 | #define SR(reg) \ |
138 | dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) | 168 | dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) |
139 | #define RR(reg) \ | 169 | #define RR(reg) \ |
140 | dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) | 170 | dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) |
141 | 171 | ||
142 | void dispc_save_context(void) | 172 | static void dispc_save_context(void) |
143 | { | 173 | { |
144 | int i; | 174 | int i; |
145 | if (cpu_is_omap24xx()) | ||
146 | return; | ||
147 | 175 | ||
148 | SR(SYSCONFIG); | 176 | DSSDBG("dispc_save_context\n"); |
177 | |||
149 | SR(IRQENABLE); | 178 | SR(IRQENABLE); |
150 | SR(CONTROL); | 179 | SR(CONTROL); |
151 | SR(CONFIG); | 180 | SR(CONFIG); |
@@ -158,7 +187,8 @@ void dispc_save_context(void) | |||
158 | SR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); | 187 | SR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); |
159 | SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); | 188 | SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); |
160 | SR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); | 189 | SR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); |
161 | SR(GLOBAL_ALPHA); | 190 | if (dss_has_feature(FEAT_GLOBAL_ALPHA)) |
191 | SR(GLOBAL_ALPHA); | ||
162 | SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); | 192 | SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); |
163 | SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); | 193 | SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); |
164 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 194 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
@@ -188,20 +218,25 @@ void dispc_save_context(void) | |||
188 | SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); | 218 | SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); |
189 | SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); | 219 | SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); |
190 | 220 | ||
191 | SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); | 221 | if (dss_has_feature(FEAT_CPR)) { |
192 | SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); | 222 | SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); |
193 | SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | 223 | SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); |
224 | SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | ||
225 | } | ||
194 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 226 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
195 | SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); | 227 | if (dss_has_feature(FEAT_CPR)) { |
196 | SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); | 228 | SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); |
197 | SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); | 229 | SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); |
230 | SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); | ||
231 | } | ||
198 | 232 | ||
199 | SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); | 233 | SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); |
200 | SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); | 234 | SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); |
201 | SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); | 235 | SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); |
202 | } | 236 | } |
203 | 237 | ||
204 | SR(OVL_PRELOAD(OMAP_DSS_GFX)); | 238 | if (dss_has_feature(FEAT_PRELOAD)) |
239 | SR(OVL_PRELOAD(OMAP_DSS_GFX)); | ||
205 | 240 | ||
206 | /* VID1 */ | 241 | /* VID1 */ |
207 | SR(OVL_BA0(OMAP_DSS_VIDEO1)); | 242 | SR(OVL_BA0(OMAP_DSS_VIDEO1)); |
@@ -226,8 +261,10 @@ void dispc_save_context(void) | |||
226 | for (i = 0; i < 5; i++) | 261 | for (i = 0; i < 5; i++) |
227 | SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); | 262 | SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); |
228 | 263 | ||
229 | for (i = 0; i < 8; i++) | 264 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
230 | SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); | 265 | for (i = 0; i < 8; i++) |
266 | SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); | ||
267 | } | ||
231 | 268 | ||
232 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 269 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
233 | SR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); | 270 | SR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); |
@@ -248,7 +285,8 @@ void dispc_save_context(void) | |||
248 | if (dss_has_feature(FEAT_ATTR2)) | 285 | if (dss_has_feature(FEAT_ATTR2)) |
249 | SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); | 286 | SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); |
250 | 287 | ||
251 | SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); | 288 | if (dss_has_feature(FEAT_PRELOAD)) |
289 | SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); | ||
252 | 290 | ||
253 | /* VID2 */ | 291 | /* VID2 */ |
254 | SR(OVL_BA0(OMAP_DSS_VIDEO2)); | 292 | SR(OVL_BA0(OMAP_DSS_VIDEO2)); |
@@ -273,8 +311,10 @@ void dispc_save_context(void) | |||
273 | for (i = 0; i < 5; i++) | 311 | for (i = 0; i < 5; i++) |
274 | SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); | 312 | SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); |
275 | 313 | ||
276 | for (i = 0; i < 8; i++) | 314 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
277 | SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); | 315 | for (i = 0; i < 8; i++) |
316 | SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); | ||
317 | } | ||
278 | 318 | ||
279 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 319 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
280 | SR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); | 320 | SR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); |
@@ -295,16 +335,35 @@ void dispc_save_context(void) | |||
295 | if (dss_has_feature(FEAT_ATTR2)) | 335 | if (dss_has_feature(FEAT_ATTR2)) |
296 | SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); | 336 | SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); |
297 | 337 | ||
298 | SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); | 338 | if (dss_has_feature(FEAT_PRELOAD)) |
339 | SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); | ||
299 | 340 | ||
300 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) | 341 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) |
301 | SR(DIVISOR); | 342 | SR(DIVISOR); |
343 | |||
344 | dispc.ctx_loss_cnt = dispc_get_ctx_loss_count(); | ||
345 | dispc.ctx_valid = true; | ||
346 | |||
347 | DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt); | ||
302 | } | 348 | } |
303 | 349 | ||
304 | void dispc_restore_context(void) | 350 | static void dispc_restore_context(void) |
305 | { | 351 | { |
306 | int i; | 352 | int i, ctx; |
307 | RR(SYSCONFIG); | 353 | |
354 | DSSDBG("dispc_restore_context\n"); | ||
355 | |||
356 | if (!dispc.ctx_valid) | ||
357 | return; | ||
358 | |||
359 | ctx = dispc_get_ctx_loss_count(); | ||
360 | |||
361 | if (ctx >= 0 && ctx == dispc.ctx_loss_cnt) | ||
362 | return; | ||
363 | |||
364 | DSSDBG("ctx_loss_count: saved %d, current %d\n", | ||
365 | dispc.ctx_loss_cnt, ctx); | ||
366 | |||
308 | /*RR(IRQENABLE);*/ | 367 | /*RR(IRQENABLE);*/ |
309 | /*RR(CONTROL);*/ | 368 | /*RR(CONTROL);*/ |
310 | RR(CONFIG); | 369 | RR(CONFIG); |
@@ -317,7 +376,8 @@ void dispc_restore_context(void) | |||
317 | RR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); | 376 | RR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); |
318 | RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); | 377 | RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); |
319 | RR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); | 378 | RR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); |
320 | RR(GLOBAL_ALPHA); | 379 | if (dss_has_feature(FEAT_GLOBAL_ALPHA)) |
380 | RR(GLOBAL_ALPHA); | ||
321 | RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); | 381 | RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); |
322 | RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); | 382 | RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); |
323 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 383 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
@@ -347,20 +407,25 @@ void dispc_restore_context(void) | |||
347 | RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); | 407 | RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); |
348 | RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); | 408 | RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); |
349 | 409 | ||
350 | RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); | 410 | if (dss_has_feature(FEAT_CPR)) { |
351 | RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); | 411 | RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); |
352 | RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | 412 | RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); |
413 | RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | ||
414 | } | ||
353 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 415 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
354 | RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); | 416 | RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); |
355 | RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); | 417 | RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); |
356 | RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); | 418 | RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); |
357 | 419 | ||
358 | RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); | 420 | if (dss_has_feature(FEAT_CPR)) { |
359 | RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); | 421 | RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); |
360 | RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); | 422 | RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); |
423 | RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); | ||
424 | } | ||
361 | } | 425 | } |
362 | 426 | ||
363 | RR(OVL_PRELOAD(OMAP_DSS_GFX)); | 427 | if (dss_has_feature(FEAT_PRELOAD)) |
428 | RR(OVL_PRELOAD(OMAP_DSS_GFX)); | ||
364 | 429 | ||
365 | /* VID1 */ | 430 | /* VID1 */ |
366 | RR(OVL_BA0(OMAP_DSS_VIDEO1)); | 431 | RR(OVL_BA0(OMAP_DSS_VIDEO1)); |
@@ -385,8 +450,10 @@ void dispc_restore_context(void) | |||
385 | for (i = 0; i < 5; i++) | 450 | for (i = 0; i < 5; i++) |
386 | RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); | 451 | RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); |
387 | 452 | ||
388 | for (i = 0; i < 8; i++) | 453 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
389 | RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); | 454 | for (i = 0; i < 8; i++) |
455 | RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); | ||
456 | } | ||
390 | 457 | ||
391 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 458 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
392 | RR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); | 459 | RR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); |
@@ -407,7 +474,8 @@ void dispc_restore_context(void) | |||
407 | if (dss_has_feature(FEAT_ATTR2)) | 474 | if (dss_has_feature(FEAT_ATTR2)) |
408 | RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); | 475 | RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); |
409 | 476 | ||
410 | RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); | 477 | if (dss_has_feature(FEAT_PRELOAD)) |
478 | RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); | ||
411 | 479 | ||
412 | /* VID2 */ | 480 | /* VID2 */ |
413 | RR(OVL_BA0(OMAP_DSS_VIDEO2)); | 481 | RR(OVL_BA0(OMAP_DSS_VIDEO2)); |
@@ -432,8 +500,10 @@ void dispc_restore_context(void) | |||
432 | for (i = 0; i < 5; i++) | 500 | for (i = 0; i < 5; i++) |
433 | RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); | 501 | RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); |
434 | 502 | ||
435 | for (i = 0; i < 8; i++) | 503 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
436 | RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); | 504 | for (i = 0; i < 8; i++) |
505 | RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); | ||
506 | } | ||
437 | 507 | ||
438 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 508 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
439 | RR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); | 509 | RR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); |
@@ -454,7 +524,8 @@ void dispc_restore_context(void) | |||
454 | if (dss_has_feature(FEAT_ATTR2)) | 524 | if (dss_has_feature(FEAT_ATTR2)) |
455 | RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); | 525 | RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); |
456 | 526 | ||
457 | RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); | 527 | if (dss_has_feature(FEAT_PRELOAD)) |
528 | RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); | ||
458 | 529 | ||
459 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) | 530 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) |
460 | RR(DIVISOR); | 531 | RR(DIVISOR); |
@@ -471,19 +542,35 @@ void dispc_restore_context(void) | |||
471 | * the context is fully restored | 542 | * the context is fully restored |
472 | */ | 543 | */ |
473 | RR(IRQENABLE); | 544 | RR(IRQENABLE); |
545 | |||
546 | DSSDBG("context restored\n"); | ||
474 | } | 547 | } |
475 | 548 | ||
476 | #undef SR | 549 | #undef SR |
477 | #undef RR | 550 | #undef RR |
478 | 551 | ||
479 | static inline void enable_clocks(bool enable) | 552 | int dispc_runtime_get(void) |
480 | { | 553 | { |
481 | if (enable) | 554 | int r; |
482 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 555 | |
483 | else | 556 | DSSDBG("dispc_runtime_get\n"); |
484 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 557 | |
558 | r = pm_runtime_get_sync(&dispc.pdev->dev); | ||
559 | WARN_ON(r < 0); | ||
560 | return r < 0 ? r : 0; | ||
485 | } | 561 | } |
486 | 562 | ||
563 | void dispc_runtime_put(void) | ||
564 | { | ||
565 | int r; | ||
566 | |||
567 | DSSDBG("dispc_runtime_put\n"); | ||
568 | |||
569 | r = pm_runtime_put(&dispc.pdev->dev); | ||
570 | WARN_ON(r < 0); | ||
571 | } | ||
572 | |||
573 | |||
487 | bool dispc_go_busy(enum omap_channel channel) | 574 | bool dispc_go_busy(enum omap_channel channel) |
488 | { | 575 | { |
489 | int bit; | 576 | int bit; |
@@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel) | |||
505 | int bit; | 592 | int bit; |
506 | bool enable_bit, go_bit; | 593 | bool enable_bit, go_bit; |
507 | 594 | ||
508 | enable_clocks(1); | ||
509 | |||
510 | if (channel == OMAP_DSS_CHANNEL_LCD || | 595 | if (channel == OMAP_DSS_CHANNEL_LCD || |
511 | channel == OMAP_DSS_CHANNEL_LCD2) | 596 | channel == OMAP_DSS_CHANNEL_LCD2) |
512 | bit = 0; /* LCDENABLE */ | 597 | bit = 0; /* LCDENABLE */ |
@@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel) | |||
520 | enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; | 605 | enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; |
521 | 606 | ||
522 | if (!enable_bit) | 607 | if (!enable_bit) |
523 | goto end; | 608 | return; |
524 | 609 | ||
525 | if (channel == OMAP_DSS_CHANNEL_LCD || | 610 | if (channel == OMAP_DSS_CHANNEL_LCD || |
526 | channel == OMAP_DSS_CHANNEL_LCD2) | 611 | channel == OMAP_DSS_CHANNEL_LCD2) |
@@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel) | |||
535 | 620 | ||
536 | if (go_bit) { | 621 | if (go_bit) { |
537 | DSSERR("GO bit not down for channel %d\n", channel); | 622 | DSSERR("GO bit not down for channel %d\n", channel); |
538 | goto end; | 623 | return; |
539 | } | 624 | } |
540 | 625 | ||
541 | DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : | 626 | DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : |
@@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel) | |||
545 | REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); | 630 | REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); |
546 | else | 631 | else |
547 | REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); | 632 | REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); |
548 | end: | ||
549 | enable_clocks(0); | ||
550 | } | 633 | } |
551 | 634 | ||
552 | static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) | 635 | static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) |
@@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane, | |||
920 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); | 1003 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); |
921 | } | 1004 | } |
922 | 1005 | ||
923 | static void _dispc_set_channel_out(enum omap_plane plane, | 1006 | void dispc_set_channel_out(enum omap_plane plane, |
924 | enum omap_channel channel) | 1007 | enum omap_channel channel) |
925 | { | 1008 | { |
926 | int shift; | 1009 | int shift; |
@@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane, | |||
967 | dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); | 1050 | dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); |
968 | } | 1051 | } |
969 | 1052 | ||
970 | void dispc_set_burst_size(enum omap_plane plane, | 1053 | static void dispc_set_burst_size(enum omap_plane plane, |
971 | enum omap_burst_size burst_size) | 1054 | enum omap_burst_size burst_size) |
972 | { | 1055 | { |
973 | int shift; | 1056 | int shift; |
974 | u32 val; | ||
975 | |||
976 | enable_clocks(1); | ||
977 | 1057 | ||
978 | switch (plane) { | 1058 | switch (plane) { |
979 | case OMAP_DSS_GFX: | 1059 | case OMAP_DSS_GFX: |
@@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane, | |||
988 | return; | 1068 | return; |
989 | } | 1069 | } |
990 | 1070 | ||
991 | val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); | 1071 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift); |
992 | val = FLD_MOD(val, burst_size, shift+1, shift); | 1072 | } |
993 | dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); | ||
994 | 1073 | ||
995 | enable_clocks(0); | 1074 | static void dispc_configure_burst_sizes(void) |
1075 | { | ||
1076 | int i; | ||
1077 | const int burst_size = BURST_SIZE_X8; | ||
1078 | |||
1079 | /* Configure burst size always to maximum size */ | ||
1080 | for (i = 0; i < omap_dss_get_num_overlays(); ++i) | ||
1081 | dispc_set_burst_size(i, burst_size); | ||
1082 | } | ||
1083 | |||
1084 | u32 dispc_get_burst_size(enum omap_plane plane) | ||
1085 | { | ||
1086 | unsigned unit = dss_feat_get_burst_size_unit(); | ||
1087 | /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */ | ||
1088 | return unit * 8; | ||
996 | } | 1089 | } |
997 | 1090 | ||
998 | void dispc_enable_gamma_table(bool enable) | 1091 | void dispc_enable_gamma_table(bool enable) |
@@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable) | |||
1009 | REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); | 1102 | REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); |
1010 | } | 1103 | } |
1011 | 1104 | ||
1105 | void dispc_enable_cpr(enum omap_channel channel, bool enable) | ||
1106 | { | ||
1107 | u16 reg; | ||
1108 | |||
1109 | if (channel == OMAP_DSS_CHANNEL_LCD) | ||
1110 | reg = DISPC_CONFIG; | ||
1111 | else if (channel == OMAP_DSS_CHANNEL_LCD2) | ||
1112 | reg = DISPC_CONFIG2; | ||
1113 | else | ||
1114 | return; | ||
1115 | |||
1116 | REG_FLD_MOD(reg, enable, 15, 15); | ||
1117 | } | ||
1118 | |||
1119 | void dispc_set_cpr_coef(enum omap_channel channel, | ||
1120 | struct omap_dss_cpr_coefs *coefs) | ||
1121 | { | ||
1122 | u32 coef_r, coef_g, coef_b; | ||
1123 | |||
1124 | if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2) | ||
1125 | return; | ||
1126 | |||
1127 | coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | | ||
1128 | FLD_VAL(coefs->rb, 9, 0); | ||
1129 | coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) | | ||
1130 | FLD_VAL(coefs->gb, 9, 0); | ||
1131 | coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) | | ||
1132 | FLD_VAL(coefs->bb, 9, 0); | ||
1133 | |||
1134 | dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r); | ||
1135 | dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g); | ||
1136 | dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b); | ||
1137 | } | ||
1138 | |||
1012 | static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) | 1139 | static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) |
1013 | { | 1140 | { |
1014 | u32 val; | 1141 | u32 val; |
@@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable) | |||
1029 | else | 1156 | else |
1030 | bit = 10; | 1157 | bit = 10; |
1031 | 1158 | ||
1032 | enable_clocks(1); | ||
1033 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); | 1159 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); |
1034 | enable_clocks(0); | ||
1035 | } | 1160 | } |
1036 | 1161 | ||
1037 | void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) | 1162 | void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) |
@@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) | |||
1039 | u32 val; | 1164 | u32 val; |
1040 | BUG_ON((width > (1 << 11)) || (height > (1 << 11))); | 1165 | BUG_ON((width > (1 << 11)) || (height > (1 << 11))); |
1041 | val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); | 1166 | val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); |
1042 | enable_clocks(1); | ||
1043 | dispc_write_reg(DISPC_SIZE_MGR(channel), val); | 1167 | dispc_write_reg(DISPC_SIZE_MGR(channel), val); |
1044 | enable_clocks(0); | ||
1045 | } | 1168 | } |
1046 | 1169 | ||
1047 | void dispc_set_digit_size(u16 width, u16 height) | 1170 | void dispc_set_digit_size(u16 width, u16 height) |
@@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height) | |||
1049 | u32 val; | 1172 | u32 val; |
1050 | BUG_ON((width > (1 << 11)) || (height > (1 << 11))); | 1173 | BUG_ON((width > (1 << 11)) || (height > (1 << 11))); |
1051 | val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); | 1174 | val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); |
1052 | enable_clocks(1); | ||
1053 | dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); | 1175 | dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); |
1054 | enable_clocks(0); | ||
1055 | } | 1176 | } |
1056 | 1177 | ||
1057 | static void dispc_read_plane_fifo_sizes(void) | 1178 | static void dispc_read_plane_fifo_sizes(void) |
@@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void) | |||
1059 | u32 size; | 1180 | u32 size; |
1060 | int plane; | 1181 | int plane; |
1061 | u8 start, end; | 1182 | u8 start, end; |
1183 | u32 unit; | ||
1062 | 1184 | ||
1063 | enable_clocks(1); | 1185 | unit = dss_feat_get_buffer_size_unit(); |
1064 | 1186 | ||
1065 | dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); | 1187 | dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); |
1066 | 1188 | ||
1067 | for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { | 1189 | for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { |
1068 | size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)), | 1190 | size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end); |
1069 | start, end); | 1191 | size *= unit; |
1070 | dispc.fifo_size[plane] = size; | 1192 | dispc.fifo_size[plane] = size; |
1071 | } | 1193 | } |
1072 | |||
1073 | enable_clocks(0); | ||
1074 | } | 1194 | } |
1075 | 1195 | ||
1076 | u32 dispc_get_plane_fifo_size(enum omap_plane plane) | 1196 | u32 dispc_get_plane_fifo_size(enum omap_plane plane) |
@@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane) | |||
1078 | return dispc.fifo_size[plane]; | 1198 | return dispc.fifo_size[plane]; |
1079 | } | 1199 | } |
1080 | 1200 | ||
1081 | void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) | 1201 | void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high) |
1082 | { | 1202 | { |
1083 | u8 hi_start, hi_end, lo_start, lo_end; | 1203 | u8 hi_start, hi_end, lo_start, lo_end; |
1204 | u32 unit; | ||
1205 | |||
1206 | unit = dss_feat_get_buffer_size_unit(); | ||
1207 | |||
1208 | WARN_ON(low % unit != 0); | ||
1209 | WARN_ON(high % unit != 0); | ||
1210 | |||
1211 | low /= unit; | ||
1212 | high /= unit; | ||
1084 | 1213 | ||
1085 | dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); | 1214 | dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); |
1086 | dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); | 1215 | dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); |
1087 | 1216 | ||
1088 | enable_clocks(1); | ||
1089 | |||
1090 | DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", | 1217 | DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", |
1091 | plane, | 1218 | plane, |
1092 | REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), | 1219 | REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), |
@@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) | |||
1098 | dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), | 1225 | dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), |
1099 | FLD_VAL(high, hi_start, hi_end) | | 1226 | FLD_VAL(high, hi_start, hi_end) | |
1100 | FLD_VAL(low, lo_start, lo_end)); | 1227 | FLD_VAL(low, lo_start, lo_end)); |
1101 | |||
1102 | enable_clocks(0); | ||
1103 | } | 1228 | } |
1104 | 1229 | ||
1105 | void dispc_enable_fifomerge(bool enable) | 1230 | void dispc_enable_fifomerge(bool enable) |
1106 | { | 1231 | { |
1107 | enable_clocks(1); | ||
1108 | |||
1109 | DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); | 1232 | DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); |
1110 | REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); | 1233 | REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); |
1111 | |||
1112 | enable_clocks(0); | ||
1113 | } | 1234 | } |
1114 | 1235 | ||
1115 | static void _dispc_set_fir(enum omap_plane plane, | 1236 | static void _dispc_set_fir(enum omap_plane plane, |
@@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width, | |||
1729 | return dispc_pclk_rate(channel) * vf * hf; | 1850 | return dispc_pclk_rate(channel) * vf * hf; |
1730 | } | 1851 | } |
1731 | 1852 | ||
1732 | void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out) | 1853 | int dispc_setup_plane(enum omap_plane plane, |
1733 | { | ||
1734 | enable_clocks(1); | ||
1735 | _dispc_set_channel_out(plane, channel_out); | ||
1736 | enable_clocks(0); | ||
1737 | } | ||
1738 | |||
1739 | static int _dispc_setup_plane(enum omap_plane plane, | ||
1740 | u32 paddr, u16 screen_width, | 1854 | u32 paddr, u16 screen_width, |
1741 | u16 pos_x, u16 pos_y, | 1855 | u16 pos_x, u16 pos_y, |
1742 | u16 width, u16 height, | 1856 | u16 width, u16 height, |
@@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane, | |||
1744 | enum omap_color_mode color_mode, | 1858 | enum omap_color_mode color_mode, |
1745 | bool ilace, | 1859 | bool ilace, |
1746 | enum omap_dss_rotation_type rotation_type, | 1860 | enum omap_dss_rotation_type rotation_type, |
1747 | u8 rotation, int mirror, | 1861 | u8 rotation, bool mirror, |
1748 | u8 global_alpha, u8 pre_mult_alpha, | 1862 | u8 global_alpha, u8 pre_mult_alpha, |
1749 | enum omap_channel channel, u32 puv_addr) | 1863 | enum omap_channel channel, u32 puv_addr) |
1750 | { | 1864 | { |
@@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane, | |||
1758 | u16 frame_height = height; | 1872 | u16 frame_height = height; |
1759 | unsigned int field_offset = 0; | 1873 | unsigned int field_offset = 0; |
1760 | 1874 | ||
1875 | DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " | ||
1876 | "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", | ||
1877 | plane, paddr, screen_width, pos_x, pos_y, | ||
1878 | width, height, | ||
1879 | out_width, out_height, | ||
1880 | ilace, color_mode, | ||
1881 | rotation, mirror, channel); | ||
1882 | |||
1761 | if (paddr == 0) | 1883 | if (paddr == 0) |
1762 | return -EINVAL; | 1884 | return -EINVAL; |
1763 | 1885 | ||
@@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane, | |||
1903 | return 0; | 2025 | return 0; |
1904 | } | 2026 | } |
1905 | 2027 | ||
1906 | static void _dispc_enable_plane(enum omap_plane plane, bool enable) | 2028 | int dispc_enable_plane(enum omap_plane plane, bool enable) |
1907 | { | 2029 | { |
2030 | DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); | ||
2031 | |||
1908 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); | 2032 | REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); |
2033 | |||
2034 | return 0; | ||
1909 | } | 2035 | } |
1910 | 2036 | ||
1911 | static void dispc_disable_isr(void *data, u32 mask) | 2037 | static void dispc_disable_isr(void *data, u32 mask) |
@@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable) | |||
1929 | int r; | 2055 | int r; |
1930 | u32 irq; | 2056 | u32 irq; |
1931 | 2057 | ||
1932 | enable_clocks(1); | ||
1933 | |||
1934 | /* When we disable LCD output, we need to wait until frame is done. | 2058 | /* When we disable LCD output, we need to wait until frame is done. |
1935 | * Otherwise the DSS is still working, and turning off the clocks | 2059 | * Otherwise the DSS is still working, and turning off the clocks |
1936 | * prevents DSS from going to OFF mode */ | 2060 | * prevents DSS from going to OFF mode */ |
@@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable) | |||
1964 | if (r) | 2088 | if (r) |
1965 | DSSERR("failed to unregister FRAMEDONE isr\n"); | 2089 | DSSERR("failed to unregister FRAMEDONE isr\n"); |
1966 | } | 2090 | } |
1967 | |||
1968 | enable_clocks(0); | ||
1969 | } | 2091 | } |
1970 | 2092 | ||
1971 | static void _enable_digit_out(bool enable) | 2093 | static void _enable_digit_out(bool enable) |
@@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable) | |||
1978 | struct completion frame_done_completion; | 2100 | struct completion frame_done_completion; |
1979 | int r; | 2101 | int r; |
1980 | 2102 | ||
1981 | enable_clocks(1); | 2103 | if (REG_GET(DISPC_CONTROL, 1, 1) == enable) |
1982 | |||
1983 | if (REG_GET(DISPC_CONTROL, 1, 1) == enable) { | ||
1984 | enable_clocks(0); | ||
1985 | return; | 2104 | return; |
1986 | } | ||
1987 | 2105 | ||
1988 | if (enable) { | 2106 | if (enable) { |
1989 | unsigned long flags; | 2107 | unsigned long flags; |
@@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable) | |||
2035 | _omap_dispc_set_irqs(); | 2153 | _omap_dispc_set_irqs(); |
2036 | spin_unlock_irqrestore(&dispc.irq_lock, flags); | 2154 | spin_unlock_irqrestore(&dispc.irq_lock, flags); |
2037 | } | 2155 | } |
2038 | |||
2039 | enable_clocks(0); | ||
2040 | } | 2156 | } |
2041 | 2157 | ||
2042 | bool dispc_is_channel_enabled(enum omap_channel channel) | 2158 | bool dispc_is_channel_enabled(enum omap_channel channel) |
@@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high) | |||
2067 | if (!dss_has_feature(FEAT_LCDENABLEPOL)) | 2183 | if (!dss_has_feature(FEAT_LCDENABLEPOL)) |
2068 | return; | 2184 | return; |
2069 | 2185 | ||
2070 | enable_clocks(1); | ||
2071 | REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); | 2186 | REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); |
2072 | enable_clocks(0); | ||
2073 | } | 2187 | } |
2074 | 2188 | ||
2075 | void dispc_lcd_enable_signal(bool enable) | 2189 | void dispc_lcd_enable_signal(bool enable) |
@@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable) | |||
2077 | if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) | 2191 | if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) |
2078 | return; | 2192 | return; |
2079 | 2193 | ||
2080 | enable_clocks(1); | ||
2081 | REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); | 2194 | REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); |
2082 | enable_clocks(0); | ||
2083 | } | 2195 | } |
2084 | 2196 | ||
2085 | void dispc_pck_free_enable(bool enable) | 2197 | void dispc_pck_free_enable(bool enable) |
@@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable) | |||
2087 | if (!dss_has_feature(FEAT_PCKFREEENABLE)) | 2199 | if (!dss_has_feature(FEAT_PCKFREEENABLE)) |
2088 | return; | 2200 | return; |
2089 | 2201 | ||
2090 | enable_clocks(1); | ||
2091 | REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); | 2202 | REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); |
2092 | enable_clocks(0); | ||
2093 | } | 2203 | } |
2094 | 2204 | ||
2095 | void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable) | 2205 | void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable) |
2096 | { | 2206 | { |
2097 | enable_clocks(1); | ||
2098 | if (channel == OMAP_DSS_CHANNEL_LCD2) | 2207 | if (channel == OMAP_DSS_CHANNEL_LCD2) |
2099 | REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); | 2208 | REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); |
2100 | else | 2209 | else |
2101 | REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); | 2210 | REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); |
2102 | enable_clocks(0); | ||
2103 | } | 2211 | } |
2104 | 2212 | ||
2105 | 2213 | ||
@@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel, | |||
2122 | return; | 2230 | return; |
2123 | } | 2231 | } |
2124 | 2232 | ||
2125 | enable_clocks(1); | ||
2126 | if (channel == OMAP_DSS_CHANNEL_LCD2) | 2233 | if (channel == OMAP_DSS_CHANNEL_LCD2) |
2127 | REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); | 2234 | REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); |
2128 | else | 2235 | else |
2129 | REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); | 2236 | REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); |
2130 | enable_clocks(0); | ||
2131 | } | 2237 | } |
2132 | 2238 | ||
2133 | void dispc_set_loadmode(enum omap_dss_load_mode mode) | 2239 | void dispc_set_loadmode(enum omap_dss_load_mode mode) |
2134 | { | 2240 | { |
2135 | enable_clocks(1); | ||
2136 | REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); | 2241 | REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); |
2137 | enable_clocks(0); | ||
2138 | } | 2242 | } |
2139 | 2243 | ||
2140 | 2244 | ||
2141 | void dispc_set_default_color(enum omap_channel channel, u32 color) | 2245 | void dispc_set_default_color(enum omap_channel channel, u32 color) |
2142 | { | 2246 | { |
2143 | enable_clocks(1); | ||
2144 | dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); | 2247 | dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); |
2145 | enable_clocks(0); | ||
2146 | } | 2248 | } |
2147 | 2249 | ||
2148 | u32 dispc_get_default_color(enum omap_channel channel) | 2250 | u32 dispc_get_default_color(enum omap_channel channel) |
@@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel) | |||
2153 | channel != OMAP_DSS_CHANNEL_LCD && | 2255 | channel != OMAP_DSS_CHANNEL_LCD && |
2154 | channel != OMAP_DSS_CHANNEL_LCD2); | 2256 | channel != OMAP_DSS_CHANNEL_LCD2); |
2155 | 2257 | ||
2156 | enable_clocks(1); | ||
2157 | l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel)); | 2258 | l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel)); |
2158 | enable_clocks(0); | ||
2159 | 2259 | ||
2160 | return l; | 2260 | return l; |
2161 | } | 2261 | } |
@@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch, | |||
2164 | enum omap_dss_trans_key_type type, | 2264 | enum omap_dss_trans_key_type type, |
2165 | u32 trans_key) | 2265 | u32 trans_key) |
2166 | { | 2266 | { |
2167 | enable_clocks(1); | ||
2168 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2267 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2169 | REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); | 2268 | REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); |
2170 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) | 2269 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) |
@@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch, | |||
2173 | REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); | 2272 | REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); |
2174 | 2273 | ||
2175 | dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); | 2274 | dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); |
2176 | enable_clocks(0); | ||
2177 | } | 2275 | } |
2178 | 2276 | ||
2179 | void dispc_get_trans_key(enum omap_channel ch, | 2277 | void dispc_get_trans_key(enum omap_channel ch, |
2180 | enum omap_dss_trans_key_type *type, | 2278 | enum omap_dss_trans_key_type *type, |
2181 | u32 *trans_key) | 2279 | u32 *trans_key) |
2182 | { | 2280 | { |
2183 | enable_clocks(1); | ||
2184 | if (type) { | 2281 | if (type) { |
2185 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2282 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2186 | *type = REG_GET(DISPC_CONFIG, 11, 11); | 2283 | *type = REG_GET(DISPC_CONFIG, 11, 11); |
@@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch, | |||
2194 | 2291 | ||
2195 | if (trans_key) | 2292 | if (trans_key) |
2196 | *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch)); | 2293 | *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch)); |
2197 | enable_clocks(0); | ||
2198 | } | 2294 | } |
2199 | 2295 | ||
2200 | void dispc_enable_trans_key(enum omap_channel ch, bool enable) | 2296 | void dispc_enable_trans_key(enum omap_channel ch, bool enable) |
2201 | { | 2297 | { |
2202 | enable_clocks(1); | ||
2203 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2298 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2204 | REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); | 2299 | REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); |
2205 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) | 2300 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) |
2206 | REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); | 2301 | REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); |
2207 | else /* OMAP_DSS_CHANNEL_LCD2 */ | 2302 | else /* OMAP_DSS_CHANNEL_LCD2 */ |
2208 | REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); | 2303 | REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); |
2209 | enable_clocks(0); | ||
2210 | } | 2304 | } |
2211 | void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) | 2305 | void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) |
2212 | { | 2306 | { |
2213 | if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) | 2307 | if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) |
2214 | return; | 2308 | return; |
2215 | 2309 | ||
2216 | enable_clocks(1); | ||
2217 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2310 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2218 | REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); | 2311 | REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); |
2219 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) | 2312 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) |
2220 | REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); | 2313 | REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); |
2221 | else /* OMAP_DSS_CHANNEL_LCD2 */ | 2314 | else /* OMAP_DSS_CHANNEL_LCD2 */ |
2222 | REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18); | 2315 | REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18); |
2223 | enable_clocks(0); | ||
2224 | } | 2316 | } |
2225 | bool dispc_alpha_blending_enabled(enum omap_channel ch) | 2317 | bool dispc_alpha_blending_enabled(enum omap_channel ch) |
2226 | { | 2318 | { |
@@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch) | |||
2229 | if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) | 2321 | if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) |
2230 | return false; | 2322 | return false; |
2231 | 2323 | ||
2232 | enable_clocks(1); | ||
2233 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2324 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2234 | enabled = REG_GET(DISPC_CONFIG, 18, 18); | 2325 | enabled = REG_GET(DISPC_CONFIG, 18, 18); |
2235 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) | 2326 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) |
@@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch) | |||
2238 | enabled = REG_GET(DISPC_CONFIG2, 18, 18); | 2329 | enabled = REG_GET(DISPC_CONFIG2, 18, 18); |
2239 | else | 2330 | else |
2240 | BUG(); | 2331 | BUG(); |
2241 | enable_clocks(0); | ||
2242 | 2332 | ||
2243 | return enabled; | 2333 | return enabled; |
2244 | } | 2334 | } |
@@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch) | |||
2248 | { | 2338 | { |
2249 | bool enabled; | 2339 | bool enabled; |
2250 | 2340 | ||
2251 | enable_clocks(1); | ||
2252 | if (ch == OMAP_DSS_CHANNEL_LCD) | 2341 | if (ch == OMAP_DSS_CHANNEL_LCD) |
2253 | enabled = REG_GET(DISPC_CONFIG, 10, 10); | 2342 | enabled = REG_GET(DISPC_CONFIG, 10, 10); |
2254 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) | 2343 | else if (ch == OMAP_DSS_CHANNEL_DIGIT) |
@@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch) | |||
2257 | enabled = REG_GET(DISPC_CONFIG2, 10, 10); | 2346 | enabled = REG_GET(DISPC_CONFIG2, 10, 10); |
2258 | else | 2347 | else |
2259 | BUG(); | 2348 | BUG(); |
2260 | enable_clocks(0); | ||
2261 | 2349 | ||
2262 | return enabled; | 2350 | return enabled; |
2263 | } | 2351 | } |
@@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines) | |||
2285 | return; | 2373 | return; |
2286 | } | 2374 | } |
2287 | 2375 | ||
2288 | enable_clocks(1); | ||
2289 | if (channel == OMAP_DSS_CHANNEL_LCD2) | 2376 | if (channel == OMAP_DSS_CHANNEL_LCD2) |
2290 | REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); | 2377 | REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); |
2291 | else | 2378 | else |
2292 | REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); | 2379 | REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); |
2293 | enable_clocks(0); | ||
2294 | } | 2380 | } |
2295 | 2381 | ||
2296 | void dispc_set_parallel_interface_mode(enum omap_channel channel, | 2382 | void dispc_set_parallel_interface_mode(enum omap_channel channel, |
@@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel, | |||
2322 | return; | 2408 | return; |
2323 | } | 2409 | } |
2324 | 2410 | ||
2325 | enable_clocks(1); | ||
2326 | |||
2327 | if (channel == OMAP_DSS_CHANNEL_LCD2) { | 2411 | if (channel == OMAP_DSS_CHANNEL_LCD2) { |
2328 | l = dispc_read_reg(DISPC_CONTROL2); | 2412 | l = dispc_read_reg(DISPC_CONTROL2); |
2329 | l = FLD_MOD(l, stallmode, 11, 11); | 2413 | l = FLD_MOD(l, stallmode, 11, 11); |
@@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel, | |||
2335 | l = FLD_MOD(l, gpout1, 16, 16); | 2419 | l = FLD_MOD(l, gpout1, 16, 16); |
2336 | dispc_write_reg(DISPC_CONTROL, l); | 2420 | dispc_write_reg(DISPC_CONTROL, l); |
2337 | } | 2421 | } |
2338 | |||
2339 | enable_clocks(0); | ||
2340 | } | 2422 | } |
2341 | 2423 | ||
2342 | static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, | 2424 | static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, |
@@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw, | |||
2389 | FLD_VAL(vbp, 31, 20); | 2471 | FLD_VAL(vbp, 31, 20); |
2390 | } | 2472 | } |
2391 | 2473 | ||
2392 | enable_clocks(1); | ||
2393 | dispc_write_reg(DISPC_TIMING_H(channel), timing_h); | 2474 | dispc_write_reg(DISPC_TIMING_H(channel), timing_h); |
2394 | dispc_write_reg(DISPC_TIMING_V(channel), timing_v); | 2475 | dispc_write_reg(DISPC_TIMING_V(channel), timing_v); |
2395 | enable_clocks(0); | ||
2396 | } | 2476 | } |
2397 | 2477 | ||
2398 | /* change name to mode? */ | 2478 | /* change name to mode? */ |
@@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div, | |||
2435 | BUG_ON(lck_div < 1); | 2515 | BUG_ON(lck_div < 1); |
2436 | BUG_ON(pck_div < 2); | 2516 | BUG_ON(pck_div < 2); |
2437 | 2517 | ||
2438 | enable_clocks(1); | ||
2439 | dispc_write_reg(DISPC_DIVISORo(channel), | 2518 | dispc_write_reg(DISPC_DIVISORo(channel), |
2440 | FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); | 2519 | FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); |
2441 | enable_clocks(0); | ||
2442 | } | 2520 | } |
2443 | 2521 | ||
2444 | static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, | 2522 | static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, |
@@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void) | |||
2457 | 2535 | ||
2458 | switch (dss_get_dispc_clk_source()) { | 2536 | switch (dss_get_dispc_clk_source()) { |
2459 | case OMAP_DSS_CLK_SRC_FCK: | 2537 | case OMAP_DSS_CLK_SRC_FCK: |
2460 | r = dss_clk_get_rate(DSS_CLK_FCK); | 2538 | r = clk_get_rate(dispc.dss_clk); |
2461 | break; | 2539 | break; |
2462 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 2540 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
2463 | dsidev = dsi_get_dsidev_from_id(0); | 2541 | dsidev = dsi_get_dsidev_from_id(0); |
@@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel) | |||
2487 | 2565 | ||
2488 | switch (dss_get_lcd_clk_source(channel)) { | 2566 | switch (dss_get_lcd_clk_source(channel)) { |
2489 | case OMAP_DSS_CLK_SRC_FCK: | 2567 | case OMAP_DSS_CLK_SRC_FCK: |
2490 | r = dss_clk_get_rate(DSS_CLK_FCK); | 2568 | r = clk_get_rate(dispc.dss_clk); |
2491 | break; | 2569 | break; |
2492 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: | 2570 | case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: |
2493 | dsidev = dsi_get_dsidev_from_id(0); | 2571 | dsidev = dsi_get_dsidev_from_id(0); |
@@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s) | |||
2526 | enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); | 2604 | enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); |
2527 | enum omap_dss_clk_source lcd_clk_src; | 2605 | enum omap_dss_clk_source lcd_clk_src; |
2528 | 2606 | ||
2529 | enable_clocks(1); | 2607 | if (dispc_runtime_get()) |
2608 | return; | ||
2530 | 2609 | ||
2531 | seq_printf(s, "- DISPC -\n"); | 2610 | seq_printf(s, "- DISPC -\n"); |
2532 | 2611 | ||
@@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s) | |||
2574 | seq_printf(s, "pck\t\t%-16lupck div\t%u\n", | 2653 | seq_printf(s, "pck\t\t%-16lupck div\t%u\n", |
2575 | dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); | 2654 | dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); |
2576 | } | 2655 | } |
2577 | enable_clocks(0); | 2656 | |
2657 | dispc_runtime_put(); | ||
2578 | } | 2658 | } |
2579 | 2659 | ||
2580 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 2660 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
@@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s) | |||
2629 | { | 2709 | { |
2630 | #define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) | 2710 | #define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) |
2631 | 2711 | ||
2632 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 2712 | if (dispc_runtime_get()) |
2713 | return; | ||
2633 | 2714 | ||
2634 | DUMPREG(DISPC_REVISION); | 2715 | DUMPREG(DISPC_REVISION); |
2635 | DUMPREG(DISPC_SYSCONFIG); | 2716 | DUMPREG(DISPC_SYSCONFIG); |
@@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s) | |||
2649 | DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD)); | 2730 | DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD)); |
2650 | DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD)); | 2731 | DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD)); |
2651 | DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD)); | 2732 | DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD)); |
2652 | DUMPREG(DISPC_GLOBAL_ALPHA); | 2733 | if (dss_has_feature(FEAT_GLOBAL_ALPHA)) |
2734 | DUMPREG(DISPC_GLOBAL_ALPHA); | ||
2653 | DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); | 2735 | DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); |
2654 | DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); | 2736 | DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); |
2655 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 2737 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
@@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s) | |||
2680 | DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); | 2762 | DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); |
2681 | DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); | 2763 | DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); |
2682 | 2764 | ||
2683 | DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); | 2765 | if (dss_has_feature(FEAT_CPR)) { |
2684 | DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); | 2766 | DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); |
2685 | DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | 2767 | DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); |
2768 | DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); | ||
2769 | } | ||
2686 | if (dss_has_feature(FEAT_MGR_LCD2)) { | 2770 | if (dss_has_feature(FEAT_MGR_LCD2)) { |
2687 | DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); | 2771 | DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); |
2688 | DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); | 2772 | DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); |
2689 | DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); | 2773 | DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); |
2690 | 2774 | ||
2691 | DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); | 2775 | if (dss_has_feature(FEAT_CPR)) { |
2692 | DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); | 2776 | DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); |
2693 | DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); | 2777 | DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); |
2778 | DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); | ||
2779 | } | ||
2694 | } | 2780 | } |
2695 | 2781 | ||
2696 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); | 2782 | if (dss_has_feature(FEAT_PRELOAD)) |
2783 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); | ||
2697 | 2784 | ||
2698 | DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1)); | 2785 | DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1)); |
2699 | DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1)); | 2786 | DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1)); |
@@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s) | |||
2744 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2)); | 2831 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2)); |
2745 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3)); | 2832 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3)); |
2746 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4)); | 2833 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4)); |
2747 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); | 2834 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
2748 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); | 2835 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); |
2749 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); | 2836 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); |
2750 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); | 2837 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); |
2751 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); | 2838 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); |
2752 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); | 2839 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); |
2753 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); | 2840 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); |
2754 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); | 2841 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); |
2842 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); | ||
2843 | } | ||
2755 | 2844 | ||
2756 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 2845 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
2757 | DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1)); | 2846 | DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1)); |
@@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s) | |||
2812 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2)); | 2901 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2)); |
2813 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3)); | 2902 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3)); |
2814 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4)); | 2903 | DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4)); |
2815 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); | 2904 | |
2816 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); | 2905 | if (dss_has_feature(FEAT_FIR_COEF_V)) { |
2817 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); | 2906 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); |
2818 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); | 2907 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); |
2819 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); | 2908 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); |
2820 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); | 2909 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); |
2821 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); | 2910 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); |
2822 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); | 2911 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); |
2912 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); | ||
2913 | DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); | ||
2914 | } | ||
2823 | 2915 | ||
2824 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { | 2916 | if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { |
2825 | DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2)); | 2917 | DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2)); |
@@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s) | |||
2858 | if (dss_has_feature(FEAT_ATTR2)) | 2950 | if (dss_has_feature(FEAT_ATTR2)) |
2859 | DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); | 2951 | DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); |
2860 | 2952 | ||
2861 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); | 2953 | if (dss_has_feature(FEAT_PRELOAD)) { |
2862 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); | 2954 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); |
2955 | DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); | ||
2956 | } | ||
2863 | 2957 | ||
2864 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 2958 | dispc_runtime_put(); |
2865 | #undef DUMPREG | 2959 | #undef DUMPREG |
2866 | } | 2960 | } |
2867 | 2961 | ||
@@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf, | |||
2882 | l |= FLD_VAL(acbi, 11, 8); | 2976 | l |= FLD_VAL(acbi, 11, 8); |
2883 | l |= FLD_VAL(acb, 7, 0); | 2977 | l |= FLD_VAL(acb, 7, 0); |
2884 | 2978 | ||
2885 | enable_clocks(1); | ||
2886 | dispc_write_reg(DISPC_POL_FREQ(channel), l); | 2979 | dispc_write_reg(DISPC_POL_FREQ(channel), l); |
2887 | enable_clocks(0); | ||
2888 | } | 2980 | } |
2889 | 2981 | ||
2890 | void dispc_set_pol_freq(enum omap_channel channel, | 2982 | void dispc_set_pol_freq(enum omap_channel channel, |
@@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void) | |||
3005 | mask |= isr_data->mask; | 3097 | mask |= isr_data->mask; |
3006 | } | 3098 | } |
3007 | 3099 | ||
3008 | enable_clocks(1); | ||
3009 | |||
3010 | old_mask = dispc_read_reg(DISPC_IRQENABLE); | 3100 | old_mask = dispc_read_reg(DISPC_IRQENABLE); |
3011 | /* clear the irqstatus for newly enabled irqs */ | 3101 | /* clear the irqstatus for newly enabled irqs */ |
3012 | dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask); | 3102 | dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask); |
3013 | 3103 | ||
3014 | dispc_write_reg(DISPC_IRQENABLE, mask); | 3104 | dispc_write_reg(DISPC_IRQENABLE, mask); |
3015 | |||
3016 | enable_clocks(0); | ||
3017 | } | 3105 | } |
3018 | 3106 | ||
3019 | int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) | 3107 | int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) |
@@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void) | |||
3522 | { | 3610 | { |
3523 | u32 l; | 3611 | u32 l; |
3524 | 3612 | ||
3525 | l = dispc_read_reg(DISPC_SYSCONFIG); | ||
3526 | l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */ | ||
3527 | l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */ | ||
3528 | l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */ | ||
3529 | l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */ | ||
3530 | dispc_write_reg(DISPC_SYSCONFIG, l); | ||
3531 | |||
3532 | /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */ | 3613 | /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */ |
3533 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) { | 3614 | if (dss_has_feature(FEAT_CORE_CLK_DIV)) { |
3534 | l = dispc_read_reg(DISPC_DIVISOR); | 3615 | l = dispc_read_reg(DISPC_DIVISOR); |
@@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void) | |||
3552 | dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); | 3633 | dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); |
3553 | 3634 | ||
3554 | dispc_read_plane_fifo_sizes(); | 3635 | dispc_read_plane_fifo_sizes(); |
3555 | } | ||
3556 | 3636 | ||
3557 | int dispc_enable_plane(enum omap_plane plane, bool enable) | 3637 | dispc_configure_burst_sizes(); |
3558 | { | ||
3559 | DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); | ||
3560 | |||
3561 | enable_clocks(1); | ||
3562 | _dispc_enable_plane(plane, enable); | ||
3563 | enable_clocks(0); | ||
3564 | |||
3565 | return 0; | ||
3566 | } | ||
3567 | |||
3568 | int dispc_setup_plane(enum omap_plane plane, | ||
3569 | u32 paddr, u16 screen_width, | ||
3570 | u16 pos_x, u16 pos_y, | ||
3571 | u16 width, u16 height, | ||
3572 | u16 out_width, u16 out_height, | ||
3573 | enum omap_color_mode color_mode, | ||
3574 | bool ilace, | ||
3575 | enum omap_dss_rotation_type rotation_type, | ||
3576 | u8 rotation, bool mirror, u8 global_alpha, | ||
3577 | u8 pre_mult_alpha, enum omap_channel channel, | ||
3578 | u32 puv_addr) | ||
3579 | { | ||
3580 | int r = 0; | ||
3581 | |||
3582 | DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> " | ||
3583 | "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", | ||
3584 | plane, paddr, screen_width, pos_x, pos_y, | ||
3585 | width, height, | ||
3586 | out_width, out_height, | ||
3587 | ilace, color_mode, | ||
3588 | rotation, mirror, channel); | ||
3589 | |||
3590 | enable_clocks(1); | ||
3591 | |||
3592 | r = _dispc_setup_plane(plane, | ||
3593 | paddr, screen_width, | ||
3594 | pos_x, pos_y, | ||
3595 | width, height, | ||
3596 | out_width, out_height, | ||
3597 | color_mode, ilace, | ||
3598 | rotation_type, | ||
3599 | rotation, mirror, | ||
3600 | global_alpha, | ||
3601 | pre_mult_alpha, | ||
3602 | channel, puv_addr); | ||
3603 | |||
3604 | enable_clocks(0); | ||
3605 | |||
3606 | return r; | ||
3607 | } | 3638 | } |
3608 | 3639 | ||
3609 | /* DISPC HW IP initialisation */ | 3640 | /* DISPC HW IP initialisation */ |
@@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev) | |||
3612 | u32 rev; | 3643 | u32 rev; |
3613 | int r = 0; | 3644 | int r = 0; |
3614 | struct resource *dispc_mem; | 3645 | struct resource *dispc_mem; |
3646 | struct clk *clk; | ||
3615 | 3647 | ||
3616 | dispc.pdev = pdev; | 3648 | dispc.pdev = pdev; |
3617 | 3649 | ||
3650 | clk = clk_get(&pdev->dev, "fck"); | ||
3651 | if (IS_ERR(clk)) { | ||
3652 | DSSERR("can't get fck\n"); | ||
3653 | r = PTR_ERR(clk); | ||
3654 | goto err_get_clk; | ||
3655 | } | ||
3656 | |||
3657 | dispc.dss_clk = clk; | ||
3658 | |||
3618 | spin_lock_init(&dispc.irq_lock); | 3659 | spin_lock_init(&dispc.irq_lock); |
3619 | 3660 | ||
3620 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 3661 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
@@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev) | |||
3628 | if (!dispc_mem) { | 3669 | if (!dispc_mem) { |
3629 | DSSERR("can't get IORESOURCE_MEM DISPC\n"); | 3670 | DSSERR("can't get IORESOURCE_MEM DISPC\n"); |
3630 | r = -EINVAL; | 3671 | r = -EINVAL; |
3631 | goto fail0; | 3672 | goto err_ioremap; |
3632 | } | 3673 | } |
3633 | dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem)); | 3674 | dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem)); |
3634 | if (!dispc.base) { | 3675 | if (!dispc.base) { |
3635 | DSSERR("can't ioremap DISPC\n"); | 3676 | DSSERR("can't ioremap DISPC\n"); |
3636 | r = -ENOMEM; | 3677 | r = -ENOMEM; |
3637 | goto fail0; | 3678 | goto err_ioremap; |
3638 | } | 3679 | } |
3639 | dispc.irq = platform_get_irq(dispc.pdev, 0); | 3680 | dispc.irq = platform_get_irq(dispc.pdev, 0); |
3640 | if (dispc.irq < 0) { | 3681 | if (dispc.irq < 0) { |
3641 | DSSERR("platform_get_irq failed\n"); | 3682 | DSSERR("platform_get_irq failed\n"); |
3642 | r = -ENODEV; | 3683 | r = -ENODEV; |
3643 | goto fail1; | 3684 | goto err_irq; |
3644 | } | 3685 | } |
3645 | 3686 | ||
3646 | r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED, | 3687 | r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED, |
3647 | "OMAP DISPC", dispc.pdev); | 3688 | "OMAP DISPC", dispc.pdev); |
3648 | if (r < 0) { | 3689 | if (r < 0) { |
3649 | DSSERR("request_irq failed\n"); | 3690 | DSSERR("request_irq failed\n"); |
3650 | goto fail1; | 3691 | goto err_irq; |
3651 | } | 3692 | } |
3652 | 3693 | ||
3653 | enable_clocks(1); | 3694 | pm_runtime_enable(&pdev->dev); |
3695 | |||
3696 | r = dispc_runtime_get(); | ||
3697 | if (r) | ||
3698 | goto err_runtime_get; | ||
3654 | 3699 | ||
3655 | _omap_dispc_initial_config(); | 3700 | _omap_dispc_initial_config(); |
3656 | 3701 | ||
3657 | _omap_dispc_initialize_irq(); | 3702 | _omap_dispc_initialize_irq(); |
3658 | 3703 | ||
3659 | dispc_save_context(); | ||
3660 | |||
3661 | rev = dispc_read_reg(DISPC_REVISION); | 3704 | rev = dispc_read_reg(DISPC_REVISION); |
3662 | dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n", | 3705 | dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n", |
3663 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | 3706 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); |
3664 | 3707 | ||
3665 | enable_clocks(0); | 3708 | dispc_runtime_put(); |
3666 | 3709 | ||
3667 | return 0; | 3710 | return 0; |
3668 | fail1: | 3711 | |
3712 | err_runtime_get: | ||
3713 | pm_runtime_disable(&pdev->dev); | ||
3714 | free_irq(dispc.irq, dispc.pdev); | ||
3715 | err_irq: | ||
3669 | iounmap(dispc.base); | 3716 | iounmap(dispc.base); |
3670 | fail0: | 3717 | err_ioremap: |
3718 | clk_put(dispc.dss_clk); | ||
3719 | err_get_clk: | ||
3671 | return r; | 3720 | return r; |
3672 | } | 3721 | } |
3673 | 3722 | ||
3674 | static int omap_dispchw_remove(struct platform_device *pdev) | 3723 | static int omap_dispchw_remove(struct platform_device *pdev) |
3675 | { | 3724 | { |
3725 | pm_runtime_disable(&pdev->dev); | ||
3726 | |||
3727 | clk_put(dispc.dss_clk); | ||
3728 | |||
3676 | free_irq(dispc.irq, dispc.pdev); | 3729 | free_irq(dispc.irq, dispc.pdev); |
3677 | iounmap(dispc.base); | 3730 | iounmap(dispc.base); |
3678 | return 0; | 3731 | return 0; |
3679 | } | 3732 | } |
3680 | 3733 | ||
3734 | static int dispc_runtime_suspend(struct device *dev) | ||
3735 | { | ||
3736 | dispc_save_context(); | ||
3737 | clk_disable(dispc.dss_clk); | ||
3738 | dss_runtime_put(); | ||
3739 | |||
3740 | return 0; | ||
3741 | } | ||
3742 | |||
3743 | static int dispc_runtime_resume(struct device *dev) | ||
3744 | { | ||
3745 | int r; | ||
3746 | |||
3747 | r = dss_runtime_get(); | ||
3748 | if (r < 0) | ||
3749 | return r; | ||
3750 | |||
3751 | clk_enable(dispc.dss_clk); | ||
3752 | dispc_restore_context(); | ||
3753 | |||
3754 | return 0; | ||
3755 | } | ||
3756 | |||
3757 | static const struct dev_pm_ops dispc_pm_ops = { | ||
3758 | .runtime_suspend = dispc_runtime_suspend, | ||
3759 | .runtime_resume = dispc_runtime_resume, | ||
3760 | }; | ||
3761 | |||
3681 | static struct platform_driver omap_dispchw_driver = { | 3762 | static struct platform_driver omap_dispchw_driver = { |
3682 | .probe = omap_dispchw_probe, | 3763 | .probe = omap_dispchw_probe, |
3683 | .remove = omap_dispchw_remove, | 3764 | .remove = omap_dispchw_remove, |
3684 | .driver = { | 3765 | .driver = { |
3685 | .name = "omapdss_dispc", | 3766 | .name = "omapdss_dispc", |
3686 | .owner = THIS_MODULE, | 3767 | .owner = THIS_MODULE, |
3768 | .pm = &dispc_pm_ops, | ||
3687 | }, | 3769 | }, |
3688 | }; | 3770 | }; |
3689 | 3771 | ||
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index c2dfc8c50057..94495e45ec5a 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <video/omapdss.h> | 30 | #include <video/omapdss.h> |
31 | #include "dss.h" | 31 | #include "dss.h" |
32 | #include "dss_features.h" | ||
32 | 33 | ||
33 | static ssize_t display_enabled_show(struct device *dev, | 34 | static ssize_t display_enabled_show(struct device *dev, |
34 | struct device_attribute *attr, char *buf) | 35 | struct device_attribute *attr, char *buf) |
@@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev, | |||
65 | return size; | 66 | return size; |
66 | } | 67 | } |
67 | 68 | ||
68 | static ssize_t display_upd_mode_show(struct device *dev, | ||
69 | struct device_attribute *attr, char *buf) | ||
70 | { | ||
71 | struct omap_dss_device *dssdev = to_dss_device(dev); | ||
72 | enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO; | ||
73 | if (dssdev->driver->get_update_mode) | ||
74 | mode = dssdev->driver->get_update_mode(dssdev); | ||
75 | return snprintf(buf, PAGE_SIZE, "%d\n", mode); | ||
76 | } | ||
77 | |||
78 | static ssize_t display_upd_mode_store(struct device *dev, | ||
79 | struct device_attribute *attr, | ||
80 | const char *buf, size_t size) | ||
81 | { | ||
82 | struct omap_dss_device *dssdev = to_dss_device(dev); | ||
83 | int val, r; | ||
84 | enum omap_dss_update_mode mode; | ||
85 | |||
86 | if (!dssdev->driver->set_update_mode) | ||
87 | return -EINVAL; | ||
88 | |||
89 | r = kstrtoint(buf, 0, &val); | ||
90 | if (r) | ||
91 | return r; | ||
92 | |||
93 | switch (val) { | ||
94 | case OMAP_DSS_UPDATE_DISABLED: | ||
95 | case OMAP_DSS_UPDATE_AUTO: | ||
96 | case OMAP_DSS_UPDATE_MANUAL: | ||
97 | mode = (enum omap_dss_update_mode)val; | ||
98 | break; | ||
99 | default: | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | r = dssdev->driver->set_update_mode(dssdev, mode); | ||
104 | if (r) | ||
105 | return r; | ||
106 | |||
107 | return size; | ||
108 | } | ||
109 | |||
110 | static ssize_t display_tear_show(struct device *dev, | 69 | static ssize_t display_tear_show(struct device *dev, |
111 | struct device_attribute *attr, char *buf) | 70 | struct device_attribute *attr, char *buf) |
112 | { | 71 | { |
@@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev, | |||
294 | 253 | ||
295 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 254 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, |
296 | display_enabled_show, display_enabled_store); | 255 | display_enabled_show, display_enabled_store); |
297 | static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR, | ||
298 | display_upd_mode_show, display_upd_mode_store); | ||
299 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 256 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
300 | display_tear_show, display_tear_store); | 257 | display_tear_show, display_tear_store); |
301 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 258 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, |
@@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | |||
309 | 266 | ||
310 | static struct device_attribute *display_sysfs_attrs[] = { | 267 | static struct device_attribute *display_sysfs_attrs[] = { |
311 | &dev_attr_enabled, | 268 | &dev_attr_enabled, |
312 | &dev_attr_update_mode, | ||
313 | &dev_attr_tear_elim, | 269 | &dev_attr_tear_elim, |
314 | &dev_attr_timings, | 270 | &dev_attr_timings, |
315 | &dev_attr_rotate, | 271 | &dev_attr_rotate, |
@@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev, | |||
327 | EXPORT_SYMBOL(omapdss_default_get_resolution); | 283 | EXPORT_SYMBOL(omapdss_default_get_resolution); |
328 | 284 | ||
329 | void default_get_overlay_fifo_thresholds(enum omap_plane plane, | 285 | void default_get_overlay_fifo_thresholds(enum omap_plane plane, |
330 | u32 fifo_size, enum omap_burst_size *burst_size, | 286 | u32 fifo_size, u32 burst_size, |
331 | u32 *fifo_low, u32 *fifo_high) | 287 | u32 *fifo_low, u32 *fifo_high) |
332 | { | 288 | { |
333 | unsigned burst_size_bytes; | 289 | unsigned buf_unit = dss_feat_get_buffer_size_unit(); |
334 | |||
335 | *burst_size = OMAP_DSS_BURST_16x32; | ||
336 | burst_size_bytes = 16 * 32 / 8; | ||
337 | 290 | ||
338 | *fifo_high = fifo_size - 1; | 291 | *fifo_high = fifo_size - buf_unit; |
339 | *fifo_low = fifo_size - burst_size_bytes; | 292 | *fifo_low = fifo_size - burst_size; |
340 | } | 293 | } |
341 | 294 | ||
342 | int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) | 295 | int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) |
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index ff6bd30132df..f053b180ecd7 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #define DSS_SUBSYS_NAME "DPI" | 23 | #define DSS_SUBSYS_NAME "DPI" |
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/clk.h> | ||
27 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
28 | #include <linux/err.h> | 27 | #include <linux/err.h> |
29 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
@@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) | |||
130 | bool is_tft; | 129 | bool is_tft; |
131 | int r = 0; | 130 | int r = 0; |
132 | 131 | ||
133 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
134 | |||
135 | dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, | 132 | dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, |
136 | dssdev->panel.acbi, dssdev->panel.acb); | 133 | dssdev->panel.acbi, dssdev->panel.acb); |
137 | 134 | ||
@@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) | |||
144 | r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, | 141 | r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, |
145 | &fck, &lck_div, &pck_div); | 142 | &fck, &lck_div, &pck_div); |
146 | if (r) | 143 | if (r) |
147 | goto err0; | 144 | return r; |
148 | 145 | ||
149 | pck = fck / lck_div / pck_div / 1000; | 146 | pck = fck / lck_div / pck_div / 1000; |
150 | 147 | ||
@@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) | |||
158 | 155 | ||
159 | dispc_set_lcd_timings(dssdev->manager->id, t); | 156 | dispc_set_lcd_timings(dssdev->manager->id, t); |
160 | 157 | ||
161 | err0: | 158 | return 0; |
162 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
163 | return r; | ||
164 | } | 159 | } |
165 | 160 | ||
166 | static int dpi_basic_init(struct omap_dss_device *dssdev) | 161 | static void dpi_basic_init(struct omap_dss_device *dssdev) |
167 | { | 162 | { |
168 | bool is_tft; | 163 | bool is_tft; |
169 | 164 | ||
@@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev) | |||
175 | OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); | 170 | OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); |
176 | dispc_set_tft_data_lines(dssdev->manager->id, | 171 | dispc_set_tft_data_lines(dssdev->manager->id, |
177 | dssdev->phy.dpi.data_lines); | 172 | dssdev->phy.dpi.data_lines); |
178 | |||
179 | return 0; | ||
180 | } | 173 | } |
181 | 174 | ||
182 | int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) | 175 | int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) |
@@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) | |||
186 | r = omap_dss_start_device(dssdev); | 179 | r = omap_dss_start_device(dssdev); |
187 | if (r) { | 180 | if (r) { |
188 | DSSERR("failed to start device\n"); | 181 | DSSERR("failed to start device\n"); |
189 | goto err0; | 182 | goto err_start_dev; |
190 | } | 183 | } |
191 | 184 | ||
192 | if (cpu_is_omap34xx()) { | 185 | if (cpu_is_omap34xx()) { |
193 | r = regulator_enable(dpi.vdds_dsi_reg); | 186 | r = regulator_enable(dpi.vdds_dsi_reg); |
194 | if (r) | 187 | if (r) |
195 | goto err1; | 188 | goto err_reg_enable; |
196 | } | 189 | } |
197 | 190 | ||
198 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 191 | r = dss_runtime_get(); |
192 | if (r) | ||
193 | goto err_get_dss; | ||
199 | 194 | ||
200 | r = dpi_basic_init(dssdev); | 195 | r = dispc_runtime_get(); |
201 | if (r) | 196 | if (r) |
202 | goto err2; | 197 | goto err_get_dispc; |
198 | |||
199 | dpi_basic_init(dssdev); | ||
203 | 200 | ||
204 | if (dpi_use_dsi_pll(dssdev)) { | 201 | if (dpi_use_dsi_pll(dssdev)) { |
205 | dss_clk_enable(DSS_CLK_SYSCK); | 202 | r = dsi_runtime_get(dpi.dsidev); |
203 | if (r) | ||
204 | goto err_get_dsi; | ||
205 | |||
206 | r = dsi_pll_init(dpi.dsidev, 0, 1); | 206 | r = dsi_pll_init(dpi.dsidev, 0, 1); |
207 | if (r) | 207 | if (r) |
208 | goto err3; | 208 | goto err_dsi_pll_init; |
209 | } | 209 | } |
210 | 210 | ||
211 | r = dpi_set_mode(dssdev); | 211 | r = dpi_set_mode(dssdev); |
212 | if (r) | 212 | if (r) |
213 | goto err4; | 213 | goto err_set_mode; |
214 | 214 | ||
215 | mdelay(2); | 215 | mdelay(2); |
216 | 216 | ||
@@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) | |||
218 | 218 | ||
219 | return 0; | 219 | return 0; |
220 | 220 | ||
221 | err4: | 221 | err_set_mode: |
222 | if (dpi_use_dsi_pll(dssdev)) | 222 | if (dpi_use_dsi_pll(dssdev)) |
223 | dsi_pll_uninit(dpi.dsidev, true); | 223 | dsi_pll_uninit(dpi.dsidev, true); |
224 | err3: | 224 | err_dsi_pll_init: |
225 | if (dpi_use_dsi_pll(dssdev)) | 225 | if (dpi_use_dsi_pll(dssdev)) |
226 | dss_clk_disable(DSS_CLK_SYSCK); | 226 | dsi_runtime_put(dpi.dsidev); |
227 | err2: | 227 | err_get_dsi: |
228 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 228 | dispc_runtime_put(); |
229 | err_get_dispc: | ||
230 | dss_runtime_put(); | ||
231 | err_get_dss: | ||
229 | if (cpu_is_omap34xx()) | 232 | if (cpu_is_omap34xx()) |
230 | regulator_disable(dpi.vdds_dsi_reg); | 233 | regulator_disable(dpi.vdds_dsi_reg); |
231 | err1: | 234 | err_reg_enable: |
232 | omap_dss_stop_device(dssdev); | 235 | omap_dss_stop_device(dssdev); |
233 | err0: | 236 | err_start_dev: |
234 | return r; | 237 | return r; |
235 | } | 238 | } |
236 | EXPORT_SYMBOL(omapdss_dpi_display_enable); | 239 | EXPORT_SYMBOL(omapdss_dpi_display_enable); |
@@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) | |||
242 | if (dpi_use_dsi_pll(dssdev)) { | 245 | if (dpi_use_dsi_pll(dssdev)) { |
243 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); | 246 | dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); |
244 | dsi_pll_uninit(dpi.dsidev, true); | 247 | dsi_pll_uninit(dpi.dsidev, true); |
245 | dss_clk_disable(DSS_CLK_SYSCK); | 248 | dsi_runtime_put(dpi.dsidev); |
246 | } | 249 | } |
247 | 250 | ||
248 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 251 | dispc_runtime_put(); |
252 | dss_runtime_put(); | ||
249 | 253 | ||
250 | if (cpu_is_omap34xx()) | 254 | if (cpu_is_omap34xx()) |
251 | regulator_disable(dpi.vdds_dsi_reg); | 255 | regulator_disable(dpi.vdds_dsi_reg); |
@@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable); | |||
257 | void dpi_set_timings(struct omap_dss_device *dssdev, | 261 | void dpi_set_timings(struct omap_dss_device *dssdev, |
258 | struct omap_video_timings *timings) | 262 | struct omap_video_timings *timings) |
259 | { | 263 | { |
264 | int r; | ||
265 | |||
260 | DSSDBG("dpi_set_timings\n"); | 266 | DSSDBG("dpi_set_timings\n"); |
261 | dssdev->panel.timings = *timings; | 267 | dssdev->panel.timings = *timings; |
262 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { | 268 | if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { |
269 | r = dss_runtime_get(); | ||
270 | if (r) | ||
271 | return; | ||
272 | |||
273 | r = dispc_runtime_get(); | ||
274 | if (r) { | ||
275 | dss_runtime_put(); | ||
276 | return; | ||
277 | } | ||
278 | |||
263 | dpi_set_mode(dssdev); | 279 | dpi_set_mode(dssdev); |
264 | dispc_go(dssdev->manager->id); | 280 | dispc_go(dssdev->manager->id); |
281 | |||
282 | dispc_runtime_put(); | ||
283 | dss_runtime_put(); | ||
265 | } | 284 | } |
266 | } | 285 | } |
267 | EXPORT_SYMBOL(dpi_set_timings); | 286 | EXPORT_SYMBOL(dpi_set_timings); |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 345757cfcbee..7adbbeb84334 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/debugfs.h> | 38 | #include <linux/debugfs.h> |
39 | #include <linux/pm_runtime.h> | ||
39 | 40 | ||
40 | #include <video/omapdss.h> | 41 | #include <video/omapdss.h> |
41 | #include <plat/clock.h> | 42 | #include <plat/clock.h> |
@@ -267,8 +268,12 @@ struct dsi_isr_tables { | |||
267 | struct dsi_data { | 268 | struct dsi_data { |
268 | struct platform_device *pdev; | 269 | struct platform_device *pdev; |
269 | void __iomem *base; | 270 | void __iomem *base; |
271 | |||
270 | int irq; | 272 | int irq; |
271 | 273 | ||
274 | struct clk *dss_clk; | ||
275 | struct clk *sys_clk; | ||
276 | |||
272 | void (*dsi_mux_pads)(bool enable); | 277 | void (*dsi_mux_pads)(bool enable); |
273 | 278 | ||
274 | struct dsi_clock_info current_cinfo; | 279 | struct dsi_clock_info current_cinfo; |
@@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev, | |||
389 | return __raw_readl(dsi->base + idx.idx); | 394 | return __raw_readl(dsi->base + idx.idx); |
390 | } | 395 | } |
391 | 396 | ||
392 | |||
393 | void dsi_save_context(void) | ||
394 | { | ||
395 | } | ||
396 | |||
397 | void dsi_restore_context(void) | ||
398 | { | ||
399 | } | ||
400 | |||
401 | void dsi_bus_lock(struct omap_dss_device *dssdev) | 397 | void dsi_bus_lock(struct omap_dss_device *dssdev) |
402 | { | 398 | { |
403 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | 399 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); |
@@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name) | |||
493 | total_bytes * 1000 / total_us); | 489 | total_bytes * 1000 / total_us); |
494 | } | 490 | } |
495 | #else | 491 | #else |
496 | #define dsi_perf_mark_setup(x) | 492 | static inline void dsi_perf_mark_setup(struct platform_device *dsidev) |
497 | #define dsi_perf_mark_start(x) | 493 | { |
498 | #define dsi_perf_show(x, y) | 494 | } |
495 | |||
496 | static inline void dsi_perf_mark_start(struct platform_device *dsidev) | ||
497 | { | ||
498 | } | ||
499 | |||
500 | static inline void dsi_perf_show(struct platform_device *dsidev, | ||
501 | const char *name) | ||
502 | { | ||
503 | } | ||
499 | #endif | 504 | #endif |
500 | 505 | ||
501 | static void print_irq_status(u32 status) | 506 | static void print_irq_status(u32 status) |
@@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev) | |||
1039 | return e; | 1044 | return e; |
1040 | } | 1045 | } |
1041 | 1046 | ||
1042 | /* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */ | 1047 | int dsi_runtime_get(struct platform_device *dsidev) |
1043 | static inline void enable_clocks(bool enable) | ||
1044 | { | 1048 | { |
1045 | if (enable) | 1049 | int r; |
1046 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 1050 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
1047 | else | 1051 | |
1048 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 1052 | DSSDBG("dsi_runtime_get\n"); |
1053 | |||
1054 | r = pm_runtime_get_sync(&dsi->pdev->dev); | ||
1055 | WARN_ON(r < 0); | ||
1056 | return r < 0 ? r : 0; | ||
1057 | } | ||
1058 | |||
1059 | void dsi_runtime_put(struct platform_device *dsidev) | ||
1060 | { | ||
1061 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | ||
1062 | int r; | ||
1063 | |||
1064 | DSSDBG("dsi_runtime_put\n"); | ||
1065 | |||
1066 | r = pm_runtime_put(&dsi->pdev->dev); | ||
1067 | WARN_ON(r < 0); | ||
1049 | } | 1068 | } |
1050 | 1069 | ||
1051 | /* source clock for DSI PLL. this could also be PCLKFREE */ | 1070 | /* source clock for DSI PLL. this could also be PCLKFREE */ |
@@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev, | |||
1055 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 1074 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
1056 | 1075 | ||
1057 | if (enable) | 1076 | if (enable) |
1058 | dss_clk_enable(DSS_CLK_SYSCK); | 1077 | clk_enable(dsi->sys_clk); |
1059 | else | 1078 | else |
1060 | dss_clk_disable(DSS_CLK_SYSCK); | 1079 | clk_disable(dsi->sys_clk); |
1061 | 1080 | ||
1062 | if (enable && dsi->pll_locked) { | 1081 | if (enable && dsi->pll_locked) { |
1063 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) | 1082 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) |
@@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev) | |||
1150 | { | 1169 | { |
1151 | unsigned long r; | 1170 | unsigned long r; |
1152 | int dsi_module = dsi_get_dsidev_id(dsidev); | 1171 | int dsi_module = dsi_get_dsidev_id(dsidev); |
1172 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | ||
1153 | 1173 | ||
1154 | if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { | 1174 | if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { |
1155 | /* DSI FCLK source is DSS_CLK_FCK */ | 1175 | /* DSI FCLK source is DSS_CLK_FCK */ |
1156 | r = dss_clk_get_rate(DSS_CLK_FCK); | 1176 | r = clk_get_rate(dsi->dss_clk); |
1157 | } else { | 1177 | } else { |
1158 | /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ | 1178 | /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ |
1159 | r = dsi_get_pll_hsdiv_dsi_rate(dsidev); | 1179 | r = dsi_get_pll_hsdiv_dsi_rate(dsidev); |
@@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, | |||
1262 | return -EINVAL; | 1282 | return -EINVAL; |
1263 | 1283 | ||
1264 | if (cinfo->use_sys_clk) { | 1284 | if (cinfo->use_sys_clk) { |
1265 | cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK); | 1285 | cinfo->clkin = clk_get_rate(dsi->sys_clk); |
1266 | /* XXX it is unclear if highfreq should be used | 1286 | /* XXX it is unclear if highfreq should be used |
1267 | * with DSS_SYS_CLK source also */ | 1287 | * with DSS_SYS_CLK source also */ |
1268 | cinfo->highfreq = 0; | 1288 | cinfo->highfreq = 0; |
@@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, | |||
1311 | int match = 0; | 1331 | int match = 0; |
1312 | unsigned long dss_sys_clk, max_dss_fck; | 1332 | unsigned long dss_sys_clk, max_dss_fck; |
1313 | 1333 | ||
1314 | dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK); | 1334 | dss_sys_clk = clk_get_rate(dsi->sys_clk); |
1315 | 1335 | ||
1316 | max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); | 1336 | max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); |
1317 | 1337 | ||
@@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, | |||
1601 | dsi->vdds_dsi_reg = vdds_dsi; | 1621 | dsi->vdds_dsi_reg = vdds_dsi; |
1602 | } | 1622 | } |
1603 | 1623 | ||
1604 | enable_clocks(1); | ||
1605 | dsi_enable_pll_clock(dsidev, 1); | 1624 | dsi_enable_pll_clock(dsidev, 1); |
1606 | /* | 1625 | /* |
1607 | * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. | 1626 | * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. |
@@ -1653,7 +1672,6 @@ err1: | |||
1653 | } | 1672 | } |
1654 | err0: | 1673 | err0: |
1655 | dsi_disable_scp_clk(dsidev); | 1674 | dsi_disable_scp_clk(dsidev); |
1656 | enable_clocks(0); | ||
1657 | dsi_enable_pll_clock(dsidev, 0); | 1675 | dsi_enable_pll_clock(dsidev, 0); |
1658 | return r; | 1676 | return r; |
1659 | } | 1677 | } |
@@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) | |||
1671 | } | 1689 | } |
1672 | 1690 | ||
1673 | dsi_disable_scp_clk(dsidev); | 1691 | dsi_disable_scp_clk(dsidev); |
1674 | enable_clocks(0); | ||
1675 | dsi_enable_pll_clock(dsidev, 0); | 1692 | dsi_enable_pll_clock(dsidev, 0); |
1676 | 1693 | ||
1677 | DSSDBG("PLL uninit done\n"); | 1694 | DSSDBG("PLL uninit done\n"); |
@@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, | |||
1688 | dispc_clk_src = dss_get_dispc_clk_source(); | 1705 | dispc_clk_src = dss_get_dispc_clk_source(); |
1689 | dsi_clk_src = dss_get_dsi_clk_source(dsi_module); | 1706 | dsi_clk_src = dss_get_dsi_clk_source(dsi_module); |
1690 | 1707 | ||
1691 | enable_clocks(1); | 1708 | if (dsi_runtime_get(dsidev)) |
1709 | return; | ||
1692 | 1710 | ||
1693 | seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); | 1711 | seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); |
1694 | 1712 | ||
@@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, | |||
1731 | 1749 | ||
1732 | seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); | 1750 | seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); |
1733 | 1751 | ||
1734 | enable_clocks(0); | 1752 | dsi_runtime_put(dsidev); |
1735 | } | 1753 | } |
1736 | 1754 | ||
1737 | void dsi_dump_clocks(struct seq_file *s) | 1755 | void dsi_dump_clocks(struct seq_file *s) |
@@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev, | |||
1873 | { | 1891 | { |
1874 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) | 1892 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) |
1875 | 1893 | ||
1876 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 1894 | if (dsi_runtime_get(dsidev)) |
1895 | return; | ||
1877 | dsi_enable_scp_clk(dsidev); | 1896 | dsi_enable_scp_clk(dsidev); |
1878 | 1897 | ||
1879 | DUMPREG(DSI_REVISION); | 1898 | DUMPREG(DSI_REVISION); |
@@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev, | |||
1947 | DUMPREG(DSI_PLL_CONFIGURATION2); | 1966 | DUMPREG(DSI_PLL_CONFIGURATION2); |
1948 | 1967 | ||
1949 | dsi_disable_scp_clk(dsidev); | 1968 | dsi_disable_scp_clk(dsidev); |
1950 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 1969 | dsi_runtime_put(dsidev); |
1951 | #undef DUMPREG | 1970 | #undef DUMPREG |
1952 | } | 1971 | } |
1953 | 1972 | ||
@@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev) | |||
2463 | dsi->dsi_mux_pads(false); | 2482 | dsi->dsi_mux_pads(false); |
2464 | } | 2483 | } |
2465 | 2484 | ||
2466 | static int _dsi_wait_reset(struct platform_device *dsidev) | ||
2467 | { | ||
2468 | int t = 0; | ||
2469 | |||
2470 | while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) { | ||
2471 | if (++t > 5) { | ||
2472 | DSSERR("soft reset failed\n"); | ||
2473 | return -ENODEV; | ||
2474 | } | ||
2475 | udelay(1); | ||
2476 | } | ||
2477 | |||
2478 | return 0; | ||
2479 | } | ||
2480 | |||
2481 | static int _dsi_reset(struct platform_device *dsidev) | ||
2482 | { | ||
2483 | /* Soft reset */ | ||
2484 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1); | ||
2485 | return _dsi_wait_reset(dsidev); | ||
2486 | } | ||
2487 | |||
2488 | static void dsi_config_tx_fifo(struct platform_device *dsidev, | 2485 | static void dsi_config_tx_fifo(struct platform_device *dsidev, |
2489 | enum fifo_size size1, enum fifo_size size2, | 2486 | enum fifo_size size1, enum fifo_size size2, |
2490 | enum fifo_size size3, enum fifo_size size4) | 2487 | enum fifo_size size3, enum fifo_size size4) |
@@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev) | |||
3386 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, | 3383 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, |
3387 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | 3384 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); |
3388 | 3385 | ||
3386 | /* Reset LANEx_ULPS_SIG2 */ | ||
3387 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2), | ||
3388 | 7, 5); | ||
3389 | |||
3389 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); | 3390 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); |
3390 | 3391 | ||
3391 | dsi_if_enable(dsidev, false); | 3392 | dsi_if_enable(dsidev, false); |
@@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, | |||
4198 | dsi_pll_uninit(dsidev, disconnect_lanes); | 4199 | dsi_pll_uninit(dsidev, disconnect_lanes); |
4199 | } | 4200 | } |
4200 | 4201 | ||
4201 | static int dsi_core_init(struct platform_device *dsidev) | ||
4202 | { | ||
4203 | /* Autoidle */ | ||
4204 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0); | ||
4205 | |||
4206 | /* ENWAKEUP */ | ||
4207 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2); | ||
4208 | |||
4209 | /* SIDLEMODE smart-idle */ | ||
4210 | REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3); | ||
4211 | |||
4212 | _dsi_initialize_irq(dsidev); | ||
4213 | |||
4214 | return 0; | ||
4215 | } | ||
4216 | |||
4217 | int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) | 4202 | int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) |
4218 | { | 4203 | { |
4219 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | 4204 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); |
@@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) | |||
4229 | r = omap_dss_start_device(dssdev); | 4214 | r = omap_dss_start_device(dssdev); |
4230 | if (r) { | 4215 | if (r) { |
4231 | DSSERR("failed to start device\n"); | 4216 | DSSERR("failed to start device\n"); |
4232 | goto err0; | 4217 | goto err_start_dev; |
4233 | } | 4218 | } |
4234 | 4219 | ||
4235 | enable_clocks(1); | 4220 | r = dsi_runtime_get(dsidev); |
4236 | dsi_enable_pll_clock(dsidev, 1); | ||
4237 | |||
4238 | r = _dsi_reset(dsidev); | ||
4239 | if (r) | 4221 | if (r) |
4240 | goto err1; | 4222 | goto err_get_dsi; |
4241 | 4223 | ||
4242 | dsi_core_init(dsidev); | 4224 | dsi_enable_pll_clock(dsidev, 1); |
4225 | |||
4226 | _dsi_initialize_irq(dsidev); | ||
4243 | 4227 | ||
4244 | r = dsi_display_init_dispc(dssdev); | 4228 | r = dsi_display_init_dispc(dssdev); |
4245 | if (r) | 4229 | if (r) |
4246 | goto err1; | 4230 | goto err_init_dispc; |
4247 | 4231 | ||
4248 | r = dsi_display_init_dsi(dssdev); | 4232 | r = dsi_display_init_dsi(dssdev); |
4249 | if (r) | 4233 | if (r) |
4250 | goto err2; | 4234 | goto err_init_dsi; |
4251 | 4235 | ||
4252 | mutex_unlock(&dsi->lock); | 4236 | mutex_unlock(&dsi->lock); |
4253 | 4237 | ||
4254 | return 0; | 4238 | return 0; |
4255 | 4239 | ||
4256 | err2: | 4240 | err_init_dsi: |
4257 | dsi_display_uninit_dispc(dssdev); | 4241 | dsi_display_uninit_dispc(dssdev); |
4258 | err1: | 4242 | err_init_dispc: |
4259 | enable_clocks(0); | ||
4260 | dsi_enable_pll_clock(dsidev, 0); | 4243 | dsi_enable_pll_clock(dsidev, 0); |
4244 | dsi_runtime_put(dsidev); | ||
4245 | err_get_dsi: | ||
4261 | omap_dss_stop_device(dssdev); | 4246 | omap_dss_stop_device(dssdev); |
4262 | err0: | 4247 | err_start_dev: |
4263 | mutex_unlock(&dsi->lock); | 4248 | mutex_unlock(&dsi->lock); |
4264 | DSSDBG("dsi_display_enable FAILED\n"); | 4249 | DSSDBG("dsi_display_enable FAILED\n"); |
4265 | return r; | 4250 | return r; |
@@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, | |||
4278 | 4263 | ||
4279 | mutex_lock(&dsi->lock); | 4264 | mutex_lock(&dsi->lock); |
4280 | 4265 | ||
4266 | dsi_sync_vc(dsidev, 0); | ||
4267 | dsi_sync_vc(dsidev, 1); | ||
4268 | dsi_sync_vc(dsidev, 2); | ||
4269 | dsi_sync_vc(dsidev, 3); | ||
4270 | |||
4281 | dsi_display_uninit_dispc(dssdev); | 4271 | dsi_display_uninit_dispc(dssdev); |
4282 | 4272 | ||
4283 | dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); | 4273 | dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); |
4284 | 4274 | ||
4285 | enable_clocks(0); | 4275 | dsi_runtime_put(dsidev); |
4286 | dsi_enable_pll_clock(dsidev, 0); | 4276 | dsi_enable_pll_clock(dsidev, 0); |
4287 | 4277 | ||
4288 | omap_dss_stop_device(dssdev); | 4278 | omap_dss_stop_device(dssdev); |
@@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) | |||
4302 | EXPORT_SYMBOL(omapdss_dsi_enable_te); | 4292 | EXPORT_SYMBOL(omapdss_dsi_enable_te); |
4303 | 4293 | ||
4304 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, | 4294 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, |
4305 | u32 fifo_size, enum omap_burst_size *burst_size, | 4295 | u32 fifo_size, u32 burst_size, |
4306 | u32 *fifo_low, u32 *fifo_high) | 4296 | u32 *fifo_low, u32 *fifo_high) |
4307 | { | 4297 | { |
4308 | unsigned burst_size_bytes; | 4298 | *fifo_high = fifo_size - burst_size; |
4309 | 4299 | *fifo_low = fifo_size - burst_size * 2; | |
4310 | *burst_size = OMAP_DSS_BURST_16x32; | ||
4311 | burst_size_bytes = 16 * 32 / 8; | ||
4312 | |||
4313 | *fifo_high = fifo_size - burst_size_bytes; | ||
4314 | *fifo_low = fifo_size - burst_size_bytes * 2; | ||
4315 | } | 4300 | } |
4316 | 4301 | ||
4317 | int dsi_init_display(struct omap_dss_device *dssdev) | 4302 | int dsi_init_display(struct omap_dss_device *dssdev) |
@@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev) | |||
4437 | dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); | 4422 | dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); |
4438 | } | 4423 | } |
4439 | 4424 | ||
4440 | static int dsi_init(struct platform_device *dsidev) | 4425 | static int dsi_get_clocks(struct platform_device *dsidev) |
4426 | { | ||
4427 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | ||
4428 | struct clk *clk; | ||
4429 | |||
4430 | clk = clk_get(&dsidev->dev, "fck"); | ||
4431 | if (IS_ERR(clk)) { | ||
4432 | DSSERR("can't get fck\n"); | ||
4433 | return PTR_ERR(clk); | ||
4434 | } | ||
4435 | |||
4436 | dsi->dss_clk = clk; | ||
4437 | |||
4438 | if (cpu_is_omap34xx() || cpu_is_omap3630()) | ||
4439 | clk = clk_get(&dsidev->dev, "dss2_alwon_fck"); | ||
4440 | else | ||
4441 | clk = clk_get(&dsidev->dev, "sys_clk"); | ||
4442 | if (IS_ERR(clk)) { | ||
4443 | DSSERR("can't get sys_clk\n"); | ||
4444 | clk_put(dsi->dss_clk); | ||
4445 | dsi->dss_clk = NULL; | ||
4446 | return PTR_ERR(clk); | ||
4447 | } | ||
4448 | |||
4449 | dsi->sys_clk = clk; | ||
4450 | |||
4451 | return 0; | ||
4452 | } | ||
4453 | |||
4454 | static void dsi_put_clocks(struct platform_device *dsidev) | ||
4455 | { | ||
4456 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | ||
4457 | |||
4458 | if (dsi->dss_clk) | ||
4459 | clk_put(dsi->dss_clk); | ||
4460 | if (dsi->sys_clk) | ||
4461 | clk_put(dsi->sys_clk); | ||
4462 | } | ||
4463 | |||
4464 | /* DSI1 HW IP initialisation */ | ||
4465 | static int omap_dsi1hw_probe(struct platform_device *dsidev) | ||
4441 | { | 4466 | { |
4442 | struct omap_display_platform_data *dss_plat_data; | 4467 | struct omap_display_platform_data *dss_plat_data; |
4443 | struct omap_dss_board_info *board_info; | 4468 | struct omap_dss_board_info *board_info; |
@@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev) | |||
4449 | dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); | 4474 | dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); |
4450 | if (!dsi) { | 4475 | if (!dsi) { |
4451 | r = -ENOMEM; | 4476 | r = -ENOMEM; |
4452 | goto err0; | 4477 | goto err_alloc; |
4453 | } | 4478 | } |
4454 | 4479 | ||
4455 | dsi->pdev = dsidev; | 4480 | dsi->pdev = dsidev; |
@@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev) | |||
4472 | mutex_init(&dsi->lock); | 4497 | mutex_init(&dsi->lock); |
4473 | sema_init(&dsi->bus_lock, 1); | 4498 | sema_init(&dsi->bus_lock, 1); |
4474 | 4499 | ||
4500 | r = dsi_get_clocks(dsidev); | ||
4501 | if (r) | ||
4502 | goto err_get_clk; | ||
4503 | |||
4504 | pm_runtime_enable(&dsidev->dev); | ||
4505 | |||
4475 | INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, | 4506 | INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, |
4476 | dsi_framedone_timeout_work_callback); | 4507 | dsi_framedone_timeout_work_callback); |
4477 | 4508 | ||
@@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev) | |||
4484 | if (!dsi_mem) { | 4515 | if (!dsi_mem) { |
4485 | DSSERR("can't get IORESOURCE_MEM DSI\n"); | 4516 | DSSERR("can't get IORESOURCE_MEM DSI\n"); |
4486 | r = -EINVAL; | 4517 | r = -EINVAL; |
4487 | goto err1; | 4518 | goto err_ioremap; |
4488 | } | 4519 | } |
4489 | dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem)); | 4520 | dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem)); |
4490 | if (!dsi->base) { | 4521 | if (!dsi->base) { |
4491 | DSSERR("can't ioremap DSI\n"); | 4522 | DSSERR("can't ioremap DSI\n"); |
4492 | r = -ENOMEM; | 4523 | r = -ENOMEM; |
4493 | goto err1; | 4524 | goto err_ioremap; |
4494 | } | 4525 | } |
4495 | dsi->irq = platform_get_irq(dsi->pdev, 0); | 4526 | dsi->irq = platform_get_irq(dsi->pdev, 0); |
4496 | if (dsi->irq < 0) { | 4527 | if (dsi->irq < 0) { |
4497 | DSSERR("platform_get_irq failed\n"); | 4528 | DSSERR("platform_get_irq failed\n"); |
4498 | r = -ENODEV; | 4529 | r = -ENODEV; |
4499 | goto err2; | 4530 | goto err_get_irq; |
4500 | } | 4531 | } |
4501 | 4532 | ||
4502 | r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED, | 4533 | r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED, |
4503 | dev_name(&dsidev->dev), dsi->pdev); | 4534 | dev_name(&dsidev->dev), dsi->pdev); |
4504 | if (r < 0) { | 4535 | if (r < 0) { |
4505 | DSSERR("request_irq failed\n"); | 4536 | DSSERR("request_irq failed\n"); |
4506 | goto err2; | 4537 | goto err_get_irq; |
4507 | } | 4538 | } |
4508 | 4539 | ||
4509 | /* DSI VCs initialization */ | 4540 | /* DSI VCs initialization */ |
@@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev) | |||
4515 | 4546 | ||
4516 | dsi_calc_clock_param_ranges(dsidev); | 4547 | dsi_calc_clock_param_ranges(dsidev); |
4517 | 4548 | ||
4518 | enable_clocks(1); | 4549 | r = dsi_runtime_get(dsidev); |
4550 | if (r) | ||
4551 | goto err_get_dsi; | ||
4519 | 4552 | ||
4520 | rev = dsi_read_reg(dsidev, DSI_REVISION); | 4553 | rev = dsi_read_reg(dsidev, DSI_REVISION); |
4521 | dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", | 4554 | dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", |
@@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev) | |||
4523 | 4556 | ||
4524 | dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev); | 4557 | dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev); |
4525 | 4558 | ||
4526 | enable_clocks(0); | 4559 | dsi_runtime_put(dsidev); |
4527 | 4560 | ||
4528 | return 0; | 4561 | return 0; |
4529 | err2: | 4562 | |
4563 | err_get_dsi: | ||
4564 | free_irq(dsi->irq, dsi->pdev); | ||
4565 | err_get_irq: | ||
4530 | iounmap(dsi->base); | 4566 | iounmap(dsi->base); |
4531 | err1: | 4567 | err_ioremap: |
4568 | pm_runtime_disable(&dsidev->dev); | ||
4569 | err_get_clk: | ||
4532 | kfree(dsi); | 4570 | kfree(dsi); |
4533 | err0: | 4571 | err_alloc: |
4534 | return r; | 4572 | return r; |
4535 | } | 4573 | } |
4536 | 4574 | ||
4537 | static void dsi_exit(struct platform_device *dsidev) | 4575 | static int omap_dsi1hw_remove(struct platform_device *dsidev) |
4538 | { | 4576 | { |
4539 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 4577 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
4540 | 4578 | ||
4579 | WARN_ON(dsi->scp_clk_refcount > 0); | ||
4580 | |||
4581 | pm_runtime_disable(&dsidev->dev); | ||
4582 | |||
4583 | dsi_put_clocks(dsidev); | ||
4584 | |||
4541 | if (dsi->vdds_dsi_reg != NULL) { | 4585 | if (dsi->vdds_dsi_reg != NULL) { |
4542 | if (dsi->vdds_dsi_enabled) { | 4586 | if (dsi->vdds_dsi_enabled) { |
4543 | regulator_disable(dsi->vdds_dsi_reg); | 4587 | regulator_disable(dsi->vdds_dsi_reg); |
@@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev) | |||
4553 | 4597 | ||
4554 | kfree(dsi); | 4598 | kfree(dsi); |
4555 | 4599 | ||
4556 | DSSDBG("omap_dsi_exit\n"); | 4600 | return 0; |
4557 | } | 4601 | } |
4558 | 4602 | ||
4559 | /* DSI1 HW IP initialisation */ | 4603 | static int dsi_runtime_suspend(struct device *dev) |
4560 | static int omap_dsi1hw_probe(struct platform_device *dsidev) | ||
4561 | { | 4604 | { |
4562 | int r; | 4605 | struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev)); |
4563 | 4606 | ||
4564 | r = dsi_init(dsidev); | 4607 | clk_disable(dsi->dss_clk); |
4565 | if (r) { | 4608 | |
4566 | DSSERR("Failed to initialize DSI\n"); | 4609 | dispc_runtime_put(); |
4567 | goto err_dsi; | 4610 | dss_runtime_put(); |
4568 | } | 4611 | |
4569 | err_dsi: | 4612 | return 0; |
4570 | return r; | ||
4571 | } | 4613 | } |
4572 | 4614 | ||
4573 | static int omap_dsi1hw_remove(struct platform_device *dsidev) | 4615 | static int dsi_runtime_resume(struct device *dev) |
4574 | { | 4616 | { |
4575 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 4617 | struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev)); |
4618 | int r; | ||
4619 | |||
4620 | r = dss_runtime_get(); | ||
4621 | if (r) | ||
4622 | goto err_get_dss; | ||
4623 | |||
4624 | r = dispc_runtime_get(); | ||
4625 | if (r) | ||
4626 | goto err_get_dispc; | ||
4627 | |||
4628 | clk_enable(dsi->dss_clk); | ||
4576 | 4629 | ||
4577 | dsi_exit(dsidev); | ||
4578 | WARN_ON(dsi->scp_clk_refcount > 0); | ||
4579 | return 0; | 4630 | return 0; |
4631 | |||
4632 | err_get_dispc: | ||
4633 | dss_runtime_put(); | ||
4634 | err_get_dss: | ||
4635 | return r; | ||
4580 | } | 4636 | } |
4581 | 4637 | ||
4638 | static const struct dev_pm_ops dsi_pm_ops = { | ||
4639 | .runtime_suspend = dsi_runtime_suspend, | ||
4640 | .runtime_resume = dsi_runtime_resume, | ||
4641 | }; | ||
4642 | |||
4582 | static struct platform_driver omap_dsi1hw_driver = { | 4643 | static struct platform_driver omap_dsi1hw_driver = { |
4583 | .probe = omap_dsi1hw_probe, | 4644 | .probe = omap_dsi1hw_probe, |
4584 | .remove = omap_dsi1hw_remove, | 4645 | .remove = omap_dsi1hw_remove, |
4585 | .driver = { | 4646 | .driver = { |
4586 | .name = "omapdss_dsi1", | 4647 | .name = "omapdss_dsi1", |
4587 | .owner = THIS_MODULE, | 4648 | .owner = THIS_MODULE, |
4649 | .pm = &dsi_pm_ops, | ||
4588 | }, | 4650 | }, |
4589 | }; | 4651 | }; |
4590 | 4652 | ||
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d9489d5c4f08..0f9c3a6457a5 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
31 | 33 | ||
32 | #include <video/omapdss.h> | 34 | #include <video/omapdss.h> |
33 | #include <plat/clock.h> | 35 | #include <plat/clock.h> |
@@ -59,15 +61,9 @@ struct dss_reg { | |||
59 | static struct { | 61 | static struct { |
60 | struct platform_device *pdev; | 62 | struct platform_device *pdev; |
61 | void __iomem *base; | 63 | void __iomem *base; |
62 | int ctx_id; | ||
63 | 64 | ||
64 | struct clk *dpll4_m4_ck; | 65 | struct clk *dpll4_m4_ck; |
65 | struct clk *dss_ick; | 66 | struct clk *dss_clk; |
66 | struct clk *dss_fck; | ||
67 | struct clk *dss_sys_clk; | ||
68 | struct clk *dss_tv_fck; | ||
69 | struct clk *dss_video_fck; | ||
70 | unsigned num_clks_enabled; | ||
71 | 67 | ||
72 | unsigned long cache_req_pck; | 68 | unsigned long cache_req_pck; |
73 | unsigned long cache_prate; | 69 | unsigned long cache_prate; |
@@ -78,6 +74,7 @@ static struct { | |||
78 | enum omap_dss_clk_source dispc_clk_source; | 74 | enum omap_dss_clk_source dispc_clk_source; |
79 | enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; | 75 | enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; |
80 | 76 | ||
77 | bool ctx_valid; | ||
81 | u32 ctx[DSS_SZ_REGS / sizeof(u32)]; | 78 | u32 ctx[DSS_SZ_REGS / sizeof(u32)]; |
82 | } dss; | 79 | } dss; |
83 | 80 | ||
@@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = { | |||
87 | [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", | 84 | [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", |
88 | }; | 85 | }; |
89 | 86 | ||
90 | static void dss_clk_enable_all_no_ctx(void); | ||
91 | static void dss_clk_disable_all_no_ctx(void); | ||
92 | static void dss_clk_enable_no_ctx(enum dss_clock clks); | ||
93 | static void dss_clk_disable_no_ctx(enum dss_clock clks); | ||
94 | |||
95 | static int _omap_dss_wait_reset(void); | ||
96 | |||
97 | static inline void dss_write_reg(const struct dss_reg idx, u32 val) | 87 | static inline void dss_write_reg(const struct dss_reg idx, u32 val) |
98 | { | 88 | { |
99 | __raw_writel(val, dss.base + idx.idx); | 89 | __raw_writel(val, dss.base + idx.idx); |
@@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx) | |||
109 | #define RR(reg) \ | 99 | #define RR(reg) \ |
110 | dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) | 100 | dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) |
111 | 101 | ||
112 | void dss_save_context(void) | 102 | static void dss_save_context(void) |
113 | { | 103 | { |
114 | if (cpu_is_omap24xx()) | 104 | DSSDBG("dss_save_context\n"); |
115 | return; | ||
116 | 105 | ||
117 | SR(SYSCONFIG); | ||
118 | SR(CONTROL); | 106 | SR(CONTROL); |
119 | 107 | ||
120 | if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & | 108 | if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & |
@@ -122,14 +110,19 @@ void dss_save_context(void) | |||
122 | SR(SDI_CONTROL); | 110 | SR(SDI_CONTROL); |
123 | SR(PLL_CONTROL); | 111 | SR(PLL_CONTROL); |
124 | } | 112 | } |
113 | |||
114 | dss.ctx_valid = true; | ||
115 | |||
116 | DSSDBG("context saved\n"); | ||
125 | } | 117 | } |
126 | 118 | ||
127 | void dss_restore_context(void) | 119 | static void dss_restore_context(void) |
128 | { | 120 | { |
129 | if (_omap_dss_wait_reset()) | 121 | DSSDBG("dss_restore_context\n"); |
130 | DSSERR("DSS not coming out of reset after sleep\n"); | 122 | |
123 | if (!dss.ctx_valid) | ||
124 | return; | ||
131 | 125 | ||
132 | RR(SYSCONFIG); | ||
133 | RR(CONTROL); | 126 | RR(CONTROL); |
134 | 127 | ||
135 | if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & | 128 | if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & |
@@ -137,6 +130,8 @@ void dss_restore_context(void) | |||
137 | RR(SDI_CONTROL); | 130 | RR(SDI_CONTROL); |
138 | RR(PLL_CONTROL); | 131 | RR(PLL_CONTROL); |
139 | } | 132 | } |
133 | |||
134 | DSSDBG("context restored\n"); | ||
140 | } | 135 | } |
141 | 136 | ||
142 | #undef SR | 137 | #undef SR |
@@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) | |||
234 | return dss_generic_clk_source_names[clk_src]; | 229 | return dss_generic_clk_source_names[clk_src]; |
235 | } | 230 | } |
236 | 231 | ||
232 | |||
237 | void dss_dump_clocks(struct seq_file *s) | 233 | void dss_dump_clocks(struct seq_file *s) |
238 | { | 234 | { |
239 | unsigned long dpll4_ck_rate; | 235 | unsigned long dpll4_ck_rate; |
@@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s) | |||
241 | const char *fclk_name, *fclk_real_name; | 237 | const char *fclk_name, *fclk_real_name; |
242 | unsigned long fclk_rate; | 238 | unsigned long fclk_rate; |
243 | 239 | ||
244 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 240 | if (dss_runtime_get()) |
241 | return; | ||
245 | 242 | ||
246 | seq_printf(s, "- DSS -\n"); | 243 | seq_printf(s, "- DSS -\n"); |
247 | 244 | ||
248 | fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); | 245 | fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); |
249 | fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); | 246 | fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); |
250 | fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); | 247 | fclk_rate = clk_get_rate(dss.dss_clk); |
251 | 248 | ||
252 | if (dss.dpll4_m4_ck) { | 249 | if (dss.dpll4_m4_ck) { |
253 | dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck)); | 250 | dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck)); |
@@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s) | |||
273 | fclk_rate); | 270 | fclk_rate); |
274 | } | 271 | } |
275 | 272 | ||
276 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 273 | dss_runtime_put(); |
277 | } | 274 | } |
278 | 275 | ||
279 | void dss_dump_regs(struct seq_file *s) | 276 | void dss_dump_regs(struct seq_file *s) |
280 | { | 277 | { |
281 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) | 278 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) |
282 | 279 | ||
283 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 280 | if (dss_runtime_get()) |
281 | return; | ||
284 | 282 | ||
285 | DUMPREG(DSS_REVISION); | 283 | DUMPREG(DSS_REVISION); |
286 | DUMPREG(DSS_SYSCONFIG); | 284 | DUMPREG(DSS_SYSCONFIG); |
@@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s) | |||
294 | DUMPREG(DSS_SDI_STATUS); | 292 | DUMPREG(DSS_SDI_STATUS); |
295 | } | 293 | } |
296 | 294 | ||
297 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 295 | dss_runtime_put(); |
298 | #undef DUMPREG | 296 | #undef DUMPREG |
299 | } | 297 | } |
300 | 298 | ||
@@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo) | |||
437 | } else { | 435 | } else { |
438 | if (cinfo->fck_div != 0) | 436 | if (cinfo->fck_div != 0) |
439 | return -EINVAL; | 437 | return -EINVAL; |
440 | cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); | 438 | cinfo->fck = clk_get_rate(dss.dss_clk); |
441 | } | 439 | } |
442 | 440 | ||
443 | return 0; | 441 | return 0; |
@@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo) | |||
467 | 465 | ||
468 | int dss_get_clock_div(struct dss_clock_info *cinfo) | 466 | int dss_get_clock_div(struct dss_clock_info *cinfo) |
469 | { | 467 | { |
470 | cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); | 468 | cinfo->fck = clk_get_rate(dss.dss_clk); |
471 | 469 | ||
472 | if (dss.dpll4_m4_ck) { | 470 | if (dss.dpll4_m4_ck) { |
473 | unsigned long prate; | 471 | unsigned long prate; |
@@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck, | |||
512 | 510 | ||
513 | max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); | 511 | max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); |
514 | 512 | ||
515 | fck = dss_clk_get_rate(DSS_CLK_FCK); | 513 | fck = clk_get_rate(dss.dss_clk); |
516 | if (req_pck == dss.cache_req_pck && | 514 | if (req_pck == dss.cache_req_pck && |
517 | ((cpu_is_omap34xx() && prate == dss.cache_prate) || | 515 | ((cpu_is_omap34xx() && prate == dss.cache_prate) || |
518 | dss.cache_dss_cinfo.fck == fck)) { | 516 | dss.cache_dss_cinfo.fck == fck)) { |
@@ -539,7 +537,7 @@ retry: | |||
539 | if (dss.dpll4_m4_ck == NULL) { | 537 | if (dss.dpll4_m4_ck == NULL) { |
540 | struct dispc_clock_info cur_dispc; | 538 | struct dispc_clock_info cur_dispc; |
541 | /* XXX can we change the clock on omap2? */ | 539 | /* XXX can we change the clock on omap2? */ |
542 | fck = dss_clk_get_rate(DSS_CLK_FCK); | 540 | fck = clk_get_rate(dss.dss_clk); |
543 | fck_div = 1; | 541 | fck_div = 1; |
544 | 542 | ||
545 | dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); | 543 | dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); |
@@ -616,28 +614,6 @@ found: | |||
616 | return 0; | 614 | return 0; |
617 | } | 615 | } |
618 | 616 | ||
619 | static int _omap_dss_wait_reset(void) | ||
620 | { | ||
621 | int t = 0; | ||
622 | |||
623 | while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { | ||
624 | if (++t > 1000) { | ||
625 | DSSERR("soft reset failed\n"); | ||
626 | return -ENODEV; | ||
627 | } | ||
628 | udelay(1); | ||
629 | } | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | static int _omap_dss_reset(void) | ||
635 | { | ||
636 | /* Soft reset */ | ||
637 | REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1); | ||
638 | return _omap_dss_wait_reset(); | ||
639 | } | ||
640 | |||
641 | void dss_set_venc_output(enum omap_dss_venc_type type) | 617 | void dss_set_venc_output(enum omap_dss_venc_type type) |
642 | { | 618 | { |
643 | int l = 0; | 619 | int l = 0; |
@@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi) | |||
663 | REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */ | 639 | REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */ |
664 | } | 640 | } |
665 | 641 | ||
666 | static int dss_init(void) | 642 | static int dss_get_clocks(void) |
667 | { | 643 | { |
644 | struct clk *clk; | ||
668 | int r; | 645 | int r; |
669 | u32 rev; | ||
670 | struct resource *dss_mem; | ||
671 | struct clk *dpll4_m4_ck; | ||
672 | 646 | ||
673 | dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); | 647 | clk = clk_get(&dss.pdev->dev, "fck"); |
674 | if (!dss_mem) { | 648 | if (IS_ERR(clk)) { |
675 | DSSERR("can't get IORESOURCE_MEM DSS\n"); | 649 | DSSERR("can't get clock fck\n"); |
676 | r = -EINVAL; | 650 | r = PTR_ERR(clk); |
677 | goto fail0; | 651 | goto err; |
678 | } | ||
679 | dss.base = ioremap(dss_mem->start, resource_size(dss_mem)); | ||
680 | if (!dss.base) { | ||
681 | DSSERR("can't ioremap DSS\n"); | ||
682 | r = -ENOMEM; | ||
683 | goto fail0; | ||
684 | } | 652 | } |
685 | 653 | ||
686 | /* disable LCD and DIGIT output. This seems to fix the synclost | 654 | dss.dss_clk = clk; |
687 | * problem that we get, if the bootloader starts the DSS and | ||
688 | * the kernel resets it */ | ||
689 | omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); | ||
690 | |||
691 | #ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET | ||
692 | /* We need to wait here a bit, otherwise we sometimes start to | ||
693 | * get synclost errors, and after that only power cycle will | ||
694 | * restore DSS functionality. I have no idea why this happens. | ||
695 | * And we have to wait _before_ resetting the DSS, but after | ||
696 | * enabling clocks. | ||
697 | * | ||
698 | * This bug was at least present on OMAP3430. It's unknown | ||
699 | * if it happens on OMAP2 or OMAP3630. | ||
700 | */ | ||
701 | msleep(50); | ||
702 | #endif | ||
703 | |||
704 | _omap_dss_reset(); | ||
705 | 655 | ||
706 | /* autoidle */ | ||
707 | REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0); | ||
708 | |||
709 | /* Select DPLL */ | ||
710 | REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); | ||
711 | |||
712 | #ifdef CONFIG_OMAP2_DSS_VENC | ||
713 | REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ | ||
714 | REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ | ||
715 | REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ | ||
716 | #endif | ||
717 | if (cpu_is_omap34xx()) { | 656 | if (cpu_is_omap34xx()) { |
718 | dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck"); | 657 | clk = clk_get(NULL, "dpll4_m4_ck"); |
719 | if (IS_ERR(dpll4_m4_ck)) { | 658 | if (IS_ERR(clk)) { |
720 | DSSERR("Failed to get dpll4_m4_ck\n"); | 659 | DSSERR("Failed to get dpll4_m4_ck\n"); |
721 | r = PTR_ERR(dpll4_m4_ck); | 660 | r = PTR_ERR(clk); |
722 | goto fail1; | 661 | goto err; |
723 | } | 662 | } |
724 | } else if (cpu_is_omap44xx()) { | 663 | } else if (cpu_is_omap44xx()) { |
725 | dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck"); | 664 | clk = clk_get(NULL, "dpll_per_m5x2_ck"); |
726 | if (IS_ERR(dpll4_m4_ck)) { | 665 | if (IS_ERR(clk)) { |
727 | DSSERR("Failed to get dpll4_m4_ck\n"); | 666 | DSSERR("Failed to get dpll_per_m5x2_ck\n"); |
728 | r = PTR_ERR(dpll4_m4_ck); | 667 | r = PTR_ERR(clk); |
729 | goto fail1; | 668 | goto err; |
730 | } | 669 | } |
731 | } else { /* omap24xx */ | 670 | } else { /* omap24xx */ |
732 | dpll4_m4_ck = NULL; | 671 | clk = NULL; |
733 | } | 672 | } |
734 | 673 | ||
735 | dss.dpll4_m4_ck = dpll4_m4_ck; | 674 | dss.dpll4_m4_ck = clk; |
736 | |||
737 | dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; | ||
738 | dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; | ||
739 | dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; | ||
740 | dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; | ||
741 | dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; | ||
742 | |||
743 | dss_save_context(); | ||
744 | |||
745 | rev = dss_read_reg(DSS_REVISION); | ||
746 | printk(KERN_INFO "OMAP DSS rev %d.%d\n", | ||
747 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | ||
748 | 675 | ||
749 | return 0; | 676 | return 0; |
750 | 677 | ||
751 | fail1: | 678 | err: |
752 | iounmap(dss.base); | 679 | if (dss.dss_clk) |
753 | fail0: | 680 | clk_put(dss.dss_clk); |
754 | return r; | ||
755 | } | ||
756 | |||
757 | static void dss_exit(void) | ||
758 | { | ||
759 | if (dss.dpll4_m4_ck) | 681 | if (dss.dpll4_m4_ck) |
760 | clk_put(dss.dpll4_m4_ck); | 682 | clk_put(dss.dpll4_m4_ck); |
761 | 683 | ||
762 | iounmap(dss.base); | ||
763 | } | ||
764 | |||
765 | /* CONTEXT */ | ||
766 | static int dss_get_ctx_id(void) | ||
767 | { | ||
768 | struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data; | ||
769 | int r; | ||
770 | |||
771 | if (!pdata->board_data->get_last_off_on_transaction_id) | ||
772 | return 0; | ||
773 | r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev); | ||
774 | if (r < 0) { | ||
775 | dev_err(&dss.pdev->dev, "getting transaction ID failed, " | ||
776 | "will force context restore\n"); | ||
777 | r = -1; | ||
778 | } | ||
779 | return r; | ||
780 | } | ||
781 | |||
782 | int dss_need_ctx_restore(void) | ||
783 | { | ||
784 | int id = dss_get_ctx_id(); | ||
785 | |||
786 | if (id < 0 || id != dss.ctx_id) { | ||
787 | DSSDBG("ctx id %d -> id %d\n", | ||
788 | dss.ctx_id, id); | ||
789 | dss.ctx_id = id; | ||
790 | return 1; | ||
791 | } else { | ||
792 | return 0; | ||
793 | } | ||
794 | } | ||
795 | |||
796 | static void save_all_ctx(void) | ||
797 | { | ||
798 | DSSDBG("save context\n"); | ||
799 | |||
800 | dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK); | ||
801 | |||
802 | dss_save_context(); | ||
803 | dispc_save_context(); | ||
804 | #ifdef CONFIG_OMAP2_DSS_DSI | ||
805 | dsi_save_context(); | ||
806 | #endif | ||
807 | |||
808 | dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK); | ||
809 | } | ||
810 | |||
811 | static void restore_all_ctx(void) | ||
812 | { | ||
813 | DSSDBG("restore context\n"); | ||
814 | |||
815 | dss_clk_enable_all_no_ctx(); | ||
816 | |||
817 | dss_restore_context(); | ||
818 | dispc_restore_context(); | ||
819 | #ifdef CONFIG_OMAP2_DSS_DSI | ||
820 | dsi_restore_context(); | ||
821 | #endif | ||
822 | |||
823 | dss_clk_disable_all_no_ctx(); | ||
824 | } | ||
825 | |||
826 | static int dss_get_clock(struct clk **clock, const char *clk_name) | ||
827 | { | ||
828 | struct clk *clk; | ||
829 | |||
830 | clk = clk_get(&dss.pdev->dev, clk_name); | ||
831 | |||
832 | if (IS_ERR(clk)) { | ||
833 | DSSERR("can't get clock %s", clk_name); | ||
834 | return PTR_ERR(clk); | ||
835 | } | ||
836 | |||
837 | *clock = clk; | ||
838 | |||
839 | DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk)); | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | static int dss_get_clocks(void) | ||
845 | { | ||
846 | int r; | ||
847 | struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data; | ||
848 | |||
849 | dss.dss_ick = NULL; | ||
850 | dss.dss_fck = NULL; | ||
851 | dss.dss_sys_clk = NULL; | ||
852 | dss.dss_tv_fck = NULL; | ||
853 | dss.dss_video_fck = NULL; | ||
854 | |||
855 | r = dss_get_clock(&dss.dss_ick, "ick"); | ||
856 | if (r) | ||
857 | goto err; | ||
858 | |||
859 | r = dss_get_clock(&dss.dss_fck, "fck"); | ||
860 | if (r) | ||
861 | goto err; | ||
862 | |||
863 | if (!pdata->opt_clock_available) { | ||
864 | r = -ENODEV; | ||
865 | goto err; | ||
866 | } | ||
867 | |||
868 | if (pdata->opt_clock_available("sys_clk")) { | ||
869 | r = dss_get_clock(&dss.dss_sys_clk, "sys_clk"); | ||
870 | if (r) | ||
871 | goto err; | ||
872 | } | ||
873 | |||
874 | if (pdata->opt_clock_available("tv_clk")) { | ||
875 | r = dss_get_clock(&dss.dss_tv_fck, "tv_clk"); | ||
876 | if (r) | ||
877 | goto err; | ||
878 | } | ||
879 | |||
880 | if (pdata->opt_clock_available("video_clk")) { | ||
881 | r = dss_get_clock(&dss.dss_video_fck, "video_clk"); | ||
882 | if (r) | ||
883 | goto err; | ||
884 | } | ||
885 | |||
886 | return 0; | ||
887 | |||
888 | err: | ||
889 | if (dss.dss_ick) | ||
890 | clk_put(dss.dss_ick); | ||
891 | if (dss.dss_fck) | ||
892 | clk_put(dss.dss_fck); | ||
893 | if (dss.dss_sys_clk) | ||
894 | clk_put(dss.dss_sys_clk); | ||
895 | if (dss.dss_tv_fck) | ||
896 | clk_put(dss.dss_tv_fck); | ||
897 | if (dss.dss_video_fck) | ||
898 | clk_put(dss.dss_video_fck); | ||
899 | |||
900 | return r; | 684 | return r; |
901 | } | 685 | } |
902 | 686 | ||
903 | static void dss_put_clocks(void) | 687 | static void dss_put_clocks(void) |
904 | { | 688 | { |
905 | if (dss.dss_video_fck) | 689 | if (dss.dpll4_m4_ck) |
906 | clk_put(dss.dss_video_fck); | 690 | clk_put(dss.dpll4_m4_ck); |
907 | if (dss.dss_tv_fck) | 691 | clk_put(dss.dss_clk); |
908 | clk_put(dss.dss_tv_fck); | ||
909 | if (dss.dss_sys_clk) | ||
910 | clk_put(dss.dss_sys_clk); | ||
911 | clk_put(dss.dss_fck); | ||
912 | clk_put(dss.dss_ick); | ||
913 | } | ||
914 | |||
915 | unsigned long dss_clk_get_rate(enum dss_clock clk) | ||
916 | { | ||
917 | switch (clk) { | ||
918 | case DSS_CLK_ICK: | ||
919 | return clk_get_rate(dss.dss_ick); | ||
920 | case DSS_CLK_FCK: | ||
921 | return clk_get_rate(dss.dss_fck); | ||
922 | case DSS_CLK_SYSCK: | ||
923 | return clk_get_rate(dss.dss_sys_clk); | ||
924 | case DSS_CLK_TVFCK: | ||
925 | return clk_get_rate(dss.dss_tv_fck); | ||
926 | case DSS_CLK_VIDFCK: | ||
927 | return clk_get_rate(dss.dss_video_fck); | ||
928 | } | ||
929 | |||
930 | BUG(); | ||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | static unsigned count_clk_bits(enum dss_clock clks) | ||
935 | { | ||
936 | unsigned num_clks = 0; | ||
937 | |||
938 | if (clks & DSS_CLK_ICK) | ||
939 | ++num_clks; | ||
940 | if (clks & DSS_CLK_FCK) | ||
941 | ++num_clks; | ||
942 | if (clks & DSS_CLK_SYSCK) | ||
943 | ++num_clks; | ||
944 | if (clks & DSS_CLK_TVFCK) | ||
945 | ++num_clks; | ||
946 | if (clks & DSS_CLK_VIDFCK) | ||
947 | ++num_clks; | ||
948 | |||
949 | return num_clks; | ||
950 | } | ||
951 | |||
952 | static void dss_clk_enable_no_ctx(enum dss_clock clks) | ||
953 | { | ||
954 | unsigned num_clks = count_clk_bits(clks); | ||
955 | |||
956 | if (clks & DSS_CLK_ICK) | ||
957 | clk_enable(dss.dss_ick); | ||
958 | if (clks & DSS_CLK_FCK) | ||
959 | clk_enable(dss.dss_fck); | ||
960 | if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk) | ||
961 | clk_enable(dss.dss_sys_clk); | ||
962 | if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck) | ||
963 | clk_enable(dss.dss_tv_fck); | ||
964 | if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck) | ||
965 | clk_enable(dss.dss_video_fck); | ||
966 | |||
967 | dss.num_clks_enabled += num_clks; | ||
968 | } | ||
969 | |||
970 | void dss_clk_enable(enum dss_clock clks) | ||
971 | { | ||
972 | bool check_ctx = dss.num_clks_enabled == 0; | ||
973 | |||
974 | dss_clk_enable_no_ctx(clks); | ||
975 | |||
976 | /* | ||
977 | * HACK: On omap4 the registers may not be accessible right after | ||
978 | * enabling the clocks. At some point this will be handled by | ||
979 | * pm_runtime, but for the time begin this should make things work. | ||
980 | */ | ||
981 | if (cpu_is_omap44xx() && check_ctx) | ||
982 | udelay(10); | ||
983 | |||
984 | if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore()) | ||
985 | restore_all_ctx(); | ||
986 | } | 692 | } |
987 | 693 | ||
988 | static void dss_clk_disable_no_ctx(enum dss_clock clks) | 694 | struct clk *dss_get_ick(void) |
989 | { | 695 | { |
990 | unsigned num_clks = count_clk_bits(clks); | 696 | return clk_get(&dss.pdev->dev, "ick"); |
991 | |||
992 | if (clks & DSS_CLK_ICK) | ||
993 | clk_disable(dss.dss_ick); | ||
994 | if (clks & DSS_CLK_FCK) | ||
995 | clk_disable(dss.dss_fck); | ||
996 | if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk) | ||
997 | clk_disable(dss.dss_sys_clk); | ||
998 | if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck) | ||
999 | clk_disable(dss.dss_tv_fck); | ||
1000 | if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck) | ||
1001 | clk_disable(dss.dss_video_fck); | ||
1002 | |||
1003 | dss.num_clks_enabled -= num_clks; | ||
1004 | } | 697 | } |
1005 | 698 | ||
1006 | void dss_clk_disable(enum dss_clock clks) | 699 | int dss_runtime_get(void) |
1007 | { | 700 | { |
1008 | if (cpu_is_omap34xx()) { | 701 | int r; |
1009 | unsigned num_clks = count_clk_bits(clks); | ||
1010 | |||
1011 | BUG_ON(dss.num_clks_enabled < num_clks); | ||
1012 | 702 | ||
1013 | if (dss.num_clks_enabled == num_clks) | 703 | DSSDBG("dss_runtime_get\n"); |
1014 | save_all_ctx(); | ||
1015 | } | ||
1016 | 704 | ||
1017 | dss_clk_disable_no_ctx(clks); | 705 | r = pm_runtime_get_sync(&dss.pdev->dev); |
706 | WARN_ON(r < 0); | ||
707 | return r < 0 ? r : 0; | ||
1018 | } | 708 | } |
1019 | 709 | ||
1020 | static void dss_clk_enable_all_no_ctx(void) | 710 | void dss_runtime_put(void) |
1021 | { | 711 | { |
1022 | enum dss_clock clks; | 712 | int r; |
1023 | |||
1024 | clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK; | ||
1025 | if (cpu_is_omap34xx()) | ||
1026 | clks |= DSS_CLK_VIDFCK; | ||
1027 | dss_clk_enable_no_ctx(clks); | ||
1028 | } | ||
1029 | |||
1030 | static void dss_clk_disable_all_no_ctx(void) | ||
1031 | { | ||
1032 | enum dss_clock clks; | ||
1033 | 713 | ||
1034 | clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK; | 714 | DSSDBG("dss_runtime_put\n"); |
1035 | if (cpu_is_omap34xx()) | ||
1036 | clks |= DSS_CLK_VIDFCK; | ||
1037 | dss_clk_disable_no_ctx(clks); | ||
1038 | } | ||
1039 | 715 | ||
1040 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) | 716 | r = pm_runtime_put(&dss.pdev->dev); |
1041 | /* CLOCKS */ | 717 | WARN_ON(r < 0); |
1042 | static void core_dump_clocks(struct seq_file *s) | ||
1043 | { | ||
1044 | int i; | ||
1045 | struct clk *clocks[5] = { | ||
1046 | dss.dss_ick, | ||
1047 | dss.dss_fck, | ||
1048 | dss.dss_sys_clk, | ||
1049 | dss.dss_tv_fck, | ||
1050 | dss.dss_video_fck | ||
1051 | }; | ||
1052 | |||
1053 | const char *names[5] = { | ||
1054 | "ick", | ||
1055 | "fck", | ||
1056 | "sys_clk", | ||
1057 | "tv_fck", | ||
1058 | "video_fck" | ||
1059 | }; | ||
1060 | |||
1061 | seq_printf(s, "- CORE -\n"); | ||
1062 | |||
1063 | seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled); | ||
1064 | |||
1065 | for (i = 0; i < 5; i++) { | ||
1066 | if (!clocks[i]) | ||
1067 | continue; | ||
1068 | seq_printf(s, "%s (%s)%*s\t%lu\t%d\n", | ||
1069 | names[i], | ||
1070 | clocks[i]->name, | ||
1071 | 24 - strlen(names[i]) - strlen(clocks[i]->name), | ||
1072 | "", | ||
1073 | clk_get_rate(clocks[i]), | ||
1074 | clocks[i]->usecount); | ||
1075 | } | ||
1076 | } | 718 | } |
1077 | #endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */ | ||
1078 | 719 | ||
1079 | /* DEBUGFS */ | 720 | /* DEBUGFS */ |
1080 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) | 721 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) |
1081 | void dss_debug_dump_clocks(struct seq_file *s) | 722 | void dss_debug_dump_clocks(struct seq_file *s) |
1082 | { | 723 | { |
1083 | core_dump_clocks(s); | ||
1084 | dss_dump_clocks(s); | 724 | dss_dump_clocks(s); |
1085 | dispc_dump_clocks(s); | 725 | dispc_dump_clocks(s); |
1086 | #ifdef CONFIG_OMAP2_DSS_DSI | 726 | #ifdef CONFIG_OMAP2_DSS_DSI |
@@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s) | |||
1089 | } | 729 | } |
1090 | #endif | 730 | #endif |
1091 | 731 | ||
1092 | |||
1093 | /* DSS HW IP initialisation */ | 732 | /* DSS HW IP initialisation */ |
1094 | static int omap_dsshw_probe(struct platform_device *pdev) | 733 | static int omap_dsshw_probe(struct platform_device *pdev) |
1095 | { | 734 | { |
735 | struct resource *dss_mem; | ||
736 | u32 rev; | ||
1096 | int r; | 737 | int r; |
1097 | 738 | ||
1098 | dss.pdev = pdev; | 739 | dss.pdev = pdev; |
1099 | 740 | ||
741 | dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); | ||
742 | if (!dss_mem) { | ||
743 | DSSERR("can't get IORESOURCE_MEM DSS\n"); | ||
744 | r = -EINVAL; | ||
745 | goto err_ioremap; | ||
746 | } | ||
747 | dss.base = ioremap(dss_mem->start, resource_size(dss_mem)); | ||
748 | if (!dss.base) { | ||
749 | DSSERR("can't ioremap DSS\n"); | ||
750 | r = -ENOMEM; | ||
751 | goto err_ioremap; | ||
752 | } | ||
753 | |||
1100 | r = dss_get_clocks(); | 754 | r = dss_get_clocks(); |
1101 | if (r) | 755 | if (r) |
1102 | goto err_clocks; | 756 | goto err_clocks; |
1103 | 757 | ||
1104 | dss_clk_enable_all_no_ctx(); | 758 | pm_runtime_enable(&pdev->dev); |
1105 | 759 | ||
1106 | dss.ctx_id = dss_get_ctx_id(); | 760 | r = dss_runtime_get(); |
1107 | DSSDBG("initial ctx id %u\n", dss.ctx_id); | 761 | if (r) |
762 | goto err_runtime_get; | ||
1108 | 763 | ||
1109 | r = dss_init(); | 764 | /* Select DPLL */ |
1110 | if (r) { | 765 | REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); |
1111 | DSSERR("Failed to initialize DSS\n"); | 766 | |
1112 | goto err_dss; | 767 | #ifdef CONFIG_OMAP2_DSS_VENC |
1113 | } | 768 | REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ |
769 | REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ | ||
770 | REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ | ||
771 | #endif | ||
772 | dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; | ||
773 | dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; | ||
774 | dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; | ||
775 | dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; | ||
776 | dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; | ||
1114 | 777 | ||
1115 | r = dpi_init(); | 778 | r = dpi_init(); |
1116 | if (r) { | 779 | if (r) { |
@@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev) | |||
1124 | goto err_sdi; | 787 | goto err_sdi; |
1125 | } | 788 | } |
1126 | 789 | ||
1127 | dss_clk_disable_all_no_ctx(); | 790 | rev = dss_read_reg(DSS_REVISION); |
791 | printk(KERN_INFO "OMAP DSS rev %d.%d\n", | ||
792 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | ||
793 | |||
794 | dss_runtime_put(); | ||
795 | |||
1128 | return 0; | 796 | return 0; |
1129 | err_sdi: | 797 | err_sdi: |
1130 | dpi_exit(); | 798 | dpi_exit(); |
1131 | err_dpi: | 799 | err_dpi: |
1132 | dss_exit(); | 800 | dss_runtime_put(); |
1133 | err_dss: | 801 | err_runtime_get: |
1134 | dss_clk_disable_all_no_ctx(); | 802 | pm_runtime_disable(&pdev->dev); |
1135 | dss_put_clocks(); | 803 | dss_put_clocks(); |
1136 | err_clocks: | 804 | err_clocks: |
805 | iounmap(dss.base); | ||
806 | err_ioremap: | ||
1137 | return r; | 807 | return r; |
1138 | } | 808 | } |
1139 | 809 | ||
1140 | static int omap_dsshw_remove(struct platform_device *pdev) | 810 | static int omap_dsshw_remove(struct platform_device *pdev) |
1141 | { | 811 | { |
812 | dpi_exit(); | ||
813 | sdi_exit(); | ||
1142 | 814 | ||
1143 | dss_exit(); | 815 | iounmap(dss.base); |
1144 | 816 | ||
1145 | /* | 817 | pm_runtime_disable(&pdev->dev); |
1146 | * As part of hwmod changes, DSS is not the only controller of dss | ||
1147 | * clocks; hwmod framework itself will also enable clocks during hwmod | ||
1148 | * init for dss, and autoidle is set in h/w for DSS. Hence, there's no | ||
1149 | * need to disable clocks if their usecounts > 1. | ||
1150 | */ | ||
1151 | WARN_ON(dss.num_clks_enabled > 0); | ||
1152 | 818 | ||
1153 | dss_put_clocks(); | 819 | dss_put_clocks(); |
820 | |||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | static int dss_runtime_suspend(struct device *dev) | ||
825 | { | ||
826 | dss_save_context(); | ||
827 | clk_disable(dss.dss_clk); | ||
1154 | return 0; | 828 | return 0; |
1155 | } | 829 | } |
1156 | 830 | ||
831 | static int dss_runtime_resume(struct device *dev) | ||
832 | { | ||
833 | clk_enable(dss.dss_clk); | ||
834 | dss_restore_context(); | ||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | static const struct dev_pm_ops dss_pm_ops = { | ||
839 | .runtime_suspend = dss_runtime_suspend, | ||
840 | .runtime_resume = dss_runtime_resume, | ||
841 | }; | ||
842 | |||
1157 | static struct platform_driver omap_dsshw_driver = { | 843 | static struct platform_driver omap_dsshw_driver = { |
1158 | .probe = omap_dsshw_probe, | 844 | .probe = omap_dsshw_probe, |
1159 | .remove = omap_dsshw_remove, | 845 | .remove = omap_dsshw_remove, |
1160 | .driver = { | 846 | .driver = { |
1161 | .name = "omapdss_dss", | 847 | .name = "omapdss_dss", |
1162 | .owner = THIS_MODULE, | 848 | .owner = THIS_MODULE, |
849 | .pm = &dss_pm_ops, | ||
1163 | }, | 850 | }, |
1164 | }; | 851 | }; |
1165 | 852 | ||
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 8ab6d43329bb..9c94b1152c20 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h | |||
@@ -97,26 +97,12 @@ extern unsigned int dss_debug; | |||
97 | #define FLD_MOD(orig, val, start, end) \ | 97 | #define FLD_MOD(orig, val, start, end) \ |
98 | (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) | 98 | (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) |
99 | 99 | ||
100 | enum omap_burst_size { | ||
101 | OMAP_DSS_BURST_4x32 = 0, | ||
102 | OMAP_DSS_BURST_8x32 = 1, | ||
103 | OMAP_DSS_BURST_16x32 = 2, | ||
104 | }; | ||
105 | |||
106 | enum omap_parallel_interface_mode { | 100 | enum omap_parallel_interface_mode { |
107 | OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */ | 101 | OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */ |
108 | OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */ | 102 | OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */ |
109 | OMAP_DSS_PARALLELMODE_DSI, | 103 | OMAP_DSS_PARALLELMODE_DSI, |
110 | }; | 104 | }; |
111 | 105 | ||
112 | enum dss_clock { | ||
113 | DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */ | ||
114 | DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */ | ||
115 | DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */ | ||
116 | DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */ | ||
117 | DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/ | ||
118 | }; | ||
119 | |||
120 | enum dss_hdmi_venc_clk_source_select { | 106 | enum dss_hdmi_venc_clk_source_select { |
121 | DSS_VENC_TV_CLK = 0, | 107 | DSS_VENC_TV_CLK = 0, |
122 | DSS_HDMI_M_PCLK = 1, | 108 | DSS_HDMI_M_PCLK = 1, |
@@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev, | |||
194 | bool dss_use_replication(struct omap_dss_device *dssdev, | 180 | bool dss_use_replication(struct omap_dss_device *dssdev, |
195 | enum omap_color_mode mode); | 181 | enum omap_color_mode mode); |
196 | void default_get_overlay_fifo_thresholds(enum omap_plane plane, | 182 | void default_get_overlay_fifo_thresholds(enum omap_plane plane, |
197 | u32 fifo_size, enum omap_burst_size *burst_size, | 183 | u32 fifo_size, u32 burst_size, |
198 | u32 *fifo_low, u32 *fifo_high); | 184 | u32 *fifo_low, u32 *fifo_high); |
199 | 185 | ||
200 | /* manager */ | 186 | /* manager */ |
@@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force); | |||
220 | int dss_init_platform_driver(void); | 206 | int dss_init_platform_driver(void); |
221 | void dss_uninit_platform_driver(void); | 207 | void dss_uninit_platform_driver(void); |
222 | 208 | ||
209 | int dss_runtime_get(void); | ||
210 | void dss_runtime_put(void); | ||
211 | |||
212 | struct clk *dss_get_ick(void); | ||
213 | |||
223 | void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); | 214 | void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); |
224 | void dss_save_context(void); | ||
225 | void dss_restore_context(void); | ||
226 | void dss_clk_enable(enum dss_clock clks); | ||
227 | void dss_clk_disable(enum dss_clock clks); | ||
228 | unsigned long dss_clk_get_rate(enum dss_clock clk); | ||
229 | int dss_need_ctx_restore(void); | ||
230 | const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); | 215 | const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); |
231 | void dss_dump_clocks(struct seq_file *s); | 216 | void dss_dump_clocks(struct seq_file *s); |
232 | 217 | ||
@@ -283,15 +268,15 @@ struct file_operations; | |||
283 | int dsi_init_platform_driver(void); | 268 | int dsi_init_platform_driver(void); |
284 | void dsi_uninit_platform_driver(void); | 269 | void dsi_uninit_platform_driver(void); |
285 | 270 | ||
271 | int dsi_runtime_get(struct platform_device *dsidev); | ||
272 | void dsi_runtime_put(struct platform_device *dsidev); | ||
273 | |||
286 | void dsi_dump_clocks(struct seq_file *s); | 274 | void dsi_dump_clocks(struct seq_file *s); |
287 | void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, | 275 | void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, |
288 | const struct file_operations *debug_fops); | 276 | const struct file_operations *debug_fops); |
289 | void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, | 277 | void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, |
290 | const struct file_operations *debug_fops); | 278 | const struct file_operations *debug_fops); |
291 | 279 | ||
292 | void dsi_save_context(void); | ||
293 | void dsi_restore_context(void); | ||
294 | |||
295 | int dsi_init_display(struct omap_dss_device *display); | 280 | int dsi_init_display(struct omap_dss_device *display); |
296 | void dsi_irq_handler(void); | 281 | void dsi_irq_handler(void); |
297 | unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); | 282 | unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); |
@@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, | |||
304 | bool enable_hsdiv); | 289 | bool enable_hsdiv); |
305 | void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); | 290 | void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); |
306 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, | 291 | void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, |
307 | u32 fifo_size, enum omap_burst_size *burst_size, | 292 | u32 fifo_size, u32 burst_size, |
308 | u32 *fifo_low, u32 *fifo_high); | 293 | u32 *fifo_low, u32 *fifo_high); |
309 | void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); | 294 | void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); |
310 | void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); | 295 | void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); |
@@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void) | |||
317 | static inline void dsi_uninit_platform_driver(void) | 302 | static inline void dsi_uninit_platform_driver(void) |
318 | { | 303 | { |
319 | } | 304 | } |
305 | static inline int dsi_runtime_get(struct platform_device *dsidev) | ||
306 | { | ||
307 | return 0; | ||
308 | } | ||
309 | static inline void dsi_runtime_put(struct platform_device *dsidev) | ||
310 | { | ||
311 | } | ||
320 | static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) | 312 | static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) |
321 | { | 313 | { |
322 | WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); | 314 | WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); |
@@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s); | |||
384 | void dispc_irq_handler(void); | 376 | void dispc_irq_handler(void); |
385 | void dispc_fake_vsync_irq(void); | 377 | void dispc_fake_vsync_irq(void); |
386 | 378 | ||
387 | void dispc_save_context(void); | 379 | int dispc_runtime_get(void); |
388 | void dispc_restore_context(void); | 380 | void dispc_runtime_put(void); |
389 | 381 | ||
390 | void dispc_enable_sidle(void); | 382 | void dispc_enable_sidle(void); |
391 | void dispc_disable_sidle(void); | 383 | void dispc_disable_sidle(void); |
@@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable); | |||
398 | void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height); | 390 | void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height); |
399 | void dispc_set_digit_size(u16 width, u16 height); | 391 | void dispc_set_digit_size(u16 width, u16 height); |
400 | u32 dispc_get_plane_fifo_size(enum omap_plane plane); | 392 | u32 dispc_get_plane_fifo_size(enum omap_plane plane); |
401 | void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high); | 393 | void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); |
402 | void dispc_enable_fifomerge(bool enable); | 394 | void dispc_enable_fifomerge(bool enable); |
403 | void dispc_set_burst_size(enum omap_plane plane, | 395 | u32 dispc_get_burst_size(enum omap_plane plane); |
404 | enum omap_burst_size burst_size); | 396 | void dispc_enable_cpr(enum omap_channel channel, bool enable); |
397 | void dispc_set_cpr_coef(enum omap_channel channel, | ||
398 | struct omap_dss_cpr_coefs *coefs); | ||
405 | 399 | ||
406 | void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr); | 400 | void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr); |
407 | void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr); | 401 | void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr); |
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index 1c18888e5df3..b415c4ee621d 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c | |||
@@ -49,6 +49,9 @@ struct omap_dss_features { | |||
49 | const enum omap_color_mode *supported_color_modes; | 49 | const enum omap_color_mode *supported_color_modes; |
50 | const char * const *clksrc_names; | 50 | const char * const *clksrc_names; |
51 | const struct dss_param_range *dss_params; | 51 | const struct dss_param_range *dss_params; |
52 | |||
53 | const u32 buffer_size_unit; | ||
54 | const u32 burst_size_unit; | ||
52 | }; | 55 | }; |
53 | 56 | ||
54 | /* This struct is assigned to one of the below during initialization */ | 57 | /* This struct is assigned to one of the below during initialization */ |
@@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = { | |||
274 | .supported_color_modes = omap2_dss_supported_color_modes, | 277 | .supported_color_modes = omap2_dss_supported_color_modes, |
275 | .clksrc_names = omap2_dss_clk_source_names, | 278 | .clksrc_names = omap2_dss_clk_source_names, |
276 | .dss_params = omap2_dss_param_range, | 279 | .dss_params = omap2_dss_param_range, |
280 | .buffer_size_unit = 1, | ||
281 | .burst_size_unit = 8, | ||
277 | }; | 282 | }; |
278 | 283 | ||
279 | /* OMAP3 DSS Features */ | 284 | /* OMAP3 DSS Features */ |
@@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = { | |||
286 | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | | 291 | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | |
287 | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | | 292 | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | |
288 | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | | 293 | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | |
289 | FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC, | 294 | FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC | |
295 | FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD | | ||
296 | FEAT_FIR_COEF_V, | ||
290 | 297 | ||
291 | .num_mgrs = 2, | 298 | .num_mgrs = 2, |
292 | .num_ovls = 3, | 299 | .num_ovls = 3, |
@@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = { | |||
294 | .supported_color_modes = omap3_dss_supported_color_modes, | 301 | .supported_color_modes = omap3_dss_supported_color_modes, |
295 | .clksrc_names = omap3_dss_clk_source_names, | 302 | .clksrc_names = omap3_dss_clk_source_names, |
296 | .dss_params = omap3_dss_param_range, | 303 | .dss_params = omap3_dss_param_range, |
304 | .buffer_size_unit = 1, | ||
305 | .burst_size_unit = 8, | ||
297 | }; | 306 | }; |
298 | 307 | ||
299 | static const struct omap_dss_features omap3630_dss_features = { | 308 | static const struct omap_dss_features omap3630_dss_features = { |
@@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = { | |||
306 | FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | | 315 | FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | |
307 | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | | 316 | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | |
308 | FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | | 317 | FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | |
309 | FEAT_DSI_PLL_FREQSEL, | 318 | FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD | |
319 | FEAT_FIR_COEF_V, | ||
310 | 320 | ||
311 | .num_mgrs = 2, | 321 | .num_mgrs = 2, |
312 | .num_ovls = 3, | 322 | .num_ovls = 3, |
@@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = { | |||
314 | .supported_color_modes = omap3_dss_supported_color_modes, | 324 | .supported_color_modes = omap3_dss_supported_color_modes, |
315 | .clksrc_names = omap3_dss_clk_source_names, | 325 | .clksrc_names = omap3_dss_clk_source_names, |
316 | .dss_params = omap3_dss_param_range, | 326 | .dss_params = omap3_dss_param_range, |
327 | .buffer_size_unit = 1, | ||
328 | .burst_size_unit = 8, | ||
317 | }; | 329 | }; |
318 | 330 | ||
319 | /* OMAP4 DSS Features */ | 331 | /* OMAP4 DSS Features */ |
@@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { | |||
327 | FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | | 339 | FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | |
328 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | | 340 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | |
329 | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | | 341 | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | |
330 | FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, | 342 | FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | |
343 | FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V, | ||
331 | 344 | ||
332 | .num_mgrs = 3, | 345 | .num_mgrs = 3, |
333 | .num_ovls = 3, | 346 | .num_ovls = 3, |
@@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { | |||
335 | .supported_color_modes = omap4_dss_supported_color_modes, | 348 | .supported_color_modes = omap4_dss_supported_color_modes, |
336 | .clksrc_names = omap4_dss_clk_source_names, | 349 | .clksrc_names = omap4_dss_clk_source_names, |
337 | .dss_params = omap4_dss_param_range, | 350 | .dss_params = omap4_dss_param_range, |
351 | .buffer_size_unit = 16, | ||
352 | .burst_size_unit = 16, | ||
338 | }; | 353 | }; |
339 | 354 | ||
340 | /* For all the other OMAP4 versions */ | 355 | /* For all the other OMAP4 versions */ |
@@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = { | |||
348 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | | 363 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | |
349 | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | | 364 | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | |
350 | FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | | 365 | FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | |
351 | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, | 366 | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR | |
367 | FEAT_PRELOAD | FEAT_FIR_COEF_V, | ||
352 | 368 | ||
353 | .num_mgrs = 3, | 369 | .num_mgrs = 3, |
354 | .num_ovls = 3, | 370 | .num_ovls = 3, |
@@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = { | |||
356 | .supported_color_modes = omap4_dss_supported_color_modes, | 372 | .supported_color_modes = omap4_dss_supported_color_modes, |
357 | .clksrc_names = omap4_dss_clk_source_names, | 373 | .clksrc_names = omap4_dss_clk_source_names, |
358 | .dss_params = omap4_dss_param_range, | 374 | .dss_params = omap4_dss_param_range, |
375 | .buffer_size_unit = 16, | ||
376 | .burst_size_unit = 16, | ||
359 | }; | 377 | }; |
360 | 378 | ||
361 | /* Functions returning values related to a DSS feature */ | 379 | /* Functions returning values related to a DSS feature */ |
@@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) | |||
401 | return omap_current_dss_features->clksrc_names[id]; | 419 | return omap_current_dss_features->clksrc_names[id]; |
402 | } | 420 | } |
403 | 421 | ||
422 | u32 dss_feat_get_buffer_size_unit(void) | ||
423 | { | ||
424 | return omap_current_dss_features->buffer_size_unit; | ||
425 | } | ||
426 | |||
427 | u32 dss_feat_get_burst_size_unit(void) | ||
428 | { | ||
429 | return omap_current_dss_features->burst_size_unit; | ||
430 | } | ||
431 | |||
404 | /* DSS has_feature check */ | 432 | /* DSS has_feature check */ |
405 | bool dss_has_feature(enum dss_feat_id id) | 433 | bool dss_has_feature(enum dss_feat_id id) |
406 | { | 434 | { |
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index 07b346f7d916..b7398cbcda5f 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h | |||
@@ -51,6 +51,10 @@ enum dss_feat_id { | |||
51 | FEAT_HDMI_CTS_SWMODE = 1 << 19, | 51 | FEAT_HDMI_CTS_SWMODE = 1 << 19, |
52 | FEAT_HANDLE_UV_SEPARATE = 1 << 20, | 52 | FEAT_HANDLE_UV_SEPARATE = 1 << 20, |
53 | FEAT_ATTR2 = 1 << 21, | 53 | FEAT_ATTR2 = 1 << 21, |
54 | FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22, | ||
55 | FEAT_CPR = 1 << 23, | ||
56 | FEAT_PRELOAD = 1 << 24, | ||
57 | FEAT_FIR_COEF_V = 1 << 25, | ||
54 | }; | 58 | }; |
55 | 59 | ||
56 | /* DSS register field id */ | 60 | /* DSS register field id */ |
@@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, | |||
90 | enum omap_color_mode color_mode); | 94 | enum omap_color_mode color_mode); |
91 | const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); | 95 | const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); |
92 | 96 | ||
97 | u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ | ||
98 | u32 dss_feat_get_burst_size_unit(void); /* in bytes */ | ||
99 | |||
93 | bool dss_has_feature(enum dss_feat_id id); | 100 | bool dss_has_feature(enum dss_feat_id id); |
94 | void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); | 101 | void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); |
95 | void dss_features_init(void); | 102 | void dss_features_init(void); |
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index b0555f4f0a78..256f27a9064a 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c | |||
@@ -29,6 +29,9 @@ | |||
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/pm_runtime.h> | ||
34 | #include <linux/clk.h> | ||
32 | #include <video/omapdss.h> | 35 | #include <video/omapdss.h> |
33 | #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ | 36 | #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ |
34 | defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) | 37 | defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) |
@@ -51,6 +54,9 @@ static struct { | |||
51 | u8 edid_set; | 54 | u8 edid_set; |
52 | bool custom_set; | 55 | bool custom_set; |
53 | struct hdmi_config cfg; | 56 | struct hdmi_config cfg; |
57 | |||
58 | struct clk *sys_clk; | ||
59 | struct clk *hdmi_clk; | ||
54 | } hdmi; | 60 | } hdmi; |
55 | 61 | ||
56 | /* | 62 | /* |
@@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx, | |||
162 | return val; | 168 | return val; |
163 | } | 169 | } |
164 | 170 | ||
171 | static int hdmi_runtime_get(void) | ||
172 | { | ||
173 | int r; | ||
174 | |||
175 | DSSDBG("hdmi_runtime_get\n"); | ||
176 | |||
177 | r = pm_runtime_get_sync(&hdmi.pdev->dev); | ||
178 | WARN_ON(r < 0); | ||
179 | return r < 0 ? r : 0; | ||
180 | } | ||
181 | |||
182 | static void hdmi_runtime_put(void) | ||
183 | { | ||
184 | int r; | ||
185 | |||
186 | DSSDBG("hdmi_runtime_put\n"); | ||
187 | |||
188 | r = pm_runtime_put(&hdmi.pdev->dev); | ||
189 | WARN_ON(r < 0); | ||
190 | } | ||
191 | |||
165 | int hdmi_init_display(struct omap_dss_device *dssdev) | 192 | int hdmi_init_display(struct omap_dss_device *dssdev) |
166 | { | 193 | { |
167 | DSSDBG("init_display\n"); | 194 | DSSDBG("init_display\n"); |
@@ -311,30 +338,11 @@ static int hdmi_phy_init(void) | |||
311 | return 0; | 338 | return 0; |
312 | } | 339 | } |
313 | 340 | ||
314 | static int hdmi_wait_softreset(void) | ||
315 | { | ||
316 | /* reset W1 */ | ||
317 | REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0); | ||
318 | |||
319 | /* wait till SOFTRESET == 0 */ | ||
320 | if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) { | ||
321 | DSSERR("sysconfig reset failed\n"); | ||
322 | return -ETIMEDOUT; | ||
323 | } | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int hdmi_pll_program(struct hdmi_pll_info *fmt) | 341 | static int hdmi_pll_program(struct hdmi_pll_info *fmt) |
329 | { | 342 | { |
330 | u16 r = 0; | 343 | u16 r = 0; |
331 | enum hdmi_clk_refsel refsel; | 344 | enum hdmi_clk_refsel refsel; |
332 | 345 | ||
333 | /* wait for wrapper reset */ | ||
334 | r = hdmi_wait_softreset(); | ||
335 | if (r) | ||
336 | return r; | ||
337 | |||
338 | r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); | 346 | r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); |
339 | if (r) | 347 | if (r) |
340 | return r; | 348 | return r; |
@@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, | |||
1064 | unsigned long clkin, refclk; | 1072 | unsigned long clkin, refclk; |
1065 | u32 mf; | 1073 | u32 mf; |
1066 | 1074 | ||
1067 | clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000; | 1075 | clkin = clk_get_rate(hdmi.sys_clk) / 10000; |
1068 | /* | 1076 | /* |
1069 | * Input clock is predivided by N + 1 | 1077 | * Input clock is predivided by N + 1 |
1070 | * out put of which is reference clk | 1078 | * out put of which is reference clk |
@@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, | |||
1098 | DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); | 1106 | DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); |
1099 | } | 1107 | } |
1100 | 1108 | ||
1101 | static void hdmi_enable_clocks(int enable) | ||
1102 | { | ||
1103 | if (enable) | ||
1104 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | | ||
1105 | DSS_CLK_SYSCK | DSS_CLK_VIDFCK); | ||
1106 | else | ||
1107 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | | ||
1108 | DSS_CLK_SYSCK | DSS_CLK_VIDFCK); | ||
1109 | } | ||
1110 | |||
1111 | static int hdmi_power_on(struct omap_dss_device *dssdev) | 1109 | static int hdmi_power_on(struct omap_dss_device *dssdev) |
1112 | { | 1110 | { |
1113 | int r, code = 0; | 1111 | int r, code = 0; |
@@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) | |||
1115 | struct omap_video_timings *p; | 1113 | struct omap_video_timings *p; |
1116 | unsigned long phy; | 1114 | unsigned long phy; |
1117 | 1115 | ||
1118 | hdmi_enable_clocks(1); | 1116 | r = hdmi_runtime_get(); |
1117 | if (r) | ||
1118 | return r; | ||
1119 | 1119 | ||
1120 | dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0); | 1120 | dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0); |
1121 | 1121 | ||
@@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) | |||
1180 | 1180 | ||
1181 | return 0; | 1181 | return 0; |
1182 | err: | 1182 | err: |
1183 | hdmi_enable_clocks(0); | 1183 | hdmi_runtime_put(); |
1184 | return -EIO; | 1184 | return -EIO; |
1185 | } | 1185 | } |
1186 | 1186 | ||
@@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev) | |||
1191 | hdmi_wp_video_start(0); | 1191 | hdmi_wp_video_start(0); |
1192 | hdmi_phy_off(); | 1192 | hdmi_phy_off(); |
1193 | hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); | 1193 | hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); |
1194 | hdmi_enable_clocks(0); | 1194 | hdmi_runtime_put(); |
1195 | 1195 | ||
1196 | hdmi.edid_set = 0; | 1196 | hdmi.edid_set = 0; |
1197 | } | 1197 | } |
@@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = { | |||
1686 | }; | 1686 | }; |
1687 | #endif | 1687 | #endif |
1688 | 1688 | ||
1689 | static int hdmi_get_clocks(struct platform_device *pdev) | ||
1690 | { | ||
1691 | struct clk *clk; | ||
1692 | |||
1693 | clk = clk_get(&pdev->dev, "sys_clk"); | ||
1694 | if (IS_ERR(clk)) { | ||
1695 | DSSERR("can't get sys_clk\n"); | ||
1696 | return PTR_ERR(clk); | ||
1697 | } | ||
1698 | |||
1699 | hdmi.sys_clk = clk; | ||
1700 | |||
1701 | clk = clk_get(&pdev->dev, "dss_48mhz_clk"); | ||
1702 | if (IS_ERR(clk)) { | ||
1703 | DSSERR("can't get hdmi_clk\n"); | ||
1704 | clk_put(hdmi.sys_clk); | ||
1705 | return PTR_ERR(clk); | ||
1706 | } | ||
1707 | |||
1708 | hdmi.hdmi_clk = clk; | ||
1709 | |||
1710 | return 0; | ||
1711 | } | ||
1712 | |||
1713 | static void hdmi_put_clocks(void) | ||
1714 | { | ||
1715 | if (hdmi.sys_clk) | ||
1716 | clk_put(hdmi.sys_clk); | ||
1717 | if (hdmi.hdmi_clk) | ||
1718 | clk_put(hdmi.hdmi_clk); | ||
1719 | } | ||
1720 | |||
1689 | /* HDMI HW IP initialisation */ | 1721 | /* HDMI HW IP initialisation */ |
1690 | static int omapdss_hdmihw_probe(struct platform_device *pdev) | 1722 | static int omapdss_hdmihw_probe(struct platform_device *pdev) |
1691 | { | 1723 | { |
1692 | struct resource *hdmi_mem; | 1724 | struct resource *hdmi_mem; |
1693 | #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ | 1725 | int r; |
1694 | defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) | ||
1695 | int ret; | ||
1696 | #endif | ||
1697 | 1726 | ||
1698 | hdmi.pdata = pdev->dev.platform_data; | 1727 | hdmi.pdata = pdev->dev.platform_data; |
1699 | hdmi.pdev = pdev; | 1728 | hdmi.pdev = pdev; |
@@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) | |||
1713 | return -ENOMEM; | 1742 | return -ENOMEM; |
1714 | } | 1743 | } |
1715 | 1744 | ||
1745 | r = hdmi_get_clocks(pdev); | ||
1746 | if (r) { | ||
1747 | iounmap(hdmi.base_wp); | ||
1748 | return r; | ||
1749 | } | ||
1750 | |||
1751 | pm_runtime_enable(&pdev->dev); | ||
1752 | |||
1716 | hdmi_panel_init(); | 1753 | hdmi_panel_init(); |
1717 | 1754 | ||
1718 | #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ | 1755 | #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ |
1719 | defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) | 1756 | defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) |
1720 | 1757 | ||
1721 | /* Register ASoC codec DAI */ | 1758 | /* Register ASoC codec DAI */ |
1722 | ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, | 1759 | r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, |
1723 | &hdmi_codec_dai_drv, 1); | 1760 | &hdmi_codec_dai_drv, 1); |
1724 | if (ret) { | 1761 | if (r) { |
1725 | DSSERR("can't register ASoC HDMI audio codec\n"); | 1762 | DSSERR("can't register ASoC HDMI audio codec\n"); |
1726 | return ret; | 1763 | return r; |
1727 | } | 1764 | } |
1728 | #endif | 1765 | #endif |
1729 | return 0; | 1766 | return 0; |
@@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev) | |||
1738 | snd_soc_unregister_codec(&pdev->dev); | 1775 | snd_soc_unregister_codec(&pdev->dev); |
1739 | #endif | 1776 | #endif |
1740 | 1777 | ||
1778 | pm_runtime_disable(&pdev->dev); | ||
1779 | |||
1780 | hdmi_put_clocks(); | ||
1781 | |||
1741 | iounmap(hdmi.base_wp); | 1782 | iounmap(hdmi.base_wp); |
1742 | 1783 | ||
1743 | return 0; | 1784 | return 0; |
1744 | } | 1785 | } |
1745 | 1786 | ||
1787 | static int hdmi_runtime_suspend(struct device *dev) | ||
1788 | { | ||
1789 | clk_disable(hdmi.hdmi_clk); | ||
1790 | clk_disable(hdmi.sys_clk); | ||
1791 | |||
1792 | dispc_runtime_put(); | ||
1793 | dss_runtime_put(); | ||
1794 | |||
1795 | return 0; | ||
1796 | } | ||
1797 | |||
1798 | static int hdmi_runtime_resume(struct device *dev) | ||
1799 | { | ||
1800 | int r; | ||
1801 | |||
1802 | r = dss_runtime_get(); | ||
1803 | if (r < 0) | ||
1804 | goto err_get_dss; | ||
1805 | |||
1806 | r = dispc_runtime_get(); | ||
1807 | if (r < 0) | ||
1808 | goto err_get_dispc; | ||
1809 | |||
1810 | |||
1811 | clk_enable(hdmi.sys_clk); | ||
1812 | clk_enable(hdmi.hdmi_clk); | ||
1813 | |||
1814 | return 0; | ||
1815 | |||
1816 | err_get_dispc: | ||
1817 | dss_runtime_put(); | ||
1818 | err_get_dss: | ||
1819 | return r; | ||
1820 | } | ||
1821 | |||
1822 | static const struct dev_pm_ops hdmi_pm_ops = { | ||
1823 | .runtime_suspend = hdmi_runtime_suspend, | ||
1824 | .runtime_resume = hdmi_runtime_resume, | ||
1825 | }; | ||
1826 | |||
1746 | static struct platform_driver omapdss_hdmihw_driver = { | 1827 | static struct platform_driver omapdss_hdmihw_driver = { |
1747 | .probe = omapdss_hdmihw_probe, | 1828 | .probe = omapdss_hdmihw_probe, |
1748 | .remove = omapdss_hdmihw_remove, | 1829 | .remove = omapdss_hdmihw_remove, |
1749 | .driver = { | 1830 | .driver = { |
1750 | .name = "omapdss_hdmi", | 1831 | .name = "omapdss_hdmi", |
1751 | .owner = THIS_MODULE, | 1832 | .owner = THIS_MODULE, |
1833 | .pm = &hdmi_pm_ops, | ||
1752 | }, | 1834 | }, |
1753 | }; | 1835 | }; |
1754 | 1836 | ||
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 9aeea50e33ff..13d72d5c714b 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c | |||
@@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store( | |||
275 | return size; | 275 | return size; |
276 | } | 276 | } |
277 | 277 | ||
278 | static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr, | ||
279 | char *buf) | ||
280 | { | ||
281 | return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable); | ||
282 | } | ||
283 | |||
284 | static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr, | ||
285 | const char *buf, size_t size) | ||
286 | { | ||
287 | struct omap_overlay_manager_info info; | ||
288 | int v; | ||
289 | int r; | ||
290 | bool enable; | ||
291 | |||
292 | if (!dss_has_feature(FEAT_CPR)) | ||
293 | return -ENODEV; | ||
294 | |||
295 | r = kstrtoint(buf, 0, &v); | ||
296 | if (r) | ||
297 | return r; | ||
298 | |||
299 | enable = !!v; | ||
300 | |||
301 | mgr->get_manager_info(mgr, &info); | ||
302 | |||
303 | if (info.cpr_enable == enable) | ||
304 | return size; | ||
305 | |||
306 | info.cpr_enable = enable; | ||
307 | |||
308 | r = mgr->set_manager_info(mgr, &info); | ||
309 | if (r) | ||
310 | return r; | ||
311 | |||
312 | r = mgr->apply(mgr); | ||
313 | if (r) | ||
314 | return r; | ||
315 | |||
316 | return size; | ||
317 | } | ||
318 | |||
319 | static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr, | ||
320 | char *buf) | ||
321 | { | ||
322 | struct omap_overlay_manager_info info; | ||
323 | |||
324 | mgr->get_manager_info(mgr, &info); | ||
325 | |||
326 | return snprintf(buf, PAGE_SIZE, | ||
327 | "%d %d %d %d %d %d %d %d %d\n", | ||
328 | info.cpr_coefs.rr, | ||
329 | info.cpr_coefs.rg, | ||
330 | info.cpr_coefs.rb, | ||
331 | info.cpr_coefs.gr, | ||
332 | info.cpr_coefs.gg, | ||
333 | info.cpr_coefs.gb, | ||
334 | info.cpr_coefs.br, | ||
335 | info.cpr_coefs.bg, | ||
336 | info.cpr_coefs.bb); | ||
337 | } | ||
338 | |||
339 | static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr, | ||
340 | const char *buf, size_t size) | ||
341 | { | ||
342 | struct omap_overlay_manager_info info; | ||
343 | struct omap_dss_cpr_coefs coefs; | ||
344 | int r, i; | ||
345 | s16 *arr; | ||
346 | |||
347 | if (!dss_has_feature(FEAT_CPR)) | ||
348 | return -ENODEV; | ||
349 | |||
350 | if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd", | ||
351 | &coefs.rr, &coefs.rg, &coefs.rb, | ||
352 | &coefs.gr, &coefs.gg, &coefs.gb, | ||
353 | &coefs.br, &coefs.bg, &coefs.bb) != 9) | ||
354 | return -EINVAL; | ||
355 | |||
356 | arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb, | ||
357 | coefs.gr, coefs.gg, coefs.gb, | ||
358 | coefs.br, coefs.bg, coefs.bb }; | ||
359 | |||
360 | for (i = 0; i < 9; ++i) { | ||
361 | if (arr[i] < -512 || arr[i] > 511) | ||
362 | return -EINVAL; | ||
363 | } | ||
364 | |||
365 | mgr->get_manager_info(mgr, &info); | ||
366 | |||
367 | info.cpr_coefs = coefs; | ||
368 | |||
369 | r = mgr->set_manager_info(mgr, &info); | ||
370 | if (r) | ||
371 | return r; | ||
372 | |||
373 | r = mgr->apply(mgr); | ||
374 | if (r) | ||
375 | return r; | ||
376 | |||
377 | return size; | ||
378 | } | ||
379 | |||
278 | struct manager_attribute { | 380 | struct manager_attribute { |
279 | struct attribute attr; | 381 | struct attribute attr; |
280 | ssize_t (*show)(struct omap_overlay_manager *, char *); | 382 | ssize_t (*show)(struct omap_overlay_manager *, char *); |
@@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR, | |||
300 | static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, | 402 | static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, |
301 | manager_alpha_blending_enabled_show, | 403 | manager_alpha_blending_enabled_show, |
302 | manager_alpha_blending_enabled_store); | 404 | manager_alpha_blending_enabled_store); |
405 | static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR, | ||
406 | manager_cpr_enable_show, | ||
407 | manager_cpr_enable_store); | ||
408 | static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR, | ||
409 | manager_cpr_coef_show, | ||
410 | manager_cpr_coef_store); | ||
303 | 411 | ||
304 | 412 | ||
305 | static struct attribute *manager_sysfs_attrs[] = { | 413 | static struct attribute *manager_sysfs_attrs[] = { |
@@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = { | |||
310 | &manager_attr_trans_key_value.attr, | 418 | &manager_attr_trans_key_value.attr, |
311 | &manager_attr_trans_key_enabled.attr, | 419 | &manager_attr_trans_key_enabled.attr, |
312 | &manager_attr_alpha_blending_enabled.attr, | 420 | &manager_attr_alpha_blending_enabled.attr, |
421 | &manager_attr_cpr_enable.attr, | ||
422 | &manager_attr_cpr_coef.attr, | ||
313 | NULL | 423 | NULL |
314 | }; | 424 | }; |
315 | 425 | ||
@@ -391,33 +501,14 @@ struct overlay_cache_data { | |||
391 | 501 | ||
392 | bool enabled; | 502 | bool enabled; |
393 | 503 | ||
394 | u32 paddr; | 504 | struct omap_overlay_info info; |
395 | void __iomem *vaddr; | ||
396 | u32 p_uv_addr; /* relevant for NV12 format only */ | ||
397 | u16 screen_width; | ||
398 | u16 width; | ||
399 | u16 height; | ||
400 | enum omap_color_mode color_mode; | ||
401 | u8 rotation; | ||
402 | enum omap_dss_rotation_type rotation_type; | ||
403 | bool mirror; | ||
404 | |||
405 | u16 pos_x; | ||
406 | u16 pos_y; | ||
407 | u16 out_width; /* if 0, out_width == width */ | ||
408 | u16 out_height; /* if 0, out_height == height */ | ||
409 | u8 global_alpha; | ||
410 | u8 pre_mult_alpha; | ||
411 | 505 | ||
412 | enum omap_channel channel; | 506 | enum omap_channel channel; |
413 | bool replication; | 507 | bool replication; |
414 | bool ilace; | 508 | bool ilace; |
415 | 509 | ||
416 | enum omap_burst_size burst_size; | ||
417 | u32 fifo_low; | 510 | u32 fifo_low; |
418 | u32 fifo_high; | 511 | u32 fifo_high; |
419 | |||
420 | bool manual_update; | ||
421 | }; | 512 | }; |
422 | 513 | ||
423 | struct manager_cache_data { | 514 | struct manager_cache_data { |
@@ -429,15 +520,8 @@ struct manager_cache_data { | |||
429 | * VSYNC/EVSYNC */ | 520 | * VSYNC/EVSYNC */ |
430 | bool shadow_dirty; | 521 | bool shadow_dirty; |
431 | 522 | ||
432 | u32 default_color; | 523 | struct omap_overlay_manager_info info; |
433 | |||
434 | enum omap_dss_trans_key_type trans_key_type; | ||
435 | u32 trans_key; | ||
436 | bool trans_enabled; | ||
437 | |||
438 | bool alpha_enabled; | ||
439 | 524 | ||
440 | bool manual_upd_display; | ||
441 | bool manual_update; | 525 | bool manual_update; |
442 | bool do_manual_update; | 526 | bool do_manual_update; |
443 | 527 | ||
@@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) | |||
539 | if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) | 623 | if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) |
540 | return 0; | 624 | return 0; |
541 | 625 | ||
626 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) | ||
627 | return 0; | ||
628 | |||
542 | if (dssdev->type == OMAP_DISPLAY_TYPE_VENC | 629 | if (dssdev->type == OMAP_DISPLAY_TYPE_VENC |
543 | || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { | 630 | || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { |
544 | irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; | 631 | irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; |
545 | } else { | 632 | } else { |
546 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | 633 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? |
547 | enum omap_dss_update_mode mode; | 634 | DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2; |
548 | mode = dssdev->driver->get_update_mode(dssdev); | ||
549 | if (mode != OMAP_DSS_UPDATE_AUTO) | ||
550 | return 0; | ||
551 | |||
552 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? | ||
553 | DISPC_IRQ_FRAMEDONE | ||
554 | : DISPC_IRQ_FRAMEDONE2; | ||
555 | } else { | ||
556 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? | ||
557 | DISPC_IRQ_VSYNC | ||
558 | : DISPC_IRQ_VSYNC2; | ||
559 | } | ||
560 | } | 635 | } |
561 | 636 | ||
562 | mc = &dss_cache.manager_cache[mgr->id]; | 637 | mc = &dss_cache.manager_cache[mgr->id]; |
@@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) | |||
617 | if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) | 692 | if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) |
618 | return 0; | 693 | return 0; |
619 | 694 | ||
695 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) | ||
696 | return 0; | ||
697 | |||
620 | if (dssdev->type == OMAP_DISPLAY_TYPE_VENC | 698 | if (dssdev->type == OMAP_DISPLAY_TYPE_VENC |
621 | || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { | 699 | || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { |
622 | irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; | 700 | irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; |
623 | } else { | 701 | } else { |
624 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | 702 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? |
625 | enum omap_dss_update_mode mode; | 703 | DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2; |
626 | mode = dssdev->driver->get_update_mode(dssdev); | ||
627 | if (mode != OMAP_DSS_UPDATE_AUTO) | ||
628 | return 0; | ||
629 | |||
630 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? | ||
631 | DISPC_IRQ_FRAMEDONE | ||
632 | : DISPC_IRQ_FRAMEDONE2; | ||
633 | } else { | ||
634 | irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? | ||
635 | DISPC_IRQ_VSYNC | ||
636 | : DISPC_IRQ_VSYNC2; | ||
637 | } | ||
638 | } | 704 | } |
639 | 705 | ||
640 | oc = &dss_cache.overlay_cache[ovl->id]; | 706 | oc = &dss_cache.overlay_cache[ovl->id]; |
@@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1, | |||
720 | 786 | ||
721 | static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc) | 787 | static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc) |
722 | { | 788 | { |
723 | if (oc->out_width != 0 && oc->width != oc->out_width) | 789 | struct omap_overlay_info *oi = &oc->info; |
790 | |||
791 | if (oi->out_width != 0 && oi->width != oi->out_width) | ||
724 | return true; | 792 | return true; |
725 | 793 | ||
726 | if (oc->out_height != 0 && oc->height != oc->out_height) | 794 | if (oi->out_height != 0 && oi->height != oi->out_height) |
727 | return true; | 795 | return true; |
728 | 796 | ||
729 | return false; | 797 | return false; |
@@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane) | |||
733 | { | 801 | { |
734 | struct overlay_cache_data *c; | 802 | struct overlay_cache_data *c; |
735 | struct manager_cache_data *mc; | 803 | struct manager_cache_data *mc; |
804 | struct omap_overlay_info *oi; | ||
805 | struct omap_overlay_manager_info *mi; | ||
736 | u16 outw, outh; | 806 | u16 outw, outh; |
737 | u16 x, y, w, h; | 807 | u16 x, y, w, h; |
738 | u32 paddr; | 808 | u32 paddr; |
@@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane) | |||
742 | DSSDBGF("%d", plane); | 812 | DSSDBGF("%d", plane); |
743 | 813 | ||
744 | c = &dss_cache.overlay_cache[plane]; | 814 | c = &dss_cache.overlay_cache[plane]; |
815 | oi = &c->info; | ||
745 | 816 | ||
746 | if (!c->enabled) { | 817 | if (!c->enabled) { |
747 | dispc_enable_plane(plane, 0); | 818 | dispc_enable_plane(plane, 0); |
@@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane) | |||
749 | } | 820 | } |
750 | 821 | ||
751 | mc = &dss_cache.manager_cache[c->channel]; | 822 | mc = &dss_cache.manager_cache[c->channel]; |
823 | mi = &mc->info; | ||
752 | 824 | ||
753 | x = c->pos_x; | 825 | x = oi->pos_x; |
754 | y = c->pos_y; | 826 | y = oi->pos_y; |
755 | w = c->width; | 827 | w = oi->width; |
756 | h = c->height; | 828 | h = oi->height; |
757 | outw = c->out_width == 0 ? c->width : c->out_width; | 829 | outw = oi->out_width == 0 ? oi->width : oi->out_width; |
758 | outh = c->out_height == 0 ? c->height : c->out_height; | 830 | outh = oi->out_height == 0 ? oi->height : oi->out_height; |
759 | paddr = c->paddr; | 831 | paddr = oi->paddr; |
760 | 832 | ||
761 | orig_w = w; | 833 | orig_w = w; |
762 | orig_h = h; | 834 | orig_h = h; |
763 | orig_outw = outw; | 835 | orig_outw = outw; |
764 | orig_outh = outh; | 836 | orig_outh = outh; |
765 | 837 | ||
766 | if (c->manual_update && mc->do_manual_update) { | 838 | if (mc->manual_update && mc->do_manual_update) { |
767 | unsigned bpp; | 839 | unsigned bpp; |
768 | unsigned scale_x_m = w, scale_x_d = outw; | 840 | unsigned scale_x_m = w, scale_x_d = outw; |
769 | unsigned scale_y_m = h, scale_y_d = outh; | 841 | unsigned scale_y_m = h, scale_y_d = outh; |
@@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane) | |||
775 | return 0; | 847 | return 0; |
776 | } | 848 | } |
777 | 849 | ||
778 | switch (c->color_mode) { | 850 | switch (oi->color_mode) { |
779 | case OMAP_DSS_COLOR_NV12: | 851 | case OMAP_DSS_COLOR_NV12: |
780 | bpp = 8; | 852 | bpp = 8; |
781 | break; | 853 | break; |
@@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane) | |||
805 | BUG(); | 877 | BUG(); |
806 | } | 878 | } |
807 | 879 | ||
808 | if (mc->x > c->pos_x) { | 880 | if (mc->x > oi->pos_x) { |
809 | x = 0; | 881 | x = 0; |
810 | outw -= (mc->x - c->pos_x); | 882 | outw -= (mc->x - oi->pos_x); |
811 | paddr += (mc->x - c->pos_x) * | 883 | paddr += (mc->x - oi->pos_x) * |
812 | scale_x_m / scale_x_d * bpp / 8; | 884 | scale_x_m / scale_x_d * bpp / 8; |
813 | } else { | 885 | } else { |
814 | x = c->pos_x - mc->x; | 886 | x = oi->pos_x - mc->x; |
815 | } | 887 | } |
816 | 888 | ||
817 | if (mc->y > c->pos_y) { | 889 | if (mc->y > oi->pos_y) { |
818 | y = 0; | 890 | y = 0; |
819 | outh -= (mc->y - c->pos_y); | 891 | outh -= (mc->y - oi->pos_y); |
820 | paddr += (mc->y - c->pos_y) * | 892 | paddr += (mc->y - oi->pos_y) * |
821 | scale_y_m / scale_y_d * | 893 | scale_y_m / scale_y_d * |
822 | c->screen_width * bpp / 8; | 894 | oi->screen_width * bpp / 8; |
823 | } else { | 895 | } else { |
824 | y = c->pos_y - mc->y; | 896 | y = oi->pos_y - mc->y; |
825 | } | 897 | } |
826 | 898 | ||
827 | if (mc->w < (x + outw)) | 899 | if (mc->w < (x + outw)) |
@@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane) | |||
840 | * the width if the original width was bigger. | 912 | * the width if the original width was bigger. |
841 | */ | 913 | */ |
842 | if ((w & 1) && | 914 | if ((w & 1) && |
843 | (c->color_mode == OMAP_DSS_COLOR_YUV2 || | 915 | (oi->color_mode == OMAP_DSS_COLOR_YUV2 || |
844 | c->color_mode == OMAP_DSS_COLOR_UYVY)) { | 916 | oi->color_mode == OMAP_DSS_COLOR_UYVY)) { |
845 | if (orig_w > w) | 917 | if (orig_w > w) |
846 | w += 1; | 918 | w += 1; |
847 | else | 919 | else |
@@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane) | |||
851 | 923 | ||
852 | r = dispc_setup_plane(plane, | 924 | r = dispc_setup_plane(plane, |
853 | paddr, | 925 | paddr, |
854 | c->screen_width, | 926 | oi->screen_width, |
855 | x, y, | 927 | x, y, |
856 | w, h, | 928 | w, h, |
857 | outw, outh, | 929 | outw, outh, |
858 | c->color_mode, | 930 | oi->color_mode, |
859 | c->ilace, | 931 | c->ilace, |
860 | c->rotation_type, | 932 | oi->rotation_type, |
861 | c->rotation, | 933 | oi->rotation, |
862 | c->mirror, | 934 | oi->mirror, |
863 | c->global_alpha, | 935 | oi->global_alpha, |
864 | c->pre_mult_alpha, | 936 | oi->pre_mult_alpha, |
865 | c->channel, | 937 | c->channel, |
866 | c->p_uv_addr); | 938 | oi->p_uv_addr); |
867 | 939 | ||
868 | if (r) { | 940 | if (r) { |
869 | /* this shouldn't happen */ | 941 | /* this shouldn't happen */ |
@@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane) | |||
874 | 946 | ||
875 | dispc_enable_replication(plane, c->replication); | 947 | dispc_enable_replication(plane, c->replication); |
876 | 948 | ||
877 | dispc_set_burst_size(plane, c->burst_size); | 949 | dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high); |
878 | dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high); | ||
879 | 950 | ||
880 | dispc_enable_plane(plane, 1); | 951 | dispc_enable_plane(plane, 1); |
881 | 952 | ||
@@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane) | |||
884 | 955 | ||
885 | static void configure_manager(enum omap_channel channel) | 956 | static void configure_manager(enum omap_channel channel) |
886 | { | 957 | { |
887 | struct manager_cache_data *c; | 958 | struct omap_overlay_manager_info *mi; |
888 | 959 | ||
889 | DSSDBGF("%d", channel); | 960 | DSSDBGF("%d", channel); |
890 | 961 | ||
891 | c = &dss_cache.manager_cache[channel]; | 962 | /* picking info from the cache */ |
963 | mi = &dss_cache.manager_cache[channel].info; | ||
892 | 964 | ||
893 | dispc_set_default_color(channel, c->default_color); | 965 | dispc_set_default_color(channel, mi->default_color); |
894 | dispc_set_trans_key(channel, c->trans_key_type, c->trans_key); | 966 | dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key); |
895 | dispc_enable_trans_key(channel, c->trans_enabled); | 967 | dispc_enable_trans_key(channel, mi->trans_enabled); |
896 | dispc_enable_alpha_blending(channel, c->alpha_enabled); | 968 | dispc_enable_alpha_blending(channel, mi->alpha_enabled); |
969 | if (dss_has_feature(FEAT_CPR)) { | ||
970 | dispc_enable_cpr(channel, mi->cpr_enable); | ||
971 | dispc_set_cpr_coef(channel, &mi->cpr_coefs); | ||
972 | } | ||
897 | } | 973 | } |
898 | 974 | ||
899 | /* configure_dispc() tries to write values from cache to shadow registers. | 975 | /* configure_dispc() tries to write values from cache to shadow registers. |
@@ -928,7 +1004,7 @@ static int configure_dispc(void) | |||
928 | if (!oc->dirty) | 1004 | if (!oc->dirty) |
929 | continue; | 1005 | continue; |
930 | 1006 | ||
931 | if (oc->manual_update && !mc->do_manual_update) | 1007 | if (mc->manual_update && !mc->do_manual_update) |
932 | continue; | 1008 | continue; |
933 | 1009 | ||
934 | if (mgr_busy[oc->channel]) { | 1010 | if (mgr_busy[oc->channel]) { |
@@ -976,7 +1052,7 @@ static int configure_dispc(void) | |||
976 | /* We don't need GO with manual update display. LCD iface will | 1052 | /* We don't need GO with manual update display. LCD iface will |
977 | * always be turned off after frame, and new settings will be | 1053 | * always be turned off after frame, and new settings will be |
978 | * taken in to use at next update */ | 1054 | * taken in to use at next update */ |
979 | if (!mc->manual_upd_display) | 1055 | if (!mc->manual_update) |
980 | dispc_go(i); | 1056 | dispc_go(i); |
981 | } | 1057 | } |
982 | 1058 | ||
@@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, | |||
1011 | { | 1087 | { |
1012 | struct overlay_cache_data *oc; | 1088 | struct overlay_cache_data *oc; |
1013 | struct manager_cache_data *mc; | 1089 | struct manager_cache_data *mc; |
1090 | struct omap_overlay_info *oi; | ||
1014 | const int num_ovls = dss_feat_get_num_ovls(); | 1091 | const int num_ovls = dss_feat_get_num_ovls(); |
1015 | struct omap_overlay_manager *mgr; | 1092 | struct omap_overlay_manager *mgr; |
1016 | int i; | 1093 | int i; |
@@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, | |||
1053 | unsigned outw, outh; | 1130 | unsigned outw, outh; |
1054 | 1131 | ||
1055 | oc = &dss_cache.overlay_cache[i]; | 1132 | oc = &dss_cache.overlay_cache[i]; |
1133 | oi = &oc->info; | ||
1056 | 1134 | ||
1057 | if (oc->channel != mgr->id) | 1135 | if (oc->channel != mgr->id) |
1058 | continue; | 1136 | continue; |
@@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, | |||
1068 | if (!dispc_is_overlay_scaled(oc)) | 1146 | if (!dispc_is_overlay_scaled(oc)) |
1069 | continue; | 1147 | continue; |
1070 | 1148 | ||
1071 | outw = oc->out_width == 0 ? | 1149 | outw = oi->out_width == 0 ? |
1072 | oc->width : oc->out_width; | 1150 | oi->width : oi->out_width; |
1073 | outh = oc->out_height == 0 ? | 1151 | outh = oi->out_height == 0 ? |
1074 | oc->height : oc->out_height; | 1152 | oi->height : oi->out_height; |
1075 | 1153 | ||
1076 | /* is the overlay outside the update region? */ | 1154 | /* is the overlay outside the update region? */ |
1077 | if (!rectangle_intersects(x, y, w, h, | 1155 | if (!rectangle_intersects(x, y, w, h, |
1078 | oc->pos_x, oc->pos_y, | 1156 | oi->pos_x, oi->pos_y, |
1079 | outw, outh)) | 1157 | outw, outh)) |
1080 | continue; | 1158 | continue; |
1081 | 1159 | ||
1082 | /* if the overlay totally inside the update region? */ | 1160 | /* if the overlay totally inside the update region? */ |
1083 | if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh, | 1161 | if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh, |
1084 | x, y, w, h)) | 1162 | x, y, w, h)) |
1085 | continue; | 1163 | continue; |
1086 | 1164 | ||
1087 | if (x > oc->pos_x) | 1165 | if (x > oi->pos_x) |
1088 | x1 = oc->pos_x; | 1166 | x1 = oi->pos_x; |
1089 | else | 1167 | else |
1090 | x1 = x; | 1168 | x1 = x; |
1091 | 1169 | ||
1092 | if (y > oc->pos_y) | 1170 | if (y > oi->pos_y) |
1093 | y1 = oc->pos_y; | 1171 | y1 = oi->pos_y; |
1094 | else | 1172 | else |
1095 | y1 = y; | 1173 | y1 = y; |
1096 | 1174 | ||
1097 | if ((x + w) < (oc->pos_x + outw)) | 1175 | if ((x + w) < (oi->pos_x + outw)) |
1098 | x2 = oc->pos_x + outw; | 1176 | x2 = oi->pos_x + outw; |
1099 | else | 1177 | else |
1100 | x2 = x + w; | 1178 | x2 = x + w; |
1101 | 1179 | ||
1102 | if ((y + h) < (oc->pos_y + outh)) | 1180 | if ((y + h) < (oi->pos_y + outh)) |
1103 | y2 = oc->pos_y + outh; | 1181 | y2 = oi->pos_y + outh; |
1104 | else | 1182 | else |
1105 | y2 = y + h; | 1183 | y2 = y + h; |
1106 | 1184 | ||
@@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1236 | 1314 | ||
1237 | DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); | 1315 | DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); |
1238 | 1316 | ||
1317 | r = dispc_runtime_get(); | ||
1318 | if (r) | ||
1319 | return r; | ||
1320 | |||
1239 | spin_lock_irqsave(&dss_cache.lock, flags); | 1321 | spin_lock_irqsave(&dss_cache.lock, flags); |
1240 | 1322 | ||
1241 | /* Configure overlays */ | 1323 | /* Configure overlays */ |
@@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1275 | 1357 | ||
1276 | ovl->info_dirty = false; | 1358 | ovl->info_dirty = false; |
1277 | oc->dirty = true; | 1359 | oc->dirty = true; |
1278 | 1360 | oc->info = ovl->info; | |
1279 | oc->paddr = ovl->info.paddr; | ||
1280 | oc->vaddr = ovl->info.vaddr; | ||
1281 | oc->p_uv_addr = ovl->info.p_uv_addr; | ||
1282 | oc->screen_width = ovl->info.screen_width; | ||
1283 | oc->width = ovl->info.width; | ||
1284 | oc->height = ovl->info.height; | ||
1285 | oc->color_mode = ovl->info.color_mode; | ||
1286 | oc->rotation = ovl->info.rotation; | ||
1287 | oc->rotation_type = ovl->info.rotation_type; | ||
1288 | oc->mirror = ovl->info.mirror; | ||
1289 | oc->pos_x = ovl->info.pos_x; | ||
1290 | oc->pos_y = ovl->info.pos_y; | ||
1291 | oc->out_width = ovl->info.out_width; | ||
1292 | oc->out_height = ovl->info.out_height; | ||
1293 | oc->global_alpha = ovl->info.global_alpha; | ||
1294 | oc->pre_mult_alpha = ovl->info.pre_mult_alpha; | ||
1295 | 1361 | ||
1296 | oc->replication = | 1362 | oc->replication = |
1297 | dss_use_replication(dssdev, ovl->info.color_mode); | 1363 | dss_use_replication(dssdev, ovl->info.color_mode); |
@@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1302 | 1368 | ||
1303 | oc->enabled = true; | 1369 | oc->enabled = true; |
1304 | 1370 | ||
1305 | oc->manual_update = | ||
1306 | dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && | ||
1307 | dssdev->driver->get_update_mode(dssdev) != | ||
1308 | OMAP_DSS_UPDATE_AUTO; | ||
1309 | |||
1310 | ++num_planes_enabled; | 1371 | ++num_planes_enabled; |
1311 | } | 1372 | } |
1312 | 1373 | ||
@@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1334 | 1395 | ||
1335 | mgr->info_dirty = false; | 1396 | mgr->info_dirty = false; |
1336 | mc->dirty = true; | 1397 | mc->dirty = true; |
1337 | 1398 | mc->info = mgr->info; | |
1338 | mc->default_color = mgr->info.default_color; | ||
1339 | mc->trans_key_type = mgr->info.trans_key_type; | ||
1340 | mc->trans_key = mgr->info.trans_key; | ||
1341 | mc->trans_enabled = mgr->info.trans_enabled; | ||
1342 | mc->alpha_enabled = mgr->info.alpha_enabled; | ||
1343 | |||
1344 | mc->manual_upd_display = | ||
1345 | dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; | ||
1346 | 1399 | ||
1347 | mc->manual_update = | 1400 | mc->manual_update = |
1348 | dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && | 1401 | dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; |
1349 | dssdev->driver->get_update_mode(dssdev) != | ||
1350 | OMAP_DSS_UPDATE_AUTO; | ||
1351 | } | 1402 | } |
1352 | 1403 | ||
1353 | /* XXX TODO: Try to get fifomerge working. The problem is that it | 1404 | /* XXX TODO: Try to get fifomerge working. The problem is that it |
@@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1368 | /* Configure overlay fifos */ | 1419 | /* Configure overlay fifos */ |
1369 | for (i = 0; i < omap_dss_get_num_overlays(); ++i) { | 1420 | for (i = 0; i < omap_dss_get_num_overlays(); ++i) { |
1370 | struct omap_dss_device *dssdev; | 1421 | struct omap_dss_device *dssdev; |
1371 | u32 size; | 1422 | u32 size, burst_size; |
1372 | 1423 | ||
1373 | ovl = omap_dss_get_overlay(i); | 1424 | ovl = omap_dss_get_overlay(i); |
1374 | 1425 | ||
@@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1386 | if (use_fifomerge) | 1437 | if (use_fifomerge) |
1387 | size *= 3; | 1438 | size *= 3; |
1388 | 1439 | ||
1440 | burst_size = dispc_get_burst_size(ovl->id); | ||
1441 | |||
1389 | switch (dssdev->type) { | 1442 | switch (dssdev->type) { |
1390 | case OMAP_DISPLAY_TYPE_DPI: | 1443 | case OMAP_DISPLAY_TYPE_DPI: |
1391 | case OMAP_DISPLAY_TYPE_DBI: | 1444 | case OMAP_DISPLAY_TYPE_DBI: |
@@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1393 | case OMAP_DISPLAY_TYPE_VENC: | 1446 | case OMAP_DISPLAY_TYPE_VENC: |
1394 | case OMAP_DISPLAY_TYPE_HDMI: | 1447 | case OMAP_DISPLAY_TYPE_HDMI: |
1395 | default_get_overlay_fifo_thresholds(ovl->id, size, | 1448 | default_get_overlay_fifo_thresholds(ovl->id, size, |
1396 | &oc->burst_size, &oc->fifo_low, | 1449 | burst_size, &oc->fifo_low, |
1397 | &oc->fifo_high); | 1450 | &oc->fifo_high); |
1398 | break; | 1451 | break; |
1399 | #ifdef CONFIG_OMAP2_DSS_DSI | 1452 | #ifdef CONFIG_OMAP2_DSS_DSI |
1400 | case OMAP_DISPLAY_TYPE_DSI: | 1453 | case OMAP_DISPLAY_TYPE_DSI: |
1401 | dsi_get_overlay_fifo_thresholds(ovl->id, size, | 1454 | dsi_get_overlay_fifo_thresholds(ovl->id, size, |
1402 | &oc->burst_size, &oc->fifo_low, | 1455 | burst_size, &oc->fifo_low, |
1403 | &oc->fifo_high); | 1456 | &oc->fifo_high); |
1404 | break; | 1457 | break; |
1405 | #endif | 1458 | #endif |
@@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1409 | } | 1462 | } |
1410 | 1463 | ||
1411 | r = 0; | 1464 | r = 0; |
1412 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
1413 | if (!dss_cache.irq_enabled) { | 1465 | if (!dss_cache.irq_enabled) { |
1414 | u32 mask; | 1466 | u32 mask; |
1415 | 1467 | ||
@@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) | |||
1422 | dss_cache.irq_enabled = true; | 1474 | dss_cache.irq_enabled = true; |
1423 | } | 1475 | } |
1424 | configure_dispc(); | 1476 | configure_dispc(); |
1425 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
1426 | 1477 | ||
1427 | spin_unlock_irqrestore(&dss_cache.lock, flags); | 1478 | spin_unlock_irqrestore(&dss_cache.lock, flags); |
1428 | 1479 | ||
1480 | dispc_runtime_put(); | ||
1481 | |||
1429 | return r; | 1482 | return r; |
1430 | } | 1483 | } |
1431 | 1484 | ||
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index 0f08025b1f0e..c84380c53c39 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c | |||
@@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf, | |||
84 | 84 | ||
85 | old_mgr = ovl->manager; | 85 | old_mgr = ovl->manager; |
86 | 86 | ||
87 | r = dispc_runtime_get(); | ||
88 | if (r) | ||
89 | return r; | ||
90 | |||
87 | /* detach old manager */ | 91 | /* detach old manager */ |
88 | if (old_mgr) { | 92 | if (old_mgr) { |
89 | r = ovl->unset_manager(ovl); | 93 | r = ovl->unset_manager(ovl); |
90 | if (r) { | 94 | if (r) { |
91 | DSSERR("detach failed\n"); | 95 | DSSERR("detach failed\n"); |
92 | return r; | 96 | goto err; |
93 | } | 97 | } |
94 | 98 | ||
95 | r = old_mgr->apply(old_mgr); | 99 | r = old_mgr->apply(old_mgr); |
96 | if (r) | 100 | if (r) |
97 | return r; | 101 | goto err; |
98 | } | 102 | } |
99 | 103 | ||
100 | if (mgr) { | 104 | if (mgr) { |
101 | r = ovl->set_manager(ovl, mgr); | 105 | r = ovl->set_manager(ovl, mgr); |
102 | if (r) { | 106 | if (r) { |
103 | DSSERR("Failed to attach overlay\n"); | 107 | DSSERR("Failed to attach overlay\n"); |
104 | return r; | 108 | goto err; |
105 | } | 109 | } |
106 | 110 | ||
107 | r = mgr->apply(mgr); | 111 | r = mgr->apply(mgr); |
108 | if (r) | 112 | if (r) |
109 | return r; | 113 | goto err; |
110 | } | 114 | } |
111 | 115 | ||
116 | dispc_runtime_put(); | ||
117 | |||
112 | return size; | 118 | return size; |
119 | |||
120 | err: | ||
121 | dispc_runtime_put(); | ||
122 | return r; | ||
113 | } | 123 | } |
114 | 124 | ||
115 | static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) | 125 | static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) |
@@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, | |||
238 | u8 alpha; | 248 | u8 alpha; |
239 | struct omap_overlay_info info; | 249 | struct omap_overlay_info info; |
240 | 250 | ||
251 | if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) | ||
252 | return -ENODEV; | ||
253 | |||
241 | r = kstrtou8(buf, 0, &alpha); | 254 | r = kstrtou8(buf, 0, &alpha); |
242 | if (r) | 255 | if (r) |
243 | return r; | 256 | return r; |
@@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl, | |||
504 | 517 | ||
505 | ovl->manager = mgr; | 518 | ovl->manager = mgr; |
506 | 519 | ||
507 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
508 | /* XXX: When there is an overlay on a DSI manual update display, and | 520 | /* XXX: When there is an overlay on a DSI manual update display, and |
509 | * the overlay is first disabled, then moved to tv, and enabled, we | 521 | * the overlay is first disabled, then moved to tv, and enabled, we |
510 | * seem to get SYNC_LOST_DIGIT error. | 522 | * seem to get SYNC_LOST_DIGIT error. |
@@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl, | |||
518 | * the overlay, but before moving the overlay to TV. | 530 | * the overlay, but before moving the overlay to TV. |
519 | */ | 531 | */ |
520 | dispc_set_channel_out(ovl->id, mgr->id); | 532 | dispc_set_channel_out(ovl->id, mgr->id); |
521 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | ||
522 | 533 | ||
523 | return 0; | 534 | return 0; |
524 | } | 535 | } |
@@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) | |||
719 | } | 730 | } |
720 | 731 | ||
721 | if (mgr) { | 732 | if (mgr) { |
733 | dispc_runtime_get(); | ||
734 | |||
722 | for (i = 0; i < dss_feat_get_num_ovls(); i++) { | 735 | for (i = 0; i < dss_feat_get_num_ovls(); i++) { |
723 | struct omap_overlay *ovl; | 736 | struct omap_overlay *ovl; |
724 | ovl = omap_dss_get_overlay(i); | 737 | ovl = omap_dss_get_overlay(i); |
@@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) | |||
728 | omap_dss_set_manager(ovl, mgr); | 741 | omap_dss_set_manager(ovl, mgr); |
729 | } | 742 | } |
730 | } | 743 | } |
744 | |||
745 | dispc_runtime_put(); | ||
731 | } | 746 | } |
732 | } | 747 | } |
733 | 748 | ||
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index c06fbe0bc678..39f4c597026a 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/hrtimer.h> | 33 | #include <linux/hrtimer.h> |
34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
35 | #include <linux/semaphore.h> | 35 | #include <linux/semaphore.h> |
36 | #include <linux/platform_device.h> | ||
37 | #include <linux/pm_runtime.h> | ||
36 | 38 | ||
37 | #include <video/omapdss.h> | 39 | #include <video/omapdss.h> |
38 | #include "dss.h" | 40 | #include "dss.h" |
@@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx) | |||
120 | return __raw_readl(rfbi.base + idx.idx); | 122 | return __raw_readl(rfbi.base + idx.idx); |
121 | } | 123 | } |
122 | 124 | ||
123 | static void rfbi_enable_clocks(bool enable) | 125 | static int rfbi_runtime_get(void) |
124 | { | 126 | { |
125 | if (enable) | 127 | int r; |
126 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 128 | |
127 | else | 129 | DSSDBG("rfbi_runtime_get\n"); |
128 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 130 | |
131 | r = pm_runtime_get_sync(&rfbi.pdev->dev); | ||
132 | WARN_ON(r < 0); | ||
133 | return r < 0 ? r : 0; | ||
134 | } | ||
135 | |||
136 | static void rfbi_runtime_put(void) | ||
137 | { | ||
138 | int r; | ||
139 | |||
140 | DSSDBG("rfbi_runtime_put\n"); | ||
141 | |||
142 | r = pm_runtime_put(&rfbi.pdev->dev); | ||
143 | WARN_ON(r < 0); | ||
129 | } | 144 | } |
130 | 145 | ||
131 | void rfbi_bus_lock(void) | 146 | void rfbi_bus_lock(void) |
@@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s) | |||
805 | { | 820 | { |
806 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) | 821 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) |
807 | 822 | ||
808 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 823 | if (rfbi_runtime_get()) |
824 | return; | ||
809 | 825 | ||
810 | DUMPREG(RFBI_REVISION); | 826 | DUMPREG(RFBI_REVISION); |
811 | DUMPREG(RFBI_SYSCONFIG); | 827 | DUMPREG(RFBI_SYSCONFIG); |
@@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s) | |||
836 | DUMPREG(RFBI_VSYNC_WIDTH); | 852 | DUMPREG(RFBI_VSYNC_WIDTH); |
837 | DUMPREG(RFBI_HSYNC_WIDTH); | 853 | DUMPREG(RFBI_HSYNC_WIDTH); |
838 | 854 | ||
839 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 855 | rfbi_runtime_put(); |
840 | #undef DUMPREG | 856 | #undef DUMPREG |
841 | } | 857 | } |
842 | 858 | ||
@@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) | |||
844 | { | 860 | { |
845 | int r; | 861 | int r; |
846 | 862 | ||
847 | rfbi_enable_clocks(1); | 863 | r = rfbi_runtime_get(); |
864 | if (r) | ||
865 | return r; | ||
848 | 866 | ||
849 | r = omap_dss_start_device(dssdev); | 867 | r = omap_dss_start_device(dssdev); |
850 | if (r) { | 868 | if (r) { |
@@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) | |||
879 | err1: | 897 | err1: |
880 | omap_dss_stop_device(dssdev); | 898 | omap_dss_stop_device(dssdev); |
881 | err0: | 899 | err0: |
900 | rfbi_runtime_put(); | ||
882 | return r; | 901 | return r; |
883 | } | 902 | } |
884 | EXPORT_SYMBOL(omapdss_rfbi_display_enable); | 903 | EXPORT_SYMBOL(omapdss_rfbi_display_enable); |
@@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev) | |||
889 | DISPC_IRQ_FRAMEDONE); | 908 | DISPC_IRQ_FRAMEDONE); |
890 | omap_dss_stop_device(dssdev); | 909 | omap_dss_stop_device(dssdev); |
891 | 910 | ||
892 | rfbi_enable_clocks(0); | 911 | rfbi_runtime_put(); |
893 | } | 912 | } |
894 | EXPORT_SYMBOL(omapdss_rfbi_display_disable); | 913 | EXPORT_SYMBOL(omapdss_rfbi_display_disable); |
895 | 914 | ||
@@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev) | |||
904 | static int omap_rfbihw_probe(struct platform_device *pdev) | 923 | static int omap_rfbihw_probe(struct platform_device *pdev) |
905 | { | 924 | { |
906 | u32 rev; | 925 | u32 rev; |
907 | u32 l; | ||
908 | struct resource *rfbi_mem; | 926 | struct resource *rfbi_mem; |
927 | struct clk *clk; | ||
928 | int r; | ||
909 | 929 | ||
910 | rfbi.pdev = pdev; | 930 | rfbi.pdev = pdev; |
911 | 931 | ||
@@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev) | |||
914 | rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); | 934 | rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); |
915 | if (!rfbi_mem) { | 935 | if (!rfbi_mem) { |
916 | DSSERR("can't get IORESOURCE_MEM RFBI\n"); | 936 | DSSERR("can't get IORESOURCE_MEM RFBI\n"); |
917 | return -EINVAL; | 937 | r = -EINVAL; |
938 | goto err_ioremap; | ||
918 | } | 939 | } |
919 | rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem)); | 940 | rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem)); |
920 | if (!rfbi.base) { | 941 | if (!rfbi.base) { |
921 | DSSERR("can't ioremap RFBI\n"); | 942 | DSSERR("can't ioremap RFBI\n"); |
922 | return -ENOMEM; | 943 | r = -ENOMEM; |
944 | goto err_ioremap; | ||
923 | } | 945 | } |
924 | 946 | ||
925 | rfbi_enable_clocks(1); | 947 | pm_runtime_enable(&pdev->dev); |
948 | |||
949 | r = rfbi_runtime_get(); | ||
950 | if (r) | ||
951 | goto err_get_rfbi; | ||
926 | 952 | ||
927 | msleep(10); | 953 | msleep(10); |
928 | 954 | ||
929 | rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000; | 955 | if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630()) |
956 | clk = dss_get_ick(); | ||
957 | else | ||
958 | clk = clk_get(&pdev->dev, "ick"); | ||
959 | if (IS_ERR(clk)) { | ||
960 | DSSERR("can't get ick\n"); | ||
961 | r = PTR_ERR(clk); | ||
962 | goto err_get_ick; | ||
963 | } | ||
964 | |||
965 | rfbi.l4_khz = clk_get_rate(clk) / 1000; | ||
930 | 966 | ||
931 | /* Enable autoidle and smart-idle */ | 967 | clk_put(clk); |
932 | l = rfbi_read_reg(RFBI_SYSCONFIG); | ||
933 | l |= (1 << 0) | (2 << 3); | ||
934 | rfbi_write_reg(RFBI_SYSCONFIG, l); | ||
935 | 968 | ||
936 | rev = rfbi_read_reg(RFBI_REVISION); | 969 | rev = rfbi_read_reg(RFBI_REVISION); |
937 | dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", | 970 | dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", |
938 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | 971 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); |
939 | 972 | ||
940 | rfbi_enable_clocks(0); | 973 | rfbi_runtime_put(); |
941 | 974 | ||
942 | return 0; | 975 | return 0; |
976 | |||
977 | err_get_ick: | ||
978 | rfbi_runtime_put(); | ||
979 | err_get_rfbi: | ||
980 | pm_runtime_disable(&pdev->dev); | ||
981 | iounmap(rfbi.base); | ||
982 | err_ioremap: | ||
983 | return r; | ||
943 | } | 984 | } |
944 | 985 | ||
945 | static int omap_rfbihw_remove(struct platform_device *pdev) | 986 | static int omap_rfbihw_remove(struct platform_device *pdev) |
946 | { | 987 | { |
988 | pm_runtime_disable(&pdev->dev); | ||
947 | iounmap(rfbi.base); | 989 | iounmap(rfbi.base); |
948 | return 0; | 990 | return 0; |
949 | } | 991 | } |
950 | 992 | ||
993 | static int rfbi_runtime_suspend(struct device *dev) | ||
994 | { | ||
995 | dispc_runtime_put(); | ||
996 | dss_runtime_put(); | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static int rfbi_runtime_resume(struct device *dev) | ||
1002 | { | ||
1003 | int r; | ||
1004 | |||
1005 | r = dss_runtime_get(); | ||
1006 | if (r < 0) | ||
1007 | goto err_get_dss; | ||
1008 | |||
1009 | r = dispc_runtime_get(); | ||
1010 | if (r < 0) | ||
1011 | goto err_get_dispc; | ||
1012 | |||
1013 | return 0; | ||
1014 | |||
1015 | err_get_dispc: | ||
1016 | dss_runtime_put(); | ||
1017 | err_get_dss: | ||
1018 | return r; | ||
1019 | } | ||
1020 | |||
1021 | static const struct dev_pm_ops rfbi_pm_ops = { | ||
1022 | .runtime_suspend = rfbi_runtime_suspend, | ||
1023 | .runtime_resume = rfbi_runtime_resume, | ||
1024 | }; | ||
1025 | |||
951 | static struct platform_driver omap_rfbihw_driver = { | 1026 | static struct platform_driver omap_rfbihw_driver = { |
952 | .probe = omap_rfbihw_probe, | 1027 | .probe = omap_rfbihw_probe, |
953 | .remove = omap_rfbihw_remove, | 1028 | .remove = omap_rfbihw_remove, |
954 | .driver = { | 1029 | .driver = { |
955 | .name = "omapdss_rfbi", | 1030 | .name = "omapdss_rfbi", |
956 | .owner = THIS_MODULE, | 1031 | .owner = THIS_MODULE, |
1032 | .pm = &rfbi_pm_ops, | ||
957 | }, | 1033 | }, |
958 | }; | 1034 | }; |
959 | 1035 | ||
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 0bd4b0350f80..3a688c871a45 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c | |||
@@ -20,13 +20,11 @@ | |||
20 | #define DSS_SUBSYS_NAME "SDI" | 20 | #define DSS_SUBSYS_NAME "SDI" |
21 | 21 | ||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/clk.h> | ||
24 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
25 | #include <linux/err.h> | 24 | #include <linux/err.h> |
26 | #include <linux/regulator/consumer.h> | 25 | #include <linux/regulator/consumer.h> |
27 | 26 | ||
28 | #include <video/omapdss.h> | 27 | #include <video/omapdss.h> |
29 | #include <plat/cpu.h> | ||
30 | #include "dss.h" | 28 | #include "dss.h" |
31 | 29 | ||
32 | static struct { | 30 | static struct { |
@@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) | |||
60 | r = omap_dss_start_device(dssdev); | 58 | r = omap_dss_start_device(dssdev); |
61 | if (r) { | 59 | if (r) { |
62 | DSSERR("failed to start device\n"); | 60 | DSSERR("failed to start device\n"); |
63 | goto err0; | 61 | goto err_start_dev; |
64 | } | 62 | } |
65 | 63 | ||
66 | r = regulator_enable(sdi.vdds_sdi_reg); | 64 | r = regulator_enable(sdi.vdds_sdi_reg); |
67 | if (r) | 65 | if (r) |
68 | goto err1; | 66 | goto err_reg_enable; |
69 | 67 | ||
70 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); | 68 | r = dss_runtime_get(); |
69 | if (r) | ||
70 | goto err_get_dss; | ||
71 | |||
72 | r = dispc_runtime_get(); | ||
73 | if (r) | ||
74 | goto err_get_dispc; | ||
71 | 75 | ||
72 | sdi_basic_init(dssdev); | 76 | sdi_basic_init(dssdev); |
73 | 77 | ||
@@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) | |||
80 | r = dss_calc_clock_div(1, t->pixel_clock * 1000, | 84 | r = dss_calc_clock_div(1, t->pixel_clock * 1000, |
81 | &dss_cinfo, &dispc_cinfo); | 85 | &dss_cinfo, &dispc_cinfo); |
82 | if (r) | 86 | if (r) |
83 | goto err2; | 87 | goto err_calc_clock_div; |
84 | 88 | ||
85 | fck = dss_cinfo.fck; | 89 | fck = dss_cinfo.fck; |
86 | lck_div = dispc_cinfo.lck_div; | 90 | lck_div = dispc_cinfo.lck_div; |
@@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) | |||
101 | 105 | ||
102 | r = dss_set_clock_div(&dss_cinfo); | 106 | r = dss_set_clock_div(&dss_cinfo); |
103 | if (r) | 107 | if (r) |
104 | goto err2; | 108 | goto err_set_dss_clock_div; |
105 | 109 | ||
106 | r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); | 110 | r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); |
107 | if (r) | 111 | if (r) |
108 | goto err2; | 112 | goto err_set_dispc_clock_div; |
109 | 113 | ||
110 | dss_sdi_init(dssdev->phy.sdi.datapairs); | 114 | dss_sdi_init(dssdev->phy.sdi.datapairs); |
111 | r = dss_sdi_enable(); | 115 | r = dss_sdi_enable(); |
112 | if (r) | 116 | if (r) |
113 | goto err1; | 117 | goto err_sdi_enable; |
114 | mdelay(2); | 118 | mdelay(2); |
115 | 119 | ||
116 | dssdev->manager->enable(dssdev->manager); | 120 | dssdev->manager->enable(dssdev->manager); |
117 | 121 | ||
118 | return 0; | 122 | return 0; |
119 | err2: | 123 | |
120 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 124 | err_sdi_enable: |
125 | err_set_dispc_clock_div: | ||
126 | err_set_dss_clock_div: | ||
127 | err_calc_clock_div: | ||
128 | dispc_runtime_put(); | ||
129 | err_get_dispc: | ||
130 | dss_runtime_put(); | ||
131 | err_get_dss: | ||
121 | regulator_disable(sdi.vdds_sdi_reg); | 132 | regulator_disable(sdi.vdds_sdi_reg); |
122 | err1: | 133 | err_reg_enable: |
123 | omap_dss_stop_device(dssdev); | 134 | omap_dss_stop_device(dssdev); |
124 | err0: | 135 | err_start_dev: |
125 | return r; | 136 | return r; |
126 | } | 137 | } |
127 | EXPORT_SYMBOL(omapdss_sdi_display_enable); | 138 | EXPORT_SYMBOL(omapdss_sdi_display_enable); |
@@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) | |||
132 | 143 | ||
133 | dss_sdi_disable(); | 144 | dss_sdi_disable(); |
134 | 145 | ||
135 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); | 146 | dispc_runtime_put(); |
147 | dss_runtime_put(); | ||
136 | 148 | ||
137 | regulator_disable(sdi.vdds_sdi_reg); | 149 | regulator_disable(sdi.vdds_sdi_reg); |
138 | 150 | ||
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 980f919ed987..173c66430dad 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c | |||
@@ -33,11 +33,13 @@ | |||
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/regulator/consumer.h> | 35 | #include <linux/regulator/consumer.h> |
36 | #include <linux/pm_runtime.h> | ||
36 | 37 | ||
37 | #include <video/omapdss.h> | 38 | #include <video/omapdss.h> |
38 | #include <plat/cpu.h> | 39 | #include <plat/cpu.h> |
39 | 40 | ||
40 | #include "dss.h" | 41 | #include "dss.h" |
42 | #include "dss_features.h" | ||
41 | 43 | ||
42 | /* Venc registers */ | 44 | /* Venc registers */ |
43 | #define VENC_REV_ID 0x00 | 45 | #define VENC_REV_ID 0x00 |
@@ -292,6 +294,9 @@ static struct { | |||
292 | struct mutex venc_lock; | 294 | struct mutex venc_lock; |
293 | u32 wss_data; | 295 | u32 wss_data; |
294 | struct regulator *vdda_dac_reg; | 296 | struct regulator *vdda_dac_reg; |
297 | |||
298 | struct clk *tv_clk; | ||
299 | struct clk *tv_dac_clk; | ||
295 | } venc; | 300 | } venc; |
296 | 301 | ||
297 | static inline void venc_write_reg(int idx, u32 val) | 302 | static inline void venc_write_reg(int idx, u32 val) |
@@ -380,14 +385,25 @@ static void venc_reset(void) | |||
380 | #endif | 385 | #endif |
381 | } | 386 | } |
382 | 387 | ||
383 | static void venc_enable_clocks(int enable) | 388 | static int venc_runtime_get(void) |
389 | { | ||
390 | int r; | ||
391 | |||
392 | DSSDBG("venc_runtime_get\n"); | ||
393 | |||
394 | r = pm_runtime_get_sync(&venc.pdev->dev); | ||
395 | WARN_ON(r < 0); | ||
396 | return r < 0 ? r : 0; | ||
397 | } | ||
398 | |||
399 | static void venc_runtime_put(void) | ||
384 | { | 400 | { |
385 | if (enable) | 401 | int r; |
386 | dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | | 402 | |
387 | DSS_CLK_VIDFCK); | 403 | DSSDBG("venc_runtime_put\n"); |
388 | else | 404 | |
389 | dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | | 405 | r = pm_runtime_put(&venc.pdev->dev); |
390 | DSS_CLK_VIDFCK); | 406 | WARN_ON(r < 0); |
391 | } | 407 | } |
392 | 408 | ||
393 | static const struct venc_config *venc_timings_to_config( | 409 | static const struct venc_config *venc_timings_to_config( |
@@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev) | |||
406 | { | 422 | { |
407 | u32 l; | 423 | u32 l; |
408 | 424 | ||
409 | venc_enable_clocks(1); | ||
410 | |||
411 | venc_reset(); | 425 | venc_reset(); |
412 | venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); | 426 | venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); |
413 | 427 | ||
@@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev) | |||
448 | dssdev->platform_disable(dssdev); | 462 | dssdev->platform_disable(dssdev); |
449 | 463 | ||
450 | regulator_disable(venc.vdda_dac_reg); | 464 | regulator_disable(venc.vdda_dac_reg); |
451 | |||
452 | venc_enable_clocks(0); | ||
453 | } | 465 | } |
454 | 466 | ||
455 | 467 | ||
@@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev) | |||
487 | goto err1; | 499 | goto err1; |
488 | } | 500 | } |
489 | 501 | ||
502 | r = venc_runtime_get(); | ||
503 | if (r) | ||
504 | goto err1; | ||
505 | |||
490 | venc_power_on(dssdev); | 506 | venc_power_on(dssdev); |
491 | 507 | ||
492 | venc.wss_data = 0; | 508 | venc.wss_data = 0; |
@@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev) | |||
520 | 536 | ||
521 | venc_power_off(dssdev); | 537 | venc_power_off(dssdev); |
522 | 538 | ||
539 | venc_runtime_put(); | ||
540 | |||
523 | dssdev->state = OMAP_DSS_DISPLAY_DISABLED; | 541 | dssdev->state = OMAP_DSS_DISPLAY_DISABLED; |
524 | 542 | ||
525 | omap_dss_stop_device(dssdev); | 543 | omap_dss_stop_device(dssdev); |
@@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev) | |||
538 | return venc_panel_enable(dssdev); | 556 | return venc_panel_enable(dssdev); |
539 | } | 557 | } |
540 | 558 | ||
541 | static enum omap_dss_update_mode venc_get_update_mode( | ||
542 | struct omap_dss_device *dssdev) | ||
543 | { | ||
544 | return OMAP_DSS_UPDATE_AUTO; | ||
545 | } | ||
546 | |||
547 | static int venc_set_update_mode(struct omap_dss_device *dssdev, | ||
548 | enum omap_dss_update_mode mode) | ||
549 | { | ||
550 | if (mode != OMAP_DSS_UPDATE_AUTO) | ||
551 | return -EINVAL; | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static void venc_get_timings(struct omap_dss_device *dssdev, | 559 | static void venc_get_timings(struct omap_dss_device *dssdev, |
556 | struct omap_video_timings *timings) | 560 | struct omap_video_timings *timings) |
557 | { | 561 | { |
@@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev) | |||
598 | static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) | 602 | static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) |
599 | { | 603 | { |
600 | const struct venc_config *config; | 604 | const struct venc_config *config; |
605 | int r; | ||
601 | 606 | ||
602 | DSSDBG("venc_set_wss\n"); | 607 | DSSDBG("venc_set_wss\n"); |
603 | 608 | ||
@@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) | |||
608 | /* Invert due to VENC_L21_WC_CTL:INV=1 */ | 613 | /* Invert due to VENC_L21_WC_CTL:INV=1 */ |
609 | venc.wss_data = (wss ^ 0xfffff) << 8; | 614 | venc.wss_data = (wss ^ 0xfffff) << 8; |
610 | 615 | ||
611 | venc_enable_clocks(1); | 616 | r = venc_runtime_get(); |
617 | if (r) | ||
618 | goto err; | ||
612 | 619 | ||
613 | venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | | 620 | venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | |
614 | venc.wss_data); | 621 | venc.wss_data); |
615 | 622 | ||
616 | venc_enable_clocks(0); | 623 | venc_runtime_put(); |
617 | 624 | ||
625 | err: | ||
618 | mutex_unlock(&venc.venc_lock); | 626 | mutex_unlock(&venc.venc_lock); |
619 | 627 | ||
620 | return 0; | 628 | return r; |
621 | } | 629 | } |
622 | 630 | ||
623 | static struct omap_dss_driver venc_driver = { | 631 | static struct omap_dss_driver venc_driver = { |
@@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = { | |||
632 | .get_resolution = omapdss_default_get_resolution, | 640 | .get_resolution = omapdss_default_get_resolution, |
633 | .get_recommended_bpp = omapdss_default_get_recommended_bpp, | 641 | .get_recommended_bpp = omapdss_default_get_recommended_bpp, |
634 | 642 | ||
635 | .set_update_mode = venc_set_update_mode, | ||
636 | .get_update_mode = venc_get_update_mode, | ||
637 | |||
638 | .get_timings = venc_get_timings, | 643 | .get_timings = venc_get_timings, |
639 | .set_timings = venc_set_timings, | 644 | .set_timings = venc_set_timings, |
640 | .check_timings = venc_check_timings, | 645 | .check_timings = venc_check_timings, |
@@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s) | |||
673 | { | 678 | { |
674 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) | 679 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) |
675 | 680 | ||
676 | venc_enable_clocks(1); | 681 | if (venc_runtime_get()) |
682 | return; | ||
677 | 683 | ||
678 | DUMPREG(VENC_F_CONTROL); | 684 | DUMPREG(VENC_F_CONTROL); |
679 | DUMPREG(VENC_VIDOUT_CTRL); | 685 | DUMPREG(VENC_VIDOUT_CTRL); |
@@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s) | |||
717 | DUMPREG(VENC_OUTPUT_CONTROL); | 723 | DUMPREG(VENC_OUTPUT_CONTROL); |
718 | DUMPREG(VENC_OUTPUT_TEST); | 724 | DUMPREG(VENC_OUTPUT_TEST); |
719 | 725 | ||
720 | venc_enable_clocks(0); | 726 | venc_runtime_put(); |
721 | 727 | ||
722 | #undef DUMPREG | 728 | #undef DUMPREG |
723 | } | 729 | } |
724 | 730 | ||
731 | static int venc_get_clocks(struct platform_device *pdev) | ||
732 | { | ||
733 | struct clk *clk; | ||
734 | |||
735 | clk = clk_get(&pdev->dev, "fck"); | ||
736 | if (IS_ERR(clk)) { | ||
737 | DSSERR("can't get fck\n"); | ||
738 | return PTR_ERR(clk); | ||
739 | } | ||
740 | |||
741 | venc.tv_clk = clk; | ||
742 | |||
743 | if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) { | ||
744 | if (cpu_is_omap34xx() || cpu_is_omap3630()) | ||
745 | clk = clk_get(&pdev->dev, "dss_96m_fck"); | ||
746 | else | ||
747 | clk = clk_get(&pdev->dev, "tv_dac_clk"); | ||
748 | if (IS_ERR(clk)) { | ||
749 | DSSERR("can't get tv_dac_clk\n"); | ||
750 | clk_put(venc.tv_clk); | ||
751 | return PTR_ERR(clk); | ||
752 | } | ||
753 | } else { | ||
754 | clk = NULL; | ||
755 | } | ||
756 | |||
757 | venc.tv_dac_clk = clk; | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | static void venc_put_clocks(void) | ||
763 | { | ||
764 | if (venc.tv_clk) | ||
765 | clk_put(venc.tv_clk); | ||
766 | if (venc.tv_dac_clk) | ||
767 | clk_put(venc.tv_dac_clk); | ||
768 | } | ||
769 | |||
725 | /* VENC HW IP initialisation */ | 770 | /* VENC HW IP initialisation */ |
726 | static int omap_venchw_probe(struct platform_device *pdev) | 771 | static int omap_venchw_probe(struct platform_device *pdev) |
727 | { | 772 | { |
728 | u8 rev_id; | 773 | u8 rev_id; |
729 | struct resource *venc_mem; | 774 | struct resource *venc_mem; |
775 | int r; | ||
730 | 776 | ||
731 | venc.pdev = pdev; | 777 | venc.pdev = pdev; |
732 | 778 | ||
@@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev) | |||
737 | venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); | 783 | venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); |
738 | if (!venc_mem) { | 784 | if (!venc_mem) { |
739 | DSSERR("can't get IORESOURCE_MEM VENC\n"); | 785 | DSSERR("can't get IORESOURCE_MEM VENC\n"); |
740 | return -EINVAL; | 786 | r = -EINVAL; |
787 | goto err_ioremap; | ||
741 | } | 788 | } |
742 | venc.base = ioremap(venc_mem->start, resource_size(venc_mem)); | 789 | venc.base = ioremap(venc_mem->start, resource_size(venc_mem)); |
743 | if (!venc.base) { | 790 | if (!venc.base) { |
744 | DSSERR("can't ioremap VENC\n"); | 791 | DSSERR("can't ioremap VENC\n"); |
745 | return -ENOMEM; | 792 | r = -ENOMEM; |
793 | goto err_ioremap; | ||
746 | } | 794 | } |
747 | 795 | ||
748 | venc_enable_clocks(1); | 796 | r = venc_get_clocks(pdev); |
797 | if (r) | ||
798 | goto err_get_clk; | ||
799 | |||
800 | pm_runtime_enable(&pdev->dev); | ||
801 | |||
802 | r = venc_runtime_get(); | ||
803 | if (r) | ||
804 | goto err_get_venc; | ||
749 | 805 | ||
750 | rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); | 806 | rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); |
751 | dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); | 807 | dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); |
752 | 808 | ||
753 | venc_enable_clocks(0); | 809 | venc_runtime_put(); |
754 | 810 | ||
755 | return omap_dss_register_driver(&venc_driver); | 811 | return omap_dss_register_driver(&venc_driver); |
812 | |||
813 | err_get_venc: | ||
814 | pm_runtime_disable(&pdev->dev); | ||
815 | venc_put_clocks(); | ||
816 | err_get_clk: | ||
817 | iounmap(venc.base); | ||
818 | err_ioremap: | ||
819 | return r; | ||
756 | } | 820 | } |
757 | 821 | ||
758 | static int omap_venchw_remove(struct platform_device *pdev) | 822 | static int omap_venchw_remove(struct platform_device *pdev) |
@@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev) | |||
763 | } | 827 | } |
764 | omap_dss_unregister_driver(&venc_driver); | 828 | omap_dss_unregister_driver(&venc_driver); |
765 | 829 | ||
830 | pm_runtime_disable(&pdev->dev); | ||
831 | venc_put_clocks(); | ||
832 | |||
766 | iounmap(venc.base); | 833 | iounmap(venc.base); |
767 | return 0; | 834 | return 0; |
768 | } | 835 | } |
769 | 836 | ||
837 | static int venc_runtime_suspend(struct device *dev) | ||
838 | { | ||
839 | if (venc.tv_dac_clk) | ||
840 | clk_disable(venc.tv_dac_clk); | ||
841 | clk_disable(venc.tv_clk); | ||
842 | |||
843 | dispc_runtime_put(); | ||
844 | dss_runtime_put(); | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static int venc_runtime_resume(struct device *dev) | ||
850 | { | ||
851 | int r; | ||
852 | |||
853 | r = dss_runtime_get(); | ||
854 | if (r < 0) | ||
855 | goto err_get_dss; | ||
856 | |||
857 | r = dispc_runtime_get(); | ||
858 | if (r < 0) | ||
859 | goto err_get_dispc; | ||
860 | |||
861 | clk_enable(venc.tv_clk); | ||
862 | if (venc.tv_dac_clk) | ||
863 | clk_enable(venc.tv_dac_clk); | ||
864 | |||
865 | return 0; | ||
866 | |||
867 | err_get_dispc: | ||
868 | dss_runtime_put(); | ||
869 | err_get_dss: | ||
870 | return r; | ||
871 | } | ||
872 | |||
873 | static const struct dev_pm_ops venc_pm_ops = { | ||
874 | .runtime_suspend = venc_runtime_suspend, | ||
875 | .runtime_resume = venc_runtime_resume, | ||
876 | }; | ||
877 | |||
770 | static struct platform_driver omap_venchw_driver = { | 878 | static struct platform_driver omap_venchw_driver = { |
771 | .probe = omap_venchw_probe, | 879 | .probe = omap_venchw_probe, |
772 | .remove = omap_venchw_remove, | 880 | .remove = omap_venchw_remove, |
773 | .driver = { | 881 | .driver = { |
774 | .name = "omapdss_venc", | 882 | .name = "omapdss_venc", |
775 | .owner = THIS_MODULE, | 883 | .owner = THIS_MODULE, |
884 | .pm = &venc_pm_ops, | ||
776 | }, | 885 | }, |
777 | }; | 886 | }; |
778 | 887 | ||
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index cff450392b79..6b1ac23dbbd3 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c | |||
@@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi, | |||
316 | } | 316 | } |
317 | EXPORT_SYMBOL(omapfb_update_window); | 317 | EXPORT_SYMBOL(omapfb_update_window); |
318 | 318 | ||
319 | static int omapfb_set_update_mode(struct fb_info *fbi, | 319 | int omapfb_set_update_mode(struct fb_info *fbi, |
320 | enum omapfb_update_mode mode) | 320 | enum omapfb_update_mode mode) |
321 | { | 321 | { |
322 | struct omap_dss_device *display = fb2display(fbi); | 322 | struct omap_dss_device *display = fb2display(fbi); |
323 | enum omap_dss_update_mode um; | 323 | struct omapfb_info *ofbi = FB2OFB(fbi); |
324 | struct omapfb2_device *fbdev = ofbi->fbdev; | ||
325 | struct omapfb_display_data *d; | ||
324 | int r; | 326 | int r; |
325 | 327 | ||
326 | if (!display || !display->driver->set_update_mode) | 328 | if (!display) |
327 | return -EINVAL; | 329 | return -EINVAL; |
328 | 330 | ||
329 | switch (mode) { | 331 | if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE) |
330 | case OMAPFB_UPDATE_DISABLED: | 332 | return -EINVAL; |
331 | um = OMAP_DSS_UPDATE_DISABLED; | ||
332 | break; | ||
333 | 333 | ||
334 | case OMAPFB_AUTO_UPDATE: | 334 | omapfb_lock(fbdev); |
335 | um = OMAP_DSS_UPDATE_AUTO; | ||
336 | break; | ||
337 | 335 | ||
338 | case OMAPFB_MANUAL_UPDATE: | 336 | d = get_display_data(fbdev, display); |
339 | um = OMAP_DSS_UPDATE_MANUAL; | ||
340 | break; | ||
341 | 337 | ||
342 | default: | 338 | if (d->update_mode == mode) { |
343 | return -EINVAL; | 339 | omapfb_unlock(fbdev); |
340 | return 0; | ||
344 | } | 341 | } |
345 | 342 | ||
346 | r = display->driver->set_update_mode(display, um); | 343 | r = 0; |
344 | |||
345 | if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | ||
346 | if (mode == OMAPFB_AUTO_UPDATE) | ||
347 | omapfb_start_auto_update(fbdev, display); | ||
348 | else /* MANUAL_UPDATE */ | ||
349 | omapfb_stop_auto_update(fbdev, display); | ||
350 | |||
351 | d->update_mode = mode; | ||
352 | } else { /* AUTO_UPDATE */ | ||
353 | if (mode == OMAPFB_MANUAL_UPDATE) | ||
354 | r = -EINVAL; | ||
355 | } | ||
356 | |||
357 | omapfb_unlock(fbdev); | ||
347 | 358 | ||
348 | return r; | 359 | return r; |
349 | } | 360 | } |
350 | 361 | ||
351 | static int omapfb_get_update_mode(struct fb_info *fbi, | 362 | int omapfb_get_update_mode(struct fb_info *fbi, |
352 | enum omapfb_update_mode *mode) | 363 | enum omapfb_update_mode *mode) |
353 | { | 364 | { |
354 | struct omap_dss_device *display = fb2display(fbi); | 365 | struct omap_dss_device *display = fb2display(fbi); |
355 | enum omap_dss_update_mode m; | 366 | struct omapfb_info *ofbi = FB2OFB(fbi); |
367 | struct omapfb2_device *fbdev = ofbi->fbdev; | ||
368 | struct omapfb_display_data *d; | ||
356 | 369 | ||
357 | if (!display) | 370 | if (!display) |
358 | return -EINVAL; | 371 | return -EINVAL; |
359 | 372 | ||
360 | if (!display->driver->get_update_mode) { | 373 | omapfb_lock(fbdev); |
361 | *mode = OMAPFB_AUTO_UPDATE; | ||
362 | return 0; | ||
363 | } | ||
364 | 374 | ||
365 | m = display->driver->get_update_mode(display); | 375 | d = get_display_data(fbdev, display); |
366 | 376 | ||
367 | switch (m) { | 377 | *mode = d->update_mode; |
368 | case OMAP_DSS_UPDATE_DISABLED: | 378 | |
369 | *mode = OMAPFB_UPDATE_DISABLED; | 379 | omapfb_unlock(fbdev); |
370 | break; | ||
371 | case OMAP_DSS_UPDATE_AUTO: | ||
372 | *mode = OMAPFB_AUTO_UPDATE; | ||
373 | break; | ||
374 | case OMAP_DSS_UPDATE_MANUAL: | ||
375 | *mode = OMAPFB_MANUAL_UPDATE; | ||
376 | break; | ||
377 | default: | ||
378 | BUG(); | ||
379 | } | ||
380 | 380 | ||
381 | return 0; | 381 | return 0; |
382 | } | 382 | } |
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 505bc12a3031..602b71a92d3c 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c | |||
@@ -46,6 +46,10 @@ static char *def_vram; | |||
46 | static int def_vrfb; | 46 | static int def_vrfb; |
47 | static int def_rotate; | 47 | static int def_rotate; |
48 | static int def_mirror; | 48 | static int def_mirror; |
49 | static bool auto_update; | ||
50 | static unsigned int auto_update_freq; | ||
51 | module_param(auto_update, bool, 0); | ||
52 | module_param(auto_update_freq, uint, 0644); | ||
49 | 53 | ||
50 | #ifdef DEBUG | 54 | #ifdef DEBUG |
51 | unsigned int omapfb_debug; | 55 | unsigned int omapfb_debug; |
@@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi) | |||
1242 | struct omapfb_info *ofbi = FB2OFB(fbi); | 1246 | struct omapfb_info *ofbi = FB2OFB(fbi); |
1243 | struct omapfb2_device *fbdev = ofbi->fbdev; | 1247 | struct omapfb2_device *fbdev = ofbi->fbdev; |
1244 | struct omap_dss_device *display = fb2display(fbi); | 1248 | struct omap_dss_device *display = fb2display(fbi); |
1249 | struct omapfb_display_data *d; | ||
1245 | int r = 0; | 1250 | int r = 0; |
1246 | 1251 | ||
1247 | if (!display) | 1252 | if (!display) |
@@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi) | |||
1249 | 1254 | ||
1250 | omapfb_lock(fbdev); | 1255 | omapfb_lock(fbdev); |
1251 | 1256 | ||
1257 | d = get_display_data(fbdev, display); | ||
1258 | |||
1252 | switch (blank) { | 1259 | switch (blank) { |
1253 | case FB_BLANK_UNBLANK: | 1260 | case FB_BLANK_UNBLANK: |
1254 | if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) | 1261 | if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) |
@@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi) | |||
1257 | if (display->driver->resume) | 1264 | if (display->driver->resume) |
1258 | r = display->driver->resume(display); | 1265 | r = display->driver->resume(display); |
1259 | 1266 | ||
1267 | if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) && | ||
1268 | d->update_mode == OMAPFB_AUTO_UPDATE && | ||
1269 | !d->auto_update_work_enabled) | ||
1270 | omapfb_start_auto_update(fbdev, display); | ||
1271 | |||
1260 | break; | 1272 | break; |
1261 | 1273 | ||
1262 | case FB_BLANK_NORMAL: | 1274 | case FB_BLANK_NORMAL: |
@@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi) | |||
1268 | if (display->state != OMAP_DSS_DISPLAY_ACTIVE) | 1280 | if (display->state != OMAP_DSS_DISPLAY_ACTIVE) |
1269 | goto exit; | 1281 | goto exit; |
1270 | 1282 | ||
1283 | if (d->auto_update_work_enabled) | ||
1284 | omapfb_stop_auto_update(fbdev, display); | ||
1285 | |||
1271 | if (display->driver->suspend) | 1286 | if (display->driver->suspend) |
1272 | r = display->driver->suspend(display); | 1287 | r = display->driver->suspend(display); |
1273 | 1288 | ||
@@ -1724,6 +1739,78 @@ err: | |||
1724 | return r; | 1739 | return r; |
1725 | } | 1740 | } |
1726 | 1741 | ||
1742 | static void omapfb_auto_update_work(struct work_struct *work) | ||
1743 | { | ||
1744 | struct omap_dss_device *dssdev; | ||
1745 | struct omap_dss_driver *dssdrv; | ||
1746 | struct omapfb_display_data *d; | ||
1747 | u16 w, h; | ||
1748 | unsigned int freq; | ||
1749 | struct omapfb2_device *fbdev; | ||
1750 | |||
1751 | d = container_of(work, struct omapfb_display_data, | ||
1752 | auto_update_work.work); | ||
1753 | |||
1754 | dssdev = d->dssdev; | ||
1755 | dssdrv = dssdev->driver; | ||
1756 | fbdev = d->fbdev; | ||
1757 | |||
1758 | if (!dssdrv || !dssdrv->update) | ||
1759 | return; | ||
1760 | |||
1761 | if (dssdrv->sync) | ||
1762 | dssdrv->sync(dssdev); | ||
1763 | |||
1764 | dssdrv->get_resolution(dssdev, &w, &h); | ||
1765 | dssdrv->update(dssdev, 0, 0, w, h); | ||
1766 | |||
1767 | freq = auto_update_freq; | ||
1768 | if (freq == 0) | ||
1769 | freq = 20; | ||
1770 | queue_delayed_work(fbdev->auto_update_wq, | ||
1771 | &d->auto_update_work, HZ / freq); | ||
1772 | } | ||
1773 | |||
1774 | void omapfb_start_auto_update(struct omapfb2_device *fbdev, | ||
1775 | struct omap_dss_device *display) | ||
1776 | { | ||
1777 | struct omapfb_display_data *d; | ||
1778 | |||
1779 | if (fbdev->auto_update_wq == NULL) { | ||
1780 | struct workqueue_struct *wq; | ||
1781 | |||
1782 | wq = create_singlethread_workqueue("omapfb_auto_update"); | ||
1783 | |||
1784 | if (wq == NULL) { | ||
1785 | dev_err(fbdev->dev, "Failed to create workqueue for " | ||
1786 | "auto-update\n"); | ||
1787 | return; | ||
1788 | } | ||
1789 | |||
1790 | fbdev->auto_update_wq = wq; | ||
1791 | } | ||
1792 | |||
1793 | d = get_display_data(fbdev, display); | ||
1794 | |||
1795 | INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work); | ||
1796 | |||
1797 | d->auto_update_work_enabled = true; | ||
1798 | |||
1799 | omapfb_auto_update_work(&d->auto_update_work.work); | ||
1800 | } | ||
1801 | |||
1802 | void omapfb_stop_auto_update(struct omapfb2_device *fbdev, | ||
1803 | struct omap_dss_device *display) | ||
1804 | { | ||
1805 | struct omapfb_display_data *d; | ||
1806 | |||
1807 | d = get_display_data(fbdev, display); | ||
1808 | |||
1809 | cancel_delayed_work_sync(&d->auto_update_work); | ||
1810 | |||
1811 | d->auto_update_work_enabled = false; | ||
1812 | } | ||
1813 | |||
1727 | /* initialize fb_info, var, fix to something sane based on the display */ | 1814 | /* initialize fb_info, var, fix to something sane based on the display */ |
1728 | static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) | 1815 | static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) |
1729 | { | 1816 | { |
@@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev) | |||
1858 | } | 1945 | } |
1859 | 1946 | ||
1860 | for (i = 0; i < fbdev->num_displays; i++) { | 1947 | for (i = 0; i < fbdev->num_displays; i++) { |
1861 | if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED) | 1948 | struct omap_dss_device *dssdev = fbdev->displays[i].dssdev; |
1862 | fbdev->displays[i]->driver->disable(fbdev->displays[i]); | 1949 | |
1950 | if (fbdev->displays[i].auto_update_work_enabled) | ||
1951 | omapfb_stop_auto_update(fbdev, dssdev); | ||
1952 | |||
1953 | if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) | ||
1954 | dssdev->driver->disable(dssdev); | ||
1955 | |||
1956 | omap_dss_put_device(dssdev); | ||
1957 | } | ||
1863 | 1958 | ||
1864 | omap_dss_put_device(fbdev->displays[i]); | 1959 | if (fbdev->auto_update_wq != NULL) { |
1960 | flush_workqueue(fbdev->auto_update_wq); | ||
1961 | destroy_workqueue(fbdev->auto_update_wq); | ||
1962 | fbdev->auto_update_wq = NULL; | ||
1865 | } | 1963 | } |
1866 | 1964 | ||
1867 | dev_set_drvdata(fbdev->dev, NULL); | 1965 | dev_set_drvdata(fbdev->dev, NULL); |
@@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, | |||
2084 | int r; | 2182 | int r; |
2085 | u8 bpp; | 2183 | u8 bpp; |
2086 | struct omap_video_timings timings, temp_timings; | 2184 | struct omap_video_timings timings, temp_timings; |
2185 | struct omapfb_display_data *d; | ||
2087 | 2186 | ||
2088 | r = omapfb_mode_to_timings(mode_str, &timings, &bpp); | 2187 | r = omapfb_mode_to_timings(mode_str, &timings, &bpp); |
2089 | if (r) | 2188 | if (r) |
2090 | return r; | 2189 | return r; |
2091 | 2190 | ||
2092 | fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display; | 2191 | d = get_display_data(fbdev, display); |
2093 | fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp; | 2192 | d->bpp_override = bpp; |
2094 | ++fbdev->num_bpp_overrides; | ||
2095 | 2193 | ||
2096 | if (display->driver->check_timings) { | 2194 | if (display->driver->check_timings) { |
2097 | r = display->driver->check_timings(display, &timings); | 2195 | r = display->driver->check_timings(display, &timings); |
@@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, | |||
2117 | static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev, | 2215 | static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev, |
2118 | struct omap_dss_device *dssdev) | 2216 | struct omap_dss_device *dssdev) |
2119 | { | 2217 | { |
2120 | int i; | 2218 | struct omapfb_display_data *d; |
2121 | 2219 | ||
2122 | BUG_ON(dssdev->driver->get_recommended_bpp == NULL); | 2220 | BUG_ON(dssdev->driver->get_recommended_bpp == NULL); |
2123 | 2221 | ||
2124 | for (i = 0; i < fbdev->num_bpp_overrides; ++i) { | 2222 | d = get_display_data(fbdev, dssdev); |
2125 | if (dssdev == fbdev->bpp_overrides[i].dssdev) | 2223 | |
2126 | return fbdev->bpp_overrides[i].bpp; | 2224 | if (d->bpp_override != 0) |
2127 | } | 2225 | return d->bpp_override; |
2128 | 2226 | ||
2129 | return dssdev->driver->get_recommended_bpp(dssdev); | 2227 | return dssdev->driver->get_recommended_bpp(dssdev); |
2130 | } | 2228 | } |
@@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) | |||
2156 | 2254 | ||
2157 | display = NULL; | 2255 | display = NULL; |
2158 | for (i = 0; i < fbdev->num_displays; ++i) { | 2256 | for (i = 0; i < fbdev->num_displays; ++i) { |
2159 | if (strcmp(fbdev->displays[i]->name, | 2257 | if (strcmp(fbdev->displays[i].dssdev->name, |
2160 | display_str) == 0) { | 2258 | display_str) == 0) { |
2161 | display = fbdev->displays[i]; | 2259 | display = fbdev->displays[i].dssdev; |
2162 | break; | 2260 | break; |
2163 | } | 2261 | } |
2164 | } | 2262 | } |
@@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, | |||
2182 | struct omap_dss_device *dssdev) | 2280 | struct omap_dss_device *dssdev) |
2183 | { | 2281 | { |
2184 | struct omap_dss_driver *dssdrv = dssdev->driver; | 2282 | struct omap_dss_driver *dssdrv = dssdev->driver; |
2283 | struct omapfb_display_data *d; | ||
2185 | int r; | 2284 | int r; |
2186 | 2285 | ||
2187 | r = dssdrv->enable(dssdev); | 2286 | r = dssdrv->enable(dssdev); |
@@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, | |||
2191 | return r; | 2290 | return r; |
2192 | } | 2291 | } |
2193 | 2292 | ||
2293 | d = get_display_data(fbdev, dssdev); | ||
2294 | |||
2295 | d->fbdev = fbdev; | ||
2296 | |||
2194 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | 2297 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { |
2195 | u16 w, h; | 2298 | u16 w, h; |
2299 | |||
2300 | if (auto_update) { | ||
2301 | omapfb_start_auto_update(fbdev, dssdev); | ||
2302 | d->update_mode = OMAPFB_AUTO_UPDATE; | ||
2303 | } else { | ||
2304 | d->update_mode = OMAPFB_MANUAL_UPDATE; | ||
2305 | } | ||
2306 | |||
2196 | if (dssdrv->enable_te) { | 2307 | if (dssdrv->enable_te) { |
2197 | r = dssdrv->enable_te(dssdev, 1); | 2308 | r = dssdrv->enable_te(dssdev, 1); |
2198 | if (r) { | 2309 | if (r) { |
@@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, | |||
2201 | } | 2312 | } |
2202 | } | 2313 | } |
2203 | 2314 | ||
2204 | if (dssdrv->set_update_mode) { | ||
2205 | r = dssdrv->set_update_mode(dssdev, | ||
2206 | OMAP_DSS_UPDATE_MANUAL); | ||
2207 | if (r) { | ||
2208 | dev_err(fbdev->dev, | ||
2209 | "Failed to set update mode\n"); | ||
2210 | return r; | ||
2211 | } | ||
2212 | } | ||
2213 | |||
2214 | dssdrv->get_resolution(dssdev, &w, &h); | 2315 | dssdrv->get_resolution(dssdev, &w, &h); |
2215 | r = dssdrv->update(dssdev, 0, 0, w, h); | 2316 | r = dssdrv->update(dssdev, 0, 0, w, h); |
2216 | if (r) { | 2317 | if (r) { |
@@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, | |||
2219 | return r; | 2320 | return r; |
2220 | } | 2321 | } |
2221 | } else { | 2322 | } else { |
2222 | if (dssdrv->set_update_mode) { | 2323 | d->update_mode = OMAPFB_AUTO_UPDATE; |
2223 | r = dssdrv->set_update_mode(dssdev, | ||
2224 | OMAP_DSS_UPDATE_AUTO); | ||
2225 | if (r) { | ||
2226 | dev_err(fbdev->dev, | ||
2227 | "Failed to set update mode\n"); | ||
2228 | return r; | ||
2229 | } | ||
2230 | } | ||
2231 | } | 2324 | } |
2232 | 2325 | ||
2233 | return 0; | 2326 | return 0; |
@@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev) | |||
2275 | fbdev->num_displays = 0; | 2368 | fbdev->num_displays = 0; |
2276 | dssdev = NULL; | 2369 | dssdev = NULL; |
2277 | for_each_dss_dev(dssdev) { | 2370 | for_each_dss_dev(dssdev) { |
2371 | struct omapfb_display_data *d; | ||
2372 | |||
2278 | omap_dss_get_device(dssdev); | 2373 | omap_dss_get_device(dssdev); |
2279 | 2374 | ||
2280 | if (!dssdev->driver) { | 2375 | if (!dssdev->driver) { |
@@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev) | |||
2282 | r = -ENODEV; | 2377 | r = -ENODEV; |
2283 | } | 2378 | } |
2284 | 2379 | ||
2285 | fbdev->displays[fbdev->num_displays++] = dssdev; | 2380 | d = &fbdev->displays[fbdev->num_displays++]; |
2381 | d->dssdev = dssdev; | ||
2382 | if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) | ||
2383 | d->update_mode = OMAPFB_MANUAL_UPDATE; | ||
2384 | else | ||
2385 | d->update_mode = OMAPFB_AUTO_UPDATE; | ||
2286 | } | 2386 | } |
2287 | 2387 | ||
2288 | if (r) | 2388 | if (r) |
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c index 2f5e817b2a9a..153bf1aceebc 100644 --- a/drivers/video/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c | |||
@@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev, | |||
518 | return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); | 518 | return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); |
519 | } | 519 | } |
520 | 520 | ||
521 | static ssize_t show_upd_mode(struct device *dev, | ||
522 | struct device_attribute *attr, char *buf) | ||
523 | { | ||
524 | struct fb_info *fbi = dev_get_drvdata(dev); | ||
525 | enum omapfb_update_mode mode; | ||
526 | int r; | ||
527 | |||
528 | r = omapfb_get_update_mode(fbi, &mode); | ||
529 | |||
530 | if (r) | ||
531 | return r; | ||
532 | |||
533 | return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); | ||
534 | } | ||
535 | |||
536 | static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, | ||
537 | const char *buf, size_t count) | ||
538 | { | ||
539 | struct fb_info *fbi = dev_get_drvdata(dev); | ||
540 | unsigned mode; | ||
541 | int r; | ||
542 | |||
543 | r = kstrtouint(buf, 0, &mode); | ||
544 | if (r) | ||
545 | return r; | ||
546 | |||
547 | r = omapfb_set_update_mode(fbi, mode); | ||
548 | if (r) | ||
549 | return r; | ||
550 | |||
551 | return count; | ||
552 | } | ||
553 | |||
521 | static struct device_attribute omapfb_attrs[] = { | 554 | static struct device_attribute omapfb_attrs[] = { |
522 | __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, | 555 | __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, |
523 | store_rotate_type), | 556 | store_rotate_type), |
@@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = { | |||
528 | store_overlays_rotate), | 561 | store_overlays_rotate), |
529 | __ATTR(phys_addr, S_IRUGO, show_phys, NULL), | 562 | __ATTR(phys_addr, S_IRUGO, show_phys, NULL), |
530 | __ATTR(virt_addr, S_IRUGO, show_virt, NULL), | 563 | __ATTR(virt_addr, S_IRUGO, show_virt, NULL), |
564 | __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode), | ||
531 | }; | 565 | }; |
532 | 566 | ||
533 | int omapfb_create_sysfs(struct omapfb2_device *fbdev) | 567 | int omapfb_create_sysfs(struct omapfb2_device *fbdev) |
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h index aa1b1d974276..fdf0edeccf4e 100644 --- a/drivers/video/omap2/omapfb/omapfb.h +++ b/drivers/video/omap2/omapfb/omapfb.h | |||
@@ -73,6 +73,15 @@ struct omapfb_info { | |||
73 | bool mirror; | 73 | bool mirror; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct omapfb_display_data { | ||
77 | struct omapfb2_device *fbdev; | ||
78 | struct omap_dss_device *dssdev; | ||
79 | u8 bpp_override; | ||
80 | enum omapfb_update_mode update_mode; | ||
81 | bool auto_update_work_enabled; | ||
82 | struct delayed_work auto_update_work; | ||
83 | }; | ||
84 | |||
76 | struct omapfb2_device { | 85 | struct omapfb2_device { |
77 | struct device *dev; | 86 | struct device *dev; |
78 | struct mutex mtx; | 87 | struct mutex mtx; |
@@ -86,17 +95,13 @@ struct omapfb2_device { | |||
86 | struct omapfb2_mem_region regions[10]; | 95 | struct omapfb2_mem_region regions[10]; |
87 | 96 | ||
88 | unsigned num_displays; | 97 | unsigned num_displays; |
89 | struct omap_dss_device *displays[10]; | 98 | struct omapfb_display_data displays[10]; |
90 | unsigned num_overlays; | 99 | unsigned num_overlays; |
91 | struct omap_overlay *overlays[10]; | 100 | struct omap_overlay *overlays[10]; |
92 | unsigned num_managers; | 101 | unsigned num_managers; |
93 | struct omap_overlay_manager *managers[10]; | 102 | struct omap_overlay_manager *managers[10]; |
94 | 103 | ||
95 | unsigned num_bpp_overrides; | 104 | struct workqueue_struct *auto_update_wq; |
96 | struct { | ||
97 | struct omap_dss_device *dssdev; | ||
98 | u8 bpp; | ||
99 | } bpp_overrides[10]; | ||
100 | }; | 105 | }; |
101 | 106 | ||
102 | struct omapfb_colormode { | 107 | struct omapfb_colormode { |
@@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode, | |||
128 | int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, | 133 | int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, |
129 | u16 posx, u16 posy, u16 outw, u16 outh); | 134 | u16 posx, u16 posy, u16 outw, u16 outh); |
130 | 135 | ||
136 | void omapfb_start_auto_update(struct omapfb2_device *fbdev, | ||
137 | struct omap_dss_device *display); | ||
138 | void omapfb_stop_auto_update(struct omapfb2_device *fbdev, | ||
139 | struct omap_dss_device *display); | ||
140 | int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode); | ||
141 | int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode); | ||
142 | |||
131 | /* find the display connected to this fb, if any */ | 143 | /* find the display connected to this fb, if any */ |
132 | static inline struct omap_dss_device *fb2display(struct fb_info *fbi) | 144 | static inline struct omap_dss_device *fb2display(struct fb_info *fbi) |
133 | { | 145 | { |
@@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi) | |||
143 | return NULL; | 155 | return NULL; |
144 | } | 156 | } |
145 | 157 | ||
158 | static inline struct omapfb_display_data *get_display_data( | ||
159 | struct omapfb2_device *fbdev, struct omap_dss_device *dssdev) | ||
160 | { | ||
161 | int i; | ||
162 | |||
163 | for (i = 0; i < fbdev->num_displays; ++i) | ||
164 | if (fbdev->displays[i].dssdev == dssdev) | ||
165 | return &fbdev->displays[i]; | ||
166 | |||
167 | /* This should never happen */ | ||
168 | BUG(); | ||
169 | } | ||
170 | |||
146 | static inline void omapfb_lock(struct omapfb2_device *fbdev) | 171 | static inline void omapfb_lock(struct omapfb2_device *fbdev) |
147 | { | 172 | { |
148 | mutex_lock(&fbdev->mtx); | 173 | mutex_lock(&fbdev->mtx); |
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h index 32549d177b19..dcaab9012ca2 100644 --- a/drivers/video/savage/savagefb.h +++ b/drivers/video/savage/savagefb.h | |||
@@ -55,7 +55,7 @@ | |||
55 | 55 | ||
56 | #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) | 56 | #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) |
57 | 57 | ||
58 | #define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR)) | 58 | #define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR)) |
59 | 59 | ||
60 | #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) | 60 | #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) |
61 | 61 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index f441726ddf2b..86b0735e6aa0 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -36,9 +36,6 @@ config WATCHDOG_CORE | |||
36 | and gives them the /dev/watchdog interface (and later also the | 36 | and gives them the /dev/watchdog interface (and later also the |
37 | sysfs interface). | 37 | sysfs interface). |
38 | 38 | ||
39 | To compile this driver as a module, choose M here: the module will | ||
40 | be called watchdog. | ||
41 | |||
42 | config WATCHDOG_NOWAYOUT | 39 | config WATCHDOG_NOWAYOUT |
43 | bool "Disable watchdog shutdown on close" | 40 | bool "Disable watchdog shutdown on close" |
44 | help | 41 | help |
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index afa78a54711e..809f41c30c44 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c | |||
@@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev) | |||
458 | 458 | ||
459 | static void nv_tco_shutdown(struct platform_device *dev) | 459 | static void nv_tco_shutdown(struct platform_device *dev) |
460 | { | 460 | { |
461 | u32 val; | ||
462 | |||
461 | tco_timer_stop(); | 463 | tco_timer_stop(); |
464 | |||
465 | /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not | ||
466 | * unset during shutdown. */ | ||
467 | pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); | ||
468 | val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT; | ||
469 | pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val); | ||
462 | } | 470 | } |
463 | 471 | ||
464 | static struct platform_driver nv_tco_driver = { | 472 | static struct platform_driver nv_tco_driver = { |
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index db84f2322d1a..a267dc078daf 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c | |||
@@ -64,7 +64,7 @@ | |||
64 | * misses its deadline, the kernel timer will allow the WDT to overflow. | 64 | * misses its deadline, the kernel timer will allow the WDT to overflow. |
65 | */ | 65 | */ |
66 | static int clock_division_ratio = WTCSR_CKS_4096; | 66 | static int clock_division_ratio = WTCSR_CKS_4096; |
67 | #define next_ping_period(cks) msecs_to_jiffies(cks - 4) | 67 | #define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4)) |
68 | 68 | ||
69 | static const struct watchdog_info sh_wdt_info; | 69 | static const struct watchdog_info sh_wdt_info; |
70 | static struct platform_device *sh_wdt_dev; | 70 | static struct platform_device *sh_wdt_dev; |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index f815283667af..5f7ff8e2fc14 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -11,7 +11,7 @@ config XEN_BALLOON | |||
11 | 11 | ||
12 | config XEN_SELFBALLOONING | 12 | config XEN_SELFBALLOONING |
13 | bool "Dynamically self-balloon kernel memory to target" | 13 | bool "Dynamically self-balloon kernel memory to target" |
14 | depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP | 14 | depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM |
15 | default n | 15 | default n |
16 | help | 16 | help |
17 | Self-ballooning dynamically balloons available kernel memory driven | 17 | Self-ballooning dynamically balloons available kernel memory driven |